Rev 3480 | Rev 4104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2351 | Serge | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
2 | */ |
||
3 | /* |
||
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
||
5 | * All Rights Reserved. |
||
6 | * |
||
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
8 | * copy of this software and associated documentation files (the |
||
9 | * "Software"), to deal in the Software without restriction, including |
||
10 | * without limitation the rights to use, copy, modify, merge, publish, |
||
11 | * distribute, sub license, and/or sell copies of the Software, and to |
||
12 | * permit persons to whom the Software is furnished to do so, subject to |
||
13 | * the following conditions: |
||
14 | * |
||
15 | * The above copyright notice and this permission notice (including the |
||
16 | * next paragraph) shall be included in all copies or substantial portions |
||
17 | * of the Software. |
||
18 | * |
||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
||
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
||
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
||
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
||
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
||
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
||
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
26 | * |
||
27 | */ |
||
28 | |||
3746 | Serge | 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3031 | serge | 30 | |
31 | #include |
||
32 | #include |
||
33 | #include |
||
2351 | Serge | 34 | #include "i915_drv.h" |
35 | #include "i915_trace.h" |
||
36 | #include "intel_drv.h" |
||
37 | |||
3746 | Serge | 38 | static const u32 hpd_ibx[] = { |
39 | [HPD_CRT] = SDE_CRT_HOTPLUG, |
||
40 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, |
||
41 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, |
||
42 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, |
||
43 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG |
||
44 | }; |
||
3031 | serge | 45 | |
3746 | Serge | 46 | static const u32 hpd_cpt[] = { |
47 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, |
||
48 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
||
49 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
||
50 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, |
||
51 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT |
||
52 | }; |
||
53 | |||
54 | static const u32 hpd_mask_i915[] = { |
||
55 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, |
||
56 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, |
||
57 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, |
||
58 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, |
||
59 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, |
||
60 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN |
||
61 | }; |
||
62 | |||
63 | static const u32 hpd_status_gen4[] = { |
||
64 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
||
65 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, |
||
66 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, |
||
67 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
||
68 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
||
69 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
||
70 | }; |
||
71 | |||
72 | static const u32 hpd_status_i965[] = { |
||
73 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
||
74 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965, |
||
75 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965, |
||
76 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
||
77 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
||
78 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
||
79 | }; |
||
80 | |||
81 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
||
82 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
||
83 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, |
||
84 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, |
||
85 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, |
||
86 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, |
||
87 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
||
88 | }; |
||
89 | |||
90 | static void ibx_hpd_irq_setup(struct drm_device *dev); |
||
91 | static void i915_hpd_irq_setup(struct drm_device *dev); |
||
92 | |||
3031 | serge | 93 | #define pr_err(fmt, ...) \ |
94 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
||
95 | |||
96 | |||
2352 | Serge | 97 | #define DRM_WAKEUP( queue ) wake_up( queue ) |
98 | #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) |
||
99 | |||
2351 | Serge | 100 | #define MAX_NOPID ((u32)~0) |
101 | |||
102 | |||
103 | |||
104 | /* For display hotplug interrupt */ |
||
105 | static void |
||
106 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
||
107 | { |
||
108 | if ((dev_priv->irq_mask & mask) != 0) { |
||
109 | dev_priv->irq_mask &= ~mask; |
||
110 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
||
111 | POSTING_READ(DEIMR); |
||
112 | } |
||
113 | } |
||
114 | |||
3746 | Serge | 115 | static void |
2351 | Serge | 116 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
117 | { |
||
118 | if ((dev_priv->irq_mask & mask) != mask) { |
||
119 | dev_priv->irq_mask |= mask; |
||
120 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
||
121 | POSTING_READ(DEIMR); |
||
122 | } |
||
123 | } |
||
3031 | serge | 124 | |
125 | void |
||
126 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
||
127 | { |
||
128 | u32 reg = PIPESTAT(pipe); |
||
3746 | Serge | 129 | u32 pipestat = I915_READ(reg) & 0x7fff0000; |
3031 | serge | 130 | |
3746 | Serge | 131 | if ((pipestat & mask) == mask) |
132 | return; |
||
133 | |||
3031 | serge | 134 | /* Enable the interrupt, clear any pending status */ |
3746 | Serge | 135 | pipestat |= mask | (mask >> 16); |
136 | I915_WRITE(reg, pipestat); |
||
3031 | serge | 137 | POSTING_READ(reg); |
138 | } |
||
139 | |||
140 | void |
||
141 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
||
142 | { |
||
143 | u32 reg = PIPESTAT(pipe); |
||
3746 | Serge | 144 | u32 pipestat = I915_READ(reg) & 0x7fff0000; |
3031 | serge | 145 | |
3746 | Serge | 146 | if ((pipestat & mask) == 0) |
147 | return; |
||
148 | |||
149 | pipestat &= ~mask; |
||
150 | I915_WRITE(reg, pipestat); |
||
3031 | serge | 151 | POSTING_READ(reg); |
152 | } |
||
153 | |||
154 | #if 0 |
||
155 | /** |
||
156 | * intel_enable_asle - enable ASLE interrupt for OpRegion |
||
157 | */ |
||
158 | void intel_enable_asle(struct drm_device *dev) |
||
159 | { |
||
160 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
161 | unsigned long irqflags; |
||
162 | |||
163 | /* FIXME: opregion/asle for VLV */ |
||
164 | if (IS_VALLEYVIEW(dev)) |
||
165 | return; |
||
166 | |||
167 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
168 | |||
169 | if (HAS_PCH_SPLIT(dev)) |
||
170 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
||
171 | else { |
||
172 | i915_enable_pipestat(dev_priv, 1, |
||
173 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
||
174 | if (INTEL_INFO(dev)->gen >= 4) |
||
175 | i915_enable_pipestat(dev_priv, 0, |
||
176 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
||
177 | } |
||
178 | |||
179 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
180 | } |
||
181 | #endif |
||
182 | |||
183 | /** |
||
184 | * i915_pipe_enabled - check if a pipe is enabled |
||
185 | * @dev: DRM device |
||
186 | * @pipe: pipe to check |
||
187 | * |
||
188 | * Reading certain registers when the pipe is disabled can hang the chip. |
||
189 | * Use this routine to make sure the PLL is running and the pipe is active |
||
190 | * before reading such registers if unsure. |
||
191 | */ |
||
192 | static int |
||
193 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
||
194 | { |
||
195 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
3243 | Serge | 196 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
197 | pipe); |
||
198 | |||
199 | return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; |
||
3031 | serge | 200 | } |
201 | |||
202 | /* Called from drm generic code, passed a 'crtc', which |
||
203 | * we use as a pipe index |
||
204 | */ |
||
205 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
||
206 | { |
||
207 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
208 | unsigned long high_frame; |
||
209 | unsigned long low_frame; |
||
210 | u32 high1, high2, low; |
||
211 | |||
212 | if (!i915_pipe_enabled(dev, pipe)) { |
||
213 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
||
214 | "pipe %c\n", pipe_name(pipe)); |
||
215 | return 0; |
||
216 | } |
||
217 | |||
218 | high_frame = PIPEFRAME(pipe); |
||
219 | low_frame = PIPEFRAMEPIXEL(pipe); |
||
220 | |||
221 | /* |
||
222 | * High & low register fields aren't synchronized, so make sure |
||
223 | * we get a low value that's stable across two reads of the high |
||
224 | * register. |
||
225 | */ |
||
226 | do { |
||
227 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
||
228 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; |
||
229 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
||
230 | } while (high1 != high2); |
||
231 | |||
232 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
||
233 | low >>= PIPE_FRAME_LOW_SHIFT; |
||
234 | return (high1 << 8) | low; |
||
235 | } |
||
236 | |||
237 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
||
238 | { |
||
239 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
240 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
||
241 | |||
242 | if (!i915_pipe_enabled(dev, pipe)) { |
||
243 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
||
244 | "pipe %c\n", pipe_name(pipe)); |
||
245 | return 0; |
||
246 | } |
||
247 | |||
248 | return I915_READ(reg); |
||
249 | } |
||
250 | |||
3746 | Serge | 251 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
252 | int *vpos, int *hpos) |
||
253 | { |
||
254 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
255 | u32 vbl = 0, position = 0; |
||
256 | int vbl_start, vbl_end, htotal, vtotal; |
||
257 | bool in_vbl = true; |
||
258 | int ret = 0; |
||
259 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
||
260 | pipe); |
||
261 | |||
262 | if (!i915_pipe_enabled(dev, pipe)) { |
||
263 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
||
264 | "pipe %c\n", pipe_name(pipe)); |
||
265 | return 0; |
||
266 | } |
||
267 | |||
268 | /* Get vtotal. */ |
||
269 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
||
270 | |||
271 | if (INTEL_INFO(dev)->gen >= 4) { |
||
272 | /* No obvious pixelcount register. Only query vertical |
||
273 | * scanout position from Display scan line register. |
||
274 | */ |
||
275 | position = I915_READ(PIPEDSL(pipe)); |
||
276 | |||
277 | /* Decode into vertical scanout position. Don't have |
||
278 | * horizontal scanout position. |
||
279 | */ |
||
280 | *vpos = position & 0x1fff; |
||
281 | *hpos = 0; |
||
282 | } else { |
||
283 | /* Have access to pixelcount since start of frame. |
||
284 | * We can split this into vertical and horizontal |
||
285 | * scanout position. |
||
286 | */ |
||
287 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
||
288 | |||
289 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
||
290 | *vpos = position / htotal; |
||
291 | *hpos = position - (*vpos * htotal); |
||
292 | } |
||
293 | |||
294 | /* Query vblank area. */ |
||
295 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
||
296 | |||
297 | /* Test position against vblank region. */ |
||
298 | vbl_start = vbl & 0x1fff; |
||
299 | vbl_end = (vbl >> 16) & 0x1fff; |
||
300 | |||
301 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) |
||
302 | in_vbl = false; |
||
303 | |||
304 | /* Inside "upper part" of vblank area? Apply corrective offset: */ |
||
305 | if (in_vbl && (*vpos >= vbl_start)) |
||
306 | *vpos = *vpos - vtotal; |
||
307 | |||
308 | /* Readouts valid? */ |
||
309 | if (vbl > 0) |
||
310 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
||
311 | |||
312 | /* In vblank? */ |
||
313 | if (in_vbl) |
||
314 | ret |= DRM_SCANOUTPOS_INVBL; |
||
315 | |||
316 | return ret; |
||
317 | } |
||
318 | |||
319 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
||
320 | int *max_error, |
||
321 | struct timeval *vblank_time, |
||
322 | unsigned flags) |
||
323 | { |
||
324 | struct drm_crtc *crtc; |
||
325 | |||
326 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
||
327 | DRM_ERROR("Invalid crtc %d\n", pipe); |
||
328 | return -EINVAL; |
||
329 | } |
||
330 | |||
331 | /* Get drm_crtc to timestamp: */ |
||
332 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
||
333 | if (crtc == NULL) { |
||
334 | DRM_ERROR("Invalid crtc %d\n", pipe); |
||
335 | return -EINVAL; |
||
336 | } |
||
337 | |||
338 | if (!crtc->enabled) { |
||
339 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
||
340 | return -EBUSY; |
||
341 | } |
||
342 | |||
343 | /* Helper routine in DRM core does all the work: */ |
||
344 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
||
345 | vblank_time, flags, |
||
346 | crtc); |
||
347 | } |
||
348 | |||
3480 | Serge | 349 | /* |
350 | * Handle hotplug events outside the interrupt handler proper. |
||
351 | */ |
||
3746 | Serge | 352 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
353 | |||
3480 | Serge | 354 | static void i915_hotplug_work_func(struct work_struct *work) |
355 | { |
||
356 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
||
357 | hotplug_work); |
||
358 | struct drm_device *dev = dev_priv->dev; |
||
359 | struct drm_mode_config *mode_config = &dev->mode_config; |
||
3746 | Serge | 360 | struct intel_connector *intel_connector; |
361 | struct intel_encoder *intel_encoder; |
||
362 | struct drm_connector *connector; |
||
363 | unsigned long irqflags; |
||
364 | bool hpd_disabled = false; |
||
3031 | serge | 365 | |
3480 | Serge | 366 | /* HPD irq before everything is fully set up. */ |
367 | if (!dev_priv->enable_hotplug_processing) |
||
368 | return; |
||
369 | |||
370 | mutex_lock(&mode_config->mutex); |
||
371 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
||
372 | |||
3746 | Serge | 373 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
374 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
||
375 | intel_connector = to_intel_connector(connector); |
||
376 | intel_encoder = intel_connector->encoder; |
||
377 | if (intel_encoder->hpd_pin > HPD_NONE && |
||
378 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && |
||
379 | connector->polled == DRM_CONNECTOR_POLL_HPD) { |
||
380 | DRM_INFO("HPD interrupt storm detected on connector %s: " |
||
381 | "switching from hotplug detection to polling\n", |
||
382 | drm_get_connector_name(connector)); |
||
383 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; |
||
384 | connector->polled = DRM_CONNECTOR_POLL_CONNECT |
||
385 | | DRM_CONNECTOR_POLL_DISCONNECT; |
||
386 | hpd_disabled = true; |
||
387 | } |
||
388 | } |
||
389 | /* if there were no outputs to poll, poll was disabled, |
||
390 | * therefore make sure it's enabled when disabling HPD on |
||
391 | * some connectors */ |
||
392 | if (hpd_disabled) { |
||
393 | drm_kms_helper_poll_enable(dev); |
||
394 | // mod_timer(&dev_priv->hotplug_reenable_timer, |
||
395 | // jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); |
||
396 | } |
||
3480 | Serge | 397 | |
3746 | Serge | 398 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
399 | |||
400 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
||
401 | if (intel_encoder->hot_plug) |
||
402 | intel_encoder->hot_plug(intel_encoder); |
||
403 | |||
3480 | Serge | 404 | mutex_unlock(&mode_config->mutex); |
405 | |||
406 | /* Just fire off a uevent and let userspace tell us what to do */ |
||
407 | drm_helper_hpd_irq_event(dev); |
||
408 | } |
||
409 | |||
3746 | Serge | 410 | static void ironlake_handle_rps_change(struct drm_device *dev) |
411 | { |
||
412 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
413 | u32 busy_up, busy_down, max_avg, min_avg; |
||
414 | u8 new_delay; |
||
415 | unsigned long flags; |
||
416 | |||
417 | spin_lock_irqsave(&mchdev_lock, flags); |
||
418 | |||
419 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
||
420 | |||
421 | new_delay = dev_priv->ips.cur_delay; |
||
422 | |||
423 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
||
424 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
||
425 | busy_down = I915_READ(RCPREVBSYTDNAVG); |
||
426 | max_avg = I915_READ(RCBMAXAVG); |
||
427 | min_avg = I915_READ(RCBMINAVG); |
||
428 | |||
429 | /* Handle RCS change request from hw */ |
||
430 | if (busy_up > max_avg) { |
||
431 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
||
432 | new_delay = dev_priv->ips.cur_delay - 1; |
||
433 | if (new_delay < dev_priv->ips.max_delay) |
||
434 | new_delay = dev_priv->ips.max_delay; |
||
435 | } else if (busy_down < min_avg) { |
||
436 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
||
437 | new_delay = dev_priv->ips.cur_delay + 1; |
||
438 | if (new_delay > dev_priv->ips.min_delay) |
||
439 | new_delay = dev_priv->ips.min_delay; |
||
440 | } |
||
441 | |||
442 | if (ironlake_set_drps(dev, new_delay)) |
||
443 | dev_priv->ips.cur_delay = new_delay; |
||
444 | |||
445 | spin_unlock_irqrestore(&mchdev_lock, flags); |
||
446 | |||
447 | return; |
||
448 | } |
||
449 | |||
2352 | Serge | 450 | static void notify_ring(struct drm_device *dev, |
451 | struct intel_ring_buffer *ring) |
||
452 | { |
||
453 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
2351 | Serge | 454 | |
2352 | Serge | 455 | if (ring->obj == NULL) |
456 | return; |
||
2351 | Serge | 457 | |
3031 | serge | 458 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
2351 | Serge | 459 | |
2352 | Serge | 460 | wake_up_all(&ring->irq_queue); |
461 | // if (i915_enable_hangcheck) { |
||
462 | // dev_priv->hangcheck_count = 0; |
||
463 | // mod_timer(&dev_priv->hangcheck_timer, |
||
464 | // jiffies + |
||
465 | // msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
||
466 | // } |
||
467 | } |
||
468 | |||
3031 | serge | 469 | #if 0 |
470 | static void gen6_pm_rps_work(struct work_struct *work) |
||
471 | { |
||
472 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
||
473 | rps.work); |
||
474 | u32 pm_iir, pm_imr; |
||
475 | u8 new_delay; |
||
2352 | Serge | 476 | |
3031 | serge | 477 | spin_lock_irq(&dev_priv->rps.lock); |
478 | pm_iir = dev_priv->rps.pm_iir; |
||
479 | dev_priv->rps.pm_iir = 0; |
||
480 | pm_imr = I915_READ(GEN6_PMIMR); |
||
481 | I915_WRITE(GEN6_PMIMR, 0); |
||
482 | spin_unlock_irq(&dev_priv->rps.lock); |
||
2352 | Serge | 483 | |
3031 | serge | 484 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
485 | return; |
||
486 | |||
3243 | Serge | 487 | mutex_lock(&dev_priv->rps.hw_lock); |
3031 | serge | 488 | |
489 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
||
490 | new_delay = dev_priv->rps.cur_delay + 1; |
||
491 | else |
||
492 | new_delay = dev_priv->rps.cur_delay - 1; |
||
493 | |||
494 | /* sysfs frequency interfaces may have snuck in while servicing the |
||
495 | * interrupt |
||
496 | */ |
||
497 | if (!(new_delay > dev_priv->rps.max_delay || |
||
498 | new_delay < dev_priv->rps.min_delay)) { |
||
499 | gen6_set_rps(dev_priv->dev, new_delay); |
||
500 | } |
||
501 | |||
3243 | Serge | 502 | mutex_unlock(&dev_priv->rps.hw_lock); |
3031 | serge | 503 | } |
504 | |||
505 | |||
506 | /** |
||
507 | * ivybridge_parity_work - Workqueue called when a parity error interrupt |
||
508 | * occurred. |
||
509 | * @work: workqueue struct |
||
510 | * |
||
511 | * Doesn't actually do anything except notify userspace. As a consequence of |
||
512 | * this event, userspace should try to remap the bad rows since statistically |
||
513 | * it is likely the same row is more likely to go bad again. |
||
514 | */ |
||
515 | static void ivybridge_parity_work(struct work_struct *work) |
||
2351 | Serge | 516 | { |
3031 | serge | 517 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
3243 | Serge | 518 | l3_parity.error_work); |
3031 | serge | 519 | u32 error_status, row, bank, subbank; |
520 | char *parity_event[5]; |
||
521 | uint32_t misccpctl; |
||
522 | unsigned long flags; |
||
523 | |||
524 | /* We must turn off DOP level clock gating to access the L3 registers. |
||
525 | * In order to prevent a get/put style interface, acquire struct mutex |
||
526 | * any time we access those registers. |
||
527 | */ |
||
528 | mutex_lock(&dev_priv->dev->struct_mutex); |
||
529 | |||
530 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
||
531 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
||
532 | POSTING_READ(GEN7_MISCCPCTL); |
||
533 | |||
534 | error_status = I915_READ(GEN7_L3CDERRST1); |
||
535 | row = GEN7_PARITY_ERROR_ROW(error_status); |
||
536 | bank = GEN7_PARITY_ERROR_BANK(error_status); |
||
537 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); |
||
538 | |||
539 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | |
||
540 | GEN7_L3CDERRST1_ENABLE); |
||
541 | POSTING_READ(GEN7_L3CDERRST1); |
||
542 | |||
543 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
||
544 | |||
545 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
546 | dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
||
547 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
548 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
549 | |||
550 | mutex_unlock(&dev_priv->dev->struct_mutex); |
||
551 | |||
552 | parity_event[0] = "L3_PARITY_ERROR=1"; |
||
553 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); |
||
554 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); |
||
555 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); |
||
556 | parity_event[4] = NULL; |
||
557 | |||
558 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, |
||
559 | KOBJ_CHANGE, parity_event); |
||
560 | |||
561 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", |
||
562 | row, bank, subbank); |
||
563 | |||
564 | kfree(parity_event[3]); |
||
565 | kfree(parity_event[2]); |
||
566 | kfree(parity_event[1]); |
||
567 | } |
||
568 | |||
569 | static void ivybridge_handle_parity_error(struct drm_device *dev) |
||
570 | { |
||
571 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
572 | unsigned long flags; |
||
573 | |||
574 | if (!HAS_L3_GPU_CACHE(dev)) |
||
575 | return; |
||
576 | |||
577 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
578 | dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
||
579 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
580 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
581 | |||
3243 | Serge | 582 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
3031 | serge | 583 | } |
584 | |||
585 | #endif |
||
586 | |||
587 | static void snb_gt_irq_handler(struct drm_device *dev, |
||
588 | struct drm_i915_private *dev_priv, |
||
589 | u32 gt_iir) |
||
590 | { |
||
591 | |||
592 | if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | |
||
593 | GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) |
||
594 | notify_ring(dev, &dev_priv->ring[RCS]); |
||
595 | if (gt_iir & GEN6_BSD_USER_INTERRUPT) |
||
596 | notify_ring(dev, &dev_priv->ring[VCS]); |
||
597 | if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) |
||
598 | notify_ring(dev, &dev_priv->ring[BCS]); |
||
599 | |||
600 | if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | |
||
601 | GT_GEN6_BSD_CS_ERROR_INTERRUPT | |
||
602 | GT_RENDER_CS_ERROR_INTERRUPT)) { |
||
603 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); |
||
604 | i915_handle_error(dev, false); |
||
605 | } |
||
606 | |||
607 | // if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) |
||
608 | // ivybridge_handle_parity_error(dev); |
||
609 | } |
||
610 | |||
611 | static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, |
||
612 | u32 pm_iir) |
||
613 | { |
||
614 | unsigned long flags; |
||
615 | |||
616 | /* |
||
617 | * IIR bits should never already be set because IMR should |
||
618 | * prevent an interrupt from being shown in IIR. The warning |
||
619 | * displays a case where we've unsafely cleared |
||
620 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same |
||
621 | * type is not a problem, it displays a problem in the logic. |
||
622 | * |
||
623 | * The mask bit in IMR is cleared by dev_priv->rps.work. |
||
624 | */ |
||
625 | |||
626 | spin_lock_irqsave(&dev_priv->rps.lock, flags); |
||
627 | dev_priv->rps.pm_iir |= pm_iir; |
||
628 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); |
||
629 | POSTING_READ(GEN6_PMIMR); |
||
630 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
||
631 | |||
3243 | Serge | 632 | // queue_work(dev_priv->wq, &dev_priv->rps.work); |
3031 | serge | 633 | } |
634 | |||
3746 | Serge | 635 | #define HPD_STORM_DETECT_PERIOD 1000 |
636 | #define HPD_STORM_THRESHOLD 5 |
||
637 | |||
638 | static inline bool hotplug_irq_storm_detect(struct drm_device *dev, |
||
639 | u32 hotplug_trigger, |
||
640 | const u32 *hpd) |
||
641 | { |
||
642 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
643 | unsigned long irqflags; |
||
644 | int i; |
||
645 | bool ret = false; |
||
646 | |||
647 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
648 | |||
649 | for (i = 1; i < HPD_NUM_PINS; i++) { |
||
650 | |||
651 | if (!(hpd[i] & hotplug_trigger) || |
||
652 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
||
653 | continue; |
||
654 | |||
655 | // if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies, |
||
656 | // dev_priv->hpd_stats[i].hpd_last_jiffies |
||
657 | // + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { |
||
658 | // dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks; |
||
659 | // dev_priv->hpd_stats[i].hpd_cnt = 0; |
||
660 | // } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
||
661 | // dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; |
||
662 | // DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
||
663 | // ret = true; |
||
664 | // } else { |
||
665 | dev_priv->hpd_stats[i].hpd_cnt++; |
||
666 | // } |
||
667 | } |
||
668 | |||
669 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
670 | |||
671 | return ret; |
||
672 | } |
||
673 | |||
3480 | Serge | 674 | static void gmbus_irq_handler(struct drm_device *dev) |
675 | { |
||
676 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
677 | |||
678 | wake_up_all(&dev_priv->gmbus_wait_queue); |
||
679 | } |
||
680 | |||
681 | static void dp_aux_irq_handler(struct drm_device *dev) |
||
682 | { |
||
683 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
684 | |||
685 | wake_up_all(&dev_priv->gmbus_wait_queue); |
||
686 | } |
||
687 | |||
3243 | Serge | 688 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
3031 | serge | 689 | { |
690 | struct drm_device *dev = (struct drm_device *) arg; |
||
691 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
692 | u32 iir, gt_iir, pm_iir; |
||
693 | irqreturn_t ret = IRQ_NONE; |
||
694 | unsigned long irqflags; |
||
695 | int pipe; |
||
696 | u32 pipe_stats[I915_MAX_PIPES]; |
||
697 | |||
698 | atomic_inc(&dev_priv->irq_received); |
||
699 | |||
700 | while (true) { |
||
701 | iir = I915_READ(VLV_IIR); |
||
702 | gt_iir = I915_READ(GTIIR); |
||
703 | pm_iir = I915_READ(GEN6_PMIIR); |
||
704 | |||
705 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
||
706 | goto out; |
||
707 | |||
708 | ret = IRQ_HANDLED; |
||
709 | |||
710 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
||
711 | |||
712 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
713 | for_each_pipe(pipe) { |
||
714 | int reg = PIPESTAT(pipe); |
||
715 | pipe_stats[pipe] = I915_READ(reg); |
||
716 | |||
717 | /* |
||
718 | * Clear the PIPE*STAT regs before the IIR |
||
719 | */ |
||
720 | if (pipe_stats[pipe] & 0x8000ffff) { |
||
721 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
||
722 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
||
723 | pipe_name(pipe)); |
||
724 | I915_WRITE(reg, pipe_stats[pipe]); |
||
725 | } |
||
726 | } |
||
727 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
728 | |||
729 | #if 0 |
||
730 | for_each_pipe(pipe) { |
||
731 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) |
||
732 | drm_handle_vblank(dev, pipe); |
||
733 | |||
734 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { |
||
735 | intel_prepare_page_flip(dev, pipe); |
||
736 | intel_finish_page_flip(dev, pipe); |
||
737 | } |
||
738 | } |
||
739 | #endif |
||
740 | |||
741 | /* Consume port. Then clear IIR or we'll miss events */ |
||
742 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
||
743 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
||
3746 | Serge | 744 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
3031 | serge | 745 | |
746 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
||
747 | hotplug_status); |
||
3746 | Serge | 748 | if (hotplug_trigger) { |
749 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) |
||
750 | i915_hpd_irq_setup(dev); |
||
3480 | Serge | 751 | queue_work(dev_priv->wq, |
752 | &dev_priv->hotplug_work); |
||
3746 | Serge | 753 | } |
3031 | serge | 754 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
755 | I915_READ(PORT_HOTPLUG_STAT); |
||
756 | } |
||
757 | |||
3480 | Serge | 758 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
759 | gmbus_irq_handler(dev); |
||
3031 | serge | 760 | |
3480 | Serge | 761 | // if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
762 | // gen6_queue_rps_work(dev_priv, pm_iir); |
||
3031 | serge | 763 | |
764 | I915_WRITE(GTIIR, gt_iir); |
||
765 | I915_WRITE(GEN6_PMIIR, pm_iir); |
||
766 | I915_WRITE(VLV_IIR, iir); |
||
767 | } |
||
768 | |||
769 | out: |
||
770 | return ret; |
||
771 | } |
||
772 | |||
773 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
||
774 | { |
||
775 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
776 | int pipe; |
||
3746 | Serge | 777 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
3031 | serge | 778 | |
3746 | Serge | 779 | if (hotplug_trigger) { |
780 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) |
||
781 | ibx_hpd_irq_setup(dev); |
||
3480 | Serge | 782 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
3746 | Serge | 783 | } |
3031 | serge | 784 | if (pch_iir & SDE_AUDIO_POWER_MASK) |
785 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
||
786 | (pch_iir & SDE_AUDIO_POWER_MASK) >> |
||
787 | SDE_AUDIO_POWER_SHIFT); |
||
788 | |||
3480 | Serge | 789 | if (pch_iir & SDE_AUX_MASK) |
790 | dp_aux_irq_handler(dev); |
||
791 | |||
3031 | serge | 792 | if (pch_iir & SDE_GMBUS) |
3480 | Serge | 793 | gmbus_irq_handler(dev); |
3031 | serge | 794 | |
795 | if (pch_iir & SDE_AUDIO_HDCP_MASK) |
||
796 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); |
||
797 | |||
798 | if (pch_iir & SDE_AUDIO_TRANS_MASK) |
||
799 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); |
||
800 | |||
801 | if (pch_iir & SDE_POISON) |
||
802 | DRM_ERROR("PCH poison interrupt\n"); |
||
803 | |||
804 | if (pch_iir & SDE_FDI_MASK) |
||
805 | for_each_pipe(pipe) |
||
806 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
||
807 | pipe_name(pipe), |
||
808 | I915_READ(FDI_RX_IIR(pipe))); |
||
809 | |||
810 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) |
||
811 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); |
||
812 | |||
813 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) |
||
814 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); |
||
815 | |||
816 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) |
||
817 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); |
||
818 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
||
819 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); |
||
820 | } |
||
821 | |||
822 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
||
823 | { |
||
824 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
825 | int pipe; |
||
3746 | Serge | 826 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
3031 | serge | 827 | |
3746 | Serge | 828 | if (hotplug_trigger) { |
829 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) |
||
830 | ibx_hpd_irq_setup(dev); |
||
3480 | Serge | 831 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
3746 | Serge | 832 | } |
3031 | serge | 833 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) |
834 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
||
835 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
||
836 | SDE_AUDIO_POWER_SHIFT_CPT); |
||
837 | |||
838 | if (pch_iir & SDE_AUX_MASK_CPT) |
||
3480 | Serge | 839 | dp_aux_irq_handler(dev); |
3031 | serge | 840 | |
841 | if (pch_iir & SDE_GMBUS_CPT) |
||
3480 | Serge | 842 | gmbus_irq_handler(dev); |
3031 | serge | 843 | |
844 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) |
||
845 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); |
||
846 | |||
847 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) |
||
848 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); |
||
849 | |||
850 | if (pch_iir & SDE_FDI_MASK_CPT) |
||
851 | for_each_pipe(pipe) |
||
852 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
||
853 | pipe_name(pipe), |
||
854 | I915_READ(FDI_RX_IIR(pipe))); |
||
855 | } |
||
856 | |||
3243 | Serge | 857 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) |
3031 | serge | 858 | { |
859 | struct drm_device *dev = (struct drm_device *) arg; |
||
860 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
3746 | Serge | 861 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; |
3031 | serge | 862 | irqreturn_t ret = IRQ_NONE; |
863 | int i; |
||
864 | |||
865 | atomic_inc(&dev_priv->irq_received); |
||
866 | |||
867 | /* disable master interrupt before clearing iir */ |
||
868 | de_ier = I915_READ(DEIER); |
||
869 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
||
870 | |||
3480 | Serge | 871 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
872 | * interrupts will will be stored on its back queue, and then we'll be |
||
873 | * able to process them after we restore SDEIER (as soon as we restore |
||
874 | * it, we'll get an interrupt if SDEIIR still has something to process |
||
875 | * due to its back queue). */ |
||
3746 | Serge | 876 | if (!HAS_PCH_NOP(dev)) { |
3480 | Serge | 877 | sde_ier = I915_READ(SDEIER); |
878 | I915_WRITE(SDEIER, 0); |
||
879 | POSTING_READ(SDEIER); |
||
3746 | Serge | 880 | } |
3480 | Serge | 881 | |
3031 | serge | 882 | gt_iir = I915_READ(GTIIR); |
883 | if (gt_iir) { |
||
884 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
||
885 | I915_WRITE(GTIIR, gt_iir); |
||
886 | ret = IRQ_HANDLED; |
||
887 | } |
||
888 | |||
889 | de_iir = I915_READ(DEIIR); |
||
890 | if (de_iir) { |
||
3480 | Serge | 891 | if (de_iir & DE_AUX_CHANNEL_A_IVB) |
892 | dp_aux_irq_handler(dev); |
||
3031 | serge | 893 | #if 0 |
894 | if (de_iir & DE_GSE_IVB) |
||
895 | intel_opregion_gse_intr(dev); |
||
896 | |||
897 | for (i = 0; i < 3; i++) { |
||
898 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) |
||
899 | drm_handle_vblank(dev, i); |
||
900 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { |
||
901 | intel_prepare_page_flip(dev, i); |
||
902 | intel_finish_page_flip_plane(dev, i); |
||
903 | } |
||
904 | } |
||
905 | #endif |
||
906 | /* check event from PCH */ |
||
3746 | Serge | 907 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { |
3031 | serge | 908 | u32 pch_iir = I915_READ(SDEIIR); |
909 | |||
910 | cpt_irq_handler(dev, pch_iir); |
||
911 | |||
912 | /* clear PCH hotplug event before clear CPU irq */ |
||
913 | I915_WRITE(SDEIIR, pch_iir); |
||
914 | } |
||
915 | |||
916 | I915_WRITE(DEIIR, de_iir); |
||
917 | ret = IRQ_HANDLED; |
||
918 | } |
||
919 | |||
920 | pm_iir = I915_READ(GEN6_PMIIR); |
||
921 | if (pm_iir) { |
||
922 | // if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
||
923 | // gen6_queue_rps_work(dev_priv, pm_iir); |
||
924 | I915_WRITE(GEN6_PMIIR, pm_iir); |
||
925 | ret = IRQ_HANDLED; |
||
926 | } |
||
927 | |||
928 | I915_WRITE(DEIER, de_ier); |
||
929 | POSTING_READ(DEIER); |
||
3746 | Serge | 930 | if (!HAS_PCH_NOP(dev)) { |
3480 | Serge | 931 | I915_WRITE(SDEIER, sde_ier); |
932 | POSTING_READ(SDEIER); |
||
3746 | Serge | 933 | } |
3031 | serge | 934 | |
935 | return ret; |
||
936 | } |
||
937 | |||
938 | static void ilk_gt_irq_handler(struct drm_device *dev, |
||
939 | struct drm_i915_private *dev_priv, |
||
940 | u32 gt_iir) |
||
941 | { |
||
942 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) |
||
943 | notify_ring(dev, &dev_priv->ring[RCS]); |
||
944 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
||
945 | notify_ring(dev, &dev_priv->ring[VCS]); |
||
946 | } |
||
947 | |||
3243 | Serge | 948 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
3031 | serge | 949 | { |
950 | struct drm_device *dev = (struct drm_device *) arg; |
||
2351 | Serge | 951 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
952 | int ret = IRQ_NONE; |
||
3480 | Serge | 953 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; |
2351 | Serge | 954 | |
955 | atomic_inc(&dev_priv->irq_received); |
||
956 | |||
957 | /* disable master interrupt before clearing iir */ |
||
958 | de_ier = I915_READ(DEIER); |
||
959 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
||
960 | POSTING_READ(DEIER); |
||
961 | |||
3480 | Serge | 962 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
963 | * interrupts will will be stored on its back queue, and then we'll be |
||
964 | * able to process them after we restore SDEIER (as soon as we restore |
||
965 | * it, we'll get an interrupt if SDEIIR still has something to process |
||
966 | * due to its back queue). */ |
||
967 | sde_ier = I915_READ(SDEIER); |
||
968 | I915_WRITE(SDEIER, 0); |
||
969 | POSTING_READ(SDEIER); |
||
970 | |||
2351 | Serge | 971 | de_iir = I915_READ(DEIIR); |
972 | gt_iir = I915_READ(GTIIR); |
||
973 | pm_iir = I915_READ(GEN6_PMIIR); |
||
974 | |||
3480 | Serge | 975 | if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) |
2351 | Serge | 976 | goto done; |
977 | |||
978 | ret = IRQ_HANDLED; |
||
979 | |||
3031 | serge | 980 | if (IS_GEN5(dev)) |
981 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); |
||
982 | else |
||
983 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
||
3480 | Serge | 984 | |
985 | if (de_iir & DE_AUX_CHANNEL_A) |
||
986 | dp_aux_irq_handler(dev); |
||
987 | |||
3031 | serge | 988 | #if 0 |
989 | if (de_iir & DE_GSE) |
||
990 | intel_opregion_gse_intr(dev); |
||
2351 | Serge | 991 | |
3031 | serge | 992 | if (de_iir & DE_PIPEA_VBLANK) |
993 | drm_handle_vblank(dev, 0); |
||
2351 | Serge | 994 | |
3031 | serge | 995 | if (de_iir & DE_PIPEB_VBLANK) |
996 | drm_handle_vblank(dev, 1); |
||
2351 | Serge | 997 | |
3031 | serge | 998 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
999 | intel_prepare_page_flip(dev, 0); |
||
1000 | intel_finish_page_flip_plane(dev, 0); |
||
1001 | } |
||
2351 | Serge | 1002 | |
3031 | serge | 1003 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
1004 | intel_prepare_page_flip(dev, 1); |
||
1005 | intel_finish_page_flip_plane(dev, 1); |
||
1006 | } |
||
1007 | #endif |
||
2351 | Serge | 1008 | |
3031 | serge | 1009 | /* check event from PCH */ |
1010 | if (de_iir & DE_PCH_EVENT) { |
||
3480 | Serge | 1011 | u32 pch_iir = I915_READ(SDEIIR); |
1012 | |||
3031 | serge | 1013 | if (HAS_PCH_CPT(dev)) |
1014 | cpt_irq_handler(dev, pch_iir); |
||
1015 | else |
||
1016 | ibx_irq_handler(dev, pch_iir); |
||
3480 | Serge | 1017 | |
1018 | /* should clear PCH hotplug event before clear CPU irq */ |
||
1019 | I915_WRITE(SDEIIR, pch_iir); |
||
3031 | serge | 1020 | } |
1021 | #if 0 |
||
1022 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
||
1023 | ironlake_handle_rps_change(dev); |
||
2351 | Serge | 1024 | |
3031 | serge | 1025 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) |
1026 | gen6_queue_rps_work(dev_priv, pm_iir); |
||
1027 | #endif |
||
2351 | Serge | 1028 | I915_WRITE(GTIIR, gt_iir); |
1029 | I915_WRITE(DEIIR, de_iir); |
||
1030 | I915_WRITE(GEN6_PMIIR, pm_iir); |
||
1031 | |||
1032 | done: |
||
1033 | I915_WRITE(DEIER, de_ier); |
||
1034 | POSTING_READ(DEIER); |
||
3480 | Serge | 1035 | I915_WRITE(SDEIER, sde_ier); |
1036 | POSTING_READ(SDEIER); |
||
2351 | Serge | 1037 | |
1038 | return ret; |
||
1039 | } |
||
1040 | |||
1041 | |||
1042 | |||
1043 | |||
3031 | serge | 1044 | /* NB: please notice the memset */ |
1045 | static void i915_get_extra_instdone(struct drm_device *dev, |
||
1046 | uint32_t *instdone) |
||
1047 | { |
||
1048 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1049 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); |
||
2351 | Serge | 1050 | |
3031 | serge | 1051 | switch(INTEL_INFO(dev)->gen) { |
1052 | case 2: |
||
1053 | case 3: |
||
1054 | instdone[0] = I915_READ(INSTDONE); |
||
1055 | break; |
||
1056 | case 4: |
||
1057 | case 5: |
||
1058 | case 6: |
||
1059 | instdone[0] = I915_READ(INSTDONE_I965); |
||
1060 | instdone[1] = I915_READ(INSTDONE1); |
||
1061 | break; |
||
1062 | default: |
||
3480 | Serge | 1063 | WARN_ONCE(1, "Unsupported platform\n"); |
3031 | serge | 1064 | case 7: |
1065 | instdone[0] = I915_READ(GEN7_INSTDONE_1); |
||
1066 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); |
||
1067 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); |
||
1068 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); |
||
1069 | break; |
||
1070 | } |
||
1071 | } |
||
2351 | Serge | 1072 | |
3031 | serge | 1073 | #ifdef CONFIG_DEBUG_FS |
1074 | static struct drm_i915_error_object * |
||
3746 | Serge | 1075 | i915_error_object_create_sized(struct drm_i915_private *dev_priv, |
1076 | struct drm_i915_gem_object *src, |
||
1077 | const int num_pages) |
||
3031 | serge | 1078 | { |
1079 | struct drm_i915_error_object *dst; |
||
3746 | Serge | 1080 | int i; |
3031 | serge | 1081 | u32 reloc_offset; |
2351 | Serge | 1082 | |
3031 | serge | 1083 | if (src == NULL || src->pages == NULL) |
1084 | return NULL; |
||
2351 | Serge | 1085 | |
3746 | Serge | 1086 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); |
3031 | serge | 1087 | if (dst == NULL) |
1088 | return NULL; |
||
1089 | |||
1090 | reloc_offset = src->gtt_offset; |
||
3746 | Serge | 1091 | for (i = 0; i < num_pages; i++) { |
3031 | serge | 1092 | unsigned long flags; |
1093 | void *d; |
||
1094 | |||
1095 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
||
1096 | if (d == NULL) |
||
1097 | goto unwind; |
||
1098 | |||
1099 | local_irq_save(flags); |
||
3480 | Serge | 1100 | if (reloc_offset < dev_priv->gtt.mappable_end && |
3031 | serge | 1101 | src->has_global_gtt_mapping) { |
1102 | void __iomem *s; |
||
1103 | |||
1104 | /* Simply ignore tiling or any overlapping fence. |
||
1105 | * It's part of the error state, and this hopefully |
||
1106 | * captures what the GPU read. |
||
1107 | */ |
||
1108 | |||
3480 | Serge | 1109 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
3031 | serge | 1110 | reloc_offset); |
1111 | memcpy_fromio(d, s, PAGE_SIZE); |
||
1112 | io_mapping_unmap_atomic(s); |
||
3480 | Serge | 1113 | } else if (src->stolen) { |
1114 | unsigned long offset; |
||
1115 | |||
1116 | offset = dev_priv->mm.stolen_base; |
||
1117 | offset += src->stolen->start; |
||
1118 | offset += i << PAGE_SHIFT; |
||
1119 | |||
1120 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); |
||
3031 | serge | 1121 | } else { |
1122 | struct page *page; |
||
1123 | void *s; |
||
1124 | |||
1125 | page = i915_gem_object_get_page(src, i); |
||
1126 | |||
1127 | drm_clflush_pages(&page, 1); |
||
1128 | |||
1129 | s = kmap_atomic(page); |
||
1130 | memcpy(d, s, PAGE_SIZE); |
||
1131 | kunmap_atomic(s); |
||
1132 | |||
1133 | drm_clflush_pages(&page, 1); |
||
1134 | } |
||
1135 | local_irq_restore(flags); |
||
1136 | |||
1137 | dst->pages[i] = d; |
||
1138 | |||
1139 | reloc_offset += PAGE_SIZE; |
||
1140 | } |
||
3746 | Serge | 1141 | dst->page_count = num_pages; |
3031 | serge | 1142 | dst->gtt_offset = src->gtt_offset; |
1143 | |||
1144 | return dst; |
||
1145 | |||
1146 | unwind: |
||
1147 | while (i--) |
||
1148 | kfree(dst->pages[i]); |
||
1149 | kfree(dst); |
||
1150 | return NULL; |
||
1151 | } |
||
3746 | Serge | 1152 | #define i915_error_object_create(dev_priv, src) \ |
1153 | i915_error_object_create_sized((dev_priv), (src), \ |
||
1154 | (src)->base.size>>PAGE_SHIFT) |
||
3031 | serge | 1155 | |
1156 | static void |
||
1157 | i915_error_object_free(struct drm_i915_error_object *obj) |
||
1158 | { |
||
1159 | int page; |
||
1160 | |||
1161 | if (obj == NULL) |
||
1162 | return; |
||
1163 | |||
1164 | for (page = 0; page < obj->page_count; page++) |
||
1165 | kfree(obj->pages[page]); |
||
1166 | |||
1167 | kfree(obj); |
||
1168 | } |
||
1169 | |||
1170 | void |
||
1171 | i915_error_state_free(struct kref *error_ref) |
||
1172 | { |
||
1173 | struct drm_i915_error_state *error = container_of(error_ref, |
||
1174 | typeof(*error), ref); |
||
1175 | int i; |
||
1176 | |||
1177 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
||
1178 | i915_error_object_free(error->ring[i].batchbuffer); |
||
1179 | i915_error_object_free(error->ring[i].ringbuffer); |
||
1180 | kfree(error->ring[i].requests); |
||
1181 | } |
||
1182 | |||
1183 | kfree(error->active_bo); |
||
1184 | kfree(error->overlay); |
||
1185 | kfree(error); |
||
1186 | } |
||
1187 | static void capture_bo(struct drm_i915_error_buffer *err, |
||
1188 | struct drm_i915_gem_object *obj) |
||
1189 | { |
||
1190 | err->size = obj->base.size; |
||
1191 | err->name = obj->base.name; |
||
1192 | err->rseqno = obj->last_read_seqno; |
||
1193 | err->wseqno = obj->last_write_seqno; |
||
1194 | err->gtt_offset = obj->gtt_offset; |
||
1195 | err->read_domains = obj->base.read_domains; |
||
1196 | err->write_domain = obj->base.write_domain; |
||
1197 | err->fence_reg = obj->fence_reg; |
||
1198 | err->pinned = 0; |
||
1199 | if (obj->pin_count > 0) |
||
1200 | err->pinned = 1; |
||
1201 | if (obj->user_pin_count > 0) |
||
1202 | err->pinned = -1; |
||
1203 | err->tiling = obj->tiling_mode; |
||
1204 | err->dirty = obj->dirty; |
||
1205 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
||
1206 | err->ring = obj->ring ? obj->ring->id : -1; |
||
1207 | err->cache_level = obj->cache_level; |
||
1208 | } |
||
1209 | |||
1210 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, |
||
1211 | int count, struct list_head *head) |
||
1212 | { |
||
1213 | struct drm_i915_gem_object *obj; |
||
1214 | int i = 0; |
||
1215 | |||
1216 | list_for_each_entry(obj, head, mm_list) { |
||
1217 | capture_bo(err++, obj); |
||
1218 | if (++i == count) |
||
1219 | break; |
||
1220 | } |
||
1221 | |||
1222 | return i; |
||
1223 | } |
||
1224 | |||
1225 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, |
||
1226 | int count, struct list_head *head) |
||
1227 | { |
||
1228 | struct drm_i915_gem_object *obj; |
||
1229 | int i = 0; |
||
1230 | |||
1231 | list_for_each_entry(obj, head, gtt_list) { |
||
1232 | if (obj->pin_count == 0) |
||
1233 | continue; |
||
1234 | |||
1235 | capture_bo(err++, obj); |
||
1236 | if (++i == count) |
||
1237 | break; |
||
1238 | } |
||
1239 | |||
1240 | return i; |
||
1241 | } |
||
1242 | |||
1243 | static void i915_gem_record_fences(struct drm_device *dev, |
||
1244 | struct drm_i915_error_state *error) |
||
1245 | { |
||
1246 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1247 | int i; |
||
1248 | |||
1249 | /* Fences */ |
||
1250 | switch (INTEL_INFO(dev)->gen) { |
||
1251 | case 7: |
||
1252 | case 6: |
||
3746 | Serge | 1253 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
3031 | serge | 1254 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
1255 | break; |
||
1256 | case 5: |
||
1257 | case 4: |
||
1258 | for (i = 0; i < 16; i++) |
||
1259 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
||
1260 | break; |
||
1261 | case 3: |
||
1262 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
||
1263 | for (i = 0; i < 8; i++) |
||
1264 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
||
1265 | case 2: |
||
1266 | for (i = 0; i < 8; i++) |
||
1267 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
||
1268 | break; |
||
1269 | |||
3480 | Serge | 1270 | default: |
1271 | BUG(); |
||
3031 | serge | 1272 | } |
1273 | } |
||
1274 | |||
1275 | static struct drm_i915_error_object * |
||
1276 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
||
1277 | struct intel_ring_buffer *ring) |
||
1278 | { |
||
1279 | struct drm_i915_gem_object *obj; |
||
1280 | u32 seqno; |
||
1281 | |||
1282 | if (!ring->get_seqno) |
||
1283 | return NULL; |
||
1284 | |||
3480 | Serge | 1285 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { |
1286 | u32 acthd = I915_READ(ACTHD); |
||
1287 | |||
1288 | if (WARN_ON(ring->id != RCS)) |
||
1289 | return NULL; |
||
1290 | |||
1291 | obj = ring->private; |
||
1292 | if (acthd >= obj->gtt_offset && |
||
1293 | acthd < obj->gtt_offset + obj->base.size) |
||
1294 | return i915_error_object_create(dev_priv, obj); |
||
1295 | } |
||
1296 | |||
3031 | serge | 1297 | seqno = ring->get_seqno(ring, false); |
1298 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
||
1299 | if (obj->ring != ring) |
||
1300 | continue; |
||
1301 | |||
1302 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
||
1303 | continue; |
||
1304 | |||
1305 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
||
1306 | continue; |
||
1307 | |||
1308 | /* We need to copy these to an anonymous buffer as the simplest |
||
1309 | * method to avoid being overwritten by userspace. |
||
1310 | */ |
||
1311 | return i915_error_object_create(dev_priv, obj); |
||
1312 | } |
||
1313 | |||
1314 | return NULL; |
||
1315 | } |
||
1316 | |||
1317 | static void i915_record_ring_state(struct drm_device *dev, |
||
1318 | struct drm_i915_error_state *error, |
||
1319 | struct intel_ring_buffer *ring) |
||
1320 | { |
||
1321 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1322 | |||
1323 | if (INTEL_INFO(dev)->gen >= 6) { |
||
1324 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); |
||
1325 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); |
||
1326 | error->semaphore_mboxes[ring->id][0] |
||
1327 | = I915_READ(RING_SYNC_0(ring->mmio_base)); |
||
1328 | error->semaphore_mboxes[ring->id][1] |
||
1329 | = I915_READ(RING_SYNC_1(ring->mmio_base)); |
||
3243 | Serge | 1330 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; |
1331 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; |
||
3031 | serge | 1332 | } |
1333 | |||
1334 | if (INTEL_INFO(dev)->gen >= 4) { |
||
1335 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); |
||
1336 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); |
||
1337 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); |
||
1338 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); |
||
1339 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
||
1340 | if (ring->id == RCS) |
||
1341 | error->bbaddr = I915_READ64(BB_ADDR); |
||
1342 | } else { |
||
1343 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
||
1344 | error->ipeir[ring->id] = I915_READ(IPEIR); |
||
1345 | error->ipehr[ring->id] = I915_READ(IPEHR); |
||
1346 | error->instdone[ring->id] = I915_READ(INSTDONE); |
||
1347 | } |
||
1348 | |||
1349 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); |
||
1350 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); |
||
1351 | error->seqno[ring->id] = ring->get_seqno(ring, false); |
||
1352 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
||
1353 | error->head[ring->id] = I915_READ_HEAD(ring); |
||
1354 | error->tail[ring->id] = I915_READ_TAIL(ring); |
||
3243 | Serge | 1355 | error->ctl[ring->id] = I915_READ_CTL(ring); |
3031 | serge | 1356 | |
1357 | error->cpu_ring_head[ring->id] = ring->head; |
||
1358 | error->cpu_ring_tail[ring->id] = ring->tail; |
||
1359 | } |
||
1360 | |||
3746 | Serge | 1361 | |
1362 | static void i915_gem_record_active_context(struct intel_ring_buffer *ring, |
||
1363 | struct drm_i915_error_state *error, |
||
1364 | struct drm_i915_error_ring *ering) |
||
1365 | { |
||
1366 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
||
1367 | struct drm_i915_gem_object *obj; |
||
1368 | |||
1369 | /* Currently render ring is the only HW context user */ |
||
1370 | if (ring->id != RCS || !error->ccid) |
||
1371 | return; |
||
1372 | |||
1373 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
||
1374 | if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { |
||
1375 | ering->ctx = i915_error_object_create_sized(dev_priv, |
||
1376 | obj, 1); |
||
1377 | } |
||
1378 | } |
||
1379 | } |
||
1380 | |||
3031 | serge | 1381 | static void i915_gem_record_rings(struct drm_device *dev, |
1382 | struct drm_i915_error_state *error) |
||
1383 | { |
||
1384 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1385 | struct intel_ring_buffer *ring; |
||
1386 | struct drm_i915_gem_request *request; |
||
1387 | int i, count; |
||
1388 | |||
1389 | for_each_ring(ring, dev_priv, i) { |
||
1390 | i915_record_ring_state(dev, error, ring); |
||
1391 | |||
1392 | error->ring[i].batchbuffer = |
||
1393 | i915_error_first_batchbuffer(dev_priv, ring); |
||
1394 | |||
1395 | error->ring[i].ringbuffer = |
||
1396 | i915_error_object_create(dev_priv, ring->obj); |
||
1397 | |||
3746 | Serge | 1398 | |
1399 | i915_gem_record_active_context(ring, error, &error->ring[i]); |
||
1400 | |||
3031 | serge | 1401 | count = 0; |
1402 | list_for_each_entry(request, &ring->request_list, list) |
||
1403 | count++; |
||
1404 | |||
1405 | error->ring[i].num_requests = count; |
||
1406 | error->ring[i].requests = |
||
1407 | kmalloc(count*sizeof(struct drm_i915_error_request), |
||
1408 | GFP_ATOMIC); |
||
1409 | if (error->ring[i].requests == NULL) { |
||
1410 | error->ring[i].num_requests = 0; |
||
1411 | continue; |
||
1412 | } |
||
1413 | |||
1414 | count = 0; |
||
1415 | list_for_each_entry(request, &ring->request_list, list) { |
||
1416 | struct drm_i915_error_request *erq; |
||
1417 | |||
1418 | erq = &error->ring[i].requests[count++]; |
||
1419 | erq->seqno = request->seqno; |
||
1420 | erq->jiffies = request->emitted_jiffies; |
||
1421 | erq->tail = request->tail; |
||
1422 | } |
||
1423 | } |
||
1424 | } |
||
1425 | |||
1426 | /** |
||
1427 | * i915_capture_error_state - capture an error record for later analysis |
||
1428 | * @dev: drm device |
||
1429 | * |
||
1430 | * Should be called when an error is detected (either a hang or an error |
||
1431 | * interrupt) to capture error state from the time of the error. Fills |
||
1432 | * out a structure which becomes available in debugfs for user level tools |
||
1433 | * to pick up. |
||
1434 | */ |
||
1435 | static void i915_capture_error_state(struct drm_device *dev) |
||
1436 | { |
||
1437 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1438 | struct drm_i915_gem_object *obj; |
||
1439 | struct drm_i915_error_state *error; |
||
1440 | unsigned long flags; |
||
1441 | int i, pipe; |
||
1442 | |||
3480 | Serge | 1443 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1444 | error = dev_priv->gpu_error.first_error; |
||
1445 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
||
3031 | serge | 1446 | if (error) |
1447 | return; |
||
1448 | |||
1449 | /* Account for pipe specific data like PIPE*STAT */ |
||
1450 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
||
1451 | if (!error) { |
||
1452 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
||
1453 | return; |
||
1454 | } |
||
1455 | |||
3746 | Serge | 1456 | DRM_INFO("capturing error event; look for more information in " |
3480 | Serge | 1457 | "/sys/kernel/debug/dri/%d/i915_error_state\n", |
3031 | serge | 1458 | dev->primary->index); |
1459 | |||
1460 | kref_init(&error->ref); |
||
1461 | error->eir = I915_READ(EIR); |
||
1462 | error->pgtbl_er = I915_READ(PGTBL_ER); |
||
3746 | Serge | 1463 | if (HAS_HW_CONTEXTS(dev)) |
3031 | serge | 1464 | error->ccid = I915_READ(CCID); |
1465 | |||
1466 | if (HAS_PCH_SPLIT(dev)) |
||
1467 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); |
||
1468 | else if (IS_VALLEYVIEW(dev)) |
||
1469 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); |
||
1470 | else if (IS_GEN2(dev)) |
||
1471 | error->ier = I915_READ16(IER); |
||
1472 | else |
||
1473 | error->ier = I915_READ(IER); |
||
1474 | |||
3243 | Serge | 1475 | if (INTEL_INFO(dev)->gen >= 6) |
1476 | error->derrmr = I915_READ(DERRMR); |
||
1477 | |||
1478 | if (IS_VALLEYVIEW(dev)) |
||
1479 | error->forcewake = I915_READ(FORCEWAKE_VLV); |
||
1480 | else if (INTEL_INFO(dev)->gen >= 7) |
||
1481 | error->forcewake = I915_READ(FORCEWAKE_MT); |
||
1482 | else if (INTEL_INFO(dev)->gen == 6) |
||
1483 | error->forcewake = I915_READ(FORCEWAKE); |
||
1484 | |||
3746 | Serge | 1485 | if (!HAS_PCH_SPLIT(dev)) |
3031 | serge | 1486 | for_each_pipe(pipe) |
1487 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
||
1488 | |||
1489 | if (INTEL_INFO(dev)->gen >= 6) { |
||
1490 | error->error = I915_READ(ERROR_GEN6); |
||
1491 | error->done_reg = I915_READ(DONE_REG); |
||
1492 | } |
||
1493 | |||
1494 | if (INTEL_INFO(dev)->gen == 7) |
||
1495 | error->err_int = I915_READ(GEN7_ERR_INT); |
||
1496 | |||
1497 | i915_get_extra_instdone(dev, error->extra_instdone); |
||
1498 | |||
1499 | i915_gem_record_fences(dev, error); |
||
1500 | i915_gem_record_rings(dev, error); |
||
1501 | |||
1502 | /* Record buffers on the active and pinned lists. */ |
||
1503 | error->active_bo = NULL; |
||
1504 | error->pinned_bo = NULL; |
||
1505 | |||
1506 | i = 0; |
||
1507 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
||
1508 | i++; |
||
1509 | error->active_bo_count = i; |
||
1510 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
||
1511 | if (obj->pin_count) |
||
1512 | i++; |
||
1513 | error->pinned_bo_count = i - error->active_bo_count; |
||
1514 | |||
1515 | error->active_bo = NULL; |
||
1516 | error->pinned_bo = NULL; |
||
1517 | if (i) { |
||
1518 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
||
1519 | GFP_ATOMIC); |
||
1520 | if (error->active_bo) |
||
1521 | error->pinned_bo = |
||
1522 | error->active_bo + error->active_bo_count; |
||
1523 | } |
||
1524 | |||
1525 | if (error->active_bo) |
||
1526 | error->active_bo_count = |
||
1527 | capture_active_bo(error->active_bo, |
||
1528 | error->active_bo_count, |
||
1529 | &dev_priv->mm.active_list); |
||
1530 | |||
1531 | if (error->pinned_bo) |
||
1532 | error->pinned_bo_count = |
||
1533 | capture_pinned_bo(error->pinned_bo, |
||
1534 | error->pinned_bo_count, |
||
1535 | &dev_priv->mm.bound_list); |
||
1536 | |||
1537 | do_gettimeofday(&error->time); |
||
1538 | |||
1539 | error->overlay = intel_overlay_capture_error_state(dev); |
||
1540 | error->display = intel_display_capture_error_state(dev); |
||
1541 | |||
3480 | Serge | 1542 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1543 | if (dev_priv->gpu_error.first_error == NULL) { |
||
1544 | dev_priv->gpu_error.first_error = error; |
||
3031 | serge | 1545 | error = NULL; |
1546 | } |
||
3480 | Serge | 1547 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
3031 | serge | 1548 | |
1549 | if (error) |
||
1550 | i915_error_state_free(&error->ref); |
||
1551 | } |
||
1552 | |||
1553 | void i915_destroy_error_state(struct drm_device *dev) |
||
1554 | { |
||
1555 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1556 | struct drm_i915_error_state *error; |
||
1557 | unsigned long flags; |
||
1558 | |||
3480 | Serge | 1559 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1560 | error = dev_priv->gpu_error.first_error; |
||
1561 | dev_priv->gpu_error.first_error = NULL; |
||
1562 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
||
3031 | serge | 1563 | |
1564 | if (error) |
||
1565 | kref_put(&error->ref, i915_error_state_free); |
||
1566 | } |
||
1567 | #else |
||
1568 | #define i915_capture_error_state(x) |
||
1569 | #endif |
||
1570 | |||
1571 | static void i915_report_and_clear_eir(struct drm_device *dev) |
||
1572 | { |
||
1573 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1574 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
||
1575 | u32 eir = I915_READ(EIR); |
||
1576 | int pipe, i; |
||
1577 | |||
1578 | if (!eir) |
||
1579 | return; |
||
1580 | |||
1581 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
||
1582 | |||
1583 | i915_get_extra_instdone(dev, instdone); |
||
1584 | |||
1585 | if (IS_G4X(dev)) { |
||
1586 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { |
||
1587 | u32 ipeir = I915_READ(IPEIR_I965); |
||
1588 | |||
1589 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
||
1590 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
||
1591 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
||
1592 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
||
1593 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
||
1594 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
||
1595 | I915_WRITE(IPEIR_I965, ipeir); |
||
1596 | POSTING_READ(IPEIR_I965); |
||
1597 | } |
||
1598 | if (eir & GM45_ERROR_PAGE_TABLE) { |
||
1599 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
||
1600 | pr_err("page table error\n"); |
||
1601 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
||
1602 | I915_WRITE(PGTBL_ER, pgtbl_err); |
||
1603 | POSTING_READ(PGTBL_ER); |
||
1604 | } |
||
1605 | } |
||
1606 | |||
1607 | if (!IS_GEN2(dev)) { |
||
1608 | if (eir & I915_ERROR_PAGE_TABLE) { |
||
1609 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
||
1610 | pr_err("page table error\n"); |
||
1611 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
||
1612 | I915_WRITE(PGTBL_ER, pgtbl_err); |
||
1613 | POSTING_READ(PGTBL_ER); |
||
1614 | } |
||
1615 | } |
||
1616 | |||
1617 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
||
1618 | pr_err("memory refresh error:\n"); |
||
1619 | for_each_pipe(pipe) |
||
1620 | pr_err("pipe %c stat: 0x%08x\n", |
||
1621 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
||
1622 | /* pipestat has already been acked */ |
||
1623 | } |
||
1624 | if (eir & I915_ERROR_INSTRUCTION) { |
||
1625 | pr_err("instruction error\n"); |
||
1626 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); |
||
1627 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
||
1628 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
||
1629 | if (INTEL_INFO(dev)->gen < 4) { |
||
1630 | u32 ipeir = I915_READ(IPEIR); |
||
1631 | |||
1632 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
||
1633 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); |
||
1634 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
||
1635 | I915_WRITE(IPEIR, ipeir); |
||
1636 | POSTING_READ(IPEIR); |
||
1637 | } else { |
||
1638 | u32 ipeir = I915_READ(IPEIR_I965); |
||
1639 | |||
1640 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
||
1641 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
||
1642 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
||
1643 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
||
1644 | I915_WRITE(IPEIR_I965, ipeir); |
||
1645 | POSTING_READ(IPEIR_I965); |
||
1646 | } |
||
1647 | } |
||
1648 | |||
1649 | I915_WRITE(EIR, eir); |
||
1650 | POSTING_READ(EIR); |
||
1651 | eir = I915_READ(EIR); |
||
1652 | if (eir) { |
||
1653 | /* |
||
1654 | * some errors might have become stuck, |
||
1655 | * mask them. |
||
1656 | */ |
||
1657 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); |
||
1658 | I915_WRITE(EMR, I915_READ(EMR) | eir); |
||
1659 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
||
1660 | } |
||
1661 | } |
||
1662 | |||
1663 | /** |
||
1664 | * i915_handle_error - handle an error interrupt |
||
1665 | * @dev: drm device |
||
1666 | * |
||
1667 | * Do some basic checking of regsiter state at error interrupt time and |
||
1668 | * dump it to the syslog. Also call i915_capture_error_state() to make |
||
1669 | * sure we get a record and make it available in debugfs. Fire a uevent |
||
1670 | * so userspace knows something bad happened (should trigger collection |
||
1671 | * of a ring dump etc.). |
||
1672 | */ |
||
1673 | void i915_handle_error(struct drm_device *dev, bool wedged) |
||
1674 | { |
||
1675 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1676 | struct intel_ring_buffer *ring; |
||
1677 | int i; |
||
1678 | |||
1679 | i915_capture_error_state(dev); |
||
1680 | i915_report_and_clear_eir(dev); |
||
1681 | |||
1682 | if (wedged) { |
||
3480 | Serge | 1683 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
1684 | &dev_priv->gpu_error.reset_counter); |
||
3031 | serge | 1685 | |
1686 | /* |
||
3480 | Serge | 1687 | * Wakeup waiting processes so that the reset work item |
1688 | * doesn't deadlock trying to grab various locks. |
||
3031 | serge | 1689 | */ |
1690 | for_each_ring(ring, dev_priv, i) |
||
1691 | wake_up_all(&ring->irq_queue); |
||
1692 | } |
||
1693 | |||
1694 | // queue_work(dev_priv->wq, &dev_priv->error_work); |
||
1695 | } |
||
1696 | |||
1697 | #if 0 |
||
1698 | |||
1699 | |||
3746 | Serge | 1700 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
3031 | serge | 1701 | { |
1702 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1703 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
||
1704 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
||
1705 | struct drm_i915_gem_object *obj; |
||
1706 | struct intel_unpin_work *work; |
||
1707 | unsigned long flags; |
||
1708 | bool stall_detected; |
||
1709 | |||
1710 | /* Ignore early vblank irqs */ |
||
1711 | if (intel_crtc == NULL) |
||
1712 | return; |
||
1713 | |||
1714 | spin_lock_irqsave(&dev->event_lock, flags); |
||
1715 | work = intel_crtc->unpin_work; |
||
1716 | |||
3243 | Serge | 1717 | if (work == NULL || |
1718 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || |
||
1719 | !work->enable_stall_check) { |
||
3031 | serge | 1720 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
1721 | spin_unlock_irqrestore(&dev->event_lock, flags); |
||
1722 | return; |
||
1723 | } |
||
1724 | |||
1725 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
||
1726 | obj = work->pending_flip_obj; |
||
1727 | if (INTEL_INFO(dev)->gen >= 4) { |
||
1728 | int dspsurf = DSPSURF(intel_crtc->plane); |
||
1729 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
||
1730 | obj->gtt_offset; |
||
1731 | } else { |
||
1732 | int dspaddr = DSPADDR(intel_crtc->plane); |
||
1733 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
||
1734 | crtc->y * crtc->fb->pitches[0] + |
||
1735 | crtc->x * crtc->fb->bits_per_pixel/8); |
||
1736 | } |
||
1737 | |||
1738 | spin_unlock_irqrestore(&dev->event_lock, flags); |
||
1739 | |||
1740 | if (stall_detected) { |
||
1741 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); |
||
1742 | intel_prepare_page_flip(dev, intel_crtc->plane); |
||
1743 | } |
||
1744 | } |
||
1745 | |||
1746 | #endif |
||
1747 | |||
1748 | /* Called from drm generic code, passed 'crtc' which |
||
1749 | * we use as a pipe index |
||
1750 | */ |
||
1751 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
||
1752 | { |
||
1753 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1754 | unsigned long irqflags; |
||
1755 | |||
1756 | if (!i915_pipe_enabled(dev, pipe)) |
||
1757 | return -EINVAL; |
||
1758 | |||
1759 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1760 | if (INTEL_INFO(dev)->gen >= 4) |
||
1761 | i915_enable_pipestat(dev_priv, pipe, |
||
1762 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
||
1763 | else |
||
1764 | i915_enable_pipestat(dev_priv, pipe, |
||
1765 | PIPE_VBLANK_INTERRUPT_ENABLE); |
||
1766 | |||
1767 | /* maintain vblank delivery even in deep C-states */ |
||
1768 | if (dev_priv->info->gen == 3) |
||
1769 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
||
1770 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1771 | |||
1772 | return 0; |
||
1773 | } |
||
1774 | |||
1775 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
||
1776 | { |
||
1777 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1778 | unsigned long irqflags; |
||
1779 | |||
1780 | if (!i915_pipe_enabled(dev, pipe)) |
||
1781 | return -EINVAL; |
||
1782 | |||
1783 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1784 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? |
||
1785 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
||
1786 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1787 | |||
1788 | return 0; |
||
1789 | } |
||
1790 | |||
1791 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) |
||
1792 | { |
||
1793 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1794 | unsigned long irqflags; |
||
1795 | |||
1796 | if (!i915_pipe_enabled(dev, pipe)) |
||
1797 | return -EINVAL; |
||
1798 | |||
1799 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1800 | ironlake_enable_display_irq(dev_priv, |
||
1801 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); |
||
1802 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1803 | |||
1804 | return 0; |
||
1805 | } |
||
1806 | |||
1807 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
||
1808 | { |
||
1809 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1810 | unsigned long irqflags; |
||
1811 | u32 imr; |
||
1812 | |||
1813 | if (!i915_pipe_enabled(dev, pipe)) |
||
1814 | return -EINVAL; |
||
1815 | |||
1816 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1817 | imr = I915_READ(VLV_IMR); |
||
1818 | if (pipe == 0) |
||
1819 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
||
1820 | else |
||
1821 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
||
1822 | I915_WRITE(VLV_IMR, imr); |
||
1823 | i915_enable_pipestat(dev_priv, pipe, |
||
1824 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
||
1825 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1826 | |||
1827 | return 0; |
||
1828 | } |
||
1829 | |||
1830 | /* Called from drm generic code, passed 'crtc' which |
||
1831 | * we use as a pipe index |
||
1832 | */ |
||
1833 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
||
1834 | { |
||
1835 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1836 | unsigned long irqflags; |
||
1837 | |||
1838 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1839 | if (dev_priv->info->gen == 3) |
||
1840 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
||
1841 | |||
1842 | i915_disable_pipestat(dev_priv, pipe, |
||
1843 | PIPE_VBLANK_INTERRUPT_ENABLE | |
||
1844 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
||
1845 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1846 | } |
||
1847 | |||
1848 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
||
1849 | { |
||
1850 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1851 | unsigned long irqflags; |
||
1852 | |||
1853 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1854 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
||
1855 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
||
1856 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1857 | } |
||
1858 | |||
1859 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) |
||
1860 | { |
||
1861 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1862 | unsigned long irqflags; |
||
1863 | |||
1864 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1865 | ironlake_disable_display_irq(dev_priv, |
||
1866 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); |
||
1867 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1868 | } |
||
1869 | |||
1870 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
||
1871 | { |
||
1872 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1873 | unsigned long irqflags; |
||
1874 | u32 imr; |
||
1875 | |||
1876 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
1877 | i915_disable_pipestat(dev_priv, pipe, |
||
1878 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
||
1879 | imr = I915_READ(VLV_IMR); |
||
1880 | if (pipe == 0) |
||
1881 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
||
1882 | else |
||
1883 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
||
1884 | I915_WRITE(VLV_IMR, imr); |
||
1885 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
1886 | } |
||
1887 | |||
1888 | static u32 |
||
1889 | ring_last_seqno(struct intel_ring_buffer *ring) |
||
1890 | { |
||
1891 | return list_entry(ring->request_list.prev, |
||
1892 | struct drm_i915_gem_request, list)->seqno; |
||
1893 | } |
||
2351 | Serge | 1894 | /* drm_dma.h hooks |
1895 | */ |
||
1896 | static void ironlake_irq_preinstall(struct drm_device *dev) |
||
1897 | { |
||
1898 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1899 | |||
1900 | atomic_set(&dev_priv->irq_received, 0); |
||
1901 | |||
1902 | I915_WRITE(HWSTAM, 0xeffe); |
||
1903 | |||
1904 | /* XXX hotplug from PCH */ |
||
1905 | |||
1906 | I915_WRITE(DEIMR, 0xffffffff); |
||
1907 | I915_WRITE(DEIER, 0x0); |
||
1908 | POSTING_READ(DEIER); |
||
1909 | |||
1910 | /* and GT */ |
||
1911 | I915_WRITE(GTIMR, 0xffffffff); |
||
1912 | I915_WRITE(GTIER, 0x0); |
||
1913 | POSTING_READ(GTIER); |
||
1914 | |||
3746 | Serge | 1915 | if (HAS_PCH_NOP(dev)) |
1916 | return; |
||
1917 | |||
2351 | Serge | 1918 | /* south display irq */ |
1919 | I915_WRITE(SDEIMR, 0xffffffff); |
||
3746 | Serge | 1920 | /* |
1921 | * SDEIER is also touched by the interrupt handler to work around missed |
||
1922 | * PCH interrupts. Hence we can't update it after the interrupt handler |
||
1923 | * is enabled - instead we unconditionally enable all PCH interrupt |
||
1924 | * sources here, but then only unmask them as needed with SDEIMR. |
||
1925 | */ |
||
1926 | I915_WRITE(SDEIER, 0xffffffff); |
||
2351 | Serge | 1927 | POSTING_READ(SDEIER); |
1928 | } |
||
1929 | |||
3031 | serge | 1930 | static void valleyview_irq_preinstall(struct drm_device *dev) |
1931 | { |
||
1932 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1933 | int pipe; |
||
1934 | |||
1935 | atomic_set(&dev_priv->irq_received, 0); |
||
1936 | |||
1937 | /* VLV magic */ |
||
1938 | I915_WRITE(VLV_IMR, 0); |
||
1939 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); |
||
1940 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); |
||
1941 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); |
||
1942 | |||
1943 | /* and GT */ |
||
1944 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
||
1945 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
||
1946 | I915_WRITE(GTIMR, 0xffffffff); |
||
1947 | I915_WRITE(GTIER, 0x0); |
||
1948 | POSTING_READ(GTIER); |
||
1949 | |||
1950 | I915_WRITE(DPINVGTT, 0xff); |
||
1951 | |||
1952 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
||
1953 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
||
1954 | for_each_pipe(pipe) |
||
1955 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
||
1956 | I915_WRITE(VLV_IIR, 0xffffffff); |
||
1957 | I915_WRITE(VLV_IMR, 0xffffffff); |
||
1958 | I915_WRITE(VLV_IER, 0x0); |
||
1959 | POSTING_READ(VLV_IER); |
||
1960 | } |
||
1961 | |||
3746 | Serge | 1962 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
1963 | { |
||
1964 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
1965 | struct drm_mode_config *mode_config = &dev->mode_config; |
||
1966 | struct intel_encoder *intel_encoder; |
||
1967 | u32 mask = ~I915_READ(SDEIMR); |
||
1968 | u32 hotplug; |
||
1969 | |||
1970 | if (HAS_PCH_IBX(dev)) { |
||
1971 | mask &= ~SDE_HOTPLUG_MASK; |
||
1972 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
||
1973 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
||
1974 | mask |= hpd_ibx[intel_encoder->hpd_pin]; |
||
1975 | } else { |
||
1976 | mask &= ~SDE_HOTPLUG_MASK_CPT; |
||
1977 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
||
1978 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
||
1979 | mask |= hpd_cpt[intel_encoder->hpd_pin]; |
||
1980 | } |
||
1981 | |||
1982 | I915_WRITE(SDEIMR, ~mask); |
||
1983 | |||
1984 | /* |
||
2351 | Serge | 1985 | * Enable digital hotplug on the PCH, and configure the DP short pulse |
1986 | * duration to 2ms (which is the minimum in the Display Port spec) |
||
1987 | * |
||
1988 | * This register is the same on all known PCH chips. |
||
1989 | */ |
||
1990 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
||
1991 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); |
||
1992 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; |
||
1993 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; |
||
1994 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; |
||
1995 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
||
1996 | } |
||
1997 | |||
3480 | Serge | 1998 | static void ibx_irq_postinstall(struct drm_device *dev) |
1999 | { |
||
2000 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2001 | u32 mask; |
||
2002 | |||
2003 | if (HAS_PCH_IBX(dev)) |
||
3746 | Serge | 2004 | mask = SDE_GMBUS | SDE_AUX_MASK; |
3480 | Serge | 2005 | else |
3746 | Serge | 2006 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
3480 | Serge | 2007 | |
3746 | Serge | 2008 | if (HAS_PCH_NOP(dev)) |
2009 | return; |
||
2010 | |||
3480 | Serge | 2011 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2012 | I915_WRITE(SDEIMR, ~mask); |
||
2013 | } |
||
2014 | |||
2351 | Serge | 2015 | static int ironlake_irq_postinstall(struct drm_device *dev) |
2016 | { |
||
2017 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2018 | /* enable kind of interrupts always enabled */ |
||
2019 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
||
3480 | Serge | 2020 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
2021 | DE_AUX_CHANNEL_A; |
||
2351 | Serge | 2022 | u32 render_irqs; |
2023 | |||
2024 | dev_priv->irq_mask = ~display_mask; |
||
2025 | |||
2026 | /* should always can generate irq */ |
||
2027 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
||
2028 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
||
2029 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); |
||
2030 | POSTING_READ(DEIER); |
||
2031 | |||
2032 | dev_priv->gt_irq_mask = ~0; |
||
2033 | |||
2034 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
||
2035 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
2036 | |||
2037 | if (IS_GEN6(dev)) |
||
2038 | render_irqs = |
||
2039 | GT_USER_INTERRUPT | |
||
3031 | serge | 2040 | GEN6_BSD_USER_INTERRUPT | |
2041 | GEN6_BLITTER_USER_INTERRUPT; |
||
2351 | Serge | 2042 | else |
2043 | render_irqs = |
||
2044 | GT_USER_INTERRUPT | |
||
2045 | GT_PIPE_NOTIFY | |
||
2046 | GT_BSD_USER_INTERRUPT; |
||
2047 | I915_WRITE(GTIER, render_irqs); |
||
2048 | POSTING_READ(GTIER); |
||
2049 | |||
3480 | Serge | 2050 | ibx_irq_postinstall(dev); |
2351 | Serge | 2051 | |
2052 | if (IS_IRONLAKE_M(dev)) { |
||
2053 | /* Clear & enable PCU event interrupts */ |
||
2054 | I915_WRITE(DEIIR, DE_PCU_EVENT); |
||
2055 | I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); |
||
3480 | Serge | 2056 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
2351 | Serge | 2057 | } |
2058 | |||
2059 | return 0; |
||
2060 | } |
||
2061 | |||
3031 | serge | 2062 | static int ivybridge_irq_postinstall(struct drm_device *dev) |
2063 | { |
||
2064 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2065 | /* enable kind of interrupts always enabled */ |
||
2066 | u32 display_mask = |
||
2067 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | |
||
2068 | DE_PLANEC_FLIP_DONE_IVB | |
||
2069 | DE_PLANEB_FLIP_DONE_IVB | |
||
3480 | Serge | 2070 | DE_PLANEA_FLIP_DONE_IVB | |
2071 | DE_AUX_CHANNEL_A_IVB; |
||
3031 | serge | 2072 | u32 render_irqs; |
2351 | Serge | 2073 | |
3031 | serge | 2074 | dev_priv->irq_mask = ~display_mask; |
2075 | |||
2076 | /* should always can generate irq */ |
||
2077 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
||
2078 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
||
2079 | I915_WRITE(DEIER, |
||
2080 | display_mask | |
||
2081 | DE_PIPEC_VBLANK_IVB | |
||
2082 | DE_PIPEB_VBLANK_IVB | |
||
2083 | DE_PIPEA_VBLANK_IVB); |
||
2084 | POSTING_READ(DEIER); |
||
2085 | |||
2086 | dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
||
2087 | |||
2088 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
||
2089 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
2090 | |||
2091 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | |
||
2092 | GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
||
2093 | I915_WRITE(GTIER, render_irqs); |
||
2094 | POSTING_READ(GTIER); |
||
2095 | |||
3480 | Serge | 2096 | ibx_irq_postinstall(dev); |
3031 | serge | 2097 | |
2098 | return 0; |
||
2099 | } |
||
2100 | |||
2101 | static int valleyview_irq_postinstall(struct drm_device *dev) |
||
2102 | { |
||
2103 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2104 | u32 enable_mask; |
||
2105 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
||
3243 | Serge | 2106 | u32 render_irqs; |
3031 | serge | 2107 | u16 msid; |
2108 | |||
2109 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; |
||
2110 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
||
2111 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | |
||
2112 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
||
2113 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
||
2114 | |||
2115 | /* |
||
2116 | *Leave vblank interrupts masked initially. enable/disable will |
||
2117 | * toggle them based on usage. |
||
2118 | */ |
||
2119 | dev_priv->irq_mask = (~enable_mask) | |
||
2120 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | |
||
2121 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
||
2122 | |||
2123 | /* Hack for broken MSIs on VLV */ |
||
3243 | Serge | 2124 | // pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); |
2125 | // pci_read_config_word(dev->pdev, 0x98, &msid); |
||
2126 | // msid &= 0xff; /* mask out delivery bits */ |
||
2127 | // msid |= (1<<14); |
||
2128 | // pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); |
||
3031 | serge | 2129 | |
3480 | Serge | 2130 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2131 | POSTING_READ(PORT_HOTPLUG_EN); |
||
2132 | |||
3031 | serge | 2133 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
2134 | I915_WRITE(VLV_IER, enable_mask); |
||
2135 | I915_WRITE(VLV_IIR, 0xffffffff); |
||
2136 | I915_WRITE(PIPESTAT(0), 0xffff); |
||
2137 | I915_WRITE(PIPESTAT(1), 0xffff); |
||
2138 | POSTING_READ(VLV_IER); |
||
2139 | |||
2140 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
||
3480 | Serge | 2141 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
3031 | serge | 2142 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
2143 | |||
2144 | I915_WRITE(VLV_IIR, 0xffffffff); |
||
2145 | I915_WRITE(VLV_IIR, 0xffffffff); |
||
2146 | |||
2147 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
||
2148 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
3243 | Serge | 2149 | |
2150 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | |
||
2151 | GEN6_BLITTER_USER_INTERRUPT; |
||
2152 | I915_WRITE(GTIER, render_irqs); |
||
3031 | serge | 2153 | POSTING_READ(GTIER); |
2154 | |||
2155 | /* ack & enable invalid PTE error interrupts */ |
||
2156 | #if 0 /* FIXME: add support to irq handler for checking these bits */ |
||
2157 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); |
||
2158 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); |
||
2159 | #endif |
||
2160 | |||
2161 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
||
3480 | Serge | 2162 | |
2163 | return 0; |
||
2164 | } |
||
2165 | |||
3031 | serge | 2166 | static void valleyview_irq_uninstall(struct drm_device *dev) |
2167 | { |
||
2168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2169 | int pipe; |
||
2170 | |||
2171 | if (!dev_priv) |
||
2172 | return; |
||
2173 | |||
2174 | for_each_pipe(pipe) |
||
2175 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
||
2176 | |||
2177 | I915_WRITE(HWSTAM, 0xffffffff); |
||
2178 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
||
2179 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
||
2180 | for_each_pipe(pipe) |
||
2181 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
||
2182 | I915_WRITE(VLV_IIR, 0xffffffff); |
||
2183 | I915_WRITE(VLV_IMR, 0xffffffff); |
||
2184 | I915_WRITE(VLV_IER, 0x0); |
||
2185 | POSTING_READ(VLV_IER); |
||
2186 | } |
||
2187 | |||
2188 | static void ironlake_irq_uninstall(struct drm_device *dev) |
||
2189 | { |
||
2190 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2191 | |||
2192 | if (!dev_priv) |
||
2193 | return; |
||
2194 | |||
2195 | I915_WRITE(HWSTAM, 0xffffffff); |
||
2196 | |||
2197 | I915_WRITE(DEIMR, 0xffffffff); |
||
2198 | I915_WRITE(DEIER, 0x0); |
||
2199 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
||
2200 | |||
2201 | I915_WRITE(GTIMR, 0xffffffff); |
||
2202 | I915_WRITE(GTIER, 0x0); |
||
2203 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
||
2204 | |||
3746 | Serge | 2205 | if (HAS_PCH_NOP(dev)) |
2206 | return; |
||
2207 | |||
3031 | serge | 2208 | I915_WRITE(SDEIMR, 0xffffffff); |
2209 | I915_WRITE(SDEIER, 0x0); |
||
2210 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
||
2211 | } |
||
2212 | |||
2213 | #if 0 |
||
2214 | |||
2215 | static void i8xx_irq_preinstall(struct drm_device * dev) |
||
2216 | { |
||
2217 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2218 | int pipe; |
||
2219 | |||
2220 | atomic_set(&dev_priv->irq_received, 0); |
||
2221 | |||
2222 | for_each_pipe(pipe) |
||
2223 | I915_WRITE(PIPESTAT(pipe), 0); |
||
2224 | I915_WRITE16(IMR, 0xffff); |
||
2225 | I915_WRITE16(IER, 0x0); |
||
2226 | POSTING_READ16(IER); |
||
2227 | } |
||
2228 | |||
2229 | static int i8xx_irq_postinstall(struct drm_device *dev) |
||
2230 | { |
||
2231 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2232 | |||
2233 | I915_WRITE16(EMR, |
||
2234 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
||
2235 | |||
2236 | /* Unmask the interrupts that we always want on. */ |
||
2237 | dev_priv->irq_mask = |
||
2238 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
||
2239 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
||
2240 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
||
2241 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
||
2242 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
||
2243 | I915_WRITE16(IMR, dev_priv->irq_mask); |
||
2244 | |||
2245 | I915_WRITE16(IER, |
||
2246 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
||
2247 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
||
2248 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
||
2249 | I915_USER_INTERRUPT); |
||
2250 | POSTING_READ16(IER); |
||
2251 | |||
2252 | return 0; |
||
2253 | } |
||
2254 | |||
3746 | Serge | 2255 | /* |
2256 | * Returns true when a page flip has completed. |
||
2257 | */ |
||
2258 | static bool i8xx_handle_vblank(struct drm_device *dev, |
||
2259 | int pipe, u16 iir) |
||
2260 | { |
||
2261 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2262 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); |
||
2263 | |||
2264 | // if (!drm_handle_vblank(dev, pipe)) |
||
2265 | return false; |
||
2266 | |||
2267 | if ((iir & flip_pending) == 0) |
||
2268 | return false; |
||
2269 | |||
2270 | // intel_prepare_page_flip(dev, pipe); |
||
2271 | |||
2272 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
||
2273 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
||
2274 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence |
||
2275 | * the flip is completed (no longer pending). Since this doesn't raise |
||
2276 | * an interrupt per se, we watch for the change at vblank. |
||
2277 | */ |
||
2278 | if (I915_READ16(ISR) & flip_pending) |
||
2279 | return false; |
||
2280 | |||
2281 | intel_finish_page_flip(dev, pipe); |
||
2282 | |||
2283 | return true; |
||
2284 | } |
||
2285 | |||
3243 | Serge | 2286 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
3031 | serge | 2287 | { |
2288 | struct drm_device *dev = (struct drm_device *) arg; |
||
2289 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2290 | u16 iir, new_iir; |
||
2291 | u32 pipe_stats[2]; |
||
2292 | unsigned long irqflags; |
||
2293 | int irq_received; |
||
2294 | int pipe; |
||
2295 | u16 flip_mask = |
||
2296 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
||
2297 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
||
2298 | |||
2299 | atomic_inc(&dev_priv->irq_received); |
||
2300 | |||
2301 | iir = I915_READ16(IIR); |
||
2302 | if (iir == 0) |
||
2303 | return IRQ_NONE; |
||
2304 | |||
2305 | while (iir & ~flip_mask) { |
||
2306 | /* Can't rely on pipestat interrupt bit in iir as it might |
||
2307 | * have been cleared after the pipestat interrupt was received. |
||
2308 | * It doesn't set the bit in iir again, but it still produces |
||
2309 | * interrupts (for non-MSI). |
||
2310 | */ |
||
2311 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
2312 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
||
2313 | i915_handle_error(dev, false); |
||
2314 | |||
2315 | for_each_pipe(pipe) { |
||
2316 | int reg = PIPESTAT(pipe); |
||
2317 | pipe_stats[pipe] = I915_READ(reg); |
||
2318 | |||
2319 | /* |
||
2320 | * Clear the PIPE*STAT regs before the IIR |
||
2321 | */ |
||
2322 | if (pipe_stats[pipe] & 0x8000ffff) { |
||
2323 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
||
2324 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
||
2325 | pipe_name(pipe)); |
||
2326 | I915_WRITE(reg, pipe_stats[pipe]); |
||
2327 | irq_received = 1; |
||
2328 | } |
||
2329 | } |
||
2330 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
2331 | |||
2332 | I915_WRITE16(IIR, iir & ~flip_mask); |
||
2333 | new_iir = I915_READ16(IIR); /* Flush posted writes */ |
||
2334 | |||
2335 | i915_update_dri1_breadcrumb(dev); |
||
2336 | |||
2337 | if (iir & I915_USER_INTERRUPT) |
||
2338 | notify_ring(dev, &dev_priv->ring[RCS]); |
||
2339 | |||
2340 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && |
||
3746 | Serge | 2341 | i8xx_handle_vblank(dev, 0, iir)) |
2342 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); |
||
3031 | serge | 2343 | |
2344 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && |
||
3746 | Serge | 2345 | i8xx_handle_vblank(dev, 1, iir)) |
2346 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); |
||
3031 | serge | 2347 | |
2348 | iir = new_iir; |
||
2349 | } |
||
2350 | |||
2351 | return IRQ_HANDLED; |
||
2352 | } |
||
2353 | |||
2354 | static void i8xx_irq_uninstall(struct drm_device * dev) |
||
2355 | { |
||
2356 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2357 | int pipe; |
||
2358 | |||
2359 | for_each_pipe(pipe) { |
||
2360 | /* Clear enable bits; then clear status bits */ |
||
2361 | I915_WRITE(PIPESTAT(pipe), 0); |
||
2362 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
||
2363 | } |
||
2364 | I915_WRITE16(IMR, 0xffff); |
||
2365 | I915_WRITE16(IER, 0x0); |
||
2366 | I915_WRITE16(IIR, I915_READ16(IIR)); |
||
2367 | } |
||
2368 | |||
2369 | #endif |
||
2370 | |||
2371 | static void i915_irq_preinstall(struct drm_device * dev) |
||
2372 | { |
||
2373 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2374 | int pipe; |
||
2375 | |||
2376 | atomic_set(&dev_priv->irq_received, 0); |
||
2377 | |||
2378 | if (I915_HAS_HOTPLUG(dev)) { |
||
2379 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
||
2380 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
||
2381 | } |
||
2382 | |||
2383 | I915_WRITE16(HWSTAM, 0xeffe); |
||
2384 | for_each_pipe(pipe) |
||
2385 | I915_WRITE(PIPESTAT(pipe), 0); |
||
2386 | I915_WRITE(IMR, 0xffffffff); |
||
2387 | I915_WRITE(IER, 0x0); |
||
2388 | POSTING_READ(IER); |
||
2389 | } |
||
2390 | |||
2391 | static int i915_irq_postinstall(struct drm_device *dev) |
||
2392 | { |
||
2393 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2394 | u32 enable_mask; |
||
2395 | |||
2396 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
||
2397 | |||
2398 | /* Unmask the interrupts that we always want on. */ |
||
2399 | dev_priv->irq_mask = |
||
2400 | ~(I915_ASLE_INTERRUPT | |
||
2401 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
||
2402 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
||
2403 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
||
2404 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
||
2405 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
||
2406 | |||
2407 | enable_mask = |
||
2408 | I915_ASLE_INTERRUPT | |
||
2409 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
||
2410 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
||
2411 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
||
2412 | I915_USER_INTERRUPT; |
||
3480 | Serge | 2413 | |
3031 | serge | 2414 | if (I915_HAS_HOTPLUG(dev)) { |
3480 | Serge | 2415 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2416 | POSTING_READ(PORT_HOTPLUG_EN); |
||
2417 | |||
3031 | serge | 2418 | /* Enable in IER... */ |
2419 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
||
2420 | /* and unmask in IMR */ |
||
2421 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
||
2422 | } |
||
2423 | |||
2424 | I915_WRITE(IMR, dev_priv->irq_mask); |
||
2425 | I915_WRITE(IER, enable_mask); |
||
2426 | POSTING_READ(IER); |
||
2427 | |||
3480 | Serge | 2428 | // intel_opregion_enable_asle(dev); |
2429 | |||
2430 | return 0; |
||
2431 | } |
||
2432 | |||
3746 | Serge | 2433 | /* |
2434 | * Returns true when a page flip has completed. |
||
2435 | */ |
||
2436 | static bool i915_handle_vblank(struct drm_device *dev, |
||
2437 | int plane, int pipe, u32 iir) |
||
3480 | Serge | 2438 | { |
3746 | Serge | 2439 | drm_i915_private_t *dev_priv = dev->dev_private; |
2440 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
||
3480 | Serge | 2441 | |
3746 | Serge | 2442 | // if (!drm_handle_vblank(dev, pipe)) |
2443 | return false; |
||
3480 | Serge | 2444 | |
3746 | Serge | 2445 | if ((iir & flip_pending) == 0) |
2446 | return false; |
||
3480 | Serge | 2447 | |
3746 | Serge | 2448 | // intel_prepare_page_flip(dev, plane); |
3031 | serge | 2449 | |
3746 | Serge | 2450 | /* We detect FlipDone by looking for the change in PendingFlip from '1' |
2451 | * to '0' on the following vblank, i.e. IIR has the Pendingflip |
||
2452 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence |
||
2453 | * the flip is completed (no longer pending). Since this doesn't raise |
||
2454 | * an interrupt per se, we watch for the change at vblank. |
||
2455 | */ |
||
2456 | if (I915_READ(ISR) & flip_pending) |
||
2457 | return false; |
||
2458 | |||
2459 | intel_finish_page_flip(dev, pipe); |
||
2460 | |||
2461 | return true; |
||
3031 | serge | 2462 | } |
2463 | |||
3243 | Serge | 2464 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
3031 | serge | 2465 | { |
2466 | struct drm_device *dev = (struct drm_device *) arg; |
||
2467 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2468 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
||
2469 | unsigned long irqflags; |
||
2470 | u32 flip_mask = |
||
2471 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
||
2472 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
||
2473 | int pipe, ret = IRQ_NONE; |
||
2474 | |||
2475 | atomic_inc(&dev_priv->irq_received); |
||
2476 | |||
2477 | iir = I915_READ(IIR); |
||
2478 | do { |
||
2479 | bool irq_received = (iir & ~flip_mask) != 0; |
||
2480 | bool blc_event = false; |
||
2481 | |||
2482 | /* Can't rely on pipestat interrupt bit in iir as it might |
||
2483 | * have been cleared after the pipestat interrupt was received. |
||
2484 | * It doesn't set the bit in iir again, but it still produces |
||
2485 | * interrupts (for non-MSI). |
||
2486 | */ |
||
2487 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
2488 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
||
2489 | i915_handle_error(dev, false); |
||
2490 | |||
2491 | for_each_pipe(pipe) { |
||
2492 | int reg = PIPESTAT(pipe); |
||
2493 | pipe_stats[pipe] = I915_READ(reg); |
||
2494 | |||
2495 | /* Clear the PIPE*STAT regs before the IIR */ |
||
2496 | if (pipe_stats[pipe] & 0x8000ffff) { |
||
2497 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
||
2498 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
||
2499 | pipe_name(pipe)); |
||
2500 | I915_WRITE(reg, pipe_stats[pipe]); |
||
2501 | irq_received = true; |
||
2502 | } |
||
2503 | } |
||
2504 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
2505 | |||
2506 | if (!irq_received) |
||
2507 | break; |
||
2508 | |||
2509 | /* Consume port. Then clear IIR or we'll miss events */ |
||
2510 | if ((I915_HAS_HOTPLUG(dev)) && |
||
2511 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { |
||
2512 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
||
3746 | Serge | 2513 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
3031 | serge | 2514 | |
2515 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
||
2516 | hotplug_status); |
||
3746 | Serge | 2517 | if (hotplug_trigger) { |
2518 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) |
||
2519 | i915_hpd_irq_setup(dev); |
||
3480 | Serge | 2520 | queue_work(dev_priv->wq, |
2521 | &dev_priv->hotplug_work); |
||
3746 | Serge | 2522 | } |
3031 | serge | 2523 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
2524 | POSTING_READ(PORT_HOTPLUG_STAT); |
||
2525 | } |
||
2526 | |||
2527 | I915_WRITE(IIR, iir & ~flip_mask); |
||
2528 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
||
2529 | |||
2530 | if (iir & I915_USER_INTERRUPT) |
||
2531 | notify_ring(dev, &dev_priv->ring[RCS]); |
||
2532 | |||
2533 | for_each_pipe(pipe) { |
||
2534 | int plane = pipe; |
||
2535 | if (IS_MOBILE(dev)) |
||
2536 | plane = !plane; |
||
2537 | |||
3746 | Serge | 2538 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
2539 | i915_handle_vblank(dev, plane, pipe, iir)) |
||
2540 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); |
||
2541 | |||
3031 | serge | 2542 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
2543 | blc_event = true; |
||
2544 | } |
||
2545 | |||
2546 | // if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
||
2547 | // intel_opregion_asle_intr(dev); |
||
2548 | |||
2549 | /* With MSI, interrupts are only generated when iir |
||
2550 | * transitions from zero to nonzero. If another bit got |
||
2551 | * set while we were handling the existing iir bits, then |
||
2552 | * we would never get another interrupt. |
||
2553 | * |
||
2554 | * This is fine on non-MSI as well, as if we hit this path |
||
2555 | * we avoid exiting the interrupt handler only to generate |
||
2556 | * another one. |
||
2557 | * |
||
2558 | * Note that for MSI this could cause a stray interrupt report |
||
2559 | * if an interrupt landed in the time between writing IIR and |
||
2560 | * the posting read. This should be rare enough to never |
||
2561 | * trigger the 99% of 100,000 interrupts test for disabling |
||
2562 | * stray interrupts. |
||
2563 | */ |
||
2564 | ret = IRQ_HANDLED; |
||
2565 | iir = new_iir; |
||
2566 | } while (iir & ~flip_mask); |
||
2567 | |||
2568 | i915_update_dri1_breadcrumb(dev); |
||
2569 | |||
2570 | return ret; |
||
2571 | } |
||
2572 | |||
2573 | static void i915_irq_uninstall(struct drm_device * dev) |
||
2574 | { |
||
2575 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2576 | int pipe; |
||
2577 | |||
2578 | if (I915_HAS_HOTPLUG(dev)) { |
||
2579 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
||
2580 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
||
2581 | } |
||
2582 | |||
2583 | I915_WRITE16(HWSTAM, 0xffff); |
||
2584 | for_each_pipe(pipe) { |
||
2585 | /* Clear enable bits; then clear status bits */ |
||
2586 | I915_WRITE(PIPESTAT(pipe), 0); |
||
2587 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
||
2588 | } |
||
2589 | I915_WRITE(IMR, 0xffffffff); |
||
2590 | I915_WRITE(IER, 0x0); |
||
2591 | |||
2592 | I915_WRITE(IIR, I915_READ(IIR)); |
||
2593 | } |
||
2594 | |||
2595 | static void i965_irq_preinstall(struct drm_device * dev) |
||
2596 | { |
||
2597 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2598 | int pipe; |
||
2599 | |||
2600 | atomic_set(&dev_priv->irq_received, 0); |
||
2601 | |||
2602 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
||
2603 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
||
2604 | |||
2605 | I915_WRITE(HWSTAM, 0xeffe); |
||
2606 | for_each_pipe(pipe) |
||
2607 | I915_WRITE(PIPESTAT(pipe), 0); |
||
2608 | I915_WRITE(IMR, 0xffffffff); |
||
2609 | I915_WRITE(IER, 0x0); |
||
2610 | POSTING_READ(IER); |
||
2611 | } |
||
2612 | |||
2613 | static int i965_irq_postinstall(struct drm_device *dev) |
||
2614 | { |
||
2615 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2616 | u32 enable_mask; |
||
2617 | u32 error_mask; |
||
2618 | |||
2619 | /* Unmask the interrupts that we always want on. */ |
||
2620 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
||
2621 | I915_DISPLAY_PORT_INTERRUPT | |
||
2622 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
||
2623 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
||
2624 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
||
2625 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
||
2626 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
||
2627 | |||
2628 | enable_mask = ~dev_priv->irq_mask; |
||
3746 | Serge | 2629 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
2630 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); |
||
3031 | serge | 2631 | enable_mask |= I915_USER_INTERRUPT; |
2632 | |||
2633 | if (IS_G4X(dev)) |
||
2634 | enable_mask |= I915_BSD_USER_INTERRUPT; |
||
2635 | |||
3480 | Serge | 2636 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
3031 | serge | 2637 | |
2638 | /* |
||
2639 | * Enable some error detection, note the instruction error mask |
||
2640 | * bit is reserved, so we leave it masked. |
||
2641 | */ |
||
2642 | if (IS_G4X(dev)) { |
||
2643 | error_mask = ~(GM45_ERROR_PAGE_TABLE | |
||
2644 | GM45_ERROR_MEM_PRIV | |
||
2645 | GM45_ERROR_CP_PRIV | |
||
2646 | I915_ERROR_MEMORY_REFRESH); |
||
2647 | } else { |
||
2648 | error_mask = ~(I915_ERROR_PAGE_TABLE | |
||
2649 | I915_ERROR_MEMORY_REFRESH); |
||
2650 | } |
||
2651 | I915_WRITE(EMR, error_mask); |
||
2652 | |||
2653 | I915_WRITE(IMR, dev_priv->irq_mask); |
||
2654 | I915_WRITE(IER, enable_mask); |
||
2655 | POSTING_READ(IER); |
||
2656 | |||
3480 | Serge | 2657 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2658 | POSTING_READ(PORT_HOTPLUG_EN); |
||
2659 | |||
2660 | // intel_opregion_enable_asle(dev); |
||
2661 | |||
2662 | return 0; |
||
2663 | } |
||
2664 | |||
3746 | Serge | 2665 | static void i915_hpd_irq_setup(struct drm_device *dev) |
3480 | Serge | 2666 | { |
2667 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
3746 | Serge | 2668 | struct drm_mode_config *mode_config = &dev->mode_config; |
2669 | struct intel_encoder *intel_encoder; |
||
3480 | Serge | 2670 | u32 hotplug_en; |
2671 | |||
3746 | Serge | 2672 | if (I915_HAS_HOTPLUG(dev)) { |
2673 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
||
2674 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; |
||
3031 | serge | 2675 | /* Note HDMI and DP share hotplug bits */ |
3746 | Serge | 2676 | /* enable bits are the same for all generations */ |
2677 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
||
2678 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
||
2679 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; |
||
3031 | serge | 2680 | /* Programming the CRT detection parameters tends |
2681 | to generate a spurious hotplug event about three |
||
2682 | seconds later. So just do it once. |
||
2683 | */ |
||
2684 | if (IS_G4X(dev)) |
||
2685 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; |
||
3746 | Serge | 2686 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
3031 | serge | 2687 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
3480 | Serge | 2688 | |
3031 | serge | 2689 | /* Ignore TV since it's buggy */ |
2690 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
||
3746 | Serge | 2691 | } |
3031 | serge | 2692 | } |
2693 | |||
3243 | Serge | 2694 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
3031 | serge | 2695 | { |
2696 | struct drm_device *dev = (struct drm_device *) arg; |
||
2697 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2698 | u32 iir, new_iir; |
||
2699 | u32 pipe_stats[I915_MAX_PIPES]; |
||
2700 | unsigned long irqflags; |
||
2701 | int irq_received; |
||
2702 | int ret = IRQ_NONE, pipe; |
||
3746 | Serge | 2703 | u32 flip_mask = |
2704 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
||
2705 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
||
3031 | serge | 2706 | |
2707 | atomic_inc(&dev_priv->irq_received); |
||
2708 | |||
2709 | iir = I915_READ(IIR); |
||
2710 | |||
2711 | for (;;) { |
||
2712 | bool blc_event = false; |
||
2713 | |||
3746 | Serge | 2714 | irq_received = (iir & ~flip_mask) != 0; |
3031 | serge | 2715 | |
2716 | /* Can't rely on pipestat interrupt bit in iir as it might |
||
2717 | * have been cleared after the pipestat interrupt was received. |
||
2718 | * It doesn't set the bit in iir again, but it still produces |
||
2719 | * interrupts (for non-MSI). |
||
2720 | */ |
||
2721 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
||
2722 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
||
2723 | i915_handle_error(dev, false); |
||
2724 | |||
2725 | for_each_pipe(pipe) { |
||
2726 | int reg = PIPESTAT(pipe); |
||
2727 | pipe_stats[pipe] = I915_READ(reg); |
||
2728 | |||
2729 | /* |
||
2730 | * Clear the PIPE*STAT regs before the IIR |
||
2731 | */ |
||
2732 | if (pipe_stats[pipe] & 0x8000ffff) { |
||
2733 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
||
2734 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
||
2735 | pipe_name(pipe)); |
||
2736 | I915_WRITE(reg, pipe_stats[pipe]); |
||
2737 | irq_received = 1; |
||
2738 | } |
||
2739 | } |
||
2740 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
||
2741 | |||
2742 | if (!irq_received) |
||
2743 | break; |
||
2744 | |||
2745 | ret = IRQ_HANDLED; |
||
2746 | |||
2747 | /* Consume port. Then clear IIR or we'll miss events */ |
||
2748 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
||
2749 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
||
3746 | Serge | 2750 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? |
2751 | HOTPLUG_INT_STATUS_G4X : |
||
2752 | HOTPLUG_INT_STATUS_I965); |
||
3031 | serge | 2753 | |
2754 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
||
2755 | hotplug_status); |
||
3746 | Serge | 2756 | if (hotplug_trigger) { |
2757 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, |
||
2758 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965)) |
||
2759 | i915_hpd_irq_setup(dev); |
||
3480 | Serge | 2760 | queue_work(dev_priv->wq, |
2761 | &dev_priv->hotplug_work); |
||
3746 | Serge | 2762 | } |
3031 | serge | 2763 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
2764 | I915_READ(PORT_HOTPLUG_STAT); |
||
2765 | } |
||
2766 | |||
3746 | Serge | 2767 | I915_WRITE(IIR, iir & ~flip_mask); |
3031 | serge | 2768 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
2769 | |||
2770 | if (iir & I915_USER_INTERRUPT) |
||
2771 | notify_ring(dev, &dev_priv->ring[RCS]); |
||
2772 | if (iir & I915_BSD_USER_INTERRUPT) |
||
2773 | notify_ring(dev, &dev_priv->ring[VCS]); |
||
2774 | |||
2775 | for_each_pipe(pipe) { |
||
3746 | Serge | 2776 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
2777 | i915_handle_vblank(dev, pipe, pipe, iir)) |
||
2778 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); |
||
3031 | serge | 2779 | |
2780 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
||
2781 | blc_event = true; |
||
2782 | } |
||
2783 | |||
2784 | |||
2785 | // if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
||
2786 | // intel_opregion_asle_intr(dev); |
||
2787 | |||
3480 | Serge | 2788 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
2789 | gmbus_irq_handler(dev); |
||
2790 | |||
3031 | serge | 2791 | /* With MSI, interrupts are only generated when iir |
2792 | * transitions from zero to nonzero. If another bit got |
||
2793 | * set while we were handling the existing iir bits, then |
||
2794 | * we would never get another interrupt. |
||
2795 | * |
||
2796 | * This is fine on non-MSI as well, as if we hit this path |
||
2797 | * we avoid exiting the interrupt handler only to generate |
||
2798 | * another one. |
||
2799 | * |
||
2800 | * Note that for MSI this could cause a stray interrupt report |
||
2801 | * if an interrupt landed in the time between writing IIR and |
||
2802 | * the posting read. This should be rare enough to never |
||
2803 | * trigger the 99% of 100,000 interrupts test for disabling |
||
2804 | * stray interrupts. |
||
2805 | */ |
||
2806 | iir = new_iir; |
||
2807 | } |
||
2808 | |||
2809 | i915_update_dri1_breadcrumb(dev); |
||
2810 | |||
2811 | return ret; |
||
2812 | } |
||
2813 | |||
2814 | static void i965_irq_uninstall(struct drm_device * dev) |
||
2815 | { |
||
2816 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
||
2817 | int pipe; |
||
2818 | |||
2819 | if (!dev_priv) |
||
2820 | return; |
||
2821 | |||
2822 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
||
2823 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
||
2824 | |||
2825 | I915_WRITE(HWSTAM, 0xffffffff); |
||
2826 | for_each_pipe(pipe) |
||
2827 | I915_WRITE(PIPESTAT(pipe), 0); |
||
2828 | I915_WRITE(IMR, 0xffffffff); |
||
2829 | I915_WRITE(IER, 0x0); |
||
2830 | |||
2831 | for_each_pipe(pipe) |
||
2832 | I915_WRITE(PIPESTAT(pipe), |
||
2833 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); |
||
2834 | I915_WRITE(IIR, I915_READ(IIR)); |
||
2835 | } |
||
2836 | |||
2351 | Serge | 2837 | void intel_irq_init(struct drm_device *dev) |
2838 | { |
||
3031 | serge | 2839 | struct drm_i915_private *dev_priv = dev->dev_private; |
2840 | |||
3480 | Serge | 2841 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
2842 | |||
2843 | // pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
||
2844 | |||
2845 | |||
2846 | |||
3031 | serge | 2847 | if (IS_VALLEYVIEW(dev)) { |
3243 | Serge | 2848 | dev->driver->irq_handler = valleyview_irq_handler; |
2849 | dev->driver->irq_preinstall = valleyview_irq_preinstall; |
||
2850 | dev->driver->irq_postinstall = valleyview_irq_postinstall; |
||
3746 | Serge | 2851 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
3480 | Serge | 2852 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
2351 | Serge | 2853 | /* Share pre & uninstall handlers with ILK/SNB */ |
3243 | Serge | 2854 | dev->driver->irq_handler = ivybridge_irq_handler; |
2855 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
||
2856 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
||
3746 | Serge | 2857 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
2351 | Serge | 2858 | } else if (HAS_PCH_SPLIT(dev)) { |
3243 | Serge | 2859 | dev->driver->irq_handler = ironlake_irq_handler; |
2860 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
||
2861 | dev->driver->irq_postinstall = ironlake_irq_postinstall; |
||
3746 | Serge | 2862 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
2351 | Serge | 2863 | } else { |
3031 | serge | 2864 | if (INTEL_INFO(dev)->gen == 2) { |
2865 | } else if (INTEL_INFO(dev)->gen == 3) { |
||
3243 | Serge | 2866 | dev->driver->irq_preinstall = i915_irq_preinstall; |
2867 | dev->driver->irq_postinstall = i915_irq_postinstall; |
||
2868 | dev->driver->irq_handler = i915_irq_handler; |
||
3480 | Serge | 2869 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
3031 | serge | 2870 | } else { |
3243 | Serge | 2871 | dev->driver->irq_preinstall = i965_irq_preinstall; |
2872 | dev->driver->irq_postinstall = i965_irq_postinstall; |
||
2873 | dev->driver->irq_handler = i965_irq_handler; |
||
3746 | Serge | 2874 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
3031 | serge | 2875 | } |
2351 | Serge | 2876 | } |
3480 | Serge | 2877 | } |
3243 | Serge | 2878 | |
3480 | Serge | 2879 | void intel_hpd_init(struct drm_device *dev) |
2880 | { |
||
2881 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
3746 | Serge | 2882 | struct drm_mode_config *mode_config = &dev->mode_config; |
2883 | struct drm_connector *connector; |
||
2884 | int i; |
||
3480 | Serge | 2885 | |
3746 | Serge | 2886 | for (i = 1; i < HPD_NUM_PINS; i++) { |
2887 | dev_priv->hpd_stats[i].hpd_cnt = 0; |
||
2888 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; |
||
2889 | } |
||
2890 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
||
2891 | struct intel_connector *intel_connector = to_intel_connector(connector); |
||
2892 | connector->polled = intel_connector->polled; |
||
2893 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) |
||
2894 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
||
2895 | } |
||
3480 | Serge | 2896 | if (dev_priv->display.hpd_irq_setup) |
2897 | dev_priv->display.hpd_irq_setup(dev); |
||
2351 | Serge | 2898 | } |
2899 | |||
3480 | Serge | 2900 | |
3243 | Serge | 2901 | irqreturn_t intel_irq_handler(struct drm_device *dev) |
2902 | { |
||
2351 | Serge | 2903 | |
3266 | Serge | 2904 | // printf("i915 irq\n"); |
3243 | Serge | 2905 | |
2906 | // printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; |
||
2907 | |||
2908 | return dev->driver->irq_handler(0, dev); |
||
2909 | } |
||
2910 | |||
2351 | Serge | 2911 | int drm_irq_install(struct drm_device *dev) |
2912 | { |
||
3051 | serge | 2913 | unsigned long sh_flags = 0; |
2351 | Serge | 2914 | int irq_line; |
2915 | int ret = 0; |
||
2916 | |||
3051 | serge | 2917 | char *irqname; |
2918 | |||
2351 | Serge | 2919 | mutex_lock(&dev->struct_mutex); |
2920 | |||
2921 | /* Driver must have been initialized */ |
||
2922 | if (!dev->dev_private) { |
||
3243 | Serge | 2923 | mutex_unlock(&dev->struct_mutex); |
2924 | return -EINVAL; |
||
2351 | Serge | 2925 | } |
2926 | |||
2927 | if (dev->irq_enabled) { |
||
3243 | Serge | 2928 | mutex_unlock(&dev->struct_mutex); |
2929 | return -EBUSY; |
||
2351 | Serge | 2930 | } |
2931 | dev->irq_enabled = 1; |
||
2932 | mutex_unlock(&dev->struct_mutex); |
||
2933 | |||
2934 | irq_line = drm_dev_to_irq(dev); |
||
2935 | |||
2936 | DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); |
||
2937 | |||
3051 | serge | 2938 | /* Before installing handler */ |
3243 | Serge | 2939 | if (dev->driver->irq_preinstall) |
2940 | dev->driver->irq_preinstall(dev); |
||
2351 | Serge | 2941 | |
3243 | Serge | 2942 | ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev); |
2351 | Serge | 2943 | |
3051 | serge | 2944 | /* After installing handler */ |
3243 | Serge | 2945 | if (dev->driver->irq_postinstall) |
2946 | ret = dev->driver->irq_postinstall(dev); |
||
2351 | Serge | 2947 | |
3051 | serge | 2948 | if (ret < 0) { |
2949 | DRM_ERROR(__FUNCTION__); |
||
2950 | } |
||
2351 | Serge | 2951 | |
2952 | u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4); |
||
2953 | cmd&= ~(1<<10); |
||
2954 | PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd); |
||
2955 | |||
2956 | return ret; |
||
2957 | }10); |
||
2958 |