Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3746 Serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3031 serge 30
 
31
#include 
32
#include 
33
#include 
2351 Serge 34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
4104 Serge 38
#define assert_spin_locked(a)
39
 
3746 Serge 40
static const u32 hpd_ibx[] = {
41
	[HPD_CRT] = SDE_CRT_HOTPLUG,
42
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
46
};
3031 serge 47
 
3746 Serge 48
static const u32 hpd_cpt[] = {
49
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54
};
55
 
56
static const u32 hpd_mask_i915[] = {
57
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
58
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63
};
64
 
4560 Serge 65
static const u32 hpd_status_g4x[] = {
3746 Serge 66
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72
};
73
 
74
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81
};
82
 
83
 
3031 serge 84
#define pr_err(fmt, ...) \
85
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
86
 
87
 
2352 Serge 88
#define DRM_WAKEUP( queue ) wake_up( queue )
89
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
90
 
2351 Serge 91
#define MAX_NOPID ((u32)~0)
92
 
93
 
94
 
95
/* For display hotplug interrupt */
96
static void
97
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98
{
4104 Serge 99
	assert_spin_locked(&dev_priv->irq_lock);
100
 
101
	if (dev_priv->pc8.irqs_disabled) {
102
		WARN(1, "IRQs disabled\n");
103
		dev_priv->pc8.regsave.deimr &= ~mask;
104
		return;
105
	}
106
 
2351 Serge 107
    if ((dev_priv->irq_mask & mask) != 0) {
108
        dev_priv->irq_mask &= ~mask;
109
        I915_WRITE(DEIMR, dev_priv->irq_mask);
110
        POSTING_READ(DEIMR);
111
    }
112
}
113
 
3746 Serge 114
static void
2351 Serge 115
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
116
{
4104 Serge 117
	assert_spin_locked(&dev_priv->irq_lock);
118
 
119
	if (dev_priv->pc8.irqs_disabled) {
120
		WARN(1, "IRQs disabled\n");
121
		dev_priv->pc8.regsave.deimr |= mask;
122
		return;
123
	}
124
 
2351 Serge 125
    if ((dev_priv->irq_mask & mask) != mask) {
126
        dev_priv->irq_mask |= mask;
127
        I915_WRITE(DEIMR, dev_priv->irq_mask);
128
        POSTING_READ(DEIMR);
129
    }
130
}
3031 serge 131
 
4104 Serge 132
/**
133
 * ilk_update_gt_irq - update GTIMR
134
 * @dev_priv: driver private
135
 * @interrupt_mask: mask of interrupt bits to update
136
 * @enabled_irq_mask: mask of interrupt bits to enable
137
 */
138
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
139
			      uint32_t interrupt_mask,
140
			      uint32_t enabled_irq_mask)
141
{
142
	assert_spin_locked(&dev_priv->irq_lock);
143
 
144
	if (dev_priv->pc8.irqs_disabled) {
145
		WARN(1, "IRQs disabled\n");
146
		dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
147
		dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
148
						interrupt_mask);
149
		return;
150
	}
151
 
152
	dev_priv->gt_irq_mask &= ~interrupt_mask;
153
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
154
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
155
	POSTING_READ(GTIMR);
156
}
157
 
158
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
159
{
160
	ilk_update_gt_irq(dev_priv, mask, mask);
161
}
162
 
163
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
164
{
165
	ilk_update_gt_irq(dev_priv, mask, 0);
166
}
167
 
168
/**
169
  * snb_update_pm_irq - update GEN6_PMIMR
170
  * @dev_priv: driver private
171
  * @interrupt_mask: mask of interrupt bits to update
172
  * @enabled_irq_mask: mask of interrupt bits to enable
173
  */
174
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
175
			      uint32_t interrupt_mask,
176
			      uint32_t enabled_irq_mask)
177
{
178
	uint32_t new_val;
179
 
180
	assert_spin_locked(&dev_priv->irq_lock);
181
 
182
	if (dev_priv->pc8.irqs_disabled) {
183
		WARN(1, "IRQs disabled\n");
184
		dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
185
		dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
186
						     interrupt_mask);
187
		return;
188
	}
189
 
190
	new_val = dev_priv->pm_irq_mask;
191
	new_val &= ~interrupt_mask;
192
	new_val |= (~enabled_irq_mask & interrupt_mask);
193
 
194
	if (new_val != dev_priv->pm_irq_mask) {
195
		dev_priv->pm_irq_mask = new_val;
196
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
197
		POSTING_READ(GEN6_PMIMR);
198
	}
199
}
200
 
201
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
202
{
203
	snb_update_pm_irq(dev_priv, mask, mask);
204
}
205
 
206
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
207
{
208
	snb_update_pm_irq(dev_priv, mask, 0);
209
}
210
 
211
static bool ivb_can_enable_err_int(struct drm_device *dev)
212
{
213
	struct drm_i915_private *dev_priv = dev->dev_private;
214
	struct intel_crtc *crtc;
215
	enum pipe pipe;
216
 
217
	assert_spin_locked(&dev_priv->irq_lock);
218
 
219
	for_each_pipe(pipe) {
220
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
221
 
222
		if (crtc->cpu_fifo_underrun_disabled)
223
			return false;
224
	}
225
 
226
	return true;
227
}
228
 
229
static bool cpt_can_enable_serr_int(struct drm_device *dev)
230
{
231
	struct drm_i915_private *dev_priv = dev->dev_private;
232
	enum pipe pipe;
233
	struct intel_crtc *crtc;
234
 
235
	assert_spin_locked(&dev_priv->irq_lock);
236
 
237
	for_each_pipe(pipe) {
238
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
239
 
240
		if (crtc->pch_fifo_underrun_disabled)
241
			return false;
242
	}
243
 
244
	return true;
245
}
246
 
247
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
248
						 enum pipe pipe, bool enable)
249
{
250
	struct drm_i915_private *dev_priv = dev->dev_private;
251
	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
252
					  DE_PIPEB_FIFO_UNDERRUN;
253
 
254
	if (enable)
255
		ironlake_enable_display_irq(dev_priv, bit);
256
	else
257
		ironlake_disable_display_irq(dev_priv, bit);
258
}
259
 
260
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
261
						  enum pipe pipe, bool enable)
262
{
263
	struct drm_i915_private *dev_priv = dev->dev_private;
264
	if (enable) {
265
		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
266
 
267
		if (!ivb_can_enable_err_int(dev))
268
			return;
269
 
270
		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
271
	} else {
272
		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
273
 
274
		/* Change the state _after_ we've read out the current one. */
275
		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
276
 
277
		if (!was_enabled &&
278
		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
279
			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
280
				      pipe_name(pipe));
4560 Serge 281
		}
4104 Serge 282
	}
283
}
284
 
4560 Serge 285
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
286
						  enum pipe pipe, bool enable)
287
{
288
	struct drm_i915_private *dev_priv = dev->dev_private;
289
 
290
	assert_spin_locked(&dev_priv->irq_lock);
291
 
292
	if (enable)
293
		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
294
	else
295
		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
296
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
297
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
298
}
299
 
4104 Serge 300
/**
301
 * ibx_display_interrupt_update - update SDEIMR
302
 * @dev_priv: driver private
303
 * @interrupt_mask: mask of interrupt bits to update
304
 * @enabled_irq_mask: mask of interrupt bits to enable
305
 */
306
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
307
					 uint32_t interrupt_mask,
308
					 uint32_t enabled_irq_mask)
309
{
310
	uint32_t sdeimr = I915_READ(SDEIMR);
311
	sdeimr &= ~interrupt_mask;
312
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
313
 
314
	assert_spin_locked(&dev_priv->irq_lock);
315
 
316
	if (dev_priv->pc8.irqs_disabled &&
317
	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
318
		WARN(1, "IRQs disabled\n");
319
		dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
320
		dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
321
						 interrupt_mask);
322
		return;
323
	}
324
 
325
	I915_WRITE(SDEIMR, sdeimr);
326
	POSTING_READ(SDEIMR);
327
}
328
#define ibx_enable_display_interrupt(dev_priv, bits) \
329
	ibx_display_interrupt_update((dev_priv), (bits), (bits))
330
#define ibx_disable_display_interrupt(dev_priv, bits) \
331
	ibx_display_interrupt_update((dev_priv), (bits), 0)
332
 
333
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
334
					    enum transcoder pch_transcoder,
335
					    bool enable)
336
{
337
	struct drm_i915_private *dev_priv = dev->dev_private;
338
	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
339
		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
340
 
341
	if (enable)
342
		ibx_enable_display_interrupt(dev_priv, bit);
343
	else
344
		ibx_disable_display_interrupt(dev_priv, bit);
345
}
346
 
347
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
348
					    enum transcoder pch_transcoder,
349
					    bool enable)
350
{
351
	struct drm_i915_private *dev_priv = dev->dev_private;
352
 
353
	if (enable) {
354
		I915_WRITE(SERR_INT,
355
			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
356
 
357
		if (!cpt_can_enable_serr_int(dev))
358
			return;
359
 
360
		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
361
	} else {
362
		uint32_t tmp = I915_READ(SERR_INT);
363
		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
364
 
365
		/* Change the state _after_ we've read out the current one. */
366
		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
367
 
368
		if (!was_enabled &&
369
		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
370
			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
371
				      transcoder_name(pch_transcoder));
372
		}
373
	}
374
}
375
 
376
/**
377
 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
378
 * @dev: drm device
379
 * @pipe: pipe
380
 * @enable: true if we want to report FIFO underrun errors, false otherwise
381
 *
382
 * This function makes us disable or enable CPU fifo underruns for a specific
383
 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
384
 * reporting for one pipe may also disable all the other CPU error interruts for
385
 * the other pipes, due to the fact that there's just one interrupt mask/enable
386
 * bit for all the pipes.
387
 *
388
 * Returns the previous state of underrun reporting.
389
 */
390
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
391
					   enum pipe pipe, bool enable)
392
{
393
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
395
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
396
	unsigned long flags;
397
	bool ret;
398
 
399
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
400
 
401
	ret = !intel_crtc->cpu_fifo_underrun_disabled;
402
 
403
	if (enable == ret)
404
		goto done;
405
 
406
	intel_crtc->cpu_fifo_underrun_disabled = !enable;
407
 
408
	if (IS_GEN5(dev) || IS_GEN6(dev))
409
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
410
	else if (IS_GEN7(dev))
411
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
4560 Serge 412
	else if (IS_GEN8(dev))
413
		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
4104 Serge 414
 
415
done:
416
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
417
	return ret;
418
}
419
 
420
/**
421
 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
422
 * @dev: drm device
423
 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
424
 * @enable: true if we want to report FIFO underrun errors, false otherwise
425
 *
426
 * This function makes us disable or enable PCH fifo underruns for a specific
427
 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
428
 * underrun reporting for one transcoder may also disable all the other PCH
429
 * error interruts for the other transcoders, due to the fact that there's just
430
 * one interrupt mask/enable bit for all the transcoders.
431
 *
432
 * Returns the previous state of underrun reporting.
433
 */
434
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
435
					   enum transcoder pch_transcoder,
436
					   bool enable)
437
{
438
	struct drm_i915_private *dev_priv = dev->dev_private;
439
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
440
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
441
	unsigned long flags;
442
	bool ret;
443
 
444
	/*
445
	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
446
	 * has only one pch transcoder A that all pipes can use. To avoid racy
447
	 * pch transcoder -> pipe lookups from interrupt code simply store the
448
	 * underrun statistics in crtc A. Since we never expose this anywhere
449
	 * nor use it outside of the fifo underrun code here using the "wrong"
450
	 * crtc on LPT won't cause issues.
451
	 */
452
 
453
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
454
 
455
	ret = !intel_crtc->pch_fifo_underrun_disabled;
456
 
457
	if (enable == ret)
458
		goto done;
459
 
460
	intel_crtc->pch_fifo_underrun_disabled = !enable;
461
 
462
	if (HAS_PCH_IBX(dev))
463
		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
464
	else
465
		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
466
 
467
done:
468
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
469
	return ret;
470
}
471
 
472
 
3031 serge 473
void
4560 Serge 474
i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
3031 serge 475
{
476
		u32 reg = PIPESTAT(pipe);
3746 Serge 477
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
3031 serge 478
 
4104 Serge 479
	assert_spin_locked(&dev_priv->irq_lock);
480
 
3746 Serge 481
	if ((pipestat & mask) == mask)
482
		return;
483
 
3031 serge 484
		/* Enable the interrupt, clear any pending status */
3746 Serge 485
	pipestat |= mask | (mask >> 16);
486
	I915_WRITE(reg, pipestat);
3031 serge 487
		POSTING_READ(reg);
488
}
489
 
490
void
4560 Serge 491
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
3031 serge 492
{
493
		u32 reg = PIPESTAT(pipe);
3746 Serge 494
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
3031 serge 495
 
4104 Serge 496
	assert_spin_locked(&dev_priv->irq_lock);
497
 
3746 Serge 498
	if ((pipestat & mask) == 0)
499
		return;
500
 
501
	pipestat &= ~mask;
502
	I915_WRITE(reg, pipestat);
3031 serge 503
		POSTING_READ(reg);
504
}
505
 
506
/**
4104 Serge 507
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
3031 serge 508
 */
4104 Serge 509
static void i915_enable_asle_pipestat(struct drm_device *dev)
3031 serge 510
{
511
	drm_i915_private_t *dev_priv = dev->dev_private;
512
	unsigned long irqflags;
513
 
4104 Serge 514
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
3031 serge 515
		return;
516
 
517
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
518
 
4560 Serge 519
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
3031 serge 520
		if (INTEL_INFO(dev)->gen >= 4)
4560 Serge 521
		i915_enable_pipestat(dev_priv, PIPE_A,
522
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
3031 serge 523
 
524
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
525
}
526
 
527
/**
528
 * i915_pipe_enabled - check if a pipe is enabled
529
 * @dev: DRM device
530
 * @pipe: pipe to check
531
 *
532
 * Reading certain registers when the pipe is disabled can hang the chip.
533
 * Use this routine to make sure the PLL is running and the pipe is active
534
 * before reading such registers if unsure.
535
 */
536
static int
537
i915_pipe_enabled(struct drm_device *dev, int pipe)
538
{
539
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3243 Serge 540
 
4104 Serge 541
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
542
		/* Locking is horribly broken here, but whatever. */
543
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
544
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
545
 
546
		return intel_crtc->active;
547
	} else {
548
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
549
	}
3031 serge 550
}
551
 
4560 Serge 552
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
553
{
554
	/* Gen2 doesn't have a hardware frame counter */
555
	return 0;
556
}
557
 
3031 serge 558
/* Called from drm generic code, passed a 'crtc', which
559
 * we use as a pipe index
560
 */
561
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
562
{
563
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
564
	unsigned long high_frame;
565
	unsigned long low_frame;
4560 Serge 566
	u32 high1, high2, low, pixel, vbl_start;
3031 serge 567
 
568
	if (!i915_pipe_enabled(dev, pipe)) {
569
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
570
				"pipe %c\n", pipe_name(pipe));
571
		return 0;
572
	}
573
 
4560 Serge 574
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
575
		struct intel_crtc *intel_crtc =
576
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
577
		const struct drm_display_mode *mode =
578
			&intel_crtc->config.adjusted_mode;
579
 
580
		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
581
	} else {
582
		enum transcoder cpu_transcoder =
583
			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
584
		u32 htotal;
585
 
586
		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
587
		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
588
 
589
		vbl_start *= htotal;
590
	}
591
 
3031 serge 592
	high_frame = PIPEFRAME(pipe);
593
	low_frame = PIPEFRAMEPIXEL(pipe);
594
 
595
	/*
596
	 * High & low register fields aren't synchronized, so make sure
597
	 * we get a low value that's stable across two reads of the high
598
	 * register.
599
	 */
600
	do {
601
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
4560 Serge 602
		low   = I915_READ(low_frame);
3031 serge 603
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
604
	} while (high1 != high2);
605
 
606
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
4560 Serge 607
	pixel = low & PIPE_PIXEL_MASK;
3031 serge 608
	low >>= PIPE_FRAME_LOW_SHIFT;
4560 Serge 609
 
610
	/*
611
	 * The frame counter increments at beginning of active.
612
	 * Cook up a vblank counter by also checking the pixel
613
	 * counter against vblank start.
614
	 */
615
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
3031 serge 616
}
617
 
618
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
619
{
620
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
621
	int reg = PIPE_FRMCOUNT_GM45(pipe);
622
 
623
	if (!i915_pipe_enabled(dev, pipe)) {
624
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
625
				 "pipe %c\n", pipe_name(pipe));
626
		return 0;
627
	}
628
 
629
	return I915_READ(reg);
630
}
631
 
4560 Serge 632
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
633
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
634
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
635
 
636
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
637
{
638
	struct drm_i915_private *dev_priv = dev->dev_private;
639
	uint32_t status;
640
 
641
	if (INTEL_INFO(dev)->gen < 7) {
642
		status = pipe == PIPE_A ?
643
			DE_PIPEA_VBLANK :
644
			DE_PIPEB_VBLANK;
645
	} else {
646
		switch (pipe) {
647
		default:
648
		case PIPE_A:
649
			status = DE_PIPEA_VBLANK_IVB;
650
			break;
651
		case PIPE_B:
652
			status = DE_PIPEB_VBLANK_IVB;
653
			break;
654
		case PIPE_C:
655
			status = DE_PIPEC_VBLANK_IVB;
656
			break;
657
		}
658
	}
659
 
660
	return __raw_i915_read32(dev_priv, DEISR) & status;
661
}
662
 
3746 Serge 663
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
4560 Serge 664
				    unsigned int flags, int *vpos, int *hpos,
665
                    void *stime, void *etime)
3746 Serge 666
{
4560 Serge 667
	struct drm_i915_private *dev_priv = dev->dev_private;
668
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
669
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
670
	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
671
	int position;
3746 Serge 672
	int vbl_start, vbl_end, htotal, vtotal;
673
	bool in_vbl = true;
674
	int ret = 0;
4560 Serge 675
	unsigned long irqflags;
3746 Serge 676
 
4560 Serge 677
	if (!intel_crtc->active) {
3746 Serge 678
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
679
				 "pipe %c\n", pipe_name(pipe));
680
		return 0;
681
	}
682
 
4560 Serge 683
	htotal = mode->crtc_htotal;
684
	vtotal = mode->crtc_vtotal;
685
	vbl_start = mode->crtc_vblank_start;
686
	vbl_end = mode->crtc_vblank_end;
3746 Serge 687
 
4560 Serge 688
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
689
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
690
		vbl_end /= 2;
691
		vtotal /= 2;
692
	}
693
 
694
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
695
 
696
	/*
697
	 * Lock uncore.lock, as we will do multiple timing critical raw
698
	 * register reads, potentially with preemption disabled, so the
699
	 * following code must not block on uncore.lock.
700
	 */
701
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
702
 
703
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
704
 
705
 
706
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3746 Serge 707
		/* No obvious pixelcount register. Only query vertical
708
		 * scanout position from Display scan line register.
709
		 */
4560 Serge 710
		if (IS_GEN2(dev))
711
			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
712
		else
713
			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
3746 Serge 714
 
4560 Serge 715
		if (HAS_PCH_SPLIT(dev)) {
716
			/*
717
			 * The scanline counter increments at the leading edge
718
			 * of hsync, ie. it completely misses the active portion
719
			 * of the line. Fix up the counter at both edges of vblank
720
			 * to get a more accurate picture whether we're in vblank
721
			 * or not.
722
			 */
723
			in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
724
			if ((in_vbl && position == vbl_start - 1) ||
725
			    (!in_vbl && position == vbl_end - 1))
726
				position = (position + 1) % vtotal;
727
		} else {
728
			/*
729
			 * ISR vblank status bits don't work the way we'd want
730
			 * them to work on non-PCH platforms (for
731
			 * ilk_pipe_in_vblank_locked()), and there doesn't
732
			 * appear any other way to determine if we're currently
733
			 * in vblank.
734
			 *
735
			 * Instead let's assume that we're already in vblank if
736
			 * we got called from the vblank interrupt and the
737
			 * scanline counter value indicates that we're on the
738
			 * line just prior to vblank start. This should result
739
			 * in the correct answer, unless the vblank interrupt
740
			 * delivery really got delayed for almost exactly one
741
			 * full frame/field.
742
			 */
743
			if (flags & DRM_CALLED_FROM_VBLIRQ &&
744
			    position == vbl_start - 1) {
745
				position = (position + 1) % vtotal;
746
 
747
				/* Signal this correction as "applied". */
748
				ret |= 0x8;
749
			}
750
		}
3746 Serge 751
	} else {
752
		/* Have access to pixelcount since start of frame.
753
		 * We can split this into vertical and horizontal
754
		 * scanout position.
755
		 */
4560 Serge 756
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
3746 Serge 757
 
4560 Serge 758
		/* convert to pixel counts */
759
		vbl_start *= htotal;
760
		vbl_end *= htotal;
761
		vtotal *= htotal;
3746 Serge 762
	}
763
 
764
 
4560 Serge 765
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
3746 Serge 766
 
4560 Serge 767
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3746 Serge 768
 
4560 Serge 769
	in_vbl = position >= vbl_start && position < vbl_end;
3746 Serge 770
 
4560 Serge 771
	/*
772
	 * While in vblank, position will be negative
773
	 * counting up towards 0 at vbl_end. And outside
774
	 * vblank, position will be positive counting
775
	 * up since vbl_end.
776
	 */
777
	if (position >= vbl_start)
778
		position -= vbl_end;
779
	else
780
		position += vtotal - vbl_end;
3746 Serge 781
 
4560 Serge 782
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
783
		*vpos = position;
784
		*hpos = 0;
785
	} else {
786
		*vpos = position / htotal;
787
		*hpos = position - (*vpos * htotal);
788
	}
789
 
3746 Serge 790
	/* In vblank? */
791
	if (in_vbl)
792
		ret |= DRM_SCANOUTPOS_INVBL;
793
 
794
	return ret;
795
}
796
 
797
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
798
			      int *max_error,
799
			      struct timeval *vblank_time,
800
			      unsigned flags)
801
{
802
	struct drm_crtc *crtc;
803
 
804
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
805
		DRM_ERROR("Invalid crtc %d\n", pipe);
806
		return -EINVAL;
807
	}
808
 
809
	/* Get drm_crtc to timestamp: */
810
	crtc = intel_get_crtc_for_pipe(dev, pipe);
811
	if (crtc == NULL) {
812
		DRM_ERROR("Invalid crtc %d\n", pipe);
813
		return -EINVAL;
814
	}
815
 
816
	if (!crtc->enabled) {
817
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
818
		return -EBUSY;
819
	}
820
 
821
	/* Helper routine in DRM core does all the work: */
822
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
823
						     vblank_time, flags,
4560 Serge 824
						     crtc,
825
						     &to_intel_crtc(crtc)->config.adjusted_mode);
3746 Serge 826
}
827
 
4560 Serge 828
static bool intel_hpd_irq_event(struct drm_device *dev,
829
				struct drm_connector *connector)
4104 Serge 830
{
831
	enum drm_connector_status old_status;
832
 
833
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
834
	old_status = connector->status;
835
 
836
	connector->status = connector->funcs->detect(connector, false);
4560 Serge 837
	if (old_status == connector->status)
838
		return false;
839
 
840
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
4104 Serge 841
		      connector->base.id,
842
		      drm_get_connector_name(connector),
4560 Serge 843
		      drm_get_connector_status_name(old_status),
844
		      drm_get_connector_status_name(connector->status));
845
 
846
	return true;
4104 Serge 847
}
848
 
3480 Serge 849
/*
850
 * Handle hotplug events outside the interrupt handler proper.
851
 */
3746 Serge 852
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
853
 
3480 Serge 854
static void i915_hotplug_work_func(struct work_struct *work)
855
{
856
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
857
						    hotplug_work);
858
	struct drm_device *dev = dev_priv->dev;
859
	struct drm_mode_config *mode_config = &dev->mode_config;
3746 Serge 860
	struct intel_connector *intel_connector;
861
	struct intel_encoder *intel_encoder;
862
	struct drm_connector *connector;
863
	unsigned long irqflags;
864
	bool hpd_disabled = false;
4104 Serge 865
	bool changed = false;
866
	u32 hpd_event_bits;
3031 serge 867
 
3480 Serge 868
	/* HPD irq before everything is fully set up. */
869
	if (!dev_priv->enable_hotplug_processing)
870
		return;
871
 
872
	mutex_lock(&mode_config->mutex);
873
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
874
 
3746 Serge 875
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 876
 
877
	hpd_event_bits = dev_priv->hpd_event_bits;
878
	dev_priv->hpd_event_bits = 0;
3746 Serge 879
	list_for_each_entry(connector, &mode_config->connector_list, head) {
880
		intel_connector = to_intel_connector(connector);
881
		intel_encoder = intel_connector->encoder;
882
		if (intel_encoder->hpd_pin > HPD_NONE &&
883
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
884
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
885
			DRM_INFO("HPD interrupt storm detected on connector %s: "
886
				 "switching from hotplug detection to polling\n",
887
				drm_get_connector_name(connector));
888
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
889
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
890
				| DRM_CONNECTOR_POLL_DISCONNECT;
891
			hpd_disabled = true;
892
		}
4104 Serge 893
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
894
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
895
				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
896
		}
3746 Serge 897
	}
898
	 /* if there were no outputs to poll, poll was disabled,
899
	  * therefore make sure it's enabled when disabling HPD on
900
	  * some connectors */
901
	if (hpd_disabled) {
902
		drm_kms_helper_poll_enable(dev);
4126 Serge 903
		mod_timer(&dev_priv->hotplug_reenable_timer,
904
			  GetTimerTicks() + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
3746 Serge 905
	}
3480 Serge 906
 
3746 Serge 907
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
908
 
4104 Serge 909
	list_for_each_entry(connector, &mode_config->connector_list, head) {
910
		intel_connector = to_intel_connector(connector);
911
		intel_encoder = intel_connector->encoder;
912
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
3746 Serge 913
		if (intel_encoder->hot_plug)
914
			intel_encoder->hot_plug(intel_encoder);
4104 Serge 915
			if (intel_hpd_irq_event(dev, connector))
916
				changed = true;
917
		}
918
	}
3480 Serge 919
	mutex_unlock(&mode_config->mutex);
920
 
4104 Serge 921
	if (changed)
922
		drm_kms_helper_hotplug_event(dev);
3480 Serge 923
}
924
 
4104 Serge 925
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
3746 Serge 926
{
927
	drm_i915_private_t *dev_priv = dev->dev_private;
928
	u32 busy_up, busy_down, max_avg, min_avg;
929
	u8 new_delay;
930
 
4104 Serge 931
	spin_lock(&mchdev_lock);
3746 Serge 932
 
933
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
934
 
935
	new_delay = dev_priv->ips.cur_delay;
936
 
937
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
938
	busy_up = I915_READ(RCPREVBSYTUPAVG);
939
	busy_down = I915_READ(RCPREVBSYTDNAVG);
940
	max_avg = I915_READ(RCBMAXAVG);
941
	min_avg = I915_READ(RCBMINAVG);
942
 
943
	/* Handle RCS change request from hw */
944
	if (busy_up > max_avg) {
945
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
946
			new_delay = dev_priv->ips.cur_delay - 1;
947
		if (new_delay < dev_priv->ips.max_delay)
948
			new_delay = dev_priv->ips.max_delay;
949
	} else if (busy_down < min_avg) {
950
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
951
			new_delay = dev_priv->ips.cur_delay + 1;
952
		if (new_delay > dev_priv->ips.min_delay)
953
			new_delay = dev_priv->ips.min_delay;
954
	}
955
 
956
	if (ironlake_set_drps(dev, new_delay))
957
		dev_priv->ips.cur_delay = new_delay;
958
 
4104 Serge 959
	spin_unlock(&mchdev_lock);
3746 Serge 960
 
961
	return;
962
}
963
 
2352 Serge 964
static void notify_ring(struct drm_device *dev,
965
			struct intel_ring_buffer *ring)
966
{
967
	if (ring->obj == NULL)
968
		return;
2351 Serge 969
 
4560 Serge 970
	trace_i915_gem_request_complete(ring);
2351 Serge 971
 
2352 Serge 972
	wake_up_all(&ring->irq_queue);
973
}
974
 
3031 serge 975
static void gen6_pm_rps_work(struct work_struct *work)
976
{
977
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
978
						    rps.work);
4104 Serge 979
	u32 pm_iir;
4560 Serge 980
	int new_delay, adj;
2352 Serge 981
 
4104 Serge 982
	spin_lock_irq(&dev_priv->irq_lock);
3031 serge 983
	pm_iir = dev_priv->rps.pm_iir;
984
	dev_priv->rps.pm_iir = 0;
4104 Serge 985
	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
986
	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
987
	spin_unlock_irq(&dev_priv->irq_lock);
2352 Serge 988
 
4104 Serge 989
	/* Make sure we didn't queue anything we're not going to process. */
990
	WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
991
 
992
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
3031 serge 993
		return;
994
 
3243 Serge 995
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 996
 
4560 Serge 997
	adj = dev_priv->rps.last_adj;
4104 Serge 998
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
4560 Serge 999
		if (adj > 0)
1000
			adj *= 2;
1001
		else
1002
			adj = 1;
1003
		new_delay = dev_priv->rps.cur_delay + adj;
4104 Serge 1004
 
1005
		/*
1006
		 * For better performance, jump directly
1007
		 * to RPe if we're below it.
1008
		 */
4560 Serge 1009
		if (new_delay < dev_priv->rps.rpe_delay)
4104 Serge 1010
			new_delay = dev_priv->rps.rpe_delay;
4560 Serge 1011
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1012
		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
1013
			new_delay = dev_priv->rps.rpe_delay;
1014
		else
1015
			new_delay = dev_priv->rps.min_delay;
1016
		adj = 0;
1017
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1018
		if (adj < 0)
1019
			adj *= 2;
1020
		else
1021
			adj = -1;
1022
		new_delay = dev_priv->rps.cur_delay + adj;
1023
	} else { /* unknown event */
1024
		new_delay = dev_priv->rps.cur_delay;
1025
	}
3031 serge 1026
 
1027
	/* sysfs frequency interfaces may have snuck in while servicing the
1028
	 * interrupt
1029
	 */
4560 Serge 1030
	new_delay = clamp_t(int, new_delay,
1031
			    dev_priv->rps.min_delay, dev_priv->rps.max_delay);
1032
	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
1033
 
4104 Serge 1034
		if (IS_VALLEYVIEW(dev_priv->dev))
1035
			valleyview_set_rps(dev_priv->dev, new_delay);
1036
		else
3031 serge 1037
		gen6_set_rps(dev_priv->dev, new_delay);
1038
 
3243 Serge 1039
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 1040
}
1041
 
1042
 
1043
/**
1044
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1045
 * occurred.
1046
 * @work: workqueue struct
1047
 *
1048
 * Doesn't actually do anything except notify userspace. As a consequence of
1049
 * this event, userspace should try to remap the bad rows since statistically
1050
 * it is likely the same row is more likely to go bad again.
1051
 */
1052
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 1053
{
3031 serge 1054
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3243 Serge 1055
						    l3_parity.error_work);
3031 serge 1056
	u32 error_status, row, bank, subbank;
4560 Serge 1057
	char *parity_event[6];
3031 serge 1058
	uint32_t misccpctl;
1059
	unsigned long flags;
4560 Serge 1060
	uint8_t slice = 0;
3031 serge 1061
 
1062
	/* We must turn off DOP level clock gating to access the L3 registers.
1063
	 * In order to prevent a get/put style interface, acquire struct mutex
1064
	 * any time we access those registers.
1065
	 */
1066
	mutex_lock(&dev_priv->dev->struct_mutex);
1067
 
4560 Serge 1068
	/* If we've screwed up tracking, just let the interrupt fire again */
1069
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1070
		goto out;
1071
 
3031 serge 1072
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1073
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1074
	POSTING_READ(GEN7_MISCCPCTL);
1075
 
4560 Serge 1076
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1077
		u32 reg;
1078
 
1079
		slice--;
1080
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1081
			break;
1082
 
1083
		dev_priv->l3_parity.which_slice &= ~(1<
1084
 
1085
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1086
 
1087
		error_status = I915_READ(reg);
3031 serge 1088
	row = GEN7_PARITY_ERROR_ROW(error_status);
1089
	bank = GEN7_PARITY_ERROR_BANK(error_status);
1090
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1091
 
4560 Serge 1092
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1093
		POSTING_READ(reg);
3031 serge 1094
 
4560 Serge 1095
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1096
			  slice, row, bank, subbank);
1097
 
1098
	}
1099
 
3031 serge 1100
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1101
 
4560 Serge 1102
out:
1103
	WARN_ON(dev_priv->l3_parity.which_slice);
3031 serge 1104
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
4560 Serge 1105
	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
3031 serge 1106
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1107
 
1108
	mutex_unlock(&dev_priv->dev->struct_mutex);
1109
}
1110
 
4560 Serge 1111
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1112
{
1113
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1114
 
4560 Serge 1115
	if (!HAS_L3_DPF(dev))
3031 serge 1116
		return;
1117
 
4104 Serge 1118
	spin_lock(&dev_priv->irq_lock);
4560 Serge 1119
	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
4104 Serge 1120
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1121
 
4560 Serge 1122
	iir &= GT_PARITY_ERROR(dev);
1123
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1124
		dev_priv->l3_parity.which_slice |= 1 << 1;
1125
 
1126
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1127
		dev_priv->l3_parity.which_slice |= 1 << 0;
1128
 
3243 Serge 1129
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 1130
}
1131
 
4104 Serge 1132
static void ilk_gt_irq_handler(struct drm_device *dev,
1133
			       struct drm_i915_private *dev_priv,
1134
			       u32 gt_iir)
1135
{
1136
	if (gt_iir &
1137
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1138
		notify_ring(dev, &dev_priv->ring[RCS]);
1139
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1140
		notify_ring(dev, &dev_priv->ring[VCS]);
1141
}
1142
 
3031 serge 1143
static void snb_gt_irq_handler(struct drm_device *dev,
1144
			       struct drm_i915_private *dev_priv,
1145
			       u32 gt_iir)
1146
{
1147
 
4104 Serge 1148
	if (gt_iir &
1149
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
3031 serge 1150
		notify_ring(dev, &dev_priv->ring[RCS]);
4104 Serge 1151
	if (gt_iir & GT_BSD_USER_INTERRUPT)
3031 serge 1152
		notify_ring(dev, &dev_priv->ring[VCS]);
4104 Serge 1153
	if (gt_iir & GT_BLT_USER_INTERRUPT)
3031 serge 1154
		notify_ring(dev, &dev_priv->ring[BCS]);
1155
 
4104 Serge 1156
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1157
		      GT_BSD_CS_ERROR_INTERRUPT |
1158
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
3031 serge 1159
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
4126 Serge 1160
		i915_handle_error(dev, false);
3031 serge 1161
	}
1162
 
4560 Serge 1163
	if (gt_iir & GT_PARITY_ERROR(dev))
1164
		ivybridge_parity_error_irq_handler(dev, gt_iir);
3031 serge 1165
}
1166
 
4560 Serge 1167
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1168
				       struct drm_i915_private *dev_priv,
1169
				       u32 master_ctl)
1170
{
1171
	u32 rcs, bcs, vcs;
1172
	uint32_t tmp = 0;
1173
	irqreturn_t ret = IRQ_NONE;
1174
 
1175
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1176
		tmp = I915_READ(GEN8_GT_IIR(0));
1177
		if (tmp) {
1178
			ret = IRQ_HANDLED;
1179
			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1180
			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1181
			if (rcs & GT_RENDER_USER_INTERRUPT)
1182
				notify_ring(dev, &dev_priv->ring[RCS]);
1183
			if (bcs & GT_RENDER_USER_INTERRUPT)
1184
				notify_ring(dev, &dev_priv->ring[BCS]);
1185
			I915_WRITE(GEN8_GT_IIR(0), tmp);
1186
		} else
1187
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1188
	}
1189
 
1190
	if (master_ctl & GEN8_GT_VCS1_IRQ) {
1191
		tmp = I915_READ(GEN8_GT_IIR(1));
1192
		if (tmp) {
1193
			ret = IRQ_HANDLED;
1194
			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1195
			if (vcs & GT_RENDER_USER_INTERRUPT)
1196
				notify_ring(dev, &dev_priv->ring[VCS]);
1197
			I915_WRITE(GEN8_GT_IIR(1), tmp);
1198
		} else
1199
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1200
	}
1201
 
1202
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1203
		tmp = I915_READ(GEN8_GT_IIR(3));
1204
		if (tmp) {
1205
			ret = IRQ_HANDLED;
1206
			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1207
			if (vcs & GT_RENDER_USER_INTERRUPT)
1208
				notify_ring(dev, &dev_priv->ring[VECS]);
1209
			I915_WRITE(GEN8_GT_IIR(3), tmp);
1210
		} else
1211
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1212
	}
1213
 
1214
	return ret;
1215
}
1216
 
3746 Serge 1217
#define HPD_STORM_DETECT_PERIOD 1000
1218
#define HPD_STORM_THRESHOLD 5
1219
 
4104 Serge 1220
static inline void intel_hpd_irq_handler(struct drm_device *dev,
3746 Serge 1221
					    u32 hotplug_trigger,
1222
					    const u32 *hpd)
1223
{
1224
	drm_i915_private_t *dev_priv = dev->dev_private;
1225
	int i;
4104 Serge 1226
	bool storm_detected = false;
3746 Serge 1227
 
4104 Serge 1228
	if (!hotplug_trigger)
1229
		return;
3746 Serge 1230
 
4104 Serge 1231
	spin_lock(&dev_priv->irq_lock);
3746 Serge 1232
	for (i = 1; i < HPD_NUM_PINS; i++) {
1233
 
4560 Serge 1234
		WARN_ONCE(hpd[i] & hotplug_trigger &&
1235
			  dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
1236
			  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1237
			  hotplug_trigger, i, hpd[i]);
4104 Serge 1238
 
3746 Serge 1239
		if (!(hpd[i] & hotplug_trigger) ||
1240
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1241
			continue;
1242
 
4104 Serge 1243
		dev_priv->hpd_event_bits |= (1 << i);
4126 Serge 1244
		if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
1245
                  dev_priv->hpd_stats[i].hpd_last_jiffies
1246
                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1247
			dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks();
1248
           dev_priv->hpd_stats[i].hpd_cnt = 0;
1249
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1250
       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1251
           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1252
			dev_priv->hpd_event_bits &= ~(1 << i);
1253
           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1254
			storm_detected = true;
1255
		} else {
3746 Serge 1256
			dev_priv->hpd_stats[i].hpd_cnt++;
4126 Serge 1257
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1258
				      dev_priv->hpd_stats[i].hpd_cnt);
1259
		}
3746 Serge 1260
	}
1261
 
4104 Serge 1262
	if (storm_detected)
1263
		dev_priv->display.hpd_irq_setup(dev);
1264
	spin_unlock(&dev_priv->irq_lock);
3746 Serge 1265
 
4126 Serge 1266
	/*
1267
	 * Our hotplug handler can grab modeset locks (by calling down into the
1268
	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1269
	 * queue for otherwise the flush_work in the pageflip code will
1270
	 * deadlock.
1271
	 */
1272
	schedule_work(&dev_priv->hotplug_work);
3746 Serge 1273
}
1274
 
3480 Serge 1275
static void gmbus_irq_handler(struct drm_device *dev)
1276
{
1277
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1278
 
1279
	wake_up_all(&dev_priv->gmbus_wait_queue);
1280
}
1281
 
1282
static void dp_aux_irq_handler(struct drm_device *dev)
1283
{
1284
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1285
 
1286
	wake_up_all(&dev_priv->gmbus_wait_queue);
1287
}
1288
 
4560 Serge 1289
#if defined(CONFIG_DEBUG_FS)
1290
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1291
					 uint32_t crc0, uint32_t crc1,
1292
					 uint32_t crc2, uint32_t crc3,
1293
					 uint32_t crc4)
1294
{
1295
	struct drm_i915_private *dev_priv = dev->dev_private;
1296
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1297
	struct intel_pipe_crc_entry *entry;
1298
	int head, tail;
1299
 
1300
	spin_lock(&pipe_crc->lock);
1301
 
1302
	if (!pipe_crc->entries) {
1303
		spin_unlock(&pipe_crc->lock);
1304
		DRM_ERROR("spurious interrupt\n");
1305
		return;
1306
	}
1307
 
1308
	head = pipe_crc->head;
1309
	tail = pipe_crc->tail;
1310
 
1311
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1312
		spin_unlock(&pipe_crc->lock);
1313
		DRM_ERROR("CRC buffer overflowing\n");
1314
		return;
1315
	}
1316
 
1317
	entry = &pipe_crc->entries[head];
1318
 
1319
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1320
	entry->crc[0] = crc0;
1321
	entry->crc[1] = crc1;
1322
	entry->crc[2] = crc2;
1323
	entry->crc[3] = crc3;
1324
	entry->crc[4] = crc4;
1325
 
1326
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1327
	pipe_crc->head = head;
1328
 
1329
	spin_unlock(&pipe_crc->lock);
1330
 
1331
	wake_up_interruptible(&pipe_crc->wq);
1332
}
1333
#else
1334
static inline void
1335
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1336
			     uint32_t crc0, uint32_t crc1,
1337
			     uint32_t crc2, uint32_t crc3,
1338
			     uint32_t crc4) {}
1339
#endif
1340
 
1341
 
1342
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1343
{
1344
	struct drm_i915_private *dev_priv = dev->dev_private;
1345
 
1346
	display_pipe_crc_irq_handler(dev, pipe,
1347
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1348
				     0, 0, 0, 0);
1349
}
1350
 
1351
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1352
{
1353
	struct drm_i915_private *dev_priv = dev->dev_private;
1354
 
1355
	display_pipe_crc_irq_handler(dev, pipe,
1356
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1357
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1358
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1359
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1360
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1361
}
1362
 
1363
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1364
{
1365
	struct drm_i915_private *dev_priv = dev->dev_private;
1366
	uint32_t res1, res2;
1367
 
1368
	if (INTEL_INFO(dev)->gen >= 3)
1369
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1370
	else
1371
		res1 = 0;
1372
 
1373
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1374
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1375
	else
1376
		res2 = 0;
1377
 
1378
	display_pipe_crc_irq_handler(dev, pipe,
1379
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1380
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1381
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1382
				     res1, res2);
1383
}
1384
 
4104 Serge 1385
/* The RPS events need forcewake, so we add them to a work queue and mask their
1386
 * IMR bits until the work is done. Other interrupts can be processed without
1387
 * the work queue. */
1388
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1389
{
1390
	if (pm_iir & GEN6_PM_RPS_EVENTS) {
1391
		spin_lock(&dev_priv->irq_lock);
1392
		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1393
		snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1394
		spin_unlock(&dev_priv->irq_lock);
1395
 
1396
		queue_work(dev_priv->wq, &dev_priv->rps.work);
1397
	}
1398
 
1399
	if (HAS_VEBOX(dev_priv->dev)) {
1400
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1401
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1402
 
1403
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1404
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
4126 Serge 1405
			i915_handle_error(dev_priv->dev, false);
4104 Serge 1406
		}
1407
	}
1408
}
1409
 
3243 Serge 1410
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
3031 serge 1411
{
1412
	struct drm_device *dev = (struct drm_device *) arg;
1413
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1414
	u32 iir, gt_iir, pm_iir;
1415
	irqreturn_t ret = IRQ_NONE;
1416
	unsigned long irqflags;
1417
	int pipe;
1418
	u32 pipe_stats[I915_MAX_PIPES];
1419
 
1420
	atomic_inc(&dev_priv->irq_received);
1421
 
1422
	while (true) {
1423
		iir = I915_READ(VLV_IIR);
1424
		gt_iir = I915_READ(GTIIR);
1425
		pm_iir = I915_READ(GEN6_PMIIR);
1426
 
1427
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1428
			goto out;
1429
 
1430
		ret = IRQ_HANDLED;
1431
 
1432
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1433
 
1434
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1435
		for_each_pipe(pipe) {
1436
			int reg = PIPESTAT(pipe);
1437
			pipe_stats[pipe] = I915_READ(reg);
1438
 
1439
			/*
1440
			 * Clear the PIPE*STAT regs before the IIR
1441
			 */
1442
			if (pipe_stats[pipe] & 0x8000ffff) {
1443
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1444
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1445
							 pipe_name(pipe));
1446
				I915_WRITE(reg, pipe_stats[pipe]);
1447
			}
1448
		}
1449
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1450
 
1451
		for_each_pipe(pipe) {
4560 Serge 1452
//			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1453
//				drm_handle_vblank(dev, pipe);
3031 serge 1454
 
1455
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
4560 Serge 1456
//				intel_prepare_page_flip(dev, pipe);
1457
//				intel_finish_page_flip(dev, pipe);
3031 serge 1458
			}
4560 Serge 1459
 
1460
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1461
				i9xx_pipe_crc_irq_handler(dev, pipe);
3031 serge 1462
		}
1463
 
1464
		/* Consume port.  Then clear IIR or we'll miss events */
1465
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1466
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3746 Serge 1467
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3031 serge 1468
 
1469
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1470
					 hotplug_status);
4104 Serge 1471
 
1472
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1473
 
4560 Serge 1474
			if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1475
				dp_aux_irq_handler(dev);
1476
 
3031 serge 1477
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1478
			I915_READ(PORT_HOTPLUG_STAT);
1479
		}
1480
 
3480 Serge 1481
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1482
			gmbus_irq_handler(dev);
3031 serge 1483
 
4126 Serge 1484
		if (pm_iir)
1485
			gen6_rps_irq_handler(dev_priv, pm_iir);
3031 serge 1486
 
1487
		I915_WRITE(GTIIR, gt_iir);
1488
		I915_WRITE(GEN6_PMIIR, pm_iir);
1489
		I915_WRITE(VLV_IIR, iir);
1490
	}
1491
 
1492
out:
1493
	return ret;
1494
}
1495
 
1496
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1497
{
1498
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1499
	int pipe;
3746 Serge 1500
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
3031 serge 1501
 
4104 Serge 1502
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1503
 
1504
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1505
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1506
			       SDE_AUDIO_POWER_SHIFT);
1507
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1508
				 port_name(port));
3746 Serge 1509
	}
3031 serge 1510
 
3480 Serge 1511
	if (pch_iir & SDE_AUX_MASK)
1512
		dp_aux_irq_handler(dev);
1513
 
3031 serge 1514
	if (pch_iir & SDE_GMBUS)
3480 Serge 1515
		gmbus_irq_handler(dev);
3031 serge 1516
 
1517
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1518
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1519
 
1520
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1521
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1522
 
1523
	if (pch_iir & SDE_POISON)
1524
		DRM_ERROR("PCH poison interrupt\n");
1525
 
1526
	if (pch_iir & SDE_FDI_MASK)
1527
		for_each_pipe(pipe)
1528
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1529
					 pipe_name(pipe),
1530
					 I915_READ(FDI_RX_IIR(pipe)));
1531
 
1532
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1533
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1534
 
1535
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1536
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1537
 
4104 Serge 1538
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1539
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1540
							  false))
1541
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1542
 
3031 serge 1543
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
4104 Serge 1544
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1545
							  false))
1546
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
3031 serge 1547
}
1548
 
4104 Serge 1549
static void ivb_err_int_handler(struct drm_device *dev)
1550
{
1551
	struct drm_i915_private *dev_priv = dev->dev_private;
1552
	u32 err_int = I915_READ(GEN7_ERR_INT);
4560 Serge 1553
	enum pipe pipe;
4104 Serge 1554
 
1555
	if (err_int & ERR_INT_POISON)
1556
		DRM_ERROR("Poison interrupt\n");
1557
 
4560 Serge 1558
	for_each_pipe(pipe) {
1559
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1560
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1561
								  false))
1562
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1563
						 pipe_name(pipe));
1564
		}
4104 Serge 1565
 
4560 Serge 1566
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1567
			if (IS_IVYBRIDGE(dev))
1568
				ivb_pipe_crc_irq_handler(dev, pipe);
1569
			else
1570
				hsw_pipe_crc_irq_handler(dev, pipe);
1571
		}
1572
	}
4104 Serge 1573
 
1574
	I915_WRITE(GEN7_ERR_INT, err_int);
1575
}
1576
 
1577
static void cpt_serr_int_handler(struct drm_device *dev)
1578
{
1579
	struct drm_i915_private *dev_priv = dev->dev_private;
1580
	u32 serr_int = I915_READ(SERR_INT);
1581
 
1582
	if (serr_int & SERR_INT_POISON)
1583
		DRM_ERROR("PCH poison interrupt\n");
1584
 
1585
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1586
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1587
							  false))
1588
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1589
 
1590
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1591
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1592
							  false))
1593
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1594
 
1595
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1596
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1597
							  false))
1598
			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1599
 
1600
	I915_WRITE(SERR_INT, serr_int);
1601
}
1602
 
3031 serge 1603
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1604
{
1605
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1606
	int pipe;
3746 Serge 1607
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
3031 serge 1608
 
4104 Serge 1609
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1610
 
1611
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1612
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1613
			       SDE_AUDIO_POWER_SHIFT_CPT);
1614
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1615
				 port_name(port));
3746 Serge 1616
	}
3031 serge 1617
 
1618
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 1619
		dp_aux_irq_handler(dev);
3031 serge 1620
 
1621
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 1622
		gmbus_irq_handler(dev);
3031 serge 1623
 
1624
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1625
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1626
 
1627
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1628
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1629
 
1630
	if (pch_iir & SDE_FDI_MASK_CPT)
1631
		for_each_pipe(pipe)
1632
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1633
					 pipe_name(pipe),
1634
					 I915_READ(FDI_RX_IIR(pipe)));
1635
 
4104 Serge 1636
	if (pch_iir & SDE_ERROR_CPT)
1637
		cpt_serr_int_handler(dev);
4539 Serge 1638
}
3480 Serge 1639
 
4104 Serge 1640
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 1641
{
4104 Serge 1642
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 1643
	enum pipe pipe;
3031 serge 1644
 
3480 Serge 1645
	if (de_iir & DE_AUX_CHANNEL_A)
1646
		dp_aux_irq_handler(dev);
1647
 
3031 serge 1648
	if (de_iir & DE_GSE)
4104 Serge 1649
		intel_opregion_asle_intr(dev);
2351 Serge 1650
 
4104 Serge 1651
	if (de_iir & DE_POISON)
1652
		DRM_ERROR("Poison interrupt\n");
1653
 
4560 Serge 1654
	for_each_pipe(pipe) {
1655
//		if (de_iir & DE_PIPE_VBLANK(pipe))
1656
//			drm_handle_vblank(dev, pipe);
4104 Serge 1657
 
4560 Serge 1658
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1659
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1660
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1661
						 pipe_name(pipe));
2351 Serge 1662
 
4560 Serge 1663
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1664
			i9xx_pipe_crc_irq_handler(dev, pipe);
1665
 
1666
		/* plane/pipes map 1:1 on ilk+ */
1667
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1668
//			intel_prepare_page_flip(dev, pipe);
1669
//			intel_finish_page_flip_plane(dev, pipe);
1670
		}
3031 serge 1671
	}
2351 Serge 1672
 
3031 serge 1673
	/* check event from PCH */
1674
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 1675
		u32 pch_iir = I915_READ(SDEIIR);
1676
 
3031 serge 1677
		if (HAS_PCH_CPT(dev))
1678
			cpt_irq_handler(dev, pch_iir);
1679
		else
1680
			ibx_irq_handler(dev, pch_iir);
3480 Serge 1681
 
1682
		/* should clear PCH hotplug event before clear CPU irq */
1683
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 1684
	}
4104 Serge 1685
 
3031 serge 1686
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
4104 Serge 1687
		ironlake_rps_change_irq_handler(dev);
2351 Serge 1688
}
1689
 
4104 Serge 1690
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 1691
{
1692
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 1693
	enum pipe i;
2351 Serge 1694
 
4126 Serge 1695
	if (de_iir & DE_ERR_INT_IVB)
1696
		ivb_err_int_handler(dev);
2351 Serge 1697
 
4104 Serge 1698
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1699
		dp_aux_irq_handler(dev);
3031 serge 1700
 
4104 Serge 1701
	if (de_iir & DE_GSE_IVB)
1702
		intel_opregion_asle_intr(dev);
4560 Serge 1703
 
1704
	for_each_pipe(i) {
1705
//		if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1706
//			drm_handle_vblank(dev, i);
1707
 
1708
		/* plane/pipes map 1:1 on ilk+ */
1709
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1710
//			intel_prepare_page_flip(dev, i);
1711
//			intel_finish_page_flip_plane(dev, i);
3031 serge 1712
		}
1713
	}
1714
 
4104 Serge 1715
	/* check event from PCH */
1716
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1717
		u32 pch_iir = I915_READ(SDEIIR);
3031 serge 1718
 
4104 Serge 1719
		cpt_irq_handler(dev, pch_iir);
3031 serge 1720
 
4104 Serge 1721
		/* clear PCH hotplug event before clear CPU irq */
1722
		I915_WRITE(SDEIIR, pch_iir);
4539 Serge 1723
	}
3031 serge 1724
}
1725
 
4104 Serge 1726
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 1727
{
4104 Serge 1728
	struct drm_device *dev = (struct drm_device *) arg;
1729
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1730
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1731
	irqreturn_t ret = IRQ_NONE;
3031 serge 1732
 
4104 Serge 1733
	atomic_inc(&dev_priv->irq_received);
3031 serge 1734
 
4104 Serge 1735
	/* We get interrupts on unclaimed registers, so check for this before we
1736
	 * do any I915_{READ,WRITE}. */
1737
	intel_uncore_check_errors(dev);
3031 serge 1738
 
4104 Serge 1739
	/* disable master interrupt before clearing iir  */
1740
	de_ier = I915_READ(DEIER);
1741
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1742
	POSTING_READ(DEIER);
3031 serge 1743
 
4104 Serge 1744
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1745
	 * interrupts will will be stored on its back queue, and then we'll be
1746
	 * able to process them after we restore SDEIER (as soon as we restore
1747
	 * it, we'll get an interrupt if SDEIIR still has something to process
1748
	 * due to its back queue). */
1749
	if (!HAS_PCH_NOP(dev)) {
1750
		sde_ier = I915_READ(SDEIER);
1751
		I915_WRITE(SDEIER, 0);
1752
		POSTING_READ(SDEIER);
3031 serge 1753
	}
1754
 
4104 Serge 1755
	gt_iir = I915_READ(GTIIR);
1756
	if (gt_iir) {
1757
		if (INTEL_INFO(dev)->gen >= 6)
1758
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1759
		else
1760
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1761
		I915_WRITE(GTIIR, gt_iir);
1762
		ret = IRQ_HANDLED;
4539 Serge 1763
	}
3031 serge 1764
 
4104 Serge 1765
	de_iir = I915_READ(DEIIR);
1766
	if (de_iir) {
1767
		if (INTEL_INFO(dev)->gen >= 7)
1768
			ivb_display_irq_handler(dev, de_iir);
1769
		else
1770
			ilk_display_irq_handler(dev, de_iir);
1771
		I915_WRITE(DEIIR, de_iir);
1772
		ret = IRQ_HANDLED;
3480 Serge 1773
	}
1774
 
4104 Serge 1775
	if (INTEL_INFO(dev)->gen >= 6) {
1776
		u32 pm_iir = I915_READ(GEN6_PMIIR);
1777
		if (pm_iir) {
1778
			gen6_rps_irq_handler(dev_priv, pm_iir);
1779
			I915_WRITE(GEN6_PMIIR, pm_iir);
1780
			ret = IRQ_HANDLED;
4560 Serge 1781
		}
3031 serge 1782
	}
1783
 
4104 Serge 1784
	I915_WRITE(DEIER, de_ier);
1785
	POSTING_READ(DEIER);
1786
	if (!HAS_PCH_NOP(dev)) {
1787
		I915_WRITE(SDEIER, sde_ier);
1788
		POSTING_READ(SDEIER);
3031 serge 1789
	}
1790
 
4104 Serge 1791
	return ret;
3031 serge 1792
}
1793
 
4560 Serge 1794
static irqreturn_t gen8_irq_handler(int irq, void *arg)
1795
{
1796
	struct drm_device *dev = arg;
1797
	struct drm_i915_private *dev_priv = dev->dev_private;
1798
	u32 master_ctl;
1799
	irqreturn_t ret = IRQ_NONE;
1800
	uint32_t tmp = 0;
1801
	enum pipe pipe;
1802
 
1803
	atomic_inc(&dev_priv->irq_received);
1804
 
1805
	master_ctl = I915_READ(GEN8_MASTER_IRQ);
1806
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1807
	if (!master_ctl)
1808
		return IRQ_NONE;
1809
 
1810
	I915_WRITE(GEN8_MASTER_IRQ, 0);
1811
	POSTING_READ(GEN8_MASTER_IRQ);
1812
 
1813
	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1814
 
1815
	if (master_ctl & GEN8_DE_MISC_IRQ) {
1816
		tmp = I915_READ(GEN8_DE_MISC_IIR);
1817
		if (tmp & GEN8_DE_MISC_GSE)
1818
			intel_opregion_asle_intr(dev);
1819
		else if (tmp)
1820
			DRM_ERROR("Unexpected DE Misc interrupt\n");
1821
		else
1822
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1823
 
1824
		if (tmp) {
1825
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1826
			ret = IRQ_HANDLED;
1827
		}
1828
	}
1829
 
1830
	if (master_ctl & GEN8_DE_PORT_IRQ) {
1831
		tmp = I915_READ(GEN8_DE_PORT_IIR);
1832
		if (tmp & GEN8_AUX_CHANNEL_A)
1833
			dp_aux_irq_handler(dev);
1834
		else if (tmp)
1835
			DRM_ERROR("Unexpected DE Port interrupt\n");
1836
		else
1837
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1838
 
1839
		if (tmp) {
1840
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1841
			ret = IRQ_HANDLED;
1842
		}
1843
	}
1844
 
1845
	for_each_pipe(pipe) {
1846
		uint32_t pipe_iir;
1847
 
1848
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1849
			continue;
1850
 
1851
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1852
//		if (pipe_iir & GEN8_PIPE_VBLANK)
1853
//			drm_handle_vblank(dev, pipe);
1854
 
1855
		if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1856
//			intel_prepare_page_flip(dev, pipe);
1857
//			intel_finish_page_flip_plane(dev, pipe);
1858
		}
1859
 
1860
		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1861
			hsw_pipe_crc_irq_handler(dev, pipe);
1862
 
1863
		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1864
			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1865
								  false))
1866
				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1867
						 pipe_name(pipe));
1868
		}
1869
 
1870
		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1871
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1872
				  pipe_name(pipe),
1873
				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1874
		}
1875
 
1876
		if (pipe_iir) {
1877
			ret = IRQ_HANDLED;
1878
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1879
		} else
1880
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1881
	}
1882
 
1883
	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1884
		/*
1885
		 * FIXME(BDW): Assume for now that the new interrupt handling
1886
		 * scheme also closed the SDE interrupt handling race we've seen
1887
		 * on older pch-split platforms. But this needs testing.
1888
		 */
1889
		u32 pch_iir = I915_READ(SDEIIR);
1890
 
1891
		cpt_irq_handler(dev, pch_iir);
1892
 
1893
		if (pch_iir) {
1894
			I915_WRITE(SDEIIR, pch_iir);
1895
			ret = IRQ_HANDLED;
1896
		}
1897
	}
1898
 
1899
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1900
	POSTING_READ(GEN8_MASTER_IRQ);
1901
 
1902
	return ret;
1903
}
1904
 
4104 Serge 1905
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1906
			       bool reset_completed)
3746 Serge 1907
{
3031 serge 1908
	struct intel_ring_buffer *ring;
4104 Serge 1909
	int i;
3031 serge 1910
 
4104 Serge 1911
	/*
1912
	 * Notify all waiters for GPU completion events that reset state has
1913
	 * been changed, and that they need to restart their wait after
1914
	 * checking for potential errors (and bail out to drop locks if there is
1915
	 * a gpu reset pending so that i915_error_work_func can acquire them).
1916
	 */
3031 serge 1917
 
4104 Serge 1918
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1919
	for_each_ring(ring, dev_priv, i)
1920
		wake_up_all(&ring->irq_queue);
3031 serge 1921
 
1922
 
4104 Serge 1923
	/*
1924
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1925
	 * reset state is cleared.
1926
	 */
1927
	if (reset_completed)
1928
		wake_up_all(&dev_priv->gpu_error.reset_queue);
3031 serge 1929
}
1930
 
1931
/**
4104 Serge 1932
 * i915_error_work_func - do process context error handling work
1933
 * @work: work struct
3031 serge 1934
 *
4104 Serge 1935
 * Fire an error uevent so userspace can see that a hang or error
1936
 * was detected.
3031 serge 1937
 */
4104 Serge 1938
static void i915_error_work_func(struct work_struct *work)
3031 serge 1939
{
4104 Serge 1940
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1941
						    work);
1942
	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1943
						    gpu_error);
1944
	struct drm_device *dev = dev_priv->dev;
1945
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1946
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1947
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1948
	int ret;
3031 serge 1949
 
4104 Serge 1950
	/*
1951
	 * Note that there's only one work item which does gpu resets, so we
1952
	 * need not worry about concurrent gpu resets potentially incrementing
1953
	 * error->reset_counter twice. We only need to take care of another
1954
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
1955
	 * quick check for that is good enough: schedule_work ensures the
1956
	 * correct ordering between hang detection and this work item, and since
1957
	 * the reset in-progress bit is only ever set by code outside of this
1958
	 * work we don't need to worry about any other races.
1959
	 */
1960
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1961
		DRM_DEBUG_DRIVER("resetting chip\n");
3031 serge 1962
 
4104 Serge 1963
		/*
1964
		 * All state reset _must_ be completed before we update the
1965
		 * reset counter, for otherwise waiters might miss the reset
1966
		 * pending state and not properly drop locks, resulting in
1967
		 * deadlocks with the reset work.
1968
		 */
4560 Serge 1969
//		ret = i915_reset(dev);
3031 serge 1970
 
4126 Serge 1971
//       intel_display_handle_reset(dev);
3031 serge 1972
 
4104 Serge 1973
		if (ret == 0) {
1974
			/*
1975
			 * After all the gem state is reset, increment the reset
1976
			 * counter and wake up everyone waiting for the reset to
1977
			 * complete.
1978
			 *
1979
			 * Since unlock operations are a one-sided barrier only,
1980
			 * we need to insert a barrier here to order any seqno
1981
			 * updates before
1982
			 * the counter increment.
1983
			 */
1984
			atomic_inc(&dev_priv->gpu_error.reset_counter);
3031 serge 1985
 
4104 Serge 1986
		} else {
4560 Serge 1987
			atomic_set_mask(I915_WEDGED, &error->reset_counter);
3031 serge 1988
	}
1989
 
4104 Serge 1990
		/*
1991
		 * Note: The wake_up also serves as a memory barrier so that
1992
		 * waiters see the update value of the reset counter atomic_t.
1993
		 */
1994
		i915_error_wake_up(dev_priv, true);
3031 serge 1995
	}
1996
}
1997
 
1998
static void i915_report_and_clear_eir(struct drm_device *dev)
1999
{
2000
	struct drm_i915_private *dev_priv = dev->dev_private;
2001
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2002
	u32 eir = I915_READ(EIR);
2003
	int pipe, i;
2004
 
2005
	if (!eir)
2006
		return;
2007
 
2008
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2009
 
2010
	i915_get_extra_instdone(dev, instdone);
2011
 
2012
	if (IS_G4X(dev)) {
2013
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2014
			u32 ipeir = I915_READ(IPEIR_I965);
2015
 
2016
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2017
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2018
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2019
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2020
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2021
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2022
			I915_WRITE(IPEIR_I965, ipeir);
2023
			POSTING_READ(IPEIR_I965);
2024
		}
2025
		if (eir & GM45_ERROR_PAGE_TABLE) {
2026
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2027
			pr_err("page table error\n");
2028
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2029
			I915_WRITE(PGTBL_ER, pgtbl_err);
2030
			POSTING_READ(PGTBL_ER);
2031
		}
2032
	}
2033
 
2034
	if (!IS_GEN2(dev)) {
2035
		if (eir & I915_ERROR_PAGE_TABLE) {
2036
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2037
			pr_err("page table error\n");
2038
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2039
			I915_WRITE(PGTBL_ER, pgtbl_err);
2040
			POSTING_READ(PGTBL_ER);
2041
		}
2042
	}
2043
 
2044
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2045
		pr_err("memory refresh error:\n");
2046
		for_each_pipe(pipe)
2047
			pr_err("pipe %c stat: 0x%08x\n",
2048
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2049
		/* pipestat has already been acked */
2050
	}
2051
	if (eir & I915_ERROR_INSTRUCTION) {
2052
		pr_err("instruction error\n");
2053
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2054
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2055
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2056
		if (INTEL_INFO(dev)->gen < 4) {
2057
			u32 ipeir = I915_READ(IPEIR);
2058
 
2059
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2060
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2061
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2062
			I915_WRITE(IPEIR, ipeir);
2063
			POSTING_READ(IPEIR);
2064
		} else {
2065
			u32 ipeir = I915_READ(IPEIR_I965);
2066
 
2067
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2068
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2069
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2070
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2071
			I915_WRITE(IPEIR_I965, ipeir);
2072
			POSTING_READ(IPEIR_I965);
2073
		}
2074
	}
2075
 
2076
	I915_WRITE(EIR, eir);
2077
	POSTING_READ(EIR);
2078
	eir = I915_READ(EIR);
2079
	if (eir) {
2080
		/*
2081
		 * some errors might have become stuck,
2082
		 * mask them.
2083
		 */
2084
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2085
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2086
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2087
	}
2088
}
2089
 
2090
/**
2091
 * i915_handle_error - handle an error interrupt
2092
 * @dev: drm device
2093
 *
2094
 * Do some basic checking of regsiter state at error interrupt time and
2095
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2096
 * sure we get a record and make it available in debugfs.  Fire a uevent
2097
 * so userspace knows something bad happened (should trigger collection
2098
 * of a ring dump etc.).
2099
 */
2100
void i915_handle_error(struct drm_device *dev, bool wedged)
2101
{
2102
	struct drm_i915_private *dev_priv = dev->dev_private;
2103
 
4560 Serge 2104
//	i915_capture_error_state(dev);
3031 serge 2105
	i915_report_and_clear_eir(dev);
2106
 
2107
	if (wedged) {
3480 Serge 2108
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2109
				&dev_priv->gpu_error.reset_counter);
3031 serge 2110
 
2111
		/*
4104 Serge 2112
		 * Wakeup waiting processes so that the reset work function
2113
		 * i915_error_work_func doesn't deadlock trying to grab various
2114
		 * locks. By bumping the reset counter first, the woken
2115
		 * processes will see a reset in progress and back off,
2116
		 * releasing their locks and then wait for the reset completion.
2117
		 * We must do this for _all_ gpu waiters that might hold locks
2118
		 * that the reset work needs to acquire.
2119
		 *
2120
		 * Note: The wake_up serves as the required memory barrier to
2121
		 * ensure that the waiters see the updated value of the reset
2122
		 * counter atomic_t.
3031 serge 2123
		 */
4104 Serge 2124
		i915_error_wake_up(dev_priv, false);
3031 serge 2125
	}
2126
 
4104 Serge 2127
	/*
2128
	 * Our reset work can grab modeset locks (since it needs to reset the
2129
	 * state of outstanding pagelips). Hence it must not be run on our own
2130
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2131
	 * code will deadlock.
2132
	 */
2133
	schedule_work(&dev_priv->gpu_error.work);
3031 serge 2134
}
2135
 
4126 Serge 2136
#if 0
3746 Serge 2137
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
3031 serge 2138
{
2139
	drm_i915_private_t *dev_priv = dev->dev_private;
2140
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2141
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2142
	struct drm_i915_gem_object *obj;
2143
	struct intel_unpin_work *work;
2144
	unsigned long flags;
2145
	bool stall_detected;
2146
 
2147
	/* Ignore early vblank irqs */
2148
	if (intel_crtc == NULL)
2149
		return;
2150
 
2151
	spin_lock_irqsave(&dev->event_lock, flags);
2152
	work = intel_crtc->unpin_work;
2153
 
3243 Serge 2154
	if (work == NULL ||
2155
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2156
	    !work->enable_stall_check) {
3031 serge 2157
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
2158
		spin_unlock_irqrestore(&dev->event_lock, flags);
2159
		return;
2160
	}
2161
 
2162
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2163
	obj = work->pending_flip_obj;
2164
	if (INTEL_INFO(dev)->gen >= 4) {
2165
		int dspsurf = DSPSURF(intel_crtc->plane);
2166
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
4104 Serge 2167
					i915_gem_obj_ggtt_offset(obj);
3031 serge 2168
	} else {
2169
		int dspaddr = DSPADDR(intel_crtc->plane);
4104 Serge 2170
		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
3031 serge 2171
							crtc->y * crtc->fb->pitches[0] +
2172
							crtc->x * crtc->fb->bits_per_pixel/8);
2173
	}
2174
 
2175
	spin_unlock_irqrestore(&dev->event_lock, flags);
2176
 
2177
	if (stall_detected) {
2178
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2179
		intel_prepare_page_flip(dev, intel_crtc->plane);
2180
	}
2181
}
2182
 
2183
#endif
2184
 
2185
/* Called from drm generic code, passed 'crtc' which
2186
 * we use as a pipe index
2187
 */
2188
static int i915_enable_vblank(struct drm_device *dev, int pipe)
2189
{
2190
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2191
	unsigned long irqflags;
2192
 
2193
	if (!i915_pipe_enabled(dev, pipe))
2194
		return -EINVAL;
2195
 
2196
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2197
	if (INTEL_INFO(dev)->gen >= 4)
2198
		i915_enable_pipestat(dev_priv, pipe,
2199
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2200
	else
2201
		i915_enable_pipestat(dev_priv, pipe,
2202
				     PIPE_VBLANK_INTERRUPT_ENABLE);
2203
 
2204
	/* maintain vblank delivery even in deep C-states */
2205
	if (dev_priv->info->gen == 3)
2206
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2207
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2208
 
2209
	return 0;
2210
}
2211
 
2212
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2213
{
2214
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2215
	unsigned long irqflags;
4104 Serge 2216
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2217
						     DE_PIPE_VBLANK(pipe);
3031 serge 2218
 
2219
	if (!i915_pipe_enabled(dev, pipe))
2220
		return -EINVAL;
2221
 
2222
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2223
	ironlake_enable_display_irq(dev_priv, bit);
3031 serge 2224
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2225
 
2226
	return 0;
2227
}
2228
 
2229
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2230
{
2231
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2232
	unsigned long irqflags;
2233
	u32 imr;
2234
 
2235
	if (!i915_pipe_enabled(dev, pipe))
2236
		return -EINVAL;
2237
 
2238
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2239
	imr = I915_READ(VLV_IMR);
4560 Serge 2240
	if (pipe == PIPE_A)
3031 serge 2241
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2242
	else
2243
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2244
	I915_WRITE(VLV_IMR, imr);
2245
	i915_enable_pipestat(dev_priv, pipe,
2246
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2247
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2248
 
2249
	return 0;
2250
}
2251
 
4560 Serge 2252
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2253
{
2254
	struct drm_i915_private *dev_priv = dev->dev_private;
2255
	unsigned long irqflags;
2256
 
2257
	if (!i915_pipe_enabled(dev, pipe))
2258
		return -EINVAL;
2259
 
2260
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2261
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2262
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2263
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2264
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2265
	return 0;
2266
}
2267
 
3031 serge 2268
/* Called from drm generic code, passed 'crtc' which
2269
 * we use as a pipe index
2270
 */
2271
static void i915_disable_vblank(struct drm_device *dev, int pipe)
2272
{
2273
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2274
	unsigned long irqflags;
2275
 
2276
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2277
	if (dev_priv->info->gen == 3)
2278
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2279
 
2280
	i915_disable_pipestat(dev_priv, pipe,
2281
			      PIPE_VBLANK_INTERRUPT_ENABLE |
2282
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2283
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2284
}
2285
 
2286
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2287
{
2288
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2289
	unsigned long irqflags;
4104 Serge 2290
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2291
						     DE_PIPE_VBLANK(pipe);
3031 serge 2292
 
2293
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2294
	ironlake_disable_display_irq(dev_priv, bit);
3031 serge 2295
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2296
}
2297
 
2298
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2299
{
2300
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2301
	unsigned long irqflags;
2302
	u32 imr;
2303
 
2304
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2305
	i915_disable_pipestat(dev_priv, pipe,
2306
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2307
	imr = I915_READ(VLV_IMR);
4560 Serge 2308
	if (pipe == PIPE_A)
3031 serge 2309
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2310
	else
2311
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2312
	I915_WRITE(VLV_IMR, imr);
2313
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2314
}
2315
 
4560 Serge 2316
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2317
{
2318
	struct drm_i915_private *dev_priv = dev->dev_private;
2319
	unsigned long irqflags;
2320
 
2321
	if (!i915_pipe_enabled(dev, pipe))
2322
		return;
2323
 
2324
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2325
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2326
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2327
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2328
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2329
}
2330
 
3031 serge 2331
static u32
2332
ring_last_seqno(struct intel_ring_buffer *ring)
2333
{
2334
	return list_entry(ring->request_list.prev,
2335
			  struct drm_i915_gem_request, list)->seqno;
2336
}
4104 Serge 2337
 
2338
static bool
2339
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2351 Serge 2340
{
4104 Serge 2341
	return (list_empty(&ring->request_list) ||
2342
		i915_seqno_passed(seqno, ring_last_seqno(ring)));
2343
}
2351 Serge 2344
 
4104 Serge 2345
static struct intel_ring_buffer *
2346
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2347
{
2348
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2349
	u32 cmd, ipehr, acthd, acthd_min;
2351 Serge 2350
 
4104 Serge 2351
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2352
	if ((ipehr & ~(0x3 << 16)) !=
2353
	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2354
		return NULL;
2351 Serge 2355
 
4104 Serge 2356
	/* ACTHD is likely pointing to the dword after the actual command,
2357
	 * so scan backwards until we find the MBOX.
2358
	 */
2359
	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2360
	acthd_min = max((int)acthd - 3 * 4, 0);
2361
	do {
2362
		cmd = ioread32(ring->virtual_start + acthd);
2363
		if (cmd == ipehr)
2364
			break;
2351 Serge 2365
 
4104 Serge 2366
		acthd -= 4;
2367
		if (acthd < acthd_min)
2368
			return NULL;
2369
	} while (1);
2351 Serge 2370
 
4104 Serge 2371
	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
2372
	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2373
}
2351 Serge 2374
 
4104 Serge 2375
static int semaphore_passed(struct intel_ring_buffer *ring)
2376
{
2377
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2378
	struct intel_ring_buffer *signaller;
2379
	u32 seqno, ctl;
2380
 
2381
	ring->hangcheck.deadlock = true;
2382
 
2383
	signaller = semaphore_waits_for(ring, &seqno);
2384
	if (signaller == NULL || signaller->hangcheck.deadlock)
2385
		return -1;
2386
 
2387
	/* cursory check for an unkickable deadlock */
2388
	ctl = I915_READ_CTL(signaller);
2389
	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2390
		return -1;
2391
 
2392
	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2393
}
2394
 
2395
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2396
{
2397
	struct intel_ring_buffer *ring;
2398
	int i;
2399
 
2400
	for_each_ring(ring, dev_priv, i)
2401
		ring->hangcheck.deadlock = false;
2402
}
2403
 
2404
static enum intel_ring_hangcheck_action
2405
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2406
{
2407
	struct drm_device *dev = ring->dev;
2408
	struct drm_i915_private *dev_priv = dev->dev_private;
2409
	u32 tmp;
2410
 
2411
	if (ring->hangcheck.acthd != acthd)
2412
		return HANGCHECK_ACTIVE;
2413
 
2414
	if (IS_GEN2(dev))
2415
		return HANGCHECK_HUNG;
2416
 
2417
	/* Is the chip hanging on a WAIT_FOR_EVENT?
2418
	 * If so we can simply poke the RB_WAIT bit
2419
	 * and break the hang. This should work on
2420
	 * all but the second generation chipsets.
2421
	 */
2422
	tmp = I915_READ_CTL(ring);
2423
	if (tmp & RING_WAIT) {
2424
		DRM_ERROR("Kicking stuck wait on %s\n",
2425
			  ring->name);
2426
		I915_WRITE_CTL(ring, tmp);
2427
		return HANGCHECK_KICK;
2428
	}
2429
 
2430
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2431
		switch (semaphore_passed(ring)) {
2432
		default:
2433
			return HANGCHECK_HUNG;
2434
		case 1:
2435
			DRM_ERROR("Kicking stuck semaphore on %s\n",
2436
				  ring->name);
2437
			I915_WRITE_CTL(ring, tmp);
2438
			return HANGCHECK_KICK;
2439
		case 0:
2440
			return HANGCHECK_WAIT;
2441
		}
2442
	}
2443
 
2444
	return HANGCHECK_HUNG;
2445
}
2446
 
2447
/**
2448
 * This is called when the chip hasn't reported back with completed
2449
 * batchbuffers in a long time. We keep track per ring seqno progress and
2450
 * if there are no progress, hangcheck score for that ring is increased.
2451
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2452
 * we kick the ring. If we see no progress on three subsequent calls
2453
 * we assume chip is wedged and try to fix it by resetting the chip.
2454
 */
2455
static void i915_hangcheck_elapsed(unsigned long data)
2456
{
2457
	struct drm_device *dev = (struct drm_device *)data;
2458
	drm_i915_private_t *dev_priv = dev->dev_private;
2459
	struct intel_ring_buffer *ring;
2460
	int i;
2461
	int busy_count = 0, rings_hung = 0;
2462
	bool stuck[I915_NUM_RINGS] = { 0 };
2463
#define BUSY 1
2464
#define KICK 5
2465
#define HUNG 20
2466
#define FIRE 30
2467
 
2468
	if (!i915_enable_hangcheck)
2469
		return;
2470
 
2471
	for_each_ring(ring, dev_priv, i) {
2472
		u32 seqno, acthd;
2473
		bool busy = true;
2474
 
2475
		semaphore_clear_deadlocks(dev_priv);
2476
 
2477
		seqno = ring->get_seqno(ring, false);
2478
		acthd = intel_ring_get_active_head(ring);
2479
 
2480
		if (ring->hangcheck.seqno == seqno) {
2481
			if (ring_idle(ring, seqno)) {
2482
//               if (waitqueue_active(&ring->irq_queue)) {
2483
					/* Issue a wake-up to catch stuck h/w. */
2484
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2485
//                         ring->name);
2486
//                   wake_up_all(&ring->irq_queue);
2487
//               } else
2488
					busy = false;
2489
			} else {
2490
				/* We always increment the hangcheck score
2491
				 * if the ring is busy and still processing
2492
				 * the same request, so that no single request
2493
				 * can run indefinitely (such as a chain of
2494
				 * batches). The only time we do not increment
2495
				 * the hangcheck score on this ring, if this
2496
				 * ring is in a legitimate wait for another
2497
				 * ring. In that case the waiting ring is a
2498
				 * victim and we want to be sure we catch the
2499
				 * right culprit. Then every time we do kick
2500
				 * the ring, add a small increment to the
2501
				 * score so that we can catch a batch that is
2502
				 * being repeatedly kicked and so responsible
2503
				 * for stalling the machine.
2504
				 */
2505
				ring->hangcheck.action = ring_stuck(ring,
2506
								    acthd);
2507
 
2508
				switch (ring->hangcheck.action) {
4560 Serge 2509
				case HANGCHECK_IDLE:
4104 Serge 2510
				case HANGCHECK_WAIT:
2511
					break;
2512
				case HANGCHECK_ACTIVE:
2513
					ring->hangcheck.score += BUSY;
2514
					break;
2515
				case HANGCHECK_KICK:
2516
					ring->hangcheck.score += KICK;
2517
					break;
2518
				case HANGCHECK_HUNG:
2519
					ring->hangcheck.score += HUNG;
2520
					stuck[i] = true;
2521
					break;
2522
				}
2523
			}
2524
		} else {
4560 Serge 2525
			ring->hangcheck.action = HANGCHECK_ACTIVE;
2526
 
4104 Serge 2527
			/* Gradually reduce the count so that we catch DoS
2528
			 * attempts across multiple batches.
2529
			 */
2530
			if (ring->hangcheck.score > 0)
2531
				ring->hangcheck.score--;
2532
		}
2533
 
2534
		ring->hangcheck.seqno = seqno;
2535
		ring->hangcheck.acthd = acthd;
2536
		busy_count += busy;
2537
	}
2538
 
2539
	for_each_ring(ring, dev_priv, i) {
2540
		if (ring->hangcheck.score > FIRE) {
2541
			DRM_INFO("%s on %s\n",
2542
				  stuck[i] ? "stuck" : "no progress",
2543
				  ring->name);
2544
			rings_hung++;
2545
		}
2546
	}
2547
 
2548
//   if (rings_hung)
2549
//       return i915_handle_error(dev, true);
2550
 
2551
}
2552
 
2553
static void ibx_irq_preinstall(struct drm_device *dev)
2554
{
2555
	struct drm_i915_private *dev_priv = dev->dev_private;
2556
 
3746 Serge 2557
	if (HAS_PCH_NOP(dev))
2558
		return;
2559
 
4104 Serge 2560
	/* south display irq */
2561
	I915_WRITE(SDEIMR, 0xffffffff);
3746 Serge 2562
	/*
2563
	 * SDEIER is also touched by the interrupt handler to work around missed
2564
	 * PCH interrupts. Hence we can't update it after the interrupt handler
2565
	 * is enabled - instead we unconditionally enable all PCH interrupt
2566
	 * sources here, but then only unmask them as needed with SDEIMR.
2567
	 */
2568
	I915_WRITE(SDEIER, 0xffffffff);
4104 Serge 2569
	POSTING_READ(SDEIER);
2351 Serge 2570
}
2571
 
4104 Serge 2572
static void gen5_gt_irq_preinstall(struct drm_device *dev)
2573
{
2574
	struct drm_i915_private *dev_priv = dev->dev_private;
2575
 
2576
    /* and GT */
2577
    I915_WRITE(GTIMR, 0xffffffff);
2578
    I915_WRITE(GTIER, 0x0);
2579
    POSTING_READ(GTIER);
2580
 
2581
	if (INTEL_INFO(dev)->gen >= 6) {
2582
		/* and PM */
2583
		I915_WRITE(GEN6_PMIMR, 0xffffffff);
2584
		I915_WRITE(GEN6_PMIER, 0x0);
2585
		POSTING_READ(GEN6_PMIER);
4539 Serge 2586
	}
4104 Serge 2587
}
2588
 
2589
/* drm_dma.h hooks
2590
*/
2591
static void ironlake_irq_preinstall(struct drm_device *dev)
2592
{
2593
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2594
 
2595
	atomic_set(&dev_priv->irq_received, 0);
2596
 
2597
	I915_WRITE(HWSTAM, 0xeffe);
2598
 
2599
	I915_WRITE(DEIMR, 0xffffffff);
2600
	I915_WRITE(DEIER, 0x0);
2601
	POSTING_READ(DEIER);
2602
 
2603
	gen5_gt_irq_preinstall(dev);
2604
 
2605
	ibx_irq_preinstall(dev);
2606
}
2607
 
3031 serge 2608
static void valleyview_irq_preinstall(struct drm_device *dev)
2609
{
2610
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2611
	int pipe;
2612
 
2613
	atomic_set(&dev_priv->irq_received, 0);
2614
 
2615
	/* VLV magic */
2616
	I915_WRITE(VLV_IMR, 0);
2617
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2618
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2619
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2620
 
2621
	/* and GT */
2622
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2623
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2624
 
4104 Serge 2625
	gen5_gt_irq_preinstall(dev);
2626
 
3031 serge 2627
	I915_WRITE(DPINVGTT, 0xff);
2628
 
2629
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2630
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2631
	for_each_pipe(pipe)
2632
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2633
	I915_WRITE(VLV_IIR, 0xffffffff);
2634
	I915_WRITE(VLV_IMR, 0xffffffff);
2635
	I915_WRITE(VLV_IER, 0x0);
2636
	POSTING_READ(VLV_IER);
2637
}
2638
 
4560 Serge 2639
static void gen8_irq_preinstall(struct drm_device *dev)
2640
{
2641
	struct drm_i915_private *dev_priv = dev->dev_private;
2642
	int pipe;
2643
 
2644
	atomic_set(&dev_priv->irq_received, 0);
2645
 
2646
	I915_WRITE(GEN8_MASTER_IRQ, 0);
2647
	POSTING_READ(GEN8_MASTER_IRQ);
2648
 
2649
	/* IIR can theoretically queue up two events. Be paranoid */
2650
#define GEN8_IRQ_INIT_NDX(type, which) do { \
2651
		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2652
		POSTING_READ(GEN8_##type##_IMR(which)); \
2653
		I915_WRITE(GEN8_##type##_IER(which), 0); \
2654
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2655
		POSTING_READ(GEN8_##type##_IIR(which)); \
2656
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2657
	} while (0)
2658
 
2659
#define GEN8_IRQ_INIT(type) do { \
2660
		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2661
		POSTING_READ(GEN8_##type##_IMR); \
2662
		I915_WRITE(GEN8_##type##_IER, 0); \
2663
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2664
		POSTING_READ(GEN8_##type##_IIR); \
2665
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2666
	} while (0)
2667
 
2668
	GEN8_IRQ_INIT_NDX(GT, 0);
2669
	GEN8_IRQ_INIT_NDX(GT, 1);
2670
	GEN8_IRQ_INIT_NDX(GT, 2);
2671
	GEN8_IRQ_INIT_NDX(GT, 3);
2672
 
2673
	for_each_pipe(pipe) {
2674
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2675
	}
2676
 
2677
	GEN8_IRQ_INIT(DE_PORT);
2678
	GEN8_IRQ_INIT(DE_MISC);
2679
	GEN8_IRQ_INIT(PCU);
2680
#undef GEN8_IRQ_INIT
2681
#undef GEN8_IRQ_INIT_NDX
2682
 
2683
	POSTING_READ(GEN8_PCU_IIR);
2684
 
2685
	ibx_irq_preinstall(dev);
2686
}
2687
 
3746 Serge 2688
static void ibx_hpd_irq_setup(struct drm_device *dev)
2689
{
2690
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2691
	struct drm_mode_config *mode_config = &dev->mode_config;
2692
	struct intel_encoder *intel_encoder;
4104 Serge 2693
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3746 Serge 2694
 
2695
	if (HAS_PCH_IBX(dev)) {
4104 Serge 2696
		hotplug_irqs = SDE_HOTPLUG_MASK;
3746 Serge 2697
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2698
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4104 Serge 2699
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3746 Serge 2700
	} else {
4104 Serge 2701
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3746 Serge 2702
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2703
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4104 Serge 2704
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3746 Serge 2705
	}
2706
 
4104 Serge 2707
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3746 Serge 2708
 
2709
	/*
2351 Serge 2710
 * Enable digital hotplug on the PCH, and configure the DP short pulse
2711
 * duration to 2ms (which is the minimum in the Display Port spec)
2712
 *
2713
 * This register is the same on all known PCH chips.
2714
 */
2715
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2716
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2717
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2718
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2719
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2720
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2721
}
2722
 
3480 Serge 2723
static void ibx_irq_postinstall(struct drm_device *dev)
2724
{
2725
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2726
	u32 mask;
2727
 
3746 Serge 2728
	if (HAS_PCH_NOP(dev))
2729
		return;
2730
 
4104 Serge 2731
	if (HAS_PCH_IBX(dev)) {
2732
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2733
		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2734
	} else {
2735
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2736
 
2737
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2738
	}
2739
 
3480 Serge 2740
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2741
	I915_WRITE(SDEIMR, ~mask);
2742
}
2743
 
4104 Serge 2744
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2351 Serge 2745
{
4104 Serge 2746
	struct drm_i915_private *dev_priv = dev->dev_private;
2747
	u32 pm_irqs, gt_irqs;
2351 Serge 2748
 
4104 Serge 2749
	pm_irqs = gt_irqs = 0;
2351 Serge 2750
 
2751
	dev_priv->gt_irq_mask = ~0;
4560 Serge 2752
	if (HAS_L3_DPF(dev)) {
4104 Serge 2753
		/* L3 parity interrupt is always unmasked. */
4560 Serge 2754
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2755
		gt_irqs |= GT_PARITY_ERROR(dev);
4104 Serge 2756
	}
2351 Serge 2757
 
4104 Serge 2758
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
2759
	if (IS_GEN5(dev)) {
2760
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2761
			   ILK_BSD_USER_INTERRUPT;
2762
	} else {
2763
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2764
	}
2351 Serge 2765
 
4104 Serge 2766
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2767
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2768
	I915_WRITE(GTIER, gt_irqs);
2351 Serge 2769
    POSTING_READ(GTIER);
2770
 
4104 Serge 2771
	if (INTEL_INFO(dev)->gen >= 6) {
2772
		pm_irqs |= GEN6_PM_RPS_EVENTS;
2351 Serge 2773
 
4104 Serge 2774
		if (HAS_VEBOX(dev))
2775
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2776
 
2777
		dev_priv->pm_irq_mask = 0xffffffff;
2778
		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2779
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2780
		I915_WRITE(GEN6_PMIER, pm_irqs);
2781
		POSTING_READ(GEN6_PMIER);
2351 Serge 2782
    }
2783
}
2784
 
4104 Serge 2785
static int ironlake_irq_postinstall(struct drm_device *dev)
3031 serge 2786
{
4104 Serge 2787
	unsigned long irqflags;
3031 serge 2788
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4104 Serge 2789
	u32 display_mask, extra_mask;
2790
 
2791
	if (INTEL_INFO(dev)->gen >= 7) {
2792
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2793
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3031 serge 2794
		DE_PLANEB_FLIP_DONE_IVB |
4104 Serge 2795
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2796
				DE_ERR_INT_IVB);
2797
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2798
			      DE_PIPEA_VBLANK_IVB);
2351 Serge 2799
 
4104 Serge 2800
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2801
	} else {
2802
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2803
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
4560 Serge 2804
				DE_AUX_CHANNEL_A |
2805
				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2806
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2807
				DE_POISON);
4104 Serge 2808
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2809
	}
2810
 
3031 serge 2811
	dev_priv->irq_mask = ~display_mask;
2812
 
2813
	/* should always can generate irq */
2814
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2815
	I915_WRITE(DEIMR, dev_priv->irq_mask);
4104 Serge 2816
	I915_WRITE(DEIER, display_mask | extra_mask);
3031 serge 2817
	POSTING_READ(DEIER);
2818
 
4104 Serge 2819
	gen5_gt_irq_postinstall(dev);
3031 serge 2820
 
4104 Serge 2821
	ibx_irq_postinstall(dev);
3031 serge 2822
 
4104 Serge 2823
	if (IS_IRONLAKE_M(dev)) {
2824
		/* Enable PCU event interrupts
2825
		 *
2826
		 * spinlocking not required here for correctness since interrupt
2827
		 * setup is guaranteed to run in single-threaded context. But we
2828
		 * need it to make the assert_spin_locked happy. */
2829
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2830
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2831
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2832
	}
3031 serge 2833
 
2834
	return 0;
2835
}
2836
 
2837
static int valleyview_irq_postinstall(struct drm_device *dev)
2838
{
2839
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2840
	u32 enable_mask;
4560 Serge 2841
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2842
		PIPE_CRC_DONE_ENABLE;
4104 Serge 2843
	unsigned long irqflags;
3031 serge 2844
 
2845
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2846
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2847
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2848
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2849
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2850
 
2851
	/*
2852
	 *Leave vblank interrupts masked initially.  enable/disable will
2853
	 * toggle them based on usage.
2854
	 */
2855
	dev_priv->irq_mask = (~enable_mask) |
2856
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2857
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2858
 
3480 Serge 2859
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2860
	POSTING_READ(PORT_HOTPLUG_EN);
2861
 
3031 serge 2862
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2863
	I915_WRITE(VLV_IER, enable_mask);
2864
	I915_WRITE(VLV_IIR, 0xffffffff);
2865
	I915_WRITE(PIPESTAT(0), 0xffff);
2866
	I915_WRITE(PIPESTAT(1), 0xffff);
2867
	POSTING_READ(VLV_IER);
2868
 
4104 Serge 2869
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2870
	 * just to make the assert_spin_locked check happy. */
2871
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4560 Serge 2872
	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2873
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2874
	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
4104 Serge 2875
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3031 serge 2876
 
2877
	I915_WRITE(VLV_IIR, 0xffffffff);
2878
	I915_WRITE(VLV_IIR, 0xffffffff);
2879
 
4104 Serge 2880
	gen5_gt_irq_postinstall(dev);
3243 Serge 2881
 
3031 serge 2882
	/* ack & enable invalid PTE error interrupts */
2883
#if 0 /* FIXME: add support to irq handler for checking these bits */
2884
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2885
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2886
#endif
2887
 
2888
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 2889
 
2890
	return 0;
2891
}
2892
 
4560 Serge 2893
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2894
{
2895
	int i;
2896
 
2897
	/* These are interrupts we'll toggle with the ring mask register */
2898
	uint32_t gt_interrupts[] = {
2899
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2900
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2901
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2902
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2903
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2904
		0,
2905
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2906
		};
2907
 
2908
	for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2909
		u32 tmp = I915_READ(GEN8_GT_IIR(i));
2910
		if (tmp)
2911
			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2912
				  i, tmp);
2913
		I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2914
		I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2915
	}
2916
	POSTING_READ(GEN8_GT_IER(0));
2917
}
2918
 
2919
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2920
{
2921
	struct drm_device *dev = dev_priv->dev;
2922
	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2923
		GEN8_PIPE_CDCLK_CRC_DONE |
2924
		GEN8_PIPE_FIFO_UNDERRUN |
2925
		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2926
	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
2927
	int pipe;
2928
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2929
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2930
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
2931
 
2932
	for_each_pipe(pipe) {
2933
		u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2934
		if (tmp)
2935
			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2936
				  pipe, tmp);
2937
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2938
		I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2939
	}
2940
	POSTING_READ(GEN8_DE_PIPE_ISR(0));
2941
 
2942
	I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2943
	I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
2944
	POSTING_READ(GEN8_DE_PORT_IER);
2945
}
2946
 
2947
static int gen8_irq_postinstall(struct drm_device *dev)
2948
{
2949
	struct drm_i915_private *dev_priv = dev->dev_private;
2950
 
2951
	gen8_gt_irq_postinstall(dev_priv);
2952
	gen8_de_irq_postinstall(dev_priv);
2953
 
2954
	ibx_irq_postinstall(dev);
2955
 
2956
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2957
	POSTING_READ(GEN8_MASTER_IRQ);
2958
 
2959
	return 0;
2960
}
2961
 
2962
static void gen8_irq_uninstall(struct drm_device *dev)
2963
{
2964
	struct drm_i915_private *dev_priv = dev->dev_private;
2965
	int pipe;
2966
 
2967
	if (!dev_priv)
2968
		return;
2969
 
2970
	atomic_set(&dev_priv->irq_received, 0);
2971
 
2972
	I915_WRITE(GEN8_MASTER_IRQ, 0);
2973
 
2974
#define GEN8_IRQ_FINI_NDX(type, which) do { \
2975
		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2976
		I915_WRITE(GEN8_##type##_IER(which), 0); \
2977
		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2978
	} while (0)
2979
 
2980
#define GEN8_IRQ_FINI(type) do { \
2981
		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2982
		I915_WRITE(GEN8_##type##_IER, 0); \
2983
		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2984
	} while (0)
2985
 
2986
	GEN8_IRQ_FINI_NDX(GT, 0);
2987
	GEN8_IRQ_FINI_NDX(GT, 1);
2988
	GEN8_IRQ_FINI_NDX(GT, 2);
2989
	GEN8_IRQ_FINI_NDX(GT, 3);
2990
 
2991
	for_each_pipe(pipe) {
2992
		GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
2993
	}
2994
 
2995
	GEN8_IRQ_FINI(DE_PORT);
2996
	GEN8_IRQ_FINI(DE_MISC);
2997
	GEN8_IRQ_FINI(PCU);
2998
#undef GEN8_IRQ_FINI
2999
#undef GEN8_IRQ_FINI_NDX
3000
 
3001
	POSTING_READ(GEN8_PCU_IIR);
3002
}
3003
 
3031 serge 3004
static void valleyview_irq_uninstall(struct drm_device *dev)
3005
{
3006
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3007
	int pipe;
3008
 
3009
	if (!dev_priv)
3010
		return;
3011
 
4293 Serge 3012
	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3013
 
3031 serge 3014
	for_each_pipe(pipe)
3015
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3016
 
3017
	I915_WRITE(HWSTAM, 0xffffffff);
3018
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3019
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3020
	for_each_pipe(pipe)
3021
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3022
	I915_WRITE(VLV_IIR, 0xffffffff);
3023
	I915_WRITE(VLV_IMR, 0xffffffff);
3024
	I915_WRITE(VLV_IER, 0x0);
3025
	POSTING_READ(VLV_IER);
3026
}
3027
 
3028
static void ironlake_irq_uninstall(struct drm_device *dev)
3029
{
3030
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3031
 
3032
	if (!dev_priv)
3033
		return;
3034
 
4293 Serge 3035
	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3036
 
3031 serge 3037
	I915_WRITE(HWSTAM, 0xffffffff);
3038
 
3039
	I915_WRITE(DEIMR, 0xffffffff);
3040
	I915_WRITE(DEIER, 0x0);
3041
	I915_WRITE(DEIIR, I915_READ(DEIIR));
4104 Serge 3042
	if (IS_GEN7(dev))
3043
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3031 serge 3044
 
3045
	I915_WRITE(GTIMR, 0xffffffff);
3046
	I915_WRITE(GTIER, 0x0);
3047
	I915_WRITE(GTIIR, I915_READ(GTIIR));
3048
 
3746 Serge 3049
	if (HAS_PCH_NOP(dev))
3050
		return;
3051
 
3031 serge 3052
	I915_WRITE(SDEIMR, 0xffffffff);
3053
	I915_WRITE(SDEIER, 0x0);
3054
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
4104 Serge 3055
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3056
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3031 serge 3057
}
3058
 
3059
#if 0
3060
 
3061
static void i8xx_irq_preinstall(struct drm_device * dev)
3062
{
3063
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3064
	int pipe;
3065
 
3066
	atomic_set(&dev_priv->irq_received, 0);
3067
 
3068
	for_each_pipe(pipe)
3069
		I915_WRITE(PIPESTAT(pipe), 0);
3070
	I915_WRITE16(IMR, 0xffff);
3071
	I915_WRITE16(IER, 0x0);
3072
	POSTING_READ16(IER);
3073
}
3074
 
3075
static int i8xx_irq_postinstall(struct drm_device *dev)
3076
{
3077
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4560 Serge 3078
	unsigned long irqflags;
3031 serge 3079
 
3080
	I915_WRITE16(EMR,
3081
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3082
 
3083
	/* Unmask the interrupts that we always want on. */
3084
	dev_priv->irq_mask =
3085
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3086
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3087
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3088
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3089
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3090
	I915_WRITE16(IMR, dev_priv->irq_mask);
3091
 
3092
	I915_WRITE16(IER,
3093
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3094
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3095
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3096
		     I915_USER_INTERRUPT);
3097
	POSTING_READ16(IER);
3098
 
4560 Serge 3099
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3100
	 * just to make the assert_spin_locked check happy. */
3101
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3102
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3103
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3104
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3105
 
3031 serge 3106
	return 0;
3107
}
3108
 
3746 Serge 3109
/*
3110
 * Returns true when a page flip has completed.
3111
 */
3112
static bool i8xx_handle_vblank(struct drm_device *dev,
4560 Serge 3113
			       int plane, int pipe, u32 iir)
3746 Serge 3114
{
3115
	drm_i915_private_t *dev_priv = dev->dev_private;
4560 Serge 3116
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3746 Serge 3117
 
3118
//   if (!drm_handle_vblank(dev, pipe))
3119
       return false;
3120
 
3121
	if ((iir & flip_pending) == 0)
3122
		return false;
3123
 
3124
//   intel_prepare_page_flip(dev, pipe);
3125
 
3126
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3127
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3128
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3129
	 * the flip is completed (no longer pending). Since this doesn't raise
3130
	 * an interrupt per se, we watch for the change at vblank.
3131
	 */
3132
	if (I915_READ16(ISR) & flip_pending)
3133
		return false;
3134
 
3135
	intel_finish_page_flip(dev, pipe);
3136
 
3137
	return true;
3138
}
3139
 
3243 Serge 3140
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 3141
{
3142
	struct drm_device *dev = (struct drm_device *) arg;
3143
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3144
	u16 iir, new_iir;
3145
	u32 pipe_stats[2];
3146
	unsigned long irqflags;
3147
	int pipe;
3148
	u16 flip_mask =
3149
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3150
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3151
 
3152
	atomic_inc(&dev_priv->irq_received);
3153
 
3154
	iir = I915_READ16(IIR);
3155
	if (iir == 0)
3156
		return IRQ_NONE;
3157
 
3158
	while (iir & ~flip_mask) {
3159
		/* Can't rely on pipestat interrupt bit in iir as it might
3160
		 * have been cleared after the pipestat interrupt was received.
3161
		 * It doesn't set the bit in iir again, but it still produces
3162
		 * interrupts (for non-MSI).
3163
		 */
3164
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4126 Serge 3165
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3166
			i915_handle_error(dev, false);
3031 serge 3167
 
3168
		for_each_pipe(pipe) {
3169
			int reg = PIPESTAT(pipe);
3170
			pipe_stats[pipe] = I915_READ(reg);
3171
 
3172
			/*
3173
			 * Clear the PIPE*STAT regs before the IIR
3174
			 */
3175
			if (pipe_stats[pipe] & 0x8000ffff) {
3176
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3177
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3178
							 pipe_name(pipe));
3179
				I915_WRITE(reg, pipe_stats[pipe]);
3180
			}
3181
		}
3182
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3183
 
3184
		I915_WRITE16(IIR, iir & ~flip_mask);
3185
		new_iir = I915_READ16(IIR); /* Flush posted writes */
3186
 
3187
		i915_update_dri1_breadcrumb(dev);
3188
 
3189
		if (iir & I915_USER_INTERRUPT)
3190
			notify_ring(dev, &dev_priv->ring[RCS]);
3191
 
4560 Serge 3192
		for_each_pipe(pipe) {
3193
			int plane = pipe;
3194
			if (HAS_FBC(dev))
3195
				plane = !plane;
3031 serge 3196
 
4560 Serge 3197
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3198
			    i8xx_handle_vblank(dev, plane, pipe, iir))
3199
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3031 serge 3200
 
4560 Serge 3201
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3202
				i9xx_pipe_crc_irq_handler(dev, pipe);
3203
		}
3204
 
3031 serge 3205
		iir = new_iir;
3206
	}
3207
 
3208
	return IRQ_HANDLED;
3209
}
3210
 
3211
static void i8xx_irq_uninstall(struct drm_device * dev)
3212
{
3213
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3214
	int pipe;
3215
 
3216
	for_each_pipe(pipe) {
3217
		/* Clear enable bits; then clear status bits */
3218
		I915_WRITE(PIPESTAT(pipe), 0);
3219
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3220
	}
3221
	I915_WRITE16(IMR, 0xffff);
3222
	I915_WRITE16(IER, 0x0);
3223
	I915_WRITE16(IIR, I915_READ16(IIR));
3224
}
3225
 
3226
#endif
3227
 
3228
static void i915_irq_preinstall(struct drm_device * dev)
3229
{
3230
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3231
	int pipe;
3232
 
3233
	atomic_set(&dev_priv->irq_received, 0);
3234
 
3235
	if (I915_HAS_HOTPLUG(dev)) {
3236
		I915_WRITE(PORT_HOTPLUG_EN, 0);
3237
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3238
	}
3239
 
3240
	I915_WRITE16(HWSTAM, 0xeffe);
3241
	for_each_pipe(pipe)
3242
		I915_WRITE(PIPESTAT(pipe), 0);
3243
	I915_WRITE(IMR, 0xffffffff);
3244
	I915_WRITE(IER, 0x0);
3245
	POSTING_READ(IER);
3246
}
3247
 
3248
static int i915_irq_postinstall(struct drm_device *dev)
3249
{
3250
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3251
	u32 enable_mask;
4560 Serge 3252
	unsigned long irqflags;
3031 serge 3253
 
3254
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3255
 
3256
	/* Unmask the interrupts that we always want on. */
3257
	dev_priv->irq_mask =
3258
		~(I915_ASLE_INTERRUPT |
3259
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3260
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3261
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3262
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3263
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3264
 
3265
	enable_mask =
3266
		I915_ASLE_INTERRUPT |
3267
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3268
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3269
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3270
		I915_USER_INTERRUPT;
3480 Serge 3271
 
3031 serge 3272
	if (I915_HAS_HOTPLUG(dev)) {
3480 Serge 3273
		I915_WRITE(PORT_HOTPLUG_EN, 0);
3274
		POSTING_READ(PORT_HOTPLUG_EN);
3275
 
3031 serge 3276
		/* Enable in IER... */
3277
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3278
		/* and unmask in IMR */
3279
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3280
	}
3281
 
3282
	I915_WRITE(IMR, dev_priv->irq_mask);
3283
	I915_WRITE(IER, enable_mask);
3284
	POSTING_READ(IER);
3285
 
4126 Serge 3286
	i915_enable_asle_pipestat(dev);
3480 Serge 3287
 
4560 Serge 3288
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3289
	 * just to make the assert_spin_locked check happy. */
3290
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3291
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3292
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3293
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3294
 
3480 Serge 3295
	return 0;
3296
}
3297
 
3746 Serge 3298
/*
3299
 * Returns true when a page flip has completed.
3300
 */
3301
static bool i915_handle_vblank(struct drm_device *dev,
3302
			       int plane, int pipe, u32 iir)
3480 Serge 3303
{
3746 Serge 3304
	drm_i915_private_t *dev_priv = dev->dev_private;
3305
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3480 Serge 3306
 
3746 Serge 3307
//   if (!drm_handle_vblank(dev, pipe))
3308
		return false;
3480 Serge 3309
 
3746 Serge 3310
	if ((iir & flip_pending) == 0)
3311
		return false;
3480 Serge 3312
 
3746 Serge 3313
//   intel_prepare_page_flip(dev, plane);
3031 serge 3314
 
3746 Serge 3315
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3316
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3317
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3318
	 * the flip is completed (no longer pending). Since this doesn't raise
3319
	 * an interrupt per se, we watch for the change at vblank.
3320
	 */
3321
	if (I915_READ(ISR) & flip_pending)
3322
		return false;
3323
 
3324
	intel_finish_page_flip(dev, pipe);
3325
 
3326
	return true;
3031 serge 3327
}
3328
 
3243 Serge 3329
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 3330
{
3331
	struct drm_device *dev = (struct drm_device *) arg;
3332
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3333
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3334
	unsigned long irqflags;
3335
	u32 flip_mask =
3336
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3337
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3338
	int pipe, ret = IRQ_NONE;
3339
 
3340
	atomic_inc(&dev_priv->irq_received);
3341
 
3342
	iir = I915_READ(IIR);
3343
	do {
3344
		bool irq_received = (iir & ~flip_mask) != 0;
3345
		bool blc_event = false;
3346
 
3347
		/* Can't rely on pipestat interrupt bit in iir as it might
3348
		 * have been cleared after the pipestat interrupt was received.
3349
		 * It doesn't set the bit in iir again, but it still produces
3350
		 * interrupts (for non-MSI).
3351
		 */
3352
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4126 Serge 3353
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3354
			i915_handle_error(dev, false);
3031 serge 3355
 
3356
		for_each_pipe(pipe) {
3357
			int reg = PIPESTAT(pipe);
3358
			pipe_stats[pipe] = I915_READ(reg);
3359
 
3360
			/* Clear the PIPE*STAT regs before the IIR */
3361
			if (pipe_stats[pipe] & 0x8000ffff) {
3362
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3363
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3364
							 pipe_name(pipe));
3365
				I915_WRITE(reg, pipe_stats[pipe]);
3366
				irq_received = true;
3367
			}
3368
		}
3369
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3370
 
3371
		if (!irq_received)
3372
			break;
3373
 
3374
		/* Consume port.  Then clear IIR or we'll miss events */
3375
		if ((I915_HAS_HOTPLUG(dev)) &&
3376
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3377
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3746 Serge 3378
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3031 serge 3379
 
3380
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3381
				  hotplug_status);
4104 Serge 3382
 
3383
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3384
 
3031 serge 3385
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3386
			POSTING_READ(PORT_HOTPLUG_STAT);
3387
		}
3388
 
3389
		I915_WRITE(IIR, iir & ~flip_mask);
3390
		new_iir = I915_READ(IIR); /* Flush posted writes */
3391
 
3392
		if (iir & I915_USER_INTERRUPT)
3393
			notify_ring(dev, &dev_priv->ring[RCS]);
3394
 
3395
		for_each_pipe(pipe) {
3396
			int plane = pipe;
4560 Serge 3397
			if (HAS_FBC(dev))
3031 serge 3398
				plane = !plane;
3399
 
3746 Serge 3400
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3401
			    i915_handle_vblank(dev, plane, pipe, iir))
3402
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3403
 
3031 serge 3404
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3405
				blc_event = true;
4560 Serge 3406
 
3407
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3408
				i9xx_pipe_crc_irq_handler(dev, pipe);
3031 serge 3409
		}
3410
 
4126 Serge 3411
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3412
			intel_opregion_asle_intr(dev);
3031 serge 3413
 
3414
		/* With MSI, interrupts are only generated when iir
3415
		 * transitions from zero to nonzero.  If another bit got
3416
		 * set while we were handling the existing iir bits, then
3417
		 * we would never get another interrupt.
3418
		 *
3419
		 * This is fine on non-MSI as well, as if we hit this path
3420
		 * we avoid exiting the interrupt handler only to generate
3421
		 * another one.
3422
		 *
3423
		 * Note that for MSI this could cause a stray interrupt report
3424
		 * if an interrupt landed in the time between writing IIR and
3425
		 * the posting read.  This should be rare enough to never
3426
		 * trigger the 99% of 100,000 interrupts test for disabling
3427
		 * stray interrupts.
3428
		 */
3429
		ret = IRQ_HANDLED;
3430
		iir = new_iir;
3431
	} while (iir & ~flip_mask);
3432
 
3433
	i915_update_dri1_breadcrumb(dev);
3434
 
3435
	return ret;
3436
}
3437
 
3438
static void i915_irq_uninstall(struct drm_device * dev)
3439
{
3440
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3441
	int pipe;
3442
 
4293 Serge 3443
	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3444
 
3031 serge 3445
	if (I915_HAS_HOTPLUG(dev)) {
3446
		I915_WRITE(PORT_HOTPLUG_EN, 0);
3447
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3448
	}
3449
 
3450
	I915_WRITE16(HWSTAM, 0xffff);
3451
	for_each_pipe(pipe) {
3452
		/* Clear enable bits; then clear status bits */
3453
		I915_WRITE(PIPESTAT(pipe), 0);
3454
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3455
	}
3456
	I915_WRITE(IMR, 0xffffffff);
3457
	I915_WRITE(IER, 0x0);
3458
 
3459
	I915_WRITE(IIR, I915_READ(IIR));
3460
}
3461
 
3462
static void i965_irq_preinstall(struct drm_device * dev)
3463
{
3464
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3465
	int pipe;
3466
 
3467
	atomic_set(&dev_priv->irq_received, 0);
3468
 
3469
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3470
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3471
 
3472
	I915_WRITE(HWSTAM, 0xeffe);
3473
	for_each_pipe(pipe)
3474
		I915_WRITE(PIPESTAT(pipe), 0);
3475
	I915_WRITE(IMR, 0xffffffff);
3476
	I915_WRITE(IER, 0x0);
3477
	POSTING_READ(IER);
3478
}
3479
 
3480
static int i965_irq_postinstall(struct drm_device *dev)
3481
{
3482
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3483
	u32 enable_mask;
3484
	u32 error_mask;
4104 Serge 3485
	unsigned long irqflags;
3031 serge 3486
 
3487
	/* Unmask the interrupts that we always want on. */
3488
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3489
			       I915_DISPLAY_PORT_INTERRUPT |
3490
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3491
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3492
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3493
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3494
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3495
 
3496
	enable_mask = ~dev_priv->irq_mask;
3746 Serge 3497
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3498
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3499
	enable_mask |= I915_USER_INTERRUPT;
3500
 
3501
	if (IS_G4X(dev))
3502
		enable_mask |= I915_BSD_USER_INTERRUPT;
3503
 
4104 Serge 3504
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3505
	 * just to make the assert_spin_locked check happy. */
3506
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4560 Serge 3507
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3508
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3509
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
4104 Serge 3510
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3031 serge 3511
 
3512
	/*
3513
	 * Enable some error detection, note the instruction error mask
3514
	 * bit is reserved, so we leave it masked.
3515
	 */
3516
	if (IS_G4X(dev)) {
3517
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3518
			       GM45_ERROR_MEM_PRIV |
3519
			       GM45_ERROR_CP_PRIV |
3520
			       I915_ERROR_MEMORY_REFRESH);
3521
	} else {
3522
		error_mask = ~(I915_ERROR_PAGE_TABLE |
3523
			       I915_ERROR_MEMORY_REFRESH);
3524
	}
3525
	I915_WRITE(EMR, error_mask);
3526
 
3527
	I915_WRITE(IMR, dev_priv->irq_mask);
3528
	I915_WRITE(IER, enable_mask);
3529
	POSTING_READ(IER);
3530
 
3480 Serge 3531
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3532
	POSTING_READ(PORT_HOTPLUG_EN);
3533
 
4126 Serge 3534
	i915_enable_asle_pipestat(dev);
3480 Serge 3535
 
3536
	return 0;
3537
}
3538
 
3746 Serge 3539
static void i915_hpd_irq_setup(struct drm_device *dev)
3480 Serge 3540
{
3541
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3746 Serge 3542
	struct drm_mode_config *mode_config = &dev->mode_config;
3543
	struct intel_encoder *intel_encoder;
3480 Serge 3544
	u32 hotplug_en;
3545
 
4104 Serge 3546
	assert_spin_locked(&dev_priv->irq_lock);
3547
 
3746 Serge 3548
	if (I915_HAS_HOTPLUG(dev)) {
3549
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3550
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3031 serge 3551
	/* Note HDMI and DP share hotplug bits */
3746 Serge 3552
		/* enable bits are the same for all generations */
3553
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3554
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3555
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3031 serge 3556
		/* Programming the CRT detection parameters tends
3557
		   to generate a spurious hotplug event about three
3558
		   seconds later.  So just do it once.
3559
		   */
3560
		if (IS_G4X(dev))
3561
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3746 Serge 3562
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3031 serge 3563
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3480 Serge 3564
 
3031 serge 3565
	/* Ignore TV since it's buggy */
3566
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3746 Serge 3567
	}
3031 serge 3568
}
3569
 
3243 Serge 3570
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 3571
{
3572
	struct drm_device *dev = (struct drm_device *) arg;
3573
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3574
	u32 iir, new_iir;
3575
	u32 pipe_stats[I915_MAX_PIPES];
3576
	unsigned long irqflags;
3577
	int irq_received;
3578
	int ret = IRQ_NONE, pipe;
3746 Serge 3579
	u32 flip_mask =
3580
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3581
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3031 serge 3582
 
3583
	atomic_inc(&dev_priv->irq_received);
3584
 
3585
	iir = I915_READ(IIR);
3586
 
3587
	for (;;) {
3588
		bool blc_event = false;
3589
 
3746 Serge 3590
		irq_received = (iir & ~flip_mask) != 0;
3031 serge 3591
 
3592
		/* Can't rely on pipestat interrupt bit in iir as it might
3593
		 * have been cleared after the pipestat interrupt was received.
3594
		 * It doesn't set the bit in iir again, but it still produces
3595
		 * interrupts (for non-MSI).
3596
		 */
3597
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4126 Serge 3598
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3599
			i915_handle_error(dev, false);
3031 serge 3600
 
3601
		for_each_pipe(pipe) {
3602
			int reg = PIPESTAT(pipe);
3603
			pipe_stats[pipe] = I915_READ(reg);
3604
 
3605
			/*
3606
			 * Clear the PIPE*STAT regs before the IIR
3607
			 */
3608
			if (pipe_stats[pipe] & 0x8000ffff) {
3609
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3610
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3611
							 pipe_name(pipe));
3612
				I915_WRITE(reg, pipe_stats[pipe]);
3613
				irq_received = 1;
3614
			}
3615
		}
3616
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3617
 
3618
		if (!irq_received)
3619
			break;
3620
 
3621
		ret = IRQ_HANDLED;
3622
 
3623
		/* Consume port.  Then clear IIR or we'll miss events */
3624
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3625
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3746 Serge 3626
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3627
								  HOTPLUG_INT_STATUS_G4X :
4104 Serge 3628
								  HOTPLUG_INT_STATUS_I915);
3031 serge 3629
 
3630
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3631
				  hotplug_status);
4104 Serge 3632
 
3633
			intel_hpd_irq_handler(dev, hotplug_trigger,
4560 Serge 3634
					      IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
4104 Serge 3635
 
4560 Serge 3636
			if (IS_G4X(dev) &&
3637
			    (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
3638
				dp_aux_irq_handler(dev);
3639
 
3031 serge 3640
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3641
			I915_READ(PORT_HOTPLUG_STAT);
3642
		}
3643
 
3746 Serge 3644
		I915_WRITE(IIR, iir & ~flip_mask);
3031 serge 3645
		new_iir = I915_READ(IIR); /* Flush posted writes */
3646
 
3647
		if (iir & I915_USER_INTERRUPT)
3648
			notify_ring(dev, &dev_priv->ring[RCS]);
3649
		if (iir & I915_BSD_USER_INTERRUPT)
3650
			notify_ring(dev, &dev_priv->ring[VCS]);
3651
 
3652
		for_each_pipe(pipe) {
3746 Serge 3653
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3654
			    i915_handle_vblank(dev, pipe, pipe, iir))
3655
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3031 serge 3656
 
3657
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3658
				blc_event = true;
4560 Serge 3659
 
3660
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3661
				i9xx_pipe_crc_irq_handler(dev, pipe);
3031 serge 3662
		}
3663
 
3664
 
4126 Serge 3665
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3666
			intel_opregion_asle_intr(dev);
3031 serge 3667
 
3480 Serge 3668
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3669
			gmbus_irq_handler(dev);
3670
 
3031 serge 3671
		/* With MSI, interrupts are only generated when iir
3672
		 * transitions from zero to nonzero.  If another bit got
3673
		 * set while we were handling the existing iir bits, then
3674
		 * we would never get another interrupt.
3675
		 *
3676
		 * This is fine on non-MSI as well, as if we hit this path
3677
		 * we avoid exiting the interrupt handler only to generate
3678
		 * another one.
3679
		 *
3680
		 * Note that for MSI this could cause a stray interrupt report
3681
		 * if an interrupt landed in the time between writing IIR and
3682
		 * the posting read.  This should be rare enough to never
3683
		 * trigger the 99% of 100,000 interrupts test for disabling
3684
		 * stray interrupts.
3685
		 */
3686
		iir = new_iir;
3687
	}
3688
 
3689
	i915_update_dri1_breadcrumb(dev);
3690
 
3691
	return ret;
3692
}
3693
 
3694
static void i965_irq_uninstall(struct drm_device * dev)
3695
{
3696
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3697
	int pipe;
3698
 
3699
	if (!dev_priv)
3700
		return;
3701
 
4293 Serge 3702
	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3703
 
3031 serge 3704
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3705
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3706
 
3707
	I915_WRITE(HWSTAM, 0xffffffff);
3708
	for_each_pipe(pipe)
3709
		I915_WRITE(PIPESTAT(pipe), 0);
3710
	I915_WRITE(IMR, 0xffffffff);
3711
	I915_WRITE(IER, 0x0);
3712
 
3713
	for_each_pipe(pipe)
3714
		I915_WRITE(PIPESTAT(pipe),
3715
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3716
	I915_WRITE(IIR, I915_READ(IIR));
3717
}
3718
 
4126 Serge 3719
static void i915_reenable_hotplug_timer_func(unsigned long data)
3720
{
3721
	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3722
	struct drm_device *dev = dev_priv->dev;
3723
	struct drm_mode_config *mode_config = &dev->mode_config;
3724
	unsigned long irqflags;
3725
	int i;
3726
 
3727
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3728
	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3729
		struct drm_connector *connector;
3730
 
3731
		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3732
			continue;
3733
 
3734
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3735
 
3736
		list_for_each_entry(connector, &mode_config->connector_list, head) {
3737
			struct intel_connector *intel_connector = to_intel_connector(connector);
3738
 
3739
			if (intel_connector->encoder->hpd_pin == i) {
3740
				if (connector->polled != intel_connector->polled)
3741
					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3742
							 drm_get_connector_name(connector));
3743
				connector->polled = intel_connector->polled;
3744
				if (!connector->polled)
3745
					connector->polled = DRM_CONNECTOR_POLL_HPD;
3746
			}
3747
		}
3748
	}
3749
	if (dev_priv->display.hpd_irq_setup)
3750
		dev_priv->display.hpd_irq_setup(dev);
3751
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3752
}
3753
 
2351 Serge 3754
void intel_irq_init(struct drm_device *dev)
3755
{
3031 serge 3756
	struct drm_i915_private *dev_priv = dev->dev_private;
3757
 
3480 Serge 3758
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4126 Serge 3759
	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3760
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3761
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3480 Serge 3762
 
4126 Serge 3763
	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3764
		    (unsigned long) dev_priv);
3480 Serge 3765
 
4560 Serge 3766
 
3767
	if (IS_GEN2(dev)) {
3768
		dev->max_vblank_count = 0;
3769
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3770
	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3771
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3772
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3773
	} else {
4293 Serge 3774
	dev->driver->get_vblank_counter = i915_get_vblank_counter;
3775
	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3776
	}
3480 Serge 3777
 
4560 Serge 3778
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4293 Serge 3779
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3780
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4560 Serge 3781
	}
3480 Serge 3782
 
3031 serge 3783
	if (IS_VALLEYVIEW(dev)) {
3243 Serge 3784
		dev->driver->irq_handler = valleyview_irq_handler;
3785
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3786
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4293 Serge 3787
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
3788
		dev->driver->enable_vblank = valleyview_enable_vblank;
3789
		dev->driver->disable_vblank = valleyview_disable_vblank;
3746 Serge 3790
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4560 Serge 3791
	} else if (IS_GEN8(dev)) {
3792
		dev->driver->irq_handler = gen8_irq_handler;
3793
		dev->driver->irq_preinstall = gen8_irq_preinstall;
3794
		dev->driver->irq_postinstall = gen8_irq_postinstall;
3795
		dev->driver->irq_uninstall = gen8_irq_uninstall;
3796
		dev->driver->enable_vblank = gen8_enable_vblank;
3797
		dev->driver->disable_vblank = gen8_disable_vblank;
3798
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2351 Serge 3799
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 3800
		dev->driver->irq_handler = ironlake_irq_handler;
3801
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3802
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4293 Serge 3803
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
3804
		dev->driver->enable_vblank = ironlake_enable_vblank;
3805
		dev->driver->disable_vblank = ironlake_disable_vblank;
3746 Serge 3806
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2351 Serge 3807
	} else {
3031 serge 3808
		if (INTEL_INFO(dev)->gen == 2) {
3809
		} else if (INTEL_INFO(dev)->gen == 3) {
3243 Serge 3810
			dev->driver->irq_preinstall = i915_irq_preinstall;
3811
			dev->driver->irq_postinstall = i915_irq_postinstall;
4293 Serge 3812
			dev->driver->irq_uninstall = i915_irq_uninstall;
3243 Serge 3813
			dev->driver->irq_handler = i915_irq_handler;
3480 Serge 3814
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 3815
		} else {
3243 Serge 3816
			dev->driver->irq_preinstall = i965_irq_preinstall;
3817
			dev->driver->irq_postinstall = i965_irq_postinstall;
4293 Serge 3818
			dev->driver->irq_uninstall = i965_irq_uninstall;
3243 Serge 3819
			dev->driver->irq_handler = i965_irq_handler;
3746 Serge 3820
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 3821
		}
4293 Serge 3822
		dev->driver->enable_vblank = i915_enable_vblank;
3823
		dev->driver->disable_vblank = i915_disable_vblank;
2351 Serge 3824
	}
3480 Serge 3825
}
3243 Serge 3826
 
3480 Serge 3827
void intel_hpd_init(struct drm_device *dev)
3828
{
3829
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3830
	struct drm_mode_config *mode_config = &dev->mode_config;
3831
	struct drm_connector *connector;
4104 Serge 3832
	unsigned long irqflags;
3746 Serge 3833
	int i;
3480 Serge 3834
 
3746 Serge 3835
	for (i = 1; i < HPD_NUM_PINS; i++) {
3836
		dev_priv->hpd_stats[i].hpd_cnt = 0;
3837
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3838
	}
3839
	list_for_each_entry(connector, &mode_config->connector_list, head) {
3840
		struct intel_connector *intel_connector = to_intel_connector(connector);
3841
		connector->polled = intel_connector->polled;
3842
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3843
			connector->polled = DRM_CONNECTOR_POLL_HPD;
3844
	}
4104 Serge 3845
 
3846
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3847
	 * just to make the assert_spin_locked checks happy. */
3848
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3480 Serge 3849
	if (dev_priv->display.hpd_irq_setup)
3850
		dev_priv->display.hpd_irq_setup(dev);
4104 Serge 3851
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2351 Serge 3852
}
3853
 
4104 Serge 3854
/* Disable interrupts so we can allow Package C8+. */
3855
void hsw_pc8_disable_interrupts(struct drm_device *dev)
3243 Serge 3856
{
4104 Serge 3857
	struct drm_i915_private *dev_priv = dev->dev_private;
3858
	unsigned long irqflags;
2351 Serge 3859
 
4104 Serge 3860
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3243 Serge 3861
 
4104 Serge 3862
	dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3863
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3864
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3865
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3866
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3243 Serge 3867
 
4560 Serge 3868
	ironlake_disable_display_irq(dev_priv, 0xffffffff);
3869
	ibx_disable_display_interrupt(dev_priv, 0xffffffff);
4104 Serge 3870
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
3871
	snb_disable_pm_irq(dev_priv, 0xffffffff);
3872
 
3873
	dev_priv->pc8.irqs_disabled = true;
3874
 
3875
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3243 Serge 3876
}
3877
 
4104 Serge 3878
/* Restore interrupts so we can recover from Package C8+. */
3879
void hsw_pc8_restore_interrupts(struct drm_device *dev)
2351 Serge 3880
{
4104 Serge 3881
	struct drm_i915_private *dev_priv = dev->dev_private;
3882
	unsigned long irqflags;
4560 Serge 3883
	uint32_t val;
2351 Serge 3884
 
4104 Serge 3885
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3051 serge 3886
 
4104 Serge 3887
	val = I915_READ(DEIMR);
4560 Serge 3888
	WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
2351 Serge 3889
 
4560 Serge 3890
	val = I915_READ(SDEIMR);
3891
	WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
2351 Serge 3892
 
4104 Serge 3893
	val = I915_READ(GTIMR);
4560 Serge 3894
	WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
2351 Serge 3895
 
4104 Serge 3896
	val = I915_READ(GEN6_PMIMR);
4560 Serge 3897
	WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
2351 Serge 3898
 
4104 Serge 3899
	dev_priv->pc8.irqs_disabled = false;
2351 Serge 3900
 
4104 Serge 3901
	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
4560 Serge 3902
	ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr);
4104 Serge 3903
	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3904
	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3905
	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
2351 Serge 3906
 
4104 Serge 3907
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3908
}
2351 Serge 3909
 
3910
 
4104 Serge 3911
irqreturn_t intel_irq_handler(struct drm_device *dev)
3912
{
2351 Serge 3913
 
4104 Serge 3914
//    printf("i915 irq\n");
2351 Serge 3915
 
4104 Serge 3916
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
3917
 
3918
    return dev->driver->irq_handler(0, dev);
2351 Serge 3919
}
3920