Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4126 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3746 Serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3031 serge 30
 
31
#include 
32
#include 
33
#include 
2351 Serge 34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
4104 Serge 38
#define assert_spin_locked(a)
39
 
3746 Serge 40
static const u32 hpd_ibx[] = {
41
	[HPD_CRT] = SDE_CRT_HOTPLUG,
42
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
46
};
3031 serge 47
 
3746 Serge 48
static const u32 hpd_cpt[] = {
49
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54
};
55
 
56
static const u32 hpd_mask_i915[] = {
57
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
58
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63
};
64
 
65
static const u32 hpd_status_gen4[] = {
66
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72
};
73
 
74
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81
};
82
 
83
 
3031 serge 84
#define pr_err(fmt, ...) \
85
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
86
 
87
 
2352 Serge 88
#define DRM_WAKEUP( queue ) wake_up( queue )
89
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
90
 
2351 Serge 91
#define MAX_NOPID ((u32)~0)
92
 
93
 
94
 
95
/* For display hotplug interrupt */
96
static void
97
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98
{
4104 Serge 99
	assert_spin_locked(&dev_priv->irq_lock);
100
 
101
	if (dev_priv->pc8.irqs_disabled) {
102
		WARN(1, "IRQs disabled\n");
103
		dev_priv->pc8.regsave.deimr &= ~mask;
104
		return;
105
	}
106
 
2351 Serge 107
    if ((dev_priv->irq_mask & mask) != 0) {
108
        dev_priv->irq_mask &= ~mask;
109
        I915_WRITE(DEIMR, dev_priv->irq_mask);
110
        POSTING_READ(DEIMR);
111
    }
112
}
113
 
3746 Serge 114
static void
2351 Serge 115
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
116
{
4104 Serge 117
	assert_spin_locked(&dev_priv->irq_lock);
118
 
119
	if (dev_priv->pc8.irqs_disabled) {
120
		WARN(1, "IRQs disabled\n");
121
		dev_priv->pc8.regsave.deimr |= mask;
122
		return;
123
	}
124
 
2351 Serge 125
    if ((dev_priv->irq_mask & mask) != mask) {
126
        dev_priv->irq_mask |= mask;
127
        I915_WRITE(DEIMR, dev_priv->irq_mask);
128
        POSTING_READ(DEIMR);
129
    }
130
}
3031 serge 131
 
4104 Serge 132
/**
133
 * ilk_update_gt_irq - update GTIMR
134
 * @dev_priv: driver private
135
 * @interrupt_mask: mask of interrupt bits to update
136
 * @enabled_irq_mask: mask of interrupt bits to enable
137
 */
138
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
139
			      uint32_t interrupt_mask,
140
			      uint32_t enabled_irq_mask)
141
{
142
	assert_spin_locked(&dev_priv->irq_lock);
143
 
144
	if (dev_priv->pc8.irqs_disabled) {
145
		WARN(1, "IRQs disabled\n");
146
		dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
147
		dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
148
						interrupt_mask);
149
		return;
150
	}
151
 
152
	dev_priv->gt_irq_mask &= ~interrupt_mask;
153
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
154
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
155
	POSTING_READ(GTIMR);
156
}
157
 
158
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
159
{
160
	ilk_update_gt_irq(dev_priv, mask, mask);
161
}
162
 
163
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
164
{
165
	ilk_update_gt_irq(dev_priv, mask, 0);
166
}
167
 
168
/**
169
  * snb_update_pm_irq - update GEN6_PMIMR
170
  * @dev_priv: driver private
171
  * @interrupt_mask: mask of interrupt bits to update
172
  * @enabled_irq_mask: mask of interrupt bits to enable
173
  */
174
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
175
			      uint32_t interrupt_mask,
176
			      uint32_t enabled_irq_mask)
177
{
178
	uint32_t new_val;
179
 
180
	assert_spin_locked(&dev_priv->irq_lock);
181
 
182
	if (dev_priv->pc8.irqs_disabled) {
183
		WARN(1, "IRQs disabled\n");
184
		dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
185
		dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
186
						     interrupt_mask);
187
		return;
188
	}
189
 
190
	new_val = dev_priv->pm_irq_mask;
191
	new_val &= ~interrupt_mask;
192
	new_val |= (~enabled_irq_mask & interrupt_mask);
193
 
194
	if (new_val != dev_priv->pm_irq_mask) {
195
		dev_priv->pm_irq_mask = new_val;
196
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
197
		POSTING_READ(GEN6_PMIMR);
198
	}
199
}
200
 
201
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
202
{
203
	snb_update_pm_irq(dev_priv, mask, mask);
204
}
205
 
206
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
207
{
208
	snb_update_pm_irq(dev_priv, mask, 0);
209
}
210
 
211
static bool ivb_can_enable_err_int(struct drm_device *dev)
212
{
213
	struct drm_i915_private *dev_priv = dev->dev_private;
214
	struct intel_crtc *crtc;
215
	enum pipe pipe;
216
 
217
	assert_spin_locked(&dev_priv->irq_lock);
218
 
219
	for_each_pipe(pipe) {
220
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
221
 
222
		if (crtc->cpu_fifo_underrun_disabled)
223
			return false;
224
	}
225
 
226
	return true;
227
}
228
 
229
static bool cpt_can_enable_serr_int(struct drm_device *dev)
230
{
231
	struct drm_i915_private *dev_priv = dev->dev_private;
232
	enum pipe pipe;
233
	struct intel_crtc *crtc;
234
 
235
	assert_spin_locked(&dev_priv->irq_lock);
236
 
237
	for_each_pipe(pipe) {
238
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
239
 
240
		if (crtc->pch_fifo_underrun_disabled)
241
			return false;
242
	}
243
 
244
	return true;
245
}
246
 
247
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
248
						 enum pipe pipe, bool enable)
249
{
250
	struct drm_i915_private *dev_priv = dev->dev_private;
251
	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
252
					  DE_PIPEB_FIFO_UNDERRUN;
253
 
254
	if (enable)
255
		ironlake_enable_display_irq(dev_priv, bit);
256
	else
257
		ironlake_disable_display_irq(dev_priv, bit);
258
}
259
 
260
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
261
						  enum pipe pipe, bool enable)
262
{
263
	struct drm_i915_private *dev_priv = dev->dev_private;
264
	if (enable) {
265
		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
266
 
267
		if (!ivb_can_enable_err_int(dev))
268
			return;
269
 
270
		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
271
	} else {
272
		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
273
 
274
		/* Change the state _after_ we've read out the current one. */
275
		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
276
 
277
		if (!was_enabled &&
278
		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
279
			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
280
				      pipe_name(pipe));
281
	}
282
}
283
}
284
 
285
/**
286
 * ibx_display_interrupt_update - update SDEIMR
287
 * @dev_priv: driver private
288
 * @interrupt_mask: mask of interrupt bits to update
289
 * @enabled_irq_mask: mask of interrupt bits to enable
290
 */
291
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
292
					 uint32_t interrupt_mask,
293
					 uint32_t enabled_irq_mask)
294
{
295
	uint32_t sdeimr = I915_READ(SDEIMR);
296
	sdeimr &= ~interrupt_mask;
297
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
298
 
299
	assert_spin_locked(&dev_priv->irq_lock);
300
 
301
	if (dev_priv->pc8.irqs_disabled &&
302
	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
303
		WARN(1, "IRQs disabled\n");
304
		dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
305
		dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
306
						 interrupt_mask);
307
		return;
308
	}
309
 
310
	I915_WRITE(SDEIMR, sdeimr);
311
	POSTING_READ(SDEIMR);
312
}
313
#define ibx_enable_display_interrupt(dev_priv, bits) \
314
	ibx_display_interrupt_update((dev_priv), (bits), (bits))
315
#define ibx_disable_display_interrupt(dev_priv, bits) \
316
	ibx_display_interrupt_update((dev_priv), (bits), 0)
317
 
318
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
319
					    enum transcoder pch_transcoder,
320
					    bool enable)
321
{
322
	struct drm_i915_private *dev_priv = dev->dev_private;
323
	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
324
		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
325
 
326
	if (enable)
327
		ibx_enable_display_interrupt(dev_priv, bit);
328
	else
329
		ibx_disable_display_interrupt(dev_priv, bit);
330
}
331
 
332
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
333
					    enum transcoder pch_transcoder,
334
					    bool enable)
335
{
336
	struct drm_i915_private *dev_priv = dev->dev_private;
337
 
338
	if (enable) {
339
		I915_WRITE(SERR_INT,
340
			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
341
 
342
		if (!cpt_can_enable_serr_int(dev))
343
			return;
344
 
345
		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
346
	} else {
347
		uint32_t tmp = I915_READ(SERR_INT);
348
		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
349
 
350
		/* Change the state _after_ we've read out the current one. */
351
		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
352
 
353
		if (!was_enabled &&
354
		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
355
			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
356
				      transcoder_name(pch_transcoder));
357
		}
358
	}
359
}
360
 
361
/**
362
 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
363
 * @dev: drm device
364
 * @pipe: pipe
365
 * @enable: true if we want to report FIFO underrun errors, false otherwise
366
 *
367
 * This function makes us disable or enable CPU fifo underruns for a specific
368
 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
369
 * reporting for one pipe may also disable all the other CPU error interruts for
370
 * the other pipes, due to the fact that there's just one interrupt mask/enable
371
 * bit for all the pipes.
372
 *
373
 * Returns the previous state of underrun reporting.
374
 */
375
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
376
					   enum pipe pipe, bool enable)
377
{
378
	struct drm_i915_private *dev_priv = dev->dev_private;
379
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
380
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
381
	unsigned long flags;
382
	bool ret;
383
 
384
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
385
 
386
	ret = !intel_crtc->cpu_fifo_underrun_disabled;
387
 
388
	if (enable == ret)
389
		goto done;
390
 
391
	intel_crtc->cpu_fifo_underrun_disabled = !enable;
392
 
393
	if (IS_GEN5(dev) || IS_GEN6(dev))
394
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
395
	else if (IS_GEN7(dev))
396
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
397
 
398
done:
399
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
400
	return ret;
401
}
402
 
403
/**
404
 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
405
 * @dev: drm device
406
 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
407
 * @enable: true if we want to report FIFO underrun errors, false otherwise
408
 *
409
 * This function makes us disable or enable PCH fifo underruns for a specific
410
 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
411
 * underrun reporting for one transcoder may also disable all the other PCH
412
 * error interruts for the other transcoders, due to the fact that there's just
413
 * one interrupt mask/enable bit for all the transcoders.
414
 *
415
 * Returns the previous state of underrun reporting.
416
 */
417
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
418
					   enum transcoder pch_transcoder,
419
					   bool enable)
420
{
421
	struct drm_i915_private *dev_priv = dev->dev_private;
422
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
423
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
424
	unsigned long flags;
425
	bool ret;
426
 
427
	/*
428
	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
429
	 * has only one pch transcoder A that all pipes can use. To avoid racy
430
	 * pch transcoder -> pipe lookups from interrupt code simply store the
431
	 * underrun statistics in crtc A. Since we never expose this anywhere
432
	 * nor use it outside of the fifo underrun code here using the "wrong"
433
	 * crtc on LPT won't cause issues.
434
	 */
435
 
436
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
437
 
438
	ret = !intel_crtc->pch_fifo_underrun_disabled;
439
 
440
	if (enable == ret)
441
		goto done;
442
 
443
	intel_crtc->pch_fifo_underrun_disabled = !enable;
444
 
445
	if (HAS_PCH_IBX(dev))
446
		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
447
	else
448
		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
449
 
450
done:
451
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
452
	return ret;
453
}
454
 
455
 
3031 serge 456
void
457
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
458
{
459
		u32 reg = PIPESTAT(pipe);
3746 Serge 460
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
3031 serge 461
 
4104 Serge 462
	assert_spin_locked(&dev_priv->irq_lock);
463
 
3746 Serge 464
	if ((pipestat & mask) == mask)
465
		return;
466
 
3031 serge 467
		/* Enable the interrupt, clear any pending status */
3746 Serge 468
	pipestat |= mask | (mask >> 16);
469
	I915_WRITE(reg, pipestat);
3031 serge 470
		POSTING_READ(reg);
471
}
472
 
473
void
474
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
475
{
476
		u32 reg = PIPESTAT(pipe);
3746 Serge 477
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
3031 serge 478
 
4104 Serge 479
	assert_spin_locked(&dev_priv->irq_lock);
480
 
3746 Serge 481
	if ((pipestat & mask) == 0)
482
		return;
483
 
484
	pipestat &= ~mask;
485
	I915_WRITE(reg, pipestat);
3031 serge 486
		POSTING_READ(reg);
487
}
488
 
489
#if 0
490
/**
4104 Serge 491
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
3031 serge 492
 */
4104 Serge 493
static void i915_enable_asle_pipestat(struct drm_device *dev)
3031 serge 494
{
495
	drm_i915_private_t *dev_priv = dev->dev_private;
496
	unsigned long irqflags;
497
 
4104 Serge 498
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
3031 serge 499
		return;
500
 
501
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
502
 
4104 Serge 503
	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
3031 serge 504
		if (INTEL_INFO(dev)->gen >= 4)
4104 Serge 505
		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
3031 serge 506
 
507
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
508
}
509
#endif
510
 
511
/**
512
 * i915_pipe_enabled - check if a pipe is enabled
513
 * @dev: DRM device
514
 * @pipe: pipe to check
515
 *
516
 * Reading certain registers when the pipe is disabled can hang the chip.
517
 * Use this routine to make sure the PLL is running and the pipe is active
518
 * before reading such registers if unsure.
519
 */
520
static int
521
i915_pipe_enabled(struct drm_device *dev, int pipe)
522
{
523
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3243 Serge 524
 
4104 Serge 525
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
526
		/* Locking is horribly broken here, but whatever. */
527
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
528
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
529
 
530
		return intel_crtc->active;
531
	} else {
532
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
533
	}
3031 serge 534
}
535
 
536
/* Called from drm generic code, passed a 'crtc', which
537
 * we use as a pipe index
538
 */
539
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
540
{
541
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
542
	unsigned long high_frame;
543
	unsigned long low_frame;
544
	u32 high1, high2, low;
545
 
546
	if (!i915_pipe_enabled(dev, pipe)) {
547
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
548
				"pipe %c\n", pipe_name(pipe));
549
		return 0;
550
	}
551
 
552
	high_frame = PIPEFRAME(pipe);
553
	low_frame = PIPEFRAMEPIXEL(pipe);
554
 
555
	/*
556
	 * High & low register fields aren't synchronized, so make sure
557
	 * we get a low value that's stable across two reads of the high
558
	 * register.
559
	 */
560
	do {
561
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
562
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
563
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
564
	} while (high1 != high2);
565
 
566
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
567
	low >>= PIPE_FRAME_LOW_SHIFT;
568
	return (high1 << 8) | low;
569
}
570
 
571
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
572
{
573
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
574
	int reg = PIPE_FRMCOUNT_GM45(pipe);
575
 
576
	if (!i915_pipe_enabled(dev, pipe)) {
577
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
578
				 "pipe %c\n", pipe_name(pipe));
579
		return 0;
580
	}
581
 
582
	return I915_READ(reg);
583
}
584
 
3746 Serge 585
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
586
			     int *vpos, int *hpos)
587
{
588
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
589
	u32 vbl = 0, position = 0;
590
	int vbl_start, vbl_end, htotal, vtotal;
591
	bool in_vbl = true;
592
	int ret = 0;
593
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
594
								      pipe);
595
 
596
	if (!i915_pipe_enabled(dev, pipe)) {
597
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
598
				 "pipe %c\n", pipe_name(pipe));
599
		return 0;
600
	}
601
 
602
	/* Get vtotal. */
603
	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
604
 
605
	if (INTEL_INFO(dev)->gen >= 4) {
606
		/* No obvious pixelcount register. Only query vertical
607
		 * scanout position from Display scan line register.
608
		 */
609
		position = I915_READ(PIPEDSL(pipe));
610
 
611
		/* Decode into vertical scanout position. Don't have
612
		 * horizontal scanout position.
613
		 */
614
		*vpos = position & 0x1fff;
615
		*hpos = 0;
616
	} else {
617
		/* Have access to pixelcount since start of frame.
618
		 * We can split this into vertical and horizontal
619
		 * scanout position.
620
		 */
621
		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
622
 
623
		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
624
		*vpos = position / htotal;
625
		*hpos = position - (*vpos * htotal);
626
	}
627
 
628
	/* Query vblank area. */
629
	vbl = I915_READ(VBLANK(cpu_transcoder));
630
 
631
	/* Test position against vblank region. */
632
	vbl_start = vbl & 0x1fff;
633
	vbl_end = (vbl >> 16) & 0x1fff;
634
 
635
	if ((*vpos < vbl_start) || (*vpos > vbl_end))
636
		in_vbl = false;
637
 
638
	/* Inside "upper part" of vblank area? Apply corrective offset: */
639
	if (in_vbl && (*vpos >= vbl_start))
640
		*vpos = *vpos - vtotal;
641
 
642
	/* Readouts valid? */
643
	if (vbl > 0)
644
		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
645
 
646
	/* In vblank? */
647
	if (in_vbl)
648
		ret |= DRM_SCANOUTPOS_INVBL;
649
 
650
	return ret;
651
}
652
 
653
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
654
			      int *max_error,
655
			      struct timeval *vblank_time,
656
			      unsigned flags)
657
{
658
	struct drm_crtc *crtc;
659
 
660
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
661
		DRM_ERROR("Invalid crtc %d\n", pipe);
662
		return -EINVAL;
663
	}
664
 
665
	/* Get drm_crtc to timestamp: */
666
	crtc = intel_get_crtc_for_pipe(dev, pipe);
667
	if (crtc == NULL) {
668
		DRM_ERROR("Invalid crtc %d\n", pipe);
669
		return -EINVAL;
670
	}
671
 
672
	if (!crtc->enabled) {
673
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
674
		return -EBUSY;
675
	}
676
 
677
	/* Helper routine in DRM core does all the work: */
678
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
679
						     vblank_time, flags,
680
						     crtc);
681
}
682
 
4104 Serge 683
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
684
{
685
	enum drm_connector_status old_status;
686
 
687
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
688
	old_status = connector->status;
689
 
690
	connector->status = connector->funcs->detect(connector, false);
691
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
692
		      connector->base.id,
693
		      drm_get_connector_name(connector),
694
		      old_status, connector->status);
695
	return (old_status != connector->status);
696
}
697
 
3480 Serge 698
/*
699
 * Handle hotplug events outside the interrupt handler proper.
700
 */
3746 Serge 701
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
702
 
3480 Serge 703
static void i915_hotplug_work_func(struct work_struct *work)
704
{
705
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
706
						    hotplug_work);
707
	struct drm_device *dev = dev_priv->dev;
708
	struct drm_mode_config *mode_config = &dev->mode_config;
3746 Serge 709
	struct intel_connector *intel_connector;
710
	struct intel_encoder *intel_encoder;
711
	struct drm_connector *connector;
712
	unsigned long irqflags;
713
	bool hpd_disabled = false;
4104 Serge 714
	bool changed = false;
715
	u32 hpd_event_bits;
3031 serge 716
 
3480 Serge 717
	/* HPD irq before everything is fully set up. */
718
	if (!dev_priv->enable_hotplug_processing)
719
		return;
720
 
721
	mutex_lock(&mode_config->mutex);
722
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
723
 
3746 Serge 724
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 725
 
726
	hpd_event_bits = dev_priv->hpd_event_bits;
727
	dev_priv->hpd_event_bits = 0;
3746 Serge 728
	list_for_each_entry(connector, &mode_config->connector_list, head) {
729
		intel_connector = to_intel_connector(connector);
730
		intel_encoder = intel_connector->encoder;
731
		if (intel_encoder->hpd_pin > HPD_NONE &&
732
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
733
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
734
			DRM_INFO("HPD interrupt storm detected on connector %s: "
735
				 "switching from hotplug detection to polling\n",
736
				drm_get_connector_name(connector));
737
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
738
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
739
				| DRM_CONNECTOR_POLL_DISCONNECT;
740
			hpd_disabled = true;
741
		}
4104 Serge 742
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
743
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
744
				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
745
		}
3746 Serge 746
	}
747
	 /* if there were no outputs to poll, poll was disabled,
748
	  * therefore make sure it's enabled when disabling HPD on
749
	  * some connectors */
750
	if (hpd_disabled) {
751
		drm_kms_helper_poll_enable(dev);
752
//       mod_timer(&dev_priv->hotplug_reenable_timer,
753
//             jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
754
	}
3480 Serge 755
 
3746 Serge 756
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
757
 
4104 Serge 758
	list_for_each_entry(connector, &mode_config->connector_list, head) {
759
		intel_connector = to_intel_connector(connector);
760
		intel_encoder = intel_connector->encoder;
761
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
3746 Serge 762
		if (intel_encoder->hot_plug)
763
			intel_encoder->hot_plug(intel_encoder);
4104 Serge 764
			if (intel_hpd_irq_event(dev, connector))
765
				changed = true;
766
		}
767
	}
3480 Serge 768
	mutex_unlock(&mode_config->mutex);
769
 
4104 Serge 770
	if (changed)
771
		drm_kms_helper_hotplug_event(dev);
3480 Serge 772
}
773
 
4104 Serge 774
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
3746 Serge 775
{
776
	drm_i915_private_t *dev_priv = dev->dev_private;
777
	u32 busy_up, busy_down, max_avg, min_avg;
778
	u8 new_delay;
779
 
4104 Serge 780
	spin_lock(&mchdev_lock);
3746 Serge 781
 
782
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
783
 
784
	new_delay = dev_priv->ips.cur_delay;
785
 
786
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
787
	busy_up = I915_READ(RCPREVBSYTUPAVG);
788
	busy_down = I915_READ(RCPREVBSYTDNAVG);
789
	max_avg = I915_READ(RCBMAXAVG);
790
	min_avg = I915_READ(RCBMINAVG);
791
 
792
	/* Handle RCS change request from hw */
793
	if (busy_up > max_avg) {
794
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
795
			new_delay = dev_priv->ips.cur_delay - 1;
796
		if (new_delay < dev_priv->ips.max_delay)
797
			new_delay = dev_priv->ips.max_delay;
798
	} else if (busy_down < min_avg) {
799
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
800
			new_delay = dev_priv->ips.cur_delay + 1;
801
		if (new_delay > dev_priv->ips.min_delay)
802
			new_delay = dev_priv->ips.min_delay;
803
	}
804
 
805
	if (ironlake_set_drps(dev, new_delay))
806
		dev_priv->ips.cur_delay = new_delay;
807
 
4104 Serge 808
	spin_unlock(&mchdev_lock);
3746 Serge 809
 
810
	return;
811
}
812
 
2352 Serge 813
static void notify_ring(struct drm_device *dev,
814
			struct intel_ring_buffer *ring)
815
{
816
	if (ring->obj == NULL)
817
		return;
2351 Serge 818
 
3031 serge 819
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
2351 Serge 820
 
2352 Serge 821
	wake_up_all(&ring->irq_queue);
822
}
823
 
3031 serge 824
#if 0
825
static void gen6_pm_rps_work(struct work_struct *work)
826
{
827
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
828
						    rps.work);
4104 Serge 829
	u32 pm_iir;
3031 serge 830
	u8 new_delay;
2352 Serge 831
 
4104 Serge 832
	spin_lock_irq(&dev_priv->irq_lock);
3031 serge 833
	pm_iir = dev_priv->rps.pm_iir;
834
	dev_priv->rps.pm_iir = 0;
4104 Serge 835
	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
836
	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
837
	spin_unlock_irq(&dev_priv->irq_lock);
2352 Serge 838
 
4104 Serge 839
	/* Make sure we didn't queue anything we're not going to process. */
840
	WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
841
 
842
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
3031 serge 843
		return;
844
 
3243 Serge 845
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 846
 
4104 Serge 847
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
3031 serge 848
		new_delay = dev_priv->rps.cur_delay + 1;
4104 Serge 849
 
850
		/*
851
		 * For better performance, jump directly
852
		 * to RPe if we're below it.
853
		 */
854
		if (IS_VALLEYVIEW(dev_priv->dev) &&
855
		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
856
			new_delay = dev_priv->rps.rpe_delay;
857
	} else
3031 serge 858
		new_delay = dev_priv->rps.cur_delay - 1;
859
 
860
	/* sysfs frequency interfaces may have snuck in while servicing the
861
	 * interrupt
862
	 */
4104 Serge 863
	if (new_delay >= dev_priv->rps.min_delay &&
864
	    new_delay <= dev_priv->rps.max_delay) {
865
		if (IS_VALLEYVIEW(dev_priv->dev))
866
			valleyview_set_rps(dev_priv->dev, new_delay);
867
		else
3031 serge 868
		gen6_set_rps(dev_priv->dev, new_delay);
869
	}
870
 
4104 Serge 871
	if (IS_VALLEYVIEW(dev_priv->dev)) {
872
		/*
873
		 * On VLV, when we enter RC6 we may not be at the minimum
874
		 * voltage level, so arm a timer to check.  It should only
875
		 * fire when there's activity or once after we've entered
876
		 * RC6, and then won't be re-armed until the next RPS interrupt.
877
		 */
878
		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
879
				 msecs_to_jiffies(100));
880
	}
881
 
3243 Serge 882
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 883
}
884
 
885
 
886
/**
887
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
888
 * occurred.
889
 * @work: workqueue struct
890
 *
891
 * Doesn't actually do anything except notify userspace. As a consequence of
892
 * this event, userspace should try to remap the bad rows since statistically
893
 * it is likely the same row is more likely to go bad again.
894
 */
895
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 896
{
3031 serge 897
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3243 Serge 898
						    l3_parity.error_work);
3031 serge 899
	u32 error_status, row, bank, subbank;
900
	char *parity_event[5];
901
	uint32_t misccpctl;
902
	unsigned long flags;
903
 
904
	/* We must turn off DOP level clock gating to access the L3 registers.
905
	 * In order to prevent a get/put style interface, acquire struct mutex
906
	 * any time we access those registers.
907
	 */
908
	mutex_lock(&dev_priv->dev->struct_mutex);
909
 
910
	misccpctl = I915_READ(GEN7_MISCCPCTL);
911
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
912
	POSTING_READ(GEN7_MISCCPCTL);
913
 
914
	error_status = I915_READ(GEN7_L3CDERRST1);
915
	row = GEN7_PARITY_ERROR_ROW(error_status);
916
	bank = GEN7_PARITY_ERROR_BANK(error_status);
917
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
918
 
919
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
920
				    GEN7_L3CDERRST1_ENABLE);
921
	POSTING_READ(GEN7_L3CDERRST1);
922
 
923
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
924
 
925
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
4104 Serge 926
	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
3031 serge 927
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
928
 
929
	mutex_unlock(&dev_priv->dev->struct_mutex);
930
 
4104 Serge 931
	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
3031 serge 932
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
933
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
934
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
935
	parity_event[4] = NULL;
936
 
937
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
938
			   KOBJ_CHANGE, parity_event);
939
 
940
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
941
		  row, bank, subbank);
942
 
943
	kfree(parity_event[3]);
944
	kfree(parity_event[2]);
945
	kfree(parity_event[1]);
946
}
947
 
4104 Serge 948
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
3031 serge 949
{
950
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
951
 
952
	if (!HAS_L3_GPU_CACHE(dev))
953
		return;
954
 
4104 Serge 955
	spin_lock(&dev_priv->irq_lock);
956
	ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
957
	spin_unlock(&dev_priv->irq_lock);
3031 serge 958
 
3243 Serge 959
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 960
}
961
 
962
#endif
963
 
4104 Serge 964
static void ilk_gt_irq_handler(struct drm_device *dev,
965
			       struct drm_i915_private *dev_priv,
966
			       u32 gt_iir)
967
{
968
	if (gt_iir &
969
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
970
		notify_ring(dev, &dev_priv->ring[RCS]);
971
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
972
		notify_ring(dev, &dev_priv->ring[VCS]);
973
}
974
 
3031 serge 975
static void snb_gt_irq_handler(struct drm_device *dev,
976
			       struct drm_i915_private *dev_priv,
977
			       u32 gt_iir)
978
{
979
 
4104 Serge 980
	if (gt_iir &
981
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
3031 serge 982
		notify_ring(dev, &dev_priv->ring[RCS]);
4104 Serge 983
	if (gt_iir & GT_BSD_USER_INTERRUPT)
3031 serge 984
		notify_ring(dev, &dev_priv->ring[VCS]);
4104 Serge 985
	if (gt_iir & GT_BLT_USER_INTERRUPT)
3031 serge 986
		notify_ring(dev, &dev_priv->ring[BCS]);
987
 
4104 Serge 988
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
989
		      GT_BSD_CS_ERROR_INTERRUPT |
990
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
3031 serge 991
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
4104 Serge 992
//       i915_handle_error(dev, false);
3031 serge 993
	}
994
 
995
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
996
//		ivybridge_handle_parity_error(dev);
997
}
998
 
3746 Serge 999
#define HPD_STORM_DETECT_PERIOD 1000
1000
#define HPD_STORM_THRESHOLD 5
1001
 
4104 Serge 1002
static inline void intel_hpd_irq_handler(struct drm_device *dev,
3746 Serge 1003
					    u32 hotplug_trigger,
1004
					    const u32 *hpd)
1005
{
1006
	drm_i915_private_t *dev_priv = dev->dev_private;
1007
	int i;
4104 Serge 1008
	bool storm_detected = false;
3746 Serge 1009
 
4104 Serge 1010
	if (!hotplug_trigger)
1011
		return;
3746 Serge 1012
 
4104 Serge 1013
	spin_lock(&dev_priv->irq_lock);
3746 Serge 1014
	for (i = 1; i < HPD_NUM_PINS; i++) {
1015
 
4104 Serge 1016
		WARN(((hpd[i] & hotplug_trigger) &&
1017
		      dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1018
		     "Received HPD interrupt although disabled\n");
1019
 
3746 Serge 1020
		if (!(hpd[i] & hotplug_trigger) ||
1021
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1022
			continue;
1023
 
4104 Serge 1024
		dev_priv->hpd_event_bits |= (1 << i);
3746 Serge 1025
//        if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
1026
//                  dev_priv->hpd_stats[i].hpd_last_jiffies
1027
//                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1028
//            dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks;
1029
//           dev_priv->hpd_stats[i].hpd_cnt = 0;
1030
//       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1031
//           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1032
//           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1033
//           ret = true;
1034
//       } else {
1035
			dev_priv->hpd_stats[i].hpd_cnt++;
1036
//       }
1037
	}
1038
 
4104 Serge 1039
	if (storm_detected)
1040
		dev_priv->display.hpd_irq_setup(dev);
1041
	spin_unlock(&dev_priv->irq_lock);
3746 Serge 1042
 
4104 Serge 1043
 
3746 Serge 1044
}
1045
 
3480 Serge 1046
static void gmbus_irq_handler(struct drm_device *dev)
1047
{
1048
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1049
 
1050
	wake_up_all(&dev_priv->gmbus_wait_queue);
1051
}
1052
 
1053
static void dp_aux_irq_handler(struct drm_device *dev)
1054
{
1055
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1056
 
1057
	wake_up_all(&dev_priv->gmbus_wait_queue);
1058
}
1059
 
4104 Serge 1060
/* The RPS events need forcewake, so we add them to a work queue and mask their
1061
 * IMR bits until the work is done. Other interrupts can be processed without
1062
 * the work queue. */
1063
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1064
{
1065
	if (pm_iir & GEN6_PM_RPS_EVENTS) {
1066
		spin_lock(&dev_priv->irq_lock);
1067
		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1068
		snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1069
		spin_unlock(&dev_priv->irq_lock);
1070
 
1071
		queue_work(dev_priv->wq, &dev_priv->rps.work);
1072
	}
1073
 
1074
	if (HAS_VEBOX(dev_priv->dev)) {
1075
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1076
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1077
 
1078
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1079
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1080
//           i915_handle_error(dev_priv->dev, false);
1081
		}
1082
	}
1083
}
1084
 
3243 Serge 1085
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
3031 serge 1086
{
1087
	struct drm_device *dev = (struct drm_device *) arg;
1088
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1089
	u32 iir, gt_iir, pm_iir;
1090
	irqreturn_t ret = IRQ_NONE;
1091
	unsigned long irqflags;
1092
	int pipe;
1093
	u32 pipe_stats[I915_MAX_PIPES];
1094
 
1095
	atomic_inc(&dev_priv->irq_received);
1096
 
1097
	while (true) {
1098
		iir = I915_READ(VLV_IIR);
1099
		gt_iir = I915_READ(GTIIR);
1100
		pm_iir = I915_READ(GEN6_PMIIR);
1101
 
1102
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1103
			goto out;
1104
 
1105
		ret = IRQ_HANDLED;
1106
 
1107
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1108
 
1109
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1110
		for_each_pipe(pipe) {
1111
			int reg = PIPESTAT(pipe);
1112
			pipe_stats[pipe] = I915_READ(reg);
1113
 
1114
			/*
1115
			 * Clear the PIPE*STAT regs before the IIR
1116
			 */
1117
			if (pipe_stats[pipe] & 0x8000ffff) {
1118
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1119
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1120
							 pipe_name(pipe));
1121
				I915_WRITE(reg, pipe_stats[pipe]);
1122
			}
1123
		}
1124
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1125
 
1126
#if 0
1127
		for_each_pipe(pipe) {
1128
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1129
				drm_handle_vblank(dev, pipe);
1130
 
1131
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1132
				intel_prepare_page_flip(dev, pipe);
1133
				intel_finish_page_flip(dev, pipe);
1134
			}
1135
		}
1136
#endif
1137
 
1138
		/* Consume port.  Then clear IIR or we'll miss events */
1139
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1140
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3746 Serge 1141
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3031 serge 1142
 
1143
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1144
					 hotplug_status);
4104 Serge 1145
 
1146
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1147
 
3031 serge 1148
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1149
			I915_READ(PORT_HOTPLUG_STAT);
1150
		}
1151
 
3480 Serge 1152
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1153
			gmbus_irq_handler(dev);
3031 serge 1154
 
3480 Serge 1155
//        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
1156
//            gen6_queue_rps_work(dev_priv, pm_iir);
3031 serge 1157
 
1158
		I915_WRITE(GTIIR, gt_iir);
1159
		I915_WRITE(GEN6_PMIIR, pm_iir);
1160
		I915_WRITE(VLV_IIR, iir);
1161
	}
1162
 
1163
out:
1164
	return ret;
1165
}
1166
 
1167
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1168
{
1169
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1170
	int pipe;
3746 Serge 1171
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
3031 serge 1172
 
4104 Serge 1173
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1174
 
1175
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1176
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1177
			       SDE_AUDIO_POWER_SHIFT);
1178
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1179
				 port_name(port));
3746 Serge 1180
	}
3031 serge 1181
 
3480 Serge 1182
	if (pch_iir & SDE_AUX_MASK)
1183
		dp_aux_irq_handler(dev);
1184
 
3031 serge 1185
	if (pch_iir & SDE_GMBUS)
3480 Serge 1186
		gmbus_irq_handler(dev);
3031 serge 1187
 
1188
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1189
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1190
 
1191
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1192
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1193
 
1194
	if (pch_iir & SDE_POISON)
1195
		DRM_ERROR("PCH poison interrupt\n");
1196
 
1197
	if (pch_iir & SDE_FDI_MASK)
1198
		for_each_pipe(pipe)
1199
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1200
					 pipe_name(pipe),
1201
					 I915_READ(FDI_RX_IIR(pipe)));
1202
 
1203
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1204
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1205
 
1206
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1207
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1208
 
4104 Serge 1209
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1210
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1211
							  false))
1212
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1213
 
3031 serge 1214
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
4104 Serge 1215
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1216
							  false))
1217
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
3031 serge 1218
}
1219
 
4104 Serge 1220
static void ivb_err_int_handler(struct drm_device *dev)
1221
{
1222
	struct drm_i915_private *dev_priv = dev->dev_private;
1223
	u32 err_int = I915_READ(GEN7_ERR_INT);
1224
 
1225
	if (err_int & ERR_INT_POISON)
1226
		DRM_ERROR("Poison interrupt\n");
1227
 
1228
	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1229
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1230
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1231
 
1232
	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1233
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1234
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1235
 
1236
	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1237
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1238
			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1239
 
1240
	I915_WRITE(GEN7_ERR_INT, err_int);
1241
}
1242
 
1243
static void cpt_serr_int_handler(struct drm_device *dev)
1244
{
1245
	struct drm_i915_private *dev_priv = dev->dev_private;
1246
	u32 serr_int = I915_READ(SERR_INT);
1247
 
1248
	if (serr_int & SERR_INT_POISON)
1249
		DRM_ERROR("PCH poison interrupt\n");
1250
 
1251
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1252
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1253
							  false))
1254
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1255
 
1256
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1257
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1258
							  false))
1259
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1260
 
1261
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1262
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1263
							  false))
1264
			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1265
 
1266
	I915_WRITE(SERR_INT, serr_int);
1267
}
1268
 
3031 serge 1269
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1270
{
1271
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1272
	int pipe;
3746 Serge 1273
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
3031 serge 1274
 
4104 Serge 1275
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1276
 
1277
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1278
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1279
			       SDE_AUDIO_POWER_SHIFT_CPT);
1280
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1281
				 port_name(port));
3746 Serge 1282
	}
3031 serge 1283
 
1284
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 1285
		dp_aux_irq_handler(dev);
3031 serge 1286
 
1287
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 1288
		gmbus_irq_handler(dev);
3031 serge 1289
 
1290
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1291
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1292
 
1293
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1294
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1295
 
1296
	if (pch_iir & SDE_FDI_MASK_CPT)
1297
		for_each_pipe(pipe)
1298
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1299
					 pipe_name(pipe),
1300
					 I915_READ(FDI_RX_IIR(pipe)));
1301
 
4104 Serge 1302
	if (pch_iir & SDE_ERROR_CPT)
1303
		cpt_serr_int_handler(dev);
3746 Serge 1304
	}
3480 Serge 1305
 
4104 Serge 1306
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 1307
{
4104 Serge 1308
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1309
 
3480 Serge 1310
	if (de_iir & DE_AUX_CHANNEL_A)
1311
		dp_aux_irq_handler(dev);
1312
 
3031 serge 1313
#if 0
1314
	if (de_iir & DE_GSE)
4104 Serge 1315
		intel_opregion_asle_intr(dev);
2351 Serge 1316
 
3031 serge 1317
	if (de_iir & DE_PIPEA_VBLANK)
1318
		drm_handle_vblank(dev, 0);
2351 Serge 1319
 
3031 serge 1320
	if (de_iir & DE_PIPEB_VBLANK)
1321
		drm_handle_vblank(dev, 1);
2351 Serge 1322
 
4104 Serge 1323
	if (de_iir & DE_POISON)
1324
		DRM_ERROR("Poison interrupt\n");
1325
#endif
1326
 
1327
	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1328
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1329
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1330
 
1331
	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1332
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1333
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1334
#if 0
3031 serge 1335
	if (de_iir & DE_PLANEA_FLIP_DONE) {
1336
		intel_prepare_page_flip(dev, 0);
1337
		intel_finish_page_flip_plane(dev, 0);
1338
	}
2351 Serge 1339
 
3031 serge 1340
	if (de_iir & DE_PLANEB_FLIP_DONE) {
1341
		intel_prepare_page_flip(dev, 1);
1342
		intel_finish_page_flip_plane(dev, 1);
1343
	}
1344
#endif
2351 Serge 1345
 
3031 serge 1346
	/* check event from PCH */
1347
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 1348
		u32 pch_iir = I915_READ(SDEIIR);
1349
 
3031 serge 1350
		if (HAS_PCH_CPT(dev))
1351
			cpt_irq_handler(dev, pch_iir);
1352
		else
1353
			ibx_irq_handler(dev, pch_iir);
3480 Serge 1354
 
1355
		/* should clear PCH hotplug event before clear CPU irq */
1356
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 1357
	}
4104 Serge 1358
 
3031 serge 1359
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
4104 Serge 1360
		ironlake_rps_change_irq_handler(dev);
2351 Serge 1361
}
1362
 
4104 Serge 1363
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 1364
{
1365
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 1366
	int i;
2351 Serge 1367
 
4104 Serge 1368
//	if (de_iir & DE_ERR_INT_IVB)
1369
//		ivb_err_int_handler(dev);
2351 Serge 1370
 
4104 Serge 1371
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1372
		dp_aux_irq_handler(dev);
3031 serge 1373
 
4104 Serge 1374
	if (de_iir & DE_GSE_IVB)
1375
		intel_opregion_asle_intr(dev);
1376
#if 0
1377
	for (i = 0; i < 3; i++) {
1378
		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1379
			drm_handle_vblank(dev, i);
1380
		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1381
			intel_prepare_page_flip(dev, i);
1382
			intel_finish_page_flip_plane(dev, i);
3031 serge 1383
		}
1384
	}
4104 Serge 1385
#endif
3031 serge 1386
 
4104 Serge 1387
	/* check event from PCH */
1388
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1389
		u32 pch_iir = I915_READ(SDEIIR);
3031 serge 1390
 
4104 Serge 1391
		cpt_irq_handler(dev, pch_iir);
3031 serge 1392
 
4104 Serge 1393
		/* clear PCH hotplug event before clear CPU irq */
1394
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 1395
}
1396
}
1397
 
4104 Serge 1398
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 1399
{
4104 Serge 1400
	struct drm_device *dev = (struct drm_device *) arg;
1401
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1402
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1403
	irqreturn_t ret = IRQ_NONE;
1404
	bool err_int_reenable = false;
3031 serge 1405
 
4104 Serge 1406
	atomic_inc(&dev_priv->irq_received);
3031 serge 1407
 
4104 Serge 1408
	/* We get interrupts on unclaimed registers, so check for this before we
1409
	 * do any I915_{READ,WRITE}. */
1410
	intel_uncore_check_errors(dev);
3031 serge 1411
 
4104 Serge 1412
	/* disable master interrupt before clearing iir  */
1413
	de_ier = I915_READ(DEIER);
1414
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1415
	POSTING_READ(DEIER);
3031 serge 1416
 
4104 Serge 1417
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1418
	 * interrupts will will be stored on its back queue, and then we'll be
1419
	 * able to process them after we restore SDEIER (as soon as we restore
1420
	 * it, we'll get an interrupt if SDEIIR still has something to process
1421
	 * due to its back queue). */
1422
	if (!HAS_PCH_NOP(dev)) {
1423
		sde_ier = I915_READ(SDEIER);
1424
		I915_WRITE(SDEIER, 0);
1425
		POSTING_READ(SDEIER);
3031 serge 1426
	}
1427
 
4104 Serge 1428
	/* On Haswell, also mask ERR_INT because we don't want to risk
1429
	 * generating "unclaimed register" interrupts from inside the interrupt
1430
	 * handler. */
1431
	if (IS_HASWELL(dev)) {
1432
		spin_lock(&dev_priv->irq_lock);
1433
		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
1434
		if (err_int_reenable)
1435
			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1436
		spin_unlock(&dev_priv->irq_lock);
3031 serge 1437
}
1438
 
4104 Serge 1439
	gt_iir = I915_READ(GTIIR);
1440
	if (gt_iir) {
1441
		if (INTEL_INFO(dev)->gen >= 6)
1442
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1443
		else
1444
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1445
		I915_WRITE(GTIIR, gt_iir);
1446
		ret = IRQ_HANDLED;
3031 serge 1447
}
1448
 
4104 Serge 1449
	de_iir = I915_READ(DEIIR);
1450
	if (de_iir) {
1451
		if (INTEL_INFO(dev)->gen >= 7)
1452
			ivb_display_irq_handler(dev, de_iir);
1453
		else
1454
			ilk_display_irq_handler(dev, de_iir);
1455
		I915_WRITE(DEIIR, de_iir);
1456
		ret = IRQ_HANDLED;
3480 Serge 1457
	}
1458
 
4104 Serge 1459
	if (INTEL_INFO(dev)->gen >= 6) {
1460
		u32 pm_iir = I915_READ(GEN6_PMIIR);
1461
		if (pm_iir) {
1462
			gen6_rps_irq_handler(dev_priv, pm_iir);
1463
			I915_WRITE(GEN6_PMIIR, pm_iir);
1464
			ret = IRQ_HANDLED;
3031 serge 1465
	}
1466
}
1467
 
4104 Serge 1468
	if (err_int_reenable) {
1469
		spin_lock(&dev_priv->irq_lock);
1470
		if (ivb_can_enable_err_int(dev))
1471
			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1472
		spin_unlock(&dev_priv->irq_lock);
3031 serge 1473
	}
1474
 
4104 Serge 1475
	I915_WRITE(DEIER, de_ier);
1476
	POSTING_READ(DEIER);
1477
	if (!HAS_PCH_NOP(dev)) {
1478
		I915_WRITE(SDEIER, sde_ier);
1479
		POSTING_READ(SDEIER);
3031 serge 1480
	}
1481
 
4104 Serge 1482
	return ret;
3031 serge 1483
}
1484
 
4104 Serge 1485
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1486
			       bool reset_completed)
3746 Serge 1487
{
3031 serge 1488
	struct intel_ring_buffer *ring;
4104 Serge 1489
	int i;
3031 serge 1490
 
4104 Serge 1491
	/*
1492
	 * Notify all waiters for GPU completion events that reset state has
1493
	 * been changed, and that they need to restart their wait after
1494
	 * checking for potential errors (and bail out to drop locks if there is
1495
	 * a gpu reset pending so that i915_error_work_func can acquire them).
1496
	 */
3031 serge 1497
 
4104 Serge 1498
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1499
	for_each_ring(ring, dev_priv, i)
1500
		wake_up_all(&ring->irq_queue);
3031 serge 1501
 
1502
 
4104 Serge 1503
	/*
1504
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1505
	 * reset state is cleared.
1506
	 */
1507
	if (reset_completed)
1508
		wake_up_all(&dev_priv->gpu_error.reset_queue);
3031 serge 1509
}
1510
 
4104 Serge 1511
#if 0
3031 serge 1512
/**
4104 Serge 1513
 * i915_error_work_func - do process context error handling work
1514
 * @work: work struct
3031 serge 1515
 *
4104 Serge 1516
 * Fire an error uevent so userspace can see that a hang or error
1517
 * was detected.
3031 serge 1518
 */
4104 Serge 1519
static void i915_error_work_func(struct work_struct *work)
3031 serge 1520
{
4104 Serge 1521
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1522
						    work);
1523
	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1524
						    gpu_error);
1525
	struct drm_device *dev = dev_priv->dev;
1526
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1527
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1528
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1529
	int ret;
3031 serge 1530
 
4104 Serge 1531
	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
3031 serge 1532
 
4104 Serge 1533
	/*
1534
	 * Note that there's only one work item which does gpu resets, so we
1535
	 * need not worry about concurrent gpu resets potentially incrementing
1536
	 * error->reset_counter twice. We only need to take care of another
1537
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
1538
	 * quick check for that is good enough: schedule_work ensures the
1539
	 * correct ordering between hang detection and this work item, and since
1540
	 * the reset in-progress bit is only ever set by code outside of this
1541
	 * work we don't need to worry about any other races.
1542
	 */
1543
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1544
		DRM_DEBUG_DRIVER("resetting chip\n");
1545
		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1546
				   reset_event);
3031 serge 1547
 
4104 Serge 1548
		/*
1549
		 * All state reset _must_ be completed before we update the
1550
		 * reset counter, for otherwise waiters might miss the reset
1551
		 * pending state and not properly drop locks, resulting in
1552
		 * deadlocks with the reset work.
1553
		 */
1554
		ret = i915_reset(dev);
3031 serge 1555
 
4104 Serge 1556
		intel_display_handle_reset(dev);
3031 serge 1557
 
4104 Serge 1558
		if (ret == 0) {
1559
			/*
1560
			 * After all the gem state is reset, increment the reset
1561
			 * counter and wake up everyone waiting for the reset to
1562
			 * complete.
1563
			 *
1564
			 * Since unlock operations are a one-sided barrier only,
1565
			 * we need to insert a barrier here to order any seqno
1566
			 * updates before
1567
			 * the counter increment.
1568
			 */
1569
			smp_mb__before_atomic_inc();
1570
			atomic_inc(&dev_priv->gpu_error.reset_counter);
3031 serge 1571
 
4104 Serge 1572
			kobject_uevent_env(&dev->primary->kdev.kobj,
1573
					   KOBJ_CHANGE, reset_done_event);
1574
		} else {
1575
			atomic_set(&error->reset_counter, I915_WEDGED);
3031 serge 1576
	}
1577
 
4104 Serge 1578
		/*
1579
		 * Note: The wake_up also serves as a memory barrier so that
1580
		 * waiters see the update value of the reset counter atomic_t.
1581
		 */
1582
		i915_error_wake_up(dev_priv, true);
3031 serge 1583
	}
1584
}
1585
 
1586
static void i915_report_and_clear_eir(struct drm_device *dev)
1587
{
1588
	struct drm_i915_private *dev_priv = dev->dev_private;
1589
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1590
	u32 eir = I915_READ(EIR);
1591
	int pipe, i;
1592
 
1593
	if (!eir)
1594
		return;
1595
 
1596
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1597
 
1598
	i915_get_extra_instdone(dev, instdone);
1599
 
1600
	if (IS_G4X(dev)) {
1601
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1602
			u32 ipeir = I915_READ(IPEIR_I965);
1603
 
1604
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1605
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1606
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1607
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1608
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1609
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1610
			I915_WRITE(IPEIR_I965, ipeir);
1611
			POSTING_READ(IPEIR_I965);
1612
		}
1613
		if (eir & GM45_ERROR_PAGE_TABLE) {
1614
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1615
			pr_err("page table error\n");
1616
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1617
			I915_WRITE(PGTBL_ER, pgtbl_err);
1618
			POSTING_READ(PGTBL_ER);
1619
		}
1620
	}
1621
 
1622
	if (!IS_GEN2(dev)) {
1623
		if (eir & I915_ERROR_PAGE_TABLE) {
1624
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1625
			pr_err("page table error\n");
1626
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1627
			I915_WRITE(PGTBL_ER, pgtbl_err);
1628
			POSTING_READ(PGTBL_ER);
1629
		}
1630
	}
1631
 
1632
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1633
		pr_err("memory refresh error:\n");
1634
		for_each_pipe(pipe)
1635
			pr_err("pipe %c stat: 0x%08x\n",
1636
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1637
		/* pipestat has already been acked */
1638
	}
1639
	if (eir & I915_ERROR_INSTRUCTION) {
1640
		pr_err("instruction error\n");
1641
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1642
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1643
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1644
		if (INTEL_INFO(dev)->gen < 4) {
1645
			u32 ipeir = I915_READ(IPEIR);
1646
 
1647
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1648
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1649
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1650
			I915_WRITE(IPEIR, ipeir);
1651
			POSTING_READ(IPEIR);
1652
		} else {
1653
			u32 ipeir = I915_READ(IPEIR_I965);
1654
 
1655
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1656
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1657
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1658
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1659
			I915_WRITE(IPEIR_I965, ipeir);
1660
			POSTING_READ(IPEIR_I965);
1661
		}
1662
	}
1663
 
1664
	I915_WRITE(EIR, eir);
1665
	POSTING_READ(EIR);
1666
	eir = I915_READ(EIR);
1667
	if (eir) {
1668
		/*
1669
		 * some errors might have become stuck,
1670
		 * mask them.
1671
		 */
1672
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1673
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1674
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1675
	}
1676
}
1677
 
1678
/**
1679
 * i915_handle_error - handle an error interrupt
1680
 * @dev: drm device
1681
 *
1682
 * Do some basic checking of regsiter state at error interrupt time and
1683
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1684
 * sure we get a record and make it available in debugfs.  Fire a uevent
1685
 * so userspace knows something bad happened (should trigger collection
1686
 * of a ring dump etc.).
1687
 */
1688
void i915_handle_error(struct drm_device *dev, bool wedged)
1689
{
1690
	struct drm_i915_private *dev_priv = dev->dev_private;
1691
 
1692
	i915_capture_error_state(dev);
1693
	i915_report_and_clear_eir(dev);
1694
 
1695
	if (wedged) {
3480 Serge 1696
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1697
				&dev_priv->gpu_error.reset_counter);
3031 serge 1698
 
1699
		/*
4104 Serge 1700
		 * Wakeup waiting processes so that the reset work function
1701
		 * i915_error_work_func doesn't deadlock trying to grab various
1702
		 * locks. By bumping the reset counter first, the woken
1703
		 * processes will see a reset in progress and back off,
1704
		 * releasing their locks and then wait for the reset completion.
1705
		 * We must do this for _all_ gpu waiters that might hold locks
1706
		 * that the reset work needs to acquire.
1707
		 *
1708
		 * Note: The wake_up serves as the required memory barrier to
1709
		 * ensure that the waiters see the updated value of the reset
1710
		 * counter atomic_t.
3031 serge 1711
		 */
4104 Serge 1712
		i915_error_wake_up(dev_priv, false);
3031 serge 1713
	}
1714
 
4104 Serge 1715
	/*
1716
	 * Our reset work can grab modeset locks (since it needs to reset the
1717
	 * state of outstanding pagelips). Hence it must not be run on our own
1718
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1719
	 * code will deadlock.
1720
	 */
1721
	schedule_work(&dev_priv->gpu_error.work);
3031 serge 1722
}
1723
 
3746 Serge 1724
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
3031 serge 1725
{
1726
	drm_i915_private_t *dev_priv = dev->dev_private;
1727
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1728
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1729
	struct drm_i915_gem_object *obj;
1730
	struct intel_unpin_work *work;
1731
	unsigned long flags;
1732
	bool stall_detected;
1733
 
1734
	/* Ignore early vblank irqs */
1735
	if (intel_crtc == NULL)
1736
		return;
1737
 
1738
	spin_lock_irqsave(&dev->event_lock, flags);
1739
	work = intel_crtc->unpin_work;
1740
 
3243 Serge 1741
	if (work == NULL ||
1742
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1743
	    !work->enable_stall_check) {
3031 serge 1744
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1745
		spin_unlock_irqrestore(&dev->event_lock, flags);
1746
		return;
1747
	}
1748
 
1749
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1750
	obj = work->pending_flip_obj;
1751
	if (INTEL_INFO(dev)->gen >= 4) {
1752
		int dspsurf = DSPSURF(intel_crtc->plane);
1753
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
4104 Serge 1754
					i915_gem_obj_ggtt_offset(obj);
3031 serge 1755
	} else {
1756
		int dspaddr = DSPADDR(intel_crtc->plane);
4104 Serge 1757
		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
3031 serge 1758
							crtc->y * crtc->fb->pitches[0] +
1759
							crtc->x * crtc->fb->bits_per_pixel/8);
1760
	}
1761
 
1762
	spin_unlock_irqrestore(&dev->event_lock, flags);
1763
 
1764
	if (stall_detected) {
1765
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1766
		intel_prepare_page_flip(dev, intel_crtc->plane);
1767
	}
1768
}
1769
 
1770
#endif
1771
 
1772
/* Called from drm generic code, passed 'crtc' which
1773
 * we use as a pipe index
1774
 */
1775
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1776
{
1777
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1778
	unsigned long irqflags;
1779
 
1780
	if (!i915_pipe_enabled(dev, pipe))
1781
		return -EINVAL;
1782
 
1783
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1784
	if (INTEL_INFO(dev)->gen >= 4)
1785
		i915_enable_pipestat(dev_priv, pipe,
1786
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1787
	else
1788
		i915_enable_pipestat(dev_priv, pipe,
1789
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1790
 
1791
	/* maintain vblank delivery even in deep C-states */
1792
	if (dev_priv->info->gen == 3)
1793
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1794
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1795
 
1796
	return 0;
1797
}
1798
 
1799
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1800
{
1801
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1802
	unsigned long irqflags;
4104 Serge 1803
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1804
						     DE_PIPE_VBLANK_ILK(pipe);
3031 serge 1805
 
1806
	if (!i915_pipe_enabled(dev, pipe))
1807
		return -EINVAL;
1808
 
1809
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 1810
	ironlake_enable_display_irq(dev_priv, bit);
3031 serge 1811
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1812
 
1813
	return 0;
1814
}
1815
 
1816
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1817
{
1818
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1819
	unsigned long irqflags;
1820
	u32 imr;
1821
 
1822
	if (!i915_pipe_enabled(dev, pipe))
1823
		return -EINVAL;
1824
 
1825
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1826
	imr = I915_READ(VLV_IMR);
1827
	if (pipe == 0)
1828
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1829
	else
1830
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1831
	I915_WRITE(VLV_IMR, imr);
1832
	i915_enable_pipestat(dev_priv, pipe,
1833
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1834
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1835
 
1836
	return 0;
1837
}
1838
 
1839
/* Called from drm generic code, passed 'crtc' which
1840
 * we use as a pipe index
1841
 */
1842
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1843
{
1844
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1845
	unsigned long irqflags;
1846
 
1847
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1848
	if (dev_priv->info->gen == 3)
1849
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1850
 
1851
	i915_disable_pipestat(dev_priv, pipe,
1852
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1853
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1854
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1855
}
1856
 
1857
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1858
{
1859
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1860
	unsigned long irqflags;
4104 Serge 1861
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1862
						     DE_PIPE_VBLANK_ILK(pipe);
3031 serge 1863
 
1864
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 1865
	ironlake_disable_display_irq(dev_priv, bit);
3031 serge 1866
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1867
}
1868
 
1869
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1870
{
1871
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1872
	unsigned long irqflags;
1873
	u32 imr;
1874
 
1875
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1876
	i915_disable_pipestat(dev_priv, pipe,
1877
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1878
	imr = I915_READ(VLV_IMR);
1879
	if (pipe == 0)
1880
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1881
	else
1882
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1883
	I915_WRITE(VLV_IMR, imr);
1884
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1885
}
1886
 
1887
static u32
1888
ring_last_seqno(struct intel_ring_buffer *ring)
1889
{
1890
	return list_entry(ring->request_list.prev,
1891
			  struct drm_i915_gem_request, list)->seqno;
1892
}
4104 Serge 1893
 
1894
static bool
1895
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2351 Serge 1896
{
4104 Serge 1897
	return (list_empty(&ring->request_list) ||
1898
		i915_seqno_passed(seqno, ring_last_seqno(ring)));
1899
}
2351 Serge 1900
 
4104 Serge 1901
static struct intel_ring_buffer *
1902
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
1903
{
1904
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1905
	u32 cmd, ipehr, acthd, acthd_min;
2351 Serge 1906
 
4104 Serge 1907
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1908
	if ((ipehr & ~(0x3 << 16)) !=
1909
	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1910
		return NULL;
2351 Serge 1911
 
4104 Serge 1912
	/* ACTHD is likely pointing to the dword after the actual command,
1913
	 * so scan backwards until we find the MBOX.
1914
	 */
1915
	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1916
	acthd_min = max((int)acthd - 3 * 4, 0);
1917
	do {
1918
		cmd = ioread32(ring->virtual_start + acthd);
1919
		if (cmd == ipehr)
1920
			break;
2351 Serge 1921
 
4104 Serge 1922
		acthd -= 4;
1923
		if (acthd < acthd_min)
1924
			return NULL;
1925
	} while (1);
2351 Serge 1926
 
4104 Serge 1927
	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
1928
	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1929
}
2351 Serge 1930
 
4104 Serge 1931
static int semaphore_passed(struct intel_ring_buffer *ring)
1932
{
1933
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1934
	struct intel_ring_buffer *signaller;
1935
	u32 seqno, ctl;
1936
 
1937
	ring->hangcheck.deadlock = true;
1938
 
1939
	signaller = semaphore_waits_for(ring, &seqno);
1940
	if (signaller == NULL || signaller->hangcheck.deadlock)
1941
		return -1;
1942
 
1943
	/* cursory check for an unkickable deadlock */
1944
	ctl = I915_READ_CTL(signaller);
1945
	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
1946
		return -1;
1947
 
1948
	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
1949
}
1950
 
1951
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
1952
{
1953
	struct intel_ring_buffer *ring;
1954
	int i;
1955
 
1956
	for_each_ring(ring, dev_priv, i)
1957
		ring->hangcheck.deadlock = false;
1958
}
1959
 
1960
static enum intel_ring_hangcheck_action
1961
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1962
{
1963
	struct drm_device *dev = ring->dev;
1964
	struct drm_i915_private *dev_priv = dev->dev_private;
1965
	u32 tmp;
1966
 
1967
	if (ring->hangcheck.acthd != acthd)
1968
		return HANGCHECK_ACTIVE;
1969
 
1970
	if (IS_GEN2(dev))
1971
		return HANGCHECK_HUNG;
1972
 
1973
	/* Is the chip hanging on a WAIT_FOR_EVENT?
1974
	 * If so we can simply poke the RB_WAIT bit
1975
	 * and break the hang. This should work on
1976
	 * all but the second generation chipsets.
1977
	 */
1978
	tmp = I915_READ_CTL(ring);
1979
	if (tmp & RING_WAIT) {
1980
		DRM_ERROR("Kicking stuck wait on %s\n",
1981
			  ring->name);
1982
		I915_WRITE_CTL(ring, tmp);
1983
		return HANGCHECK_KICK;
1984
	}
1985
 
1986
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
1987
		switch (semaphore_passed(ring)) {
1988
		default:
1989
			return HANGCHECK_HUNG;
1990
		case 1:
1991
			DRM_ERROR("Kicking stuck semaphore on %s\n",
1992
				  ring->name);
1993
			I915_WRITE_CTL(ring, tmp);
1994
			return HANGCHECK_KICK;
1995
		case 0:
1996
			return HANGCHECK_WAIT;
1997
		}
1998
	}
1999
 
2000
	return HANGCHECK_HUNG;
2001
}
2002
 
2003
/**
2004
 * This is called when the chip hasn't reported back with completed
2005
 * batchbuffers in a long time. We keep track per ring seqno progress and
2006
 * if there are no progress, hangcheck score for that ring is increased.
2007
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2008
 * we kick the ring. If we see no progress on three subsequent calls
2009
 * we assume chip is wedged and try to fix it by resetting the chip.
2010
 */
2011
static void i915_hangcheck_elapsed(unsigned long data)
2012
{
2013
	struct drm_device *dev = (struct drm_device *)data;
2014
	drm_i915_private_t *dev_priv = dev->dev_private;
2015
	struct intel_ring_buffer *ring;
2016
	int i;
2017
	int busy_count = 0, rings_hung = 0;
2018
	bool stuck[I915_NUM_RINGS] = { 0 };
2019
#define BUSY 1
2020
#define KICK 5
2021
#define HUNG 20
2022
#define FIRE 30
2023
 
2024
	if (!i915_enable_hangcheck)
2025
		return;
2026
 
2027
	for_each_ring(ring, dev_priv, i) {
2028
		u32 seqno, acthd;
2029
		bool busy = true;
2030
 
2031
		semaphore_clear_deadlocks(dev_priv);
2032
 
2033
		seqno = ring->get_seqno(ring, false);
2034
		acthd = intel_ring_get_active_head(ring);
2035
 
2036
		if (ring->hangcheck.seqno == seqno) {
2037
			if (ring_idle(ring, seqno)) {
2038
//               if (waitqueue_active(&ring->irq_queue)) {
2039
					/* Issue a wake-up to catch stuck h/w. */
2040
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2041
//                         ring->name);
2042
//                   wake_up_all(&ring->irq_queue);
2043
//                   ring->hangcheck.score += HUNG;
2044
//               } else
2045
					busy = false;
2046
			} else {
2047
				/* We always increment the hangcheck score
2048
				 * if the ring is busy and still processing
2049
				 * the same request, so that no single request
2050
				 * can run indefinitely (such as a chain of
2051
				 * batches). The only time we do not increment
2052
				 * the hangcheck score on this ring, if this
2053
				 * ring is in a legitimate wait for another
2054
				 * ring. In that case the waiting ring is a
2055
				 * victim and we want to be sure we catch the
2056
				 * right culprit. Then every time we do kick
2057
				 * the ring, add a small increment to the
2058
				 * score so that we can catch a batch that is
2059
				 * being repeatedly kicked and so responsible
2060
				 * for stalling the machine.
2061
				 */
2062
				ring->hangcheck.action = ring_stuck(ring,
2063
								    acthd);
2064
 
2065
				switch (ring->hangcheck.action) {
2066
				case HANGCHECK_WAIT:
2067
					break;
2068
				case HANGCHECK_ACTIVE:
2069
					ring->hangcheck.score += BUSY;
2070
					break;
2071
				case HANGCHECK_KICK:
2072
					ring->hangcheck.score += KICK;
2073
					break;
2074
				case HANGCHECK_HUNG:
2075
					ring->hangcheck.score += HUNG;
2076
					stuck[i] = true;
2077
					break;
2078
				}
2079
			}
2080
		} else {
2081
			/* Gradually reduce the count so that we catch DoS
2082
			 * attempts across multiple batches.
2083
			 */
2084
			if (ring->hangcheck.score > 0)
2085
				ring->hangcheck.score--;
2086
		}
2087
 
2088
		ring->hangcheck.seqno = seqno;
2089
		ring->hangcheck.acthd = acthd;
2090
		busy_count += busy;
2091
	}
2092
 
2093
	for_each_ring(ring, dev_priv, i) {
2094
		if (ring->hangcheck.score > FIRE) {
2095
			DRM_INFO("%s on %s\n",
2096
				  stuck[i] ? "stuck" : "no progress",
2097
				  ring->name);
2098
			rings_hung++;
2099
		}
2100
	}
2101
 
2102
//   if (rings_hung)
2103
//       return i915_handle_error(dev, true);
2104
 
2105
}
2106
 
2107
static void ibx_irq_preinstall(struct drm_device *dev)
2108
{
2109
	struct drm_i915_private *dev_priv = dev->dev_private;
2110
 
3746 Serge 2111
	if (HAS_PCH_NOP(dev))
2112
		return;
2113
 
4104 Serge 2114
	/* south display irq */
2115
	I915_WRITE(SDEIMR, 0xffffffff);
3746 Serge 2116
	/*
2117
	 * SDEIER is also touched by the interrupt handler to work around missed
2118
	 * PCH interrupts. Hence we can't update it after the interrupt handler
2119
	 * is enabled - instead we unconditionally enable all PCH interrupt
2120
	 * sources here, but then only unmask them as needed with SDEIMR.
2121
	 */
2122
	I915_WRITE(SDEIER, 0xffffffff);
4104 Serge 2123
	POSTING_READ(SDEIER);
2351 Serge 2124
}
2125
 
4104 Serge 2126
static void gen5_gt_irq_preinstall(struct drm_device *dev)
2127
{
2128
	struct drm_i915_private *dev_priv = dev->dev_private;
2129
 
2130
    /* and GT */
2131
    I915_WRITE(GTIMR, 0xffffffff);
2132
    I915_WRITE(GTIER, 0x0);
2133
    POSTING_READ(GTIER);
2134
 
2135
	if (INTEL_INFO(dev)->gen >= 6) {
2136
		/* and PM */
2137
		I915_WRITE(GEN6_PMIMR, 0xffffffff);
2138
		I915_WRITE(GEN6_PMIER, 0x0);
2139
		POSTING_READ(GEN6_PMIER);
2140
}
2141
}
2142
 
2143
/* drm_dma.h hooks
2144
*/
2145
static void ironlake_irq_preinstall(struct drm_device *dev)
2146
{
2147
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2148
 
2149
	atomic_set(&dev_priv->irq_received, 0);
2150
 
2151
	I915_WRITE(HWSTAM, 0xeffe);
2152
 
2153
	I915_WRITE(DEIMR, 0xffffffff);
2154
	I915_WRITE(DEIER, 0x0);
2155
	POSTING_READ(DEIER);
2156
 
2157
	gen5_gt_irq_preinstall(dev);
2158
 
2159
	ibx_irq_preinstall(dev);
2160
}
2161
 
3031 serge 2162
static void valleyview_irq_preinstall(struct drm_device *dev)
2163
{
2164
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2165
	int pipe;
2166
 
2167
	atomic_set(&dev_priv->irq_received, 0);
2168
 
2169
	/* VLV magic */
2170
	I915_WRITE(VLV_IMR, 0);
2171
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2172
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2173
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2174
 
2175
	/* and GT */
2176
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2177
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2178
 
4104 Serge 2179
	gen5_gt_irq_preinstall(dev);
2180
 
3031 serge 2181
	I915_WRITE(DPINVGTT, 0xff);
2182
 
2183
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2184
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2185
	for_each_pipe(pipe)
2186
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2187
	I915_WRITE(VLV_IIR, 0xffffffff);
2188
	I915_WRITE(VLV_IMR, 0xffffffff);
2189
	I915_WRITE(VLV_IER, 0x0);
2190
	POSTING_READ(VLV_IER);
2191
}
2192
 
3746 Serge 2193
static void ibx_hpd_irq_setup(struct drm_device *dev)
2194
{
2195
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2196
	struct drm_mode_config *mode_config = &dev->mode_config;
2197
	struct intel_encoder *intel_encoder;
4104 Serge 2198
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3746 Serge 2199
 
2200
	if (HAS_PCH_IBX(dev)) {
4104 Serge 2201
		hotplug_irqs = SDE_HOTPLUG_MASK;
3746 Serge 2202
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2203
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4104 Serge 2204
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3746 Serge 2205
	} else {
4104 Serge 2206
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3746 Serge 2207
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2208
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4104 Serge 2209
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3746 Serge 2210
	}
2211
 
4104 Serge 2212
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3746 Serge 2213
 
2214
	/*
2351 Serge 2215
 * Enable digital hotplug on the PCH, and configure the DP short pulse
2216
 * duration to 2ms (which is the minimum in the Display Port spec)
2217
 *
2218
 * This register is the same on all known PCH chips.
2219
 */
2220
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2221
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2222
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2223
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2224
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2225
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2226
}
2227
 
3480 Serge 2228
static void ibx_irq_postinstall(struct drm_device *dev)
2229
{
2230
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2231
	u32 mask;
2232
 
3746 Serge 2233
	if (HAS_PCH_NOP(dev))
2234
		return;
2235
 
4104 Serge 2236
	if (HAS_PCH_IBX(dev)) {
2237
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2238
		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2239
	} else {
2240
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2241
 
2242
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2243
	}
2244
 
3480 Serge 2245
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2246
	I915_WRITE(SDEIMR, ~mask);
2247
}
2248
 
4104 Serge 2249
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2351 Serge 2250
{
4104 Serge 2251
	struct drm_i915_private *dev_priv = dev->dev_private;
2252
	u32 pm_irqs, gt_irqs;
2351 Serge 2253
 
4104 Serge 2254
	pm_irqs = gt_irqs = 0;
2351 Serge 2255
 
2256
	dev_priv->gt_irq_mask = ~0;
4104 Serge 2257
	if (HAS_L3_GPU_CACHE(dev)) {
2258
		/* L3 parity interrupt is always unmasked. */
2259
		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2260
		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2261
	}
2351 Serge 2262
 
4104 Serge 2263
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
2264
	if (IS_GEN5(dev)) {
2265
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2266
			   ILK_BSD_USER_INTERRUPT;
2267
	} else {
2268
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2269
	}
2351 Serge 2270
 
4104 Serge 2271
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2272
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2273
	I915_WRITE(GTIER, gt_irqs);
2351 Serge 2274
    POSTING_READ(GTIER);
2275
 
4104 Serge 2276
	if (INTEL_INFO(dev)->gen >= 6) {
2277
		pm_irqs |= GEN6_PM_RPS_EVENTS;
2351 Serge 2278
 
4104 Serge 2279
		if (HAS_VEBOX(dev))
2280
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2281
 
2282
		dev_priv->pm_irq_mask = 0xffffffff;
2283
		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2284
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2285
		I915_WRITE(GEN6_PMIER, pm_irqs);
2286
		POSTING_READ(GEN6_PMIER);
2351 Serge 2287
    }
2288
}
2289
 
4104 Serge 2290
static int ironlake_irq_postinstall(struct drm_device *dev)
3031 serge 2291
{
4104 Serge 2292
	unsigned long irqflags;
3031 serge 2293
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4104 Serge 2294
	u32 display_mask, extra_mask;
2295
 
2296
	if (INTEL_INFO(dev)->gen >= 7) {
2297
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2298
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3031 serge 2299
		DE_PLANEB_FLIP_DONE_IVB |
4104 Serge 2300
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2301
				DE_ERR_INT_IVB);
2302
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2303
			      DE_PIPEA_VBLANK_IVB);
2351 Serge 2304
 
4104 Serge 2305
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2306
	} else {
2307
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2308
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2309
				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2310
				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2311
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2312
	}
2313
 
3031 serge 2314
	dev_priv->irq_mask = ~display_mask;
2315
 
2316
	/* should always can generate irq */
2317
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2318
	I915_WRITE(DEIMR, dev_priv->irq_mask);
4104 Serge 2319
	I915_WRITE(DEIER, display_mask | extra_mask);
3031 serge 2320
	POSTING_READ(DEIER);
2321
 
4104 Serge 2322
	gen5_gt_irq_postinstall(dev);
3031 serge 2323
 
4104 Serge 2324
	ibx_irq_postinstall(dev);
3031 serge 2325
 
4104 Serge 2326
	if (IS_IRONLAKE_M(dev)) {
2327
		/* Enable PCU event interrupts
2328
		 *
2329
		 * spinlocking not required here for correctness since interrupt
2330
		 * setup is guaranteed to run in single-threaded context. But we
2331
		 * need it to make the assert_spin_locked happy. */
2332
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2333
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2334
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2335
	}
3031 serge 2336
 
2337
	return 0;
2338
}
2339
 
2340
static int valleyview_irq_postinstall(struct drm_device *dev)
2341
{
2342
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2343
	u32 enable_mask;
2344
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
4104 Serge 2345
	unsigned long irqflags;
3031 serge 2346
 
2347
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2348
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2349
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2350
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2351
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2352
 
2353
	/*
2354
	 *Leave vblank interrupts masked initially.  enable/disable will
2355
	 * toggle them based on usage.
2356
	 */
2357
	dev_priv->irq_mask = (~enable_mask) |
2358
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2359
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2360
 
3480 Serge 2361
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2362
	POSTING_READ(PORT_HOTPLUG_EN);
2363
 
3031 serge 2364
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2365
	I915_WRITE(VLV_IER, enable_mask);
2366
	I915_WRITE(VLV_IIR, 0xffffffff);
2367
	I915_WRITE(PIPESTAT(0), 0xffff);
2368
	I915_WRITE(PIPESTAT(1), 0xffff);
2369
	POSTING_READ(VLV_IER);
2370
 
4104 Serge 2371
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2372
	 * just to make the assert_spin_locked check happy. */
2373
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3031 serge 2374
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
3480 Serge 2375
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3031 serge 2376
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
4104 Serge 2377
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3031 serge 2378
 
2379
	I915_WRITE(VLV_IIR, 0xffffffff);
2380
	I915_WRITE(VLV_IIR, 0xffffffff);
2381
 
4104 Serge 2382
	gen5_gt_irq_postinstall(dev);
3243 Serge 2383
 
3031 serge 2384
	/* ack & enable invalid PTE error interrupts */
2385
#if 0 /* FIXME: add support to irq handler for checking these bits */
2386
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2387
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2388
#endif
2389
 
2390
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 2391
 
2392
	return 0;
2393
}
2394
 
3031 serge 2395
static void valleyview_irq_uninstall(struct drm_device *dev)
2396
{
2397
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2398
	int pipe;
2399
 
2400
	if (!dev_priv)
2401
		return;
2402
 
2403
	for_each_pipe(pipe)
2404
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2405
 
2406
	I915_WRITE(HWSTAM, 0xffffffff);
2407
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2408
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2409
	for_each_pipe(pipe)
2410
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2411
	I915_WRITE(VLV_IIR, 0xffffffff);
2412
	I915_WRITE(VLV_IMR, 0xffffffff);
2413
	I915_WRITE(VLV_IER, 0x0);
2414
	POSTING_READ(VLV_IER);
2415
}
2416
 
2417
static void ironlake_irq_uninstall(struct drm_device *dev)
2418
{
2419
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2420
 
2421
	if (!dev_priv)
2422
		return;
2423
 
2424
	I915_WRITE(HWSTAM, 0xffffffff);
2425
 
2426
	I915_WRITE(DEIMR, 0xffffffff);
2427
	I915_WRITE(DEIER, 0x0);
2428
	I915_WRITE(DEIIR, I915_READ(DEIIR));
4104 Serge 2429
	if (IS_GEN7(dev))
2430
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3031 serge 2431
 
2432
	I915_WRITE(GTIMR, 0xffffffff);
2433
	I915_WRITE(GTIER, 0x0);
2434
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2435
 
3746 Serge 2436
	if (HAS_PCH_NOP(dev))
2437
		return;
2438
 
3031 serge 2439
	I915_WRITE(SDEIMR, 0xffffffff);
2440
	I915_WRITE(SDEIER, 0x0);
2441
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
4104 Serge 2442
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2443
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3031 serge 2444
}
2445
 
2446
#if 0
2447
 
2448
static void i8xx_irq_preinstall(struct drm_device * dev)
2449
{
2450
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2451
	int pipe;
2452
 
2453
	atomic_set(&dev_priv->irq_received, 0);
2454
 
2455
	for_each_pipe(pipe)
2456
		I915_WRITE(PIPESTAT(pipe), 0);
2457
	I915_WRITE16(IMR, 0xffff);
2458
	I915_WRITE16(IER, 0x0);
2459
	POSTING_READ16(IER);
2460
}
2461
 
2462
static int i8xx_irq_postinstall(struct drm_device *dev)
2463
{
2464
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2465
 
2466
	I915_WRITE16(EMR,
2467
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2468
 
2469
	/* Unmask the interrupts that we always want on. */
2470
	dev_priv->irq_mask =
2471
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2472
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2473
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2474
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2475
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2476
	I915_WRITE16(IMR, dev_priv->irq_mask);
2477
 
2478
	I915_WRITE16(IER,
2479
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2480
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2481
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2482
		     I915_USER_INTERRUPT);
2483
	POSTING_READ16(IER);
2484
 
2485
	return 0;
2486
}
2487
 
3746 Serge 2488
/*
2489
 * Returns true when a page flip has completed.
2490
 */
2491
static bool i8xx_handle_vblank(struct drm_device *dev,
2492
			       int pipe, u16 iir)
2493
{
2494
	drm_i915_private_t *dev_priv = dev->dev_private;
2495
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2496
 
2497
//   if (!drm_handle_vblank(dev, pipe))
2498
       return false;
2499
 
2500
	if ((iir & flip_pending) == 0)
2501
		return false;
2502
 
2503
//   intel_prepare_page_flip(dev, pipe);
2504
 
2505
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2506
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2507
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2508
	 * the flip is completed (no longer pending). Since this doesn't raise
2509
	 * an interrupt per se, we watch for the change at vblank.
2510
	 */
2511
	if (I915_READ16(ISR) & flip_pending)
2512
		return false;
2513
 
2514
	intel_finish_page_flip(dev, pipe);
2515
 
2516
	return true;
2517
}
2518
 
3243 Serge 2519
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 2520
{
2521
	struct drm_device *dev = (struct drm_device *) arg;
2522
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2523
	u16 iir, new_iir;
2524
	u32 pipe_stats[2];
2525
	unsigned long irqflags;
2526
	int pipe;
2527
	u16 flip_mask =
2528
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2529
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2530
 
2531
	atomic_inc(&dev_priv->irq_received);
2532
 
2533
	iir = I915_READ16(IIR);
2534
	if (iir == 0)
2535
		return IRQ_NONE;
2536
 
2537
	while (iir & ~flip_mask) {
2538
		/* Can't rely on pipestat interrupt bit in iir as it might
2539
		 * have been cleared after the pipestat interrupt was received.
2540
		 * It doesn't set the bit in iir again, but it still produces
2541
		 * interrupts (for non-MSI).
2542
		 */
2543
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2544
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2545
//           i915_handle_error(dev, false);
3031 serge 2546
 
2547
		for_each_pipe(pipe) {
2548
			int reg = PIPESTAT(pipe);
2549
			pipe_stats[pipe] = I915_READ(reg);
2550
 
2551
			/*
2552
			 * Clear the PIPE*STAT regs before the IIR
2553
			 */
2554
			if (pipe_stats[pipe] & 0x8000ffff) {
2555
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2556
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2557
							 pipe_name(pipe));
2558
				I915_WRITE(reg, pipe_stats[pipe]);
2559
			}
2560
		}
2561
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2562
 
2563
		I915_WRITE16(IIR, iir & ~flip_mask);
2564
		new_iir = I915_READ16(IIR); /* Flush posted writes */
2565
 
2566
		i915_update_dri1_breadcrumb(dev);
2567
 
2568
		if (iir & I915_USER_INTERRUPT)
2569
			notify_ring(dev, &dev_priv->ring[RCS]);
2570
 
2571
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3746 Serge 2572
		    i8xx_handle_vblank(dev, 0, iir))
2573
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3031 serge 2574
 
2575
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3746 Serge 2576
		    i8xx_handle_vblank(dev, 1, iir))
2577
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3031 serge 2578
 
2579
		iir = new_iir;
2580
	}
2581
 
2582
	return IRQ_HANDLED;
2583
}
2584
 
2585
static void i8xx_irq_uninstall(struct drm_device * dev)
2586
{
2587
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2588
	int pipe;
2589
 
2590
	for_each_pipe(pipe) {
2591
		/* Clear enable bits; then clear status bits */
2592
		I915_WRITE(PIPESTAT(pipe), 0);
2593
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2594
	}
2595
	I915_WRITE16(IMR, 0xffff);
2596
	I915_WRITE16(IER, 0x0);
2597
	I915_WRITE16(IIR, I915_READ16(IIR));
2598
}
2599
 
2600
#endif
2601
 
2602
static void i915_irq_preinstall(struct drm_device * dev)
2603
{
2604
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2605
	int pipe;
2606
 
2607
	atomic_set(&dev_priv->irq_received, 0);
2608
 
2609
	if (I915_HAS_HOTPLUG(dev)) {
2610
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2611
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2612
	}
2613
 
2614
	I915_WRITE16(HWSTAM, 0xeffe);
2615
	for_each_pipe(pipe)
2616
		I915_WRITE(PIPESTAT(pipe), 0);
2617
	I915_WRITE(IMR, 0xffffffff);
2618
	I915_WRITE(IER, 0x0);
2619
	POSTING_READ(IER);
2620
}
2621
 
2622
static int i915_irq_postinstall(struct drm_device *dev)
2623
{
2624
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2625
	u32 enable_mask;
2626
 
2627
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2628
 
2629
	/* Unmask the interrupts that we always want on. */
2630
	dev_priv->irq_mask =
2631
		~(I915_ASLE_INTERRUPT |
2632
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2633
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2634
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2635
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2636
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2637
 
2638
	enable_mask =
2639
		I915_ASLE_INTERRUPT |
2640
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2641
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2642
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2643
		I915_USER_INTERRUPT;
3480 Serge 2644
 
3031 serge 2645
	if (I915_HAS_HOTPLUG(dev)) {
3480 Serge 2646
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2647
		POSTING_READ(PORT_HOTPLUG_EN);
2648
 
3031 serge 2649
		/* Enable in IER... */
2650
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2651
		/* and unmask in IMR */
2652
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2653
	}
2654
 
2655
	I915_WRITE(IMR, dev_priv->irq_mask);
2656
	I915_WRITE(IER, enable_mask);
2657
	POSTING_READ(IER);
2658
 
3480 Serge 2659
//	intel_opregion_enable_asle(dev);
2660
 
2661
	return 0;
2662
}
2663
 
3746 Serge 2664
/*
2665
 * Returns true when a page flip has completed.
2666
 */
2667
static bool i915_handle_vblank(struct drm_device *dev,
2668
			       int plane, int pipe, u32 iir)
3480 Serge 2669
{
3746 Serge 2670
	drm_i915_private_t *dev_priv = dev->dev_private;
2671
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3480 Serge 2672
 
3746 Serge 2673
//   if (!drm_handle_vblank(dev, pipe))
2674
		return false;
3480 Serge 2675
 
3746 Serge 2676
	if ((iir & flip_pending) == 0)
2677
		return false;
3480 Serge 2678
 
3746 Serge 2679
//   intel_prepare_page_flip(dev, plane);
3031 serge 2680
 
3746 Serge 2681
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2682
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2683
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2684
	 * the flip is completed (no longer pending). Since this doesn't raise
2685
	 * an interrupt per se, we watch for the change at vblank.
2686
	 */
2687
	if (I915_READ(ISR) & flip_pending)
2688
		return false;
2689
 
2690
	intel_finish_page_flip(dev, pipe);
2691
 
2692
	return true;
3031 serge 2693
}
2694
 
3243 Serge 2695
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 2696
{
2697
	struct drm_device *dev = (struct drm_device *) arg;
2698
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2699
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2700
	unsigned long irqflags;
2701
	u32 flip_mask =
2702
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2703
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2704
	int pipe, ret = IRQ_NONE;
2705
 
2706
	atomic_inc(&dev_priv->irq_received);
2707
 
2708
	iir = I915_READ(IIR);
2709
	do {
2710
		bool irq_received = (iir & ~flip_mask) != 0;
2711
		bool blc_event = false;
2712
 
2713
		/* Can't rely on pipestat interrupt bit in iir as it might
2714
		 * have been cleared after the pipestat interrupt was received.
2715
		 * It doesn't set the bit in iir again, but it still produces
2716
		 * interrupts (for non-MSI).
2717
		 */
2718
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2719
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2720
//           i915_handle_error(dev, false);
3031 serge 2721
 
2722
		for_each_pipe(pipe) {
2723
			int reg = PIPESTAT(pipe);
2724
			pipe_stats[pipe] = I915_READ(reg);
2725
 
2726
			/* Clear the PIPE*STAT regs before the IIR */
2727
			if (pipe_stats[pipe] & 0x8000ffff) {
2728
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2729
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2730
							 pipe_name(pipe));
2731
				I915_WRITE(reg, pipe_stats[pipe]);
2732
				irq_received = true;
2733
			}
2734
		}
2735
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2736
 
2737
		if (!irq_received)
2738
			break;
2739
 
2740
		/* Consume port.  Then clear IIR or we'll miss events */
2741
		if ((I915_HAS_HOTPLUG(dev)) &&
2742
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2743
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3746 Serge 2744
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3031 serge 2745
 
2746
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2747
				  hotplug_status);
4104 Serge 2748
 
2749
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2750
 
3031 serge 2751
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2752
			POSTING_READ(PORT_HOTPLUG_STAT);
2753
		}
2754
 
2755
		I915_WRITE(IIR, iir & ~flip_mask);
2756
		new_iir = I915_READ(IIR); /* Flush posted writes */
2757
 
2758
		if (iir & I915_USER_INTERRUPT)
2759
			notify_ring(dev, &dev_priv->ring[RCS]);
2760
 
2761
		for_each_pipe(pipe) {
2762
			int plane = pipe;
2763
			if (IS_MOBILE(dev))
2764
				plane = !plane;
2765
 
3746 Serge 2766
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2767
			    i915_handle_vblank(dev, plane, pipe, iir))
2768
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2769
 
3031 serge 2770
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2771
				blc_event = true;
2772
		}
2773
 
2774
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2775
//			intel_opregion_asle_intr(dev);
2776
 
2777
		/* With MSI, interrupts are only generated when iir
2778
		 * transitions from zero to nonzero.  If another bit got
2779
		 * set while we were handling the existing iir bits, then
2780
		 * we would never get another interrupt.
2781
		 *
2782
		 * This is fine on non-MSI as well, as if we hit this path
2783
		 * we avoid exiting the interrupt handler only to generate
2784
		 * another one.
2785
		 *
2786
		 * Note that for MSI this could cause a stray interrupt report
2787
		 * if an interrupt landed in the time between writing IIR and
2788
		 * the posting read.  This should be rare enough to never
2789
		 * trigger the 99% of 100,000 interrupts test for disabling
2790
		 * stray interrupts.
2791
		 */
2792
		ret = IRQ_HANDLED;
2793
		iir = new_iir;
2794
	} while (iir & ~flip_mask);
2795
 
2796
	i915_update_dri1_breadcrumb(dev);
2797
 
2798
	return ret;
2799
}
2800
 
2801
static void i915_irq_uninstall(struct drm_device * dev)
2802
{
2803
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2804
	int pipe;
2805
 
2806
	if (I915_HAS_HOTPLUG(dev)) {
2807
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2808
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2809
	}
2810
 
2811
	I915_WRITE16(HWSTAM, 0xffff);
2812
	for_each_pipe(pipe) {
2813
		/* Clear enable bits; then clear status bits */
2814
		I915_WRITE(PIPESTAT(pipe), 0);
2815
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2816
	}
2817
	I915_WRITE(IMR, 0xffffffff);
2818
	I915_WRITE(IER, 0x0);
2819
 
2820
	I915_WRITE(IIR, I915_READ(IIR));
2821
}
2822
 
2823
static void i965_irq_preinstall(struct drm_device * dev)
2824
{
2825
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2826
	int pipe;
2827
 
2828
	atomic_set(&dev_priv->irq_received, 0);
2829
 
2830
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2831
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2832
 
2833
	I915_WRITE(HWSTAM, 0xeffe);
2834
	for_each_pipe(pipe)
2835
		I915_WRITE(PIPESTAT(pipe), 0);
2836
	I915_WRITE(IMR, 0xffffffff);
2837
	I915_WRITE(IER, 0x0);
2838
	POSTING_READ(IER);
2839
}
2840
 
2841
static int i965_irq_postinstall(struct drm_device *dev)
2842
{
2843
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2844
	u32 enable_mask;
2845
	u32 error_mask;
4104 Serge 2846
	unsigned long irqflags;
3031 serge 2847
 
2848
	/* Unmask the interrupts that we always want on. */
2849
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2850
			       I915_DISPLAY_PORT_INTERRUPT |
2851
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2852
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2853
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2854
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2855
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2856
 
2857
	enable_mask = ~dev_priv->irq_mask;
3746 Serge 2858
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2859
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 2860
	enable_mask |= I915_USER_INTERRUPT;
2861
 
2862
	if (IS_G4X(dev))
2863
		enable_mask |= I915_BSD_USER_INTERRUPT;
2864
 
4104 Serge 2865
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2866
	 * just to make the assert_spin_locked check happy. */
2867
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3480 Serge 2868
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
4104 Serge 2869
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3031 serge 2870
 
2871
	/*
2872
	 * Enable some error detection, note the instruction error mask
2873
	 * bit is reserved, so we leave it masked.
2874
	 */
2875
	if (IS_G4X(dev)) {
2876
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2877
			       GM45_ERROR_MEM_PRIV |
2878
			       GM45_ERROR_CP_PRIV |
2879
			       I915_ERROR_MEMORY_REFRESH);
2880
	} else {
2881
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2882
			       I915_ERROR_MEMORY_REFRESH);
2883
	}
2884
	I915_WRITE(EMR, error_mask);
2885
 
2886
	I915_WRITE(IMR, dev_priv->irq_mask);
2887
	I915_WRITE(IER, enable_mask);
2888
	POSTING_READ(IER);
2889
 
3480 Serge 2890
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2891
	POSTING_READ(PORT_HOTPLUG_EN);
2892
 
2893
//	intel_opregion_enable_asle(dev);
2894
 
2895
	return 0;
2896
}
2897
 
3746 Serge 2898
static void i915_hpd_irq_setup(struct drm_device *dev)
3480 Serge 2899
{
2900
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3746 Serge 2901
	struct drm_mode_config *mode_config = &dev->mode_config;
2902
	struct intel_encoder *intel_encoder;
3480 Serge 2903
	u32 hotplug_en;
2904
 
4104 Serge 2905
	assert_spin_locked(&dev_priv->irq_lock);
2906
 
3746 Serge 2907
	if (I915_HAS_HOTPLUG(dev)) {
2908
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2909
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3031 serge 2910
	/* Note HDMI and DP share hotplug bits */
3746 Serge 2911
		/* enable bits are the same for all generations */
2912
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2913
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2914
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3031 serge 2915
		/* Programming the CRT detection parameters tends
2916
		   to generate a spurious hotplug event about three
2917
		   seconds later.  So just do it once.
2918
		   */
2919
		if (IS_G4X(dev))
2920
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3746 Serge 2921
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3031 serge 2922
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3480 Serge 2923
 
3031 serge 2924
	/* Ignore TV since it's buggy */
2925
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3746 Serge 2926
	}
3031 serge 2927
}
2928
 
3243 Serge 2929
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 2930
{
2931
	struct drm_device *dev = (struct drm_device *) arg;
2932
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2933
	u32 iir, new_iir;
2934
	u32 pipe_stats[I915_MAX_PIPES];
2935
	unsigned long irqflags;
2936
	int irq_received;
2937
	int ret = IRQ_NONE, pipe;
3746 Serge 2938
	u32 flip_mask =
2939
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2940
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3031 serge 2941
 
2942
	atomic_inc(&dev_priv->irq_received);
2943
 
2944
	iir = I915_READ(IIR);
2945
 
2946
	for (;;) {
2947
		bool blc_event = false;
2948
 
3746 Serge 2949
		irq_received = (iir & ~flip_mask) != 0;
3031 serge 2950
 
2951
		/* Can't rely on pipestat interrupt bit in iir as it might
2952
		 * have been cleared after the pipestat interrupt was received.
2953
		 * It doesn't set the bit in iir again, but it still produces
2954
		 * interrupts (for non-MSI).
2955
		 */
2956
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2957
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2958
//           i915_handle_error(dev, false);
3031 serge 2959
 
2960
		for_each_pipe(pipe) {
2961
			int reg = PIPESTAT(pipe);
2962
			pipe_stats[pipe] = I915_READ(reg);
2963
 
2964
			/*
2965
			 * Clear the PIPE*STAT regs before the IIR
2966
			 */
2967
			if (pipe_stats[pipe] & 0x8000ffff) {
2968
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2969
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2970
							 pipe_name(pipe));
2971
				I915_WRITE(reg, pipe_stats[pipe]);
2972
				irq_received = 1;
2973
			}
2974
		}
2975
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2976
 
2977
		if (!irq_received)
2978
			break;
2979
 
2980
		ret = IRQ_HANDLED;
2981
 
2982
		/* Consume port.  Then clear IIR or we'll miss events */
2983
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2984
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3746 Serge 2985
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2986
								  HOTPLUG_INT_STATUS_G4X :
4104 Serge 2987
								  HOTPLUG_INT_STATUS_I915);
3031 serge 2988
 
2989
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2990
				  hotplug_status);
4104 Serge 2991
 
2992
			intel_hpd_irq_handler(dev, hotplug_trigger,
2993
					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2994
 
3031 serge 2995
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2996
			I915_READ(PORT_HOTPLUG_STAT);
2997
		}
2998
 
3746 Serge 2999
		I915_WRITE(IIR, iir & ~flip_mask);
3031 serge 3000
		new_iir = I915_READ(IIR); /* Flush posted writes */
3001
 
3002
		if (iir & I915_USER_INTERRUPT)
3003
			notify_ring(dev, &dev_priv->ring[RCS]);
3004
		if (iir & I915_BSD_USER_INTERRUPT)
3005
			notify_ring(dev, &dev_priv->ring[VCS]);
3006
 
3007
		for_each_pipe(pipe) {
3746 Serge 3008
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3009
			    i915_handle_vblank(dev, pipe, pipe, iir))
3010
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3031 serge 3011
 
3012
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3013
				blc_event = true;
3014
		}
3015
 
3016
 
3017
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3018
//			intel_opregion_asle_intr(dev);
3019
 
3480 Serge 3020
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3021
			gmbus_irq_handler(dev);
3022
 
3031 serge 3023
		/* With MSI, interrupts are only generated when iir
3024
		 * transitions from zero to nonzero.  If another bit got
3025
		 * set while we were handling the existing iir bits, then
3026
		 * we would never get another interrupt.
3027
		 *
3028
		 * This is fine on non-MSI as well, as if we hit this path
3029
		 * we avoid exiting the interrupt handler only to generate
3030
		 * another one.
3031
		 *
3032
		 * Note that for MSI this could cause a stray interrupt report
3033
		 * if an interrupt landed in the time between writing IIR and
3034
		 * the posting read.  This should be rare enough to never
3035
		 * trigger the 99% of 100,000 interrupts test for disabling
3036
		 * stray interrupts.
3037
		 */
3038
		iir = new_iir;
3039
	}
3040
 
3041
	i915_update_dri1_breadcrumb(dev);
3042
 
3043
	return ret;
3044
}
3045
 
3046
static void i965_irq_uninstall(struct drm_device * dev)
3047
{
3048
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3049
	int pipe;
3050
 
3051
	if (!dev_priv)
3052
		return;
3053
 
3054
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3055
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3056
 
3057
	I915_WRITE(HWSTAM, 0xffffffff);
3058
	for_each_pipe(pipe)
3059
		I915_WRITE(PIPESTAT(pipe), 0);
3060
	I915_WRITE(IMR, 0xffffffff);
3061
	I915_WRITE(IER, 0x0);
3062
 
3063
	for_each_pipe(pipe)
3064
		I915_WRITE(PIPESTAT(pipe),
3065
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3066
	I915_WRITE(IIR, I915_READ(IIR));
3067
}
3068
 
2351 Serge 3069
void intel_irq_init(struct drm_device *dev)
3070
{
3031 serge 3071
	struct drm_i915_private *dev_priv = dev->dev_private;
3072
 
3480 Serge 3073
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3074
 
3075
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3076
 
3077
 
4104 Serge 3078
//	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3480 Serge 3079
 
3031 serge 3080
	if (IS_VALLEYVIEW(dev)) {
3243 Serge 3081
		dev->driver->irq_handler = valleyview_irq_handler;
3082
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3083
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3746 Serge 3084
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2351 Serge 3085
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 3086
		dev->driver->irq_handler = ironlake_irq_handler;
3087
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3088
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
3746 Serge 3089
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2351 Serge 3090
	} else {
3031 serge 3091
		if (INTEL_INFO(dev)->gen == 2) {
3092
		} else if (INTEL_INFO(dev)->gen == 3) {
3243 Serge 3093
			dev->driver->irq_preinstall = i915_irq_preinstall;
3094
			dev->driver->irq_postinstall = i915_irq_postinstall;
3095
			dev->driver->irq_handler = i915_irq_handler;
3480 Serge 3096
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 3097
		} else {
3243 Serge 3098
			dev->driver->irq_preinstall = i965_irq_preinstall;
3099
			dev->driver->irq_postinstall = i965_irq_postinstall;
3100
			dev->driver->irq_handler = i965_irq_handler;
3746 Serge 3101
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 3102
		}
2351 Serge 3103
	}
3480 Serge 3104
}
3243 Serge 3105
 
3480 Serge 3106
void intel_hpd_init(struct drm_device *dev)
3107
{
3108
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3109
	struct drm_mode_config *mode_config = &dev->mode_config;
3110
	struct drm_connector *connector;
4104 Serge 3111
	unsigned long irqflags;
3746 Serge 3112
	int i;
3480 Serge 3113
 
3746 Serge 3114
	for (i = 1; i < HPD_NUM_PINS; i++) {
3115
		dev_priv->hpd_stats[i].hpd_cnt = 0;
3116
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3117
	}
3118
	list_for_each_entry(connector, &mode_config->connector_list, head) {
3119
		struct intel_connector *intel_connector = to_intel_connector(connector);
3120
		connector->polled = intel_connector->polled;
3121
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3122
			connector->polled = DRM_CONNECTOR_POLL_HPD;
3123
	}
4104 Serge 3124
 
3125
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3126
	 * just to make the assert_spin_locked checks happy. */
3127
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3480 Serge 3128
	if (dev_priv->display.hpd_irq_setup)
3129
		dev_priv->display.hpd_irq_setup(dev);
4104 Serge 3130
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2351 Serge 3131
}
3132
 
4104 Serge 3133
/* Disable interrupts so we can allow Package C8+. */
3134
void hsw_pc8_disable_interrupts(struct drm_device *dev)
3243 Serge 3135
{
4104 Serge 3136
	struct drm_i915_private *dev_priv = dev->dev_private;
3137
	unsigned long irqflags;
2351 Serge 3138
 
4104 Serge 3139
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3243 Serge 3140
 
4104 Serge 3141
	dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3142
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3143
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3144
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3145
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3243 Serge 3146
 
4104 Serge 3147
	ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3148
	ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3149
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
3150
	snb_disable_pm_irq(dev_priv, 0xffffffff);
3151
 
3152
	dev_priv->pc8.irqs_disabled = true;
3153
 
3154
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3243 Serge 3155
}
3156
 
4104 Serge 3157
/* Restore interrupts so we can recover from Package C8+. */
3158
void hsw_pc8_restore_interrupts(struct drm_device *dev)
2351 Serge 3159
{
4104 Serge 3160
	struct drm_i915_private *dev_priv = dev->dev_private;
3161
	unsigned long irqflags;
3162
	uint32_t val, expected;
2351 Serge 3163
 
4104 Serge 3164
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3051 serge 3165
 
4104 Serge 3166
	val = I915_READ(DEIMR);
3167
	expected = ~DE_PCH_EVENT_IVB;
3168
	WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
2351 Serge 3169
 
4104 Serge 3170
	val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3171
	expected = ~SDE_HOTPLUG_MASK_CPT;
3172
	WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3173
	     val, expected);
2351 Serge 3174
 
4104 Serge 3175
	val = I915_READ(GTIMR);
3176
	expected = 0xffffffff;
3177
	WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
2351 Serge 3178
 
4104 Serge 3179
	val = I915_READ(GEN6_PMIMR);
3180
	expected = 0xffffffff;
3181
	WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3182
	     expected);
2351 Serge 3183
 
4104 Serge 3184
	dev_priv->pc8.irqs_disabled = false;
2351 Serge 3185
 
4104 Serge 3186
	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3187
	ibx_enable_display_interrupt(dev_priv,
3188
				     ~dev_priv->pc8.regsave.sdeimr &
3189
				     ~SDE_HOTPLUG_MASK_CPT);
3190
	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3191
	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3192
	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
2351 Serge 3193
 
4104 Serge 3194
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3195
}
2351 Serge 3196
 
3197
 
4104 Serge 3198
irqreturn_t intel_irq_handler(struct drm_device *dev)
3199
{
2351 Serge 3200
 
4104 Serge 3201
//    printf("i915 irq\n");
2351 Serge 3202
 
4104 Serge 3203
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
3204
 
3205
    return dev->driver->irq_handler(0, dev);
2351 Serge 3206
}
3207