Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5367 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3746 Serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3031 serge 30
 
31
#include 
32
#include 
33
#include 
2351 Serge 34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
5354 serge 38
/**
39
 * DOC: interrupt handling
40
 *
41
 * These functions provide the basic support for enabling and disabling the
42
 * interrupt handling support. There's a lot more functionality in i915_irq.c
43
 * and related files, but that will be described in separate chapters.
44
 */
4104 Serge 45
 
3746 Serge 46
static const u32 hpd_ibx[] = {
47
	[HPD_CRT] = SDE_CRT_HOTPLUG,
48
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
49
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
50
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
51
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
52
};
3031 serge 53
 
3746 Serge 54
static const u32 hpd_cpt[] = {
55
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
56
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
57
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
58
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
59
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
60
};
61
 
62
static const u32 hpd_mask_i915[] = {
63
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
64
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
65
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
66
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
67
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
68
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
69
};
70
 
4560 Serge 71
static const u32 hpd_status_g4x[] = {
3746 Serge 72
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
73
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
74
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
75
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
76
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
77
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
78
};
79
 
80
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
81
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
82
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
83
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
84
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
85
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
86
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
87
};
88
 
5060 serge 89
/* IIR can theoretically queue up two events. Be paranoid. */
90
#define GEN8_IRQ_RESET_NDX(type, which) do { \
91
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
92
	POSTING_READ(GEN8_##type##_IMR(which)); \
93
	I915_WRITE(GEN8_##type##_IER(which), 0); \
94
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
95
	POSTING_READ(GEN8_##type##_IIR(which)); \
96
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97
	POSTING_READ(GEN8_##type##_IIR(which)); \
98
} while (0)
3746 Serge 99
 
5060 serge 100
#define GEN5_IRQ_RESET(type) do { \
101
	I915_WRITE(type##IMR, 0xffffffff); \
102
	POSTING_READ(type##IMR); \
103
	I915_WRITE(type##IER, 0); \
104
	I915_WRITE(type##IIR, 0xffffffff); \
105
	POSTING_READ(type##IIR); \
106
	I915_WRITE(type##IIR, 0xffffffff); \
107
	POSTING_READ(type##IIR); \
108
} while (0)
109
 
110
/*
111
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
112
 */
113
#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
114
	u32 val = I915_READ(reg); \
115
	if (val) { \
116
		WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
117
		     (reg), val); \
118
		I915_WRITE((reg), 0xffffffff); \
119
		POSTING_READ(reg); \
120
		I915_WRITE((reg), 0xffffffff); \
121
		POSTING_READ(reg); \
122
	} \
123
} while (0)
124
 
125
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
126
	GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
5354 serge 127
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
5060 serge 128
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
5354 serge 129
	POSTING_READ(GEN8_##type##_IMR(which)); \
5060 serge 130
} while (0)
131
 
132
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
133
	GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
5354 serge 134
	I915_WRITE(type##IER, (ier_val)); \
5060 serge 135
	I915_WRITE(type##IMR, (imr_val)); \
5354 serge 136
	POSTING_READ(type##IMR); \
5060 serge 137
} while (0)
138
 
5354 serge 139
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
3031 serge 140
 
2351 Serge 141
/* For display hotplug interrupt */
5354 serge 142
void
5060 serge 143
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
2351 Serge 144
{
4104 Serge 145
	assert_spin_locked(&dev_priv->irq_lock);
146
 
5060 serge 147
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 148
		return;
149
 
2351 Serge 150
    if ((dev_priv->irq_mask & mask) != 0) {
151
        dev_priv->irq_mask &= ~mask;
152
        I915_WRITE(DEIMR, dev_priv->irq_mask);
153
        POSTING_READ(DEIMR);
154
    }
155
}
156
 
5354 serge 157
void
5060 serge 158
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
2351 Serge 159
{
4104 Serge 160
	assert_spin_locked(&dev_priv->irq_lock);
161
 
5354 serge 162
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 163
		return;
164
 
2351 Serge 165
    if ((dev_priv->irq_mask & mask) != mask) {
166
        dev_priv->irq_mask |= mask;
167
        I915_WRITE(DEIMR, dev_priv->irq_mask);
168
        POSTING_READ(DEIMR);
169
    }
170
}
3031 serge 171
 
4104 Serge 172
/**
173
 * ilk_update_gt_irq - update GTIMR
174
 * @dev_priv: driver private
175
 * @interrupt_mask: mask of interrupt bits to update
176
 * @enabled_irq_mask: mask of interrupt bits to enable
177
 */
178
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
179
			      uint32_t interrupt_mask,
180
			      uint32_t enabled_irq_mask)
181
{
182
	assert_spin_locked(&dev_priv->irq_lock);
183
 
5060 serge 184
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 185
		return;
186
 
187
	dev_priv->gt_irq_mask &= ~interrupt_mask;
188
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
189
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
190
	POSTING_READ(GTIMR);
191
}
192
 
5060 serge 193
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 194
{
195
	ilk_update_gt_irq(dev_priv, mask, mask);
196
}
197
 
5060 serge 198
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 199
{
200
	ilk_update_gt_irq(dev_priv, mask, 0);
201
}
202
 
5354 serge 203
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
4104 Serge 204
{
5354 serge 205
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
4104 Serge 206
}
207
 
5354 serge 208
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
4104 Serge 209
{
5354 serge 210
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
4104 Serge 211
}
212
 
5354 serge 213
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
4104 Serge 214
{
5354 serge 215
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
4104 Serge 216
}
217
 
5060 serge 218
/**
5354 serge 219
  * snb_update_pm_irq - update GEN6_PMIMR
5060 serge 220
  * @dev_priv: driver private
221
  * @interrupt_mask: mask of interrupt bits to update
222
  * @enabled_irq_mask: mask of interrupt bits to enable
223
  */
5354 serge 224
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
5060 serge 225
			      uint32_t interrupt_mask,
226
			      uint32_t enabled_irq_mask)
227
{
228
	uint32_t new_val;
229
 
230
	assert_spin_locked(&dev_priv->irq_lock);
231
 
232
	new_val = dev_priv->pm_irq_mask;
233
	new_val &= ~interrupt_mask;
234
	new_val |= (~enabled_irq_mask & interrupt_mask);
235
 
236
	if (new_val != dev_priv->pm_irq_mask) {
237
		dev_priv->pm_irq_mask = new_val;
5354 serge 238
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
239
		POSTING_READ(gen6_pm_imr(dev_priv));
5060 serge 240
	}
241
}
242
 
5354 serge 243
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
5060 serge 244
{
5354 serge 245
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
246
		return;
247
 
248
	snb_update_pm_irq(dev_priv, mask, mask);
5060 serge 249
}
250
 
5354 serge 251
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
252
				  uint32_t mask)
5060 serge 253
{
5354 serge 254
	snb_update_pm_irq(dev_priv, mask, 0);
5060 serge 255
}
256
 
5354 serge 257
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 258
{
5354 serge 259
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
260
		return;
4104 Serge 261
 
5354 serge 262
	__gen6_disable_pm_irq(dev_priv, mask);
4104 Serge 263
}
264
 
5354 serge 265
void gen6_reset_rps_interrupts(struct drm_device *dev)
5060 serge 266
{
267
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 268
	uint32_t reg = gen6_pm_iir(dev_priv);
5060 serge 269
 
5354 serge 270
	spin_lock_irq(&dev_priv->irq_lock);
271
	I915_WRITE(reg, dev_priv->pm_rps_events);
272
	I915_WRITE(reg, dev_priv->pm_rps_events);
5060 serge 273
		POSTING_READ(reg);
5354 serge 274
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 275
}
276
 
5354 serge 277
void gen6_enable_rps_interrupts(struct drm_device *dev)
5060 serge 278
{
279
	struct drm_i915_private *dev_priv = dev->dev_private;
280
 
5354 serge 281
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 282
 
5354 serge 283
	WARN_ON(dev_priv->rps.pm_iir);
284
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
285
	dev_priv->rps.interrupts_enabled = true;
286
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
287
				dev_priv->pm_rps_events);
288
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
289
 
290
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 291
}
292
 
5354 serge 293
void gen6_disable_rps_interrupts(struct drm_device *dev)
4104 Serge 294
{
295
	struct drm_i915_private *dev_priv = dev->dev_private;
296
 
5354 serge 297
	spin_lock_irq(&dev_priv->irq_lock);
298
	dev_priv->rps.interrupts_enabled = false;
299
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 300
 
5354 serge 301
	cancel_work_sync(&dev_priv->rps.work);
4104 Serge 302
 
5354 serge 303
	spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 304
 
5354 serge 305
	I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
306
		   ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
4104 Serge 307
 
5354 serge 308
	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
309
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
310
				~dev_priv->pm_rps_events);
311
	I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
312
	I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
4104 Serge 313
 
5354 serge 314
	dev_priv->rps.pm_iir = 0;
4560 Serge 315
 
5354 serge 316
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 317
}
318
 
4104 Serge 319
/**
320
 * ibx_display_interrupt_update - update SDEIMR
321
 * @dev_priv: driver private
322
 * @interrupt_mask: mask of interrupt bits to update
323
 * @enabled_irq_mask: mask of interrupt bits to enable
324
 */
5354 serge 325
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
4104 Serge 326
					 uint32_t interrupt_mask,
327
					 uint32_t enabled_irq_mask)
328
{
329
	uint32_t sdeimr = I915_READ(SDEIMR);
330
	sdeimr &= ~interrupt_mask;
331
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
332
 
333
	assert_spin_locked(&dev_priv->irq_lock);
334
 
5060 serge 335
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 336
		return;
337
 
338
	I915_WRITE(SDEIMR, sdeimr);
339
	POSTING_READ(SDEIMR);
340
}
341
 
5060 serge 342
static void
343
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
344
		       u32 enable_mask, u32 status_mask)
3031 serge 345
{
346
		u32 reg = PIPESTAT(pipe);
5060 serge 347
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 348
 
4104 Serge 349
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 350
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 351
 
5060 serge 352
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
353
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
354
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
355
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 356
		return;
357
 
5060 serge 358
	if ((pipestat & enable_mask) == enable_mask)
359
		return;
360
 
361
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
362
 
3031 serge 363
		/* Enable the interrupt, clear any pending status */
5060 serge 364
	pipestat |= enable_mask | status_mask;
3746 Serge 365
	I915_WRITE(reg, pipestat);
3031 serge 366
		POSTING_READ(reg);
367
}
368
 
5060 serge 369
static void
370
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
371
		        u32 enable_mask, u32 status_mask)
3031 serge 372
{
373
		u32 reg = PIPESTAT(pipe);
5060 serge 374
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 375
 
4104 Serge 376
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 377
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 378
 
5060 serge 379
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
380
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
381
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
382
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 383
		return;
384
 
5060 serge 385
	if ((pipestat & enable_mask) == 0)
386
		return;
387
 
388
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
389
 
390
	pipestat &= ~enable_mask;
3746 Serge 391
	I915_WRITE(reg, pipestat);
3031 serge 392
		POSTING_READ(reg);
393
}
394
 
5060 serge 395
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
396
{
397
	u32 enable_mask = status_mask << 16;
398
 
399
	/*
400
	 * On pipe A we don't support the PSR interrupt yet,
401
	 * on pipe B and C the same bit MBZ.
402
	 */
403
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
404
		return 0;
405
	/*
406
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
407
	 * A the same bit is for perf counters which we don't use either.
408
	 */
409
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
410
		return 0;
411
 
412
	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
413
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
414
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
415
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
416
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
417
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
418
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
419
 
420
	return enable_mask;
421
}
422
 
423
void
424
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
425
		     u32 status_mask)
426
{
427
	u32 enable_mask;
428
 
429
	if (IS_VALLEYVIEW(dev_priv->dev))
430
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
431
							   status_mask);
432
	else
433
		enable_mask = status_mask << 16;
434
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
435
}
436
 
437
void
438
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
439
		      u32 status_mask)
440
{
441
	u32 enable_mask;
442
 
443
	if (IS_VALLEYVIEW(dev_priv->dev))
444
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
445
							   status_mask);
446
	else
447
		enable_mask = status_mask << 16;
448
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
449
}
450
 
3031 serge 451
/**
4104 Serge 452
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
3031 serge 453
 */
4104 Serge 454
static void i915_enable_asle_pipestat(struct drm_device *dev)
3031 serge 455
{
5060 serge 456
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 457
 
4104 Serge 458
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
3031 serge 459
		return;
460
 
5354 serge 461
	spin_lock_irq(&dev_priv->irq_lock);
3031 serge 462
 
5060 serge 463
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
3031 serge 464
		if (INTEL_INFO(dev)->gen >= 4)
4560 Serge 465
		i915_enable_pipestat(dev_priv, PIPE_A,
5060 serge 466
				     PIPE_LEGACY_BLC_EVENT_STATUS);
3031 serge 467
 
5354 serge 468
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 469
}
470
 
471
/**
472
 * i915_pipe_enabled - check if a pipe is enabled
473
 * @dev: DRM device
474
 * @pipe: pipe to check
475
 *
476
 * Reading certain registers when the pipe is disabled can hang the chip.
477
 * Use this routine to make sure the PLL is running and the pipe is active
478
 * before reading such registers if unsure.
479
 */
480
static int
481
i915_pipe_enabled(struct drm_device *dev, int pipe)
482
{
5060 serge 483
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 484
 
4104 Serge 485
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
486
		/* Locking is horribly broken here, but whatever. */
487
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
488
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
489
 
490
		return intel_crtc->active;
491
	} else {
492
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
493
	}
3031 serge 494
}
495
 
5060 serge 496
/*
497
 * This timing diagram depicts the video signal in and
498
 * around the vertical blanking period.
499
 *
500
 * Assumptions about the fictitious mode used in this example:
501
 *  vblank_start >= 3
502
 *  vsync_start = vblank_start + 1
503
 *  vsync_end = vblank_start + 2
504
 *  vtotal = vblank_start + 3
505
 *
506
 *           start of vblank:
507
 *           latch double buffered registers
508
 *           increment frame counter (ctg+)
509
 *           generate start of vblank interrupt (gen4+)
510
 *           |
511
 *           |          frame start:
512
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
513
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
514
 *           |          |
515
 *           |          |  start of vsync:
516
 *           |          |  generate vsync interrupt
517
 *           |          |  |
518
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
519
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
520
 * ----va---> <-----------------vb--------------------> <--------va-------------
521
 *       |          |       <----vs----->                     |
522
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
523
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
524
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
525
 *       |          |                                         |
526
 *       last visible pixel                                   first visible pixel
527
 *                  |                                         increment frame counter (gen3/4)
528
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
529
 *
530
 * x  = horizontal active
531
 * _  = horizontal blanking
532
 * hs = horizontal sync
533
 * va = vertical active
534
 * vb = vertical blanking
535
 * vs = vertical sync
536
 * vbs = vblank_start (number)
537
 *
538
 * Summary:
539
 * - most events happen at the start of horizontal sync
540
 * - frame start happens at the start of horizontal blank, 1-4 lines
541
 *   (depending on PIPECONF settings) after the start of vblank
542
 * - gen3/4 pixel and frame counter are synchronized with the start
543
 *   of horizontal active on the first line of vertical active
544
 */
545
 
4560 Serge 546
static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
547
{
548
	/* Gen2 doesn't have a hardware frame counter */
549
	return 0;
550
}
551
 
3031 serge 552
/* Called from drm generic code, passed a 'crtc', which
553
 * we use as a pipe index
554
 */
555
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
556
{
5060 serge 557
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 558
	unsigned long high_frame;
559
	unsigned long low_frame;
5060 serge 560
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
3031 serge 561
 
562
	if (!i915_pipe_enabled(dev, pipe)) {
563
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
564
				"pipe %c\n", pipe_name(pipe));
565
		return 0;
566
	}
567
 
4560 Serge 568
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
569
		struct intel_crtc *intel_crtc =
570
			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
571
		const struct drm_display_mode *mode =
572
			&intel_crtc->config.adjusted_mode;
573
 
5060 serge 574
		htotal = mode->crtc_htotal;
575
		hsync_start = mode->crtc_hsync_start;
576
		vbl_start = mode->crtc_vblank_start;
577
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
578
			vbl_start = DIV_ROUND_UP(vbl_start, 2);
4560 Serge 579
	} else {
5060 serge 580
		enum transcoder cpu_transcoder = (enum transcoder) pipe;
4560 Serge 581
 
582
		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
5060 serge 583
		hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
4560 Serge 584
		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
5060 serge 585
		if ((I915_READ(PIPECONF(cpu_transcoder)) &
586
		     PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
587
			vbl_start = DIV_ROUND_UP(vbl_start, 2);
588
	}
4560 Serge 589
 
5060 serge 590
	/* Convert to pixel count */
4560 Serge 591
		vbl_start *= htotal;
592
 
5060 serge 593
	/* Start of vblank event occurs at start of hsync */
594
	vbl_start -= htotal - hsync_start;
595
 
3031 serge 596
	high_frame = PIPEFRAME(pipe);
597
	low_frame = PIPEFRAMEPIXEL(pipe);
598
 
599
	/*
600
	 * High & low register fields aren't synchronized, so make sure
601
	 * we get a low value that's stable across two reads of the high
602
	 * register.
603
	 */
604
	do {
605
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
4560 Serge 606
		low   = I915_READ(low_frame);
3031 serge 607
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
608
	} while (high1 != high2);
609
 
610
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
4560 Serge 611
	pixel = low & PIPE_PIXEL_MASK;
3031 serge 612
	low >>= PIPE_FRAME_LOW_SHIFT;
4560 Serge 613
 
614
	/*
615
	 * The frame counter increments at beginning of active.
616
	 * Cook up a vblank counter by also checking the pixel
617
	 * counter against vblank start.
618
	 */
619
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
3031 serge 620
}
621
 
622
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
623
{
5060 serge 624
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 625
	int reg = PIPE_FRMCOUNT_GM45(pipe);
626
 
627
	if (!i915_pipe_enabled(dev, pipe)) {
628
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
629
				 "pipe %c\n", pipe_name(pipe));
630
		return 0;
631
	}
632
 
633
	return I915_READ(reg);
634
}
635
 
4560 Serge 636
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
637
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
638
 
5060 serge 639
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
4560 Serge 640
{
5060 serge 641
	struct drm_device *dev = crtc->base.dev;
4560 Serge 642
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 643
	const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
644
	enum pipe pipe = crtc->pipe;
645
	int position, vtotal;
4560 Serge 646
 
5060 serge 647
	vtotal = mode->crtc_vtotal;
648
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
649
		vtotal /= 2;
4560 Serge 650
 
5060 serge 651
	if (IS_GEN2(dev))
652
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
653
	else
654
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
655
 
656
	/*
657
	 * See update_scanline_offset() for the details on the
658
	 * scanline_offset adjustment.
659
	 */
660
	return (position + crtc->scanline_offset) % vtotal;
4560 Serge 661
}
662
 
3746 Serge 663
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
4560 Serge 664
				    unsigned int flags, int *vpos, int *hpos,
5060 serge 665
				    void *stime, void *etime)
3746 Serge 666
{
4560 Serge 667
	struct drm_i915_private *dev_priv = dev->dev_private;
668
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
669
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
670
	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
671
	int position;
5060 serge 672
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
3746 Serge 673
	bool in_vbl = true;
674
	int ret = 0;
4560 Serge 675
	unsigned long irqflags;
3746 Serge 676
 
4560 Serge 677
	if (!intel_crtc->active) {
3746 Serge 678
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
679
				 "pipe %c\n", pipe_name(pipe));
680
		return 0;
681
	}
682
 
4560 Serge 683
	htotal = mode->crtc_htotal;
5060 serge 684
	hsync_start = mode->crtc_hsync_start;
4560 Serge 685
	vtotal = mode->crtc_vtotal;
686
	vbl_start = mode->crtc_vblank_start;
687
	vbl_end = mode->crtc_vblank_end;
3746 Serge 688
 
4560 Serge 689
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
690
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
691
		vbl_end /= 2;
692
		vtotal /= 2;
693
	}
694
 
695
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
696
 
697
	/*
698
	 * Lock uncore.lock, as we will do multiple timing critical raw
699
	 * register reads, potentially with preemption disabled, so the
700
	 * following code must not block on uncore.lock.
701
	 */
702
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
703
 
704
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
705
 
706
 
707
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3746 Serge 708
		/* No obvious pixelcount register. Only query vertical
709
		 * scanout position from Display scan line register.
710
		 */
5060 serge 711
		position = __intel_get_crtc_scanline(intel_crtc);
3746 Serge 712
	} else {
713
		/* Have access to pixelcount since start of frame.
714
		 * We can split this into vertical and horizontal
715
		 * scanout position.
716
		 */
4560 Serge 717
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
3746 Serge 718
 
4560 Serge 719
		/* convert to pixel counts */
720
		vbl_start *= htotal;
721
		vbl_end *= htotal;
722
		vtotal *= htotal;
5060 serge 723
 
724
		/*
725
		 * In interlaced modes, the pixel counter counts all pixels,
726
		 * so one field will have htotal more pixels. In order to avoid
727
		 * the reported position from jumping backwards when the pixel
728
		 * counter is beyond the length of the shorter field, just
729
		 * clamp the position the length of the shorter field. This
730
		 * matches how the scanline counter based position works since
731
		 * the scanline counter doesn't count the two half lines.
732
		 */
733
		if (position >= vtotal)
734
			position = vtotal - 1;
735
 
736
		/*
737
		 * Start of vblank interrupt is triggered at start of hsync,
738
		 * just prior to the first active line of vblank. However we
739
		 * consider lines to start at the leading edge of horizontal
740
		 * active. So, should we get here before we've crossed into
741
		 * the horizontal active of the first line in vblank, we would
742
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
743
		 * always add htotal-hsync_start to the current pixel position.
744
		 */
745
		position = (position + htotal - hsync_start) % vtotal;
3746 Serge 746
	}
747
 
748
 
4560 Serge 749
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
3746 Serge 750
 
4560 Serge 751
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3746 Serge 752
 
4560 Serge 753
	in_vbl = position >= vbl_start && position < vbl_end;
3746 Serge 754
 
4560 Serge 755
	/*
756
	 * While in vblank, position will be negative
757
	 * counting up towards 0 at vbl_end. And outside
758
	 * vblank, position will be positive counting
759
	 * up since vbl_end.
760
	 */
761
	if (position >= vbl_start)
762
		position -= vbl_end;
763
	else
764
		position += vtotal - vbl_end;
3746 Serge 765
 
4560 Serge 766
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
767
		*vpos = position;
768
		*hpos = 0;
769
	} else {
770
		*vpos = position / htotal;
771
		*hpos = position - (*vpos * htotal);
772
	}
773
 
3746 Serge 774
	/* In vblank? */
775
	if (in_vbl)
5354 serge 776
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
3746 Serge 777
 
778
	return ret;
779
}
780
 
5060 serge 781
int intel_get_crtc_scanline(struct intel_crtc *crtc)
782
{
783
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
784
	unsigned long irqflags;
785
	int position;
786
 
787
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
788
	position = __intel_get_crtc_scanline(crtc);
789
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
790
 
791
	return position;
792
}
793
 
3746 Serge 794
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
795
			      int *max_error,
796
			      struct timeval *vblank_time,
797
			      unsigned flags)
798
{
799
	struct drm_crtc *crtc;
800
 
801
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
802
		DRM_ERROR("Invalid crtc %d\n", pipe);
803
		return -EINVAL;
804
	}
805
 
806
	/* Get drm_crtc to timestamp: */
807
	crtc = intel_get_crtc_for_pipe(dev, pipe);
808
	if (crtc == NULL) {
809
		DRM_ERROR("Invalid crtc %d\n", pipe);
810
		return -EINVAL;
811
	}
812
 
813
	if (!crtc->enabled) {
814
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
815
		return -EBUSY;
816
	}
817
 
818
	/* Helper routine in DRM core does all the work: */
819
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
820
						     vblank_time, flags,
4560 Serge 821
						     crtc,
822
						     &to_intel_crtc(crtc)->config.adjusted_mode);
3746 Serge 823
}
824
 
4560 Serge 825
static bool intel_hpd_irq_event(struct drm_device *dev,
826
				struct drm_connector *connector)
4104 Serge 827
{
828
	enum drm_connector_status old_status;
829
 
830
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
831
	old_status = connector->status;
832
 
833
	connector->status = connector->funcs->detect(connector, false);
4560 Serge 834
	if (old_status == connector->status)
835
		return false;
836
 
837
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
4104 Serge 838
		      connector->base.id,
5060 serge 839
		      connector->name,
4560 Serge 840
		      drm_get_connector_status_name(old_status),
841
		      drm_get_connector_status_name(connector->status));
842
 
843
	return true;
4104 Serge 844
}
845
 
3480 Serge 846
/*
847
 * Handle hotplug events outside the interrupt handler proper.
848
 */
3746 Serge 849
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
850
 
3480 Serge 851
static void i915_hotplug_work_func(struct work_struct *work)
852
{
5060 serge 853
	struct drm_i915_private *dev_priv =
854
		container_of(work, struct drm_i915_private, hotplug_work);
3480 Serge 855
	struct drm_device *dev = dev_priv->dev;
856
	struct drm_mode_config *mode_config = &dev->mode_config;
3746 Serge 857
	struct intel_connector *intel_connector;
858
	struct intel_encoder *intel_encoder;
859
	struct drm_connector *connector;
860
	bool hpd_disabled = false;
4104 Serge 861
	bool changed = false;
862
	u32 hpd_event_bits;
3031 serge 863
 
3480 Serge 864
	mutex_lock(&mode_config->mutex);
865
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
866
 
5354 serge 867
	spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 868
 
869
	hpd_event_bits = dev_priv->hpd_event_bits;
870
	dev_priv->hpd_event_bits = 0;
3746 Serge 871
	list_for_each_entry(connector, &mode_config->connector_list, head) {
872
		intel_connector = to_intel_connector(connector);
5060 serge 873
		if (!intel_connector->encoder)
874
			continue;
3746 Serge 875
		intel_encoder = intel_connector->encoder;
876
		if (intel_encoder->hpd_pin > HPD_NONE &&
877
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
878
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
879
			DRM_INFO("HPD interrupt storm detected on connector %s: "
880
				 "switching from hotplug detection to polling\n",
5060 serge 881
				connector->name);
3746 Serge 882
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
883
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
884
				| DRM_CONNECTOR_POLL_DISCONNECT;
885
			hpd_disabled = true;
886
		}
4104 Serge 887
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
888
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
5060 serge 889
				      connector->name, intel_encoder->hpd_pin);
4104 Serge 890
		}
3746 Serge 891
	}
892
	 /* if there were no outputs to poll, poll was disabled,
893
	  * therefore make sure it's enabled when disabling HPD on
894
	  * some connectors */
3480 Serge 895
 
5354 serge 896
	spin_unlock_irq(&dev_priv->irq_lock);
3746 Serge 897
 
4104 Serge 898
	list_for_each_entry(connector, &mode_config->connector_list, head) {
899
		intel_connector = to_intel_connector(connector);
5060 serge 900
		if (!intel_connector->encoder)
901
			continue;
4104 Serge 902
		intel_encoder = intel_connector->encoder;
903
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
3746 Serge 904
		if (intel_encoder->hot_plug)
905
			intel_encoder->hot_plug(intel_encoder);
4104 Serge 906
			if (intel_hpd_irq_event(dev, connector))
907
				changed = true;
908
		}
909
	}
3480 Serge 910
	mutex_unlock(&mode_config->mutex);
911
 
912
}
913
 
4104 Serge 914
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
3746 Serge 915
{
5060 serge 916
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 917
	u32 busy_up, busy_down, max_avg, min_avg;
918
	u8 new_delay;
919
 
4104 Serge 920
	spin_lock(&mchdev_lock);
3746 Serge 921
 
922
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
923
 
924
	new_delay = dev_priv->ips.cur_delay;
925
 
926
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
927
	busy_up = I915_READ(RCPREVBSYTUPAVG);
928
	busy_down = I915_READ(RCPREVBSYTDNAVG);
929
	max_avg = I915_READ(RCBMAXAVG);
930
	min_avg = I915_READ(RCBMINAVG);
931
 
932
	/* Handle RCS change request from hw */
933
	if (busy_up > max_avg) {
934
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
935
			new_delay = dev_priv->ips.cur_delay - 1;
936
		if (new_delay < dev_priv->ips.max_delay)
937
			new_delay = dev_priv->ips.max_delay;
938
	} else if (busy_down < min_avg) {
939
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
940
			new_delay = dev_priv->ips.cur_delay + 1;
941
		if (new_delay > dev_priv->ips.min_delay)
942
			new_delay = dev_priv->ips.min_delay;
943
	}
944
 
945
	if (ironlake_set_drps(dev, new_delay))
946
		dev_priv->ips.cur_delay = new_delay;
947
 
4104 Serge 948
	spin_unlock(&mchdev_lock);
3746 Serge 949
 
950
	return;
951
}
952
 
2352 Serge 953
static void notify_ring(struct drm_device *dev,
5060 serge 954
			struct intel_engine_cs *ring)
2352 Serge 955
{
5060 serge 956
	if (!intel_ring_initialized(ring))
2352 Serge 957
		return;
2351 Serge 958
 
4560 Serge 959
	trace_i915_gem_request_complete(ring);
2351 Serge 960
 
2352 Serge 961
	wake_up_all(&ring->irq_queue);
962
}
963
 
5060 serge 964
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
965
			    struct intel_rps_ei *rps_ei)
966
{
967
	u32 cz_ts, cz_freq_khz;
968
	u32 render_count, media_count;
969
	u32 elapsed_render, elapsed_media, elapsed_time;
970
	u32 residency = 0;
971
 
972
	cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
973
	cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
974
 
975
	render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
976
	media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
977
 
978
	if (rps_ei->cz_clock == 0) {
979
		rps_ei->cz_clock = cz_ts;
980
		rps_ei->render_c0 = render_count;
981
		rps_ei->media_c0 = media_count;
982
 
983
		return dev_priv->rps.cur_freq;
984
	}
985
 
986
	elapsed_time = cz_ts - rps_ei->cz_clock;
987
	rps_ei->cz_clock = cz_ts;
988
 
989
	elapsed_render = render_count - rps_ei->render_c0;
990
	rps_ei->render_c0 = render_count;
991
 
992
	elapsed_media = media_count - rps_ei->media_c0;
993
	rps_ei->media_c0 = media_count;
994
 
995
	/* Convert all the counters into common unit of milli sec */
996
	elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
997
	elapsed_render /=  cz_freq_khz;
998
	elapsed_media /= cz_freq_khz;
999
 
1000
	/*
1001
	 * Calculate overall C0 residency percentage
1002
	 * only if elapsed time is non zero
1003
	 */
1004
	if (elapsed_time) {
1005
		residency =
1006
			((max(elapsed_render, elapsed_media) * 100)
1007
				/ elapsed_time);
1008
	}
1009
 
1010
	return residency;
1011
}
1012
 
1013
/**
1014
 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1015
 * busy-ness calculated from C0 counters of render & media power wells
1016
 * @dev_priv: DRM device private
1017
 *
1018
 */
5354 serge 1019
static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
5060 serge 1020
{
1021
	u32 residency_C0_up = 0, residency_C0_down = 0;
5354 serge 1022
	int new_delay, adj;
5060 serge 1023
 
1024
	dev_priv->rps.ei_interrupt_count++;
1025
 
1026
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1027
 
1028
 
1029
	if (dev_priv->rps.up_ei.cz_clock == 0) {
1030
		vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1031
		vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1032
		return dev_priv->rps.cur_freq;
1033
	}
1034
 
1035
 
1036
	/*
1037
	 * To down throttle, C0 residency should be less than down threshold
1038
	 * for continous EI intervals. So calculate down EI counters
1039
	 * once in VLV_INT_COUNT_FOR_DOWN_EI
1040
	 */
1041
	if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1042
 
1043
		dev_priv->rps.ei_interrupt_count = 0;
1044
 
1045
		residency_C0_down = vlv_c0_residency(dev_priv,
1046
						     &dev_priv->rps.down_ei);
1047
	} else {
1048
		residency_C0_up = vlv_c0_residency(dev_priv,
1049
						   &dev_priv->rps.up_ei);
1050
	}
1051
 
1052
	new_delay = dev_priv->rps.cur_freq;
1053
 
1054
	adj = dev_priv->rps.last_adj;
1055
	/* C0 residency is greater than UP threshold. Increase Frequency */
1056
	if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1057
		if (adj > 0)
1058
			adj *= 2;
1059
		else
1060
			adj = 1;
1061
 
1062
		if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1063
			new_delay = dev_priv->rps.cur_freq + adj;
1064
 
1065
		/*
1066
		 * For better performance, jump directly
1067
		 * to RPe if we're below it.
1068
		 */
1069
		if (new_delay < dev_priv->rps.efficient_freq)
1070
			new_delay = dev_priv->rps.efficient_freq;
1071
 
1072
	} else if (!dev_priv->rps.ei_interrupt_count &&
1073
			(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1074
		if (adj < 0)
1075
			adj *= 2;
1076
		else
1077
			adj = -1;
1078
		/*
1079
		 * This means, C0 residency is less than down threshold over
1080
		 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1081
		 */
1082
		if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1083
			new_delay = dev_priv->rps.cur_freq + adj;
1084
	}
1085
 
1086
	return new_delay;
1087
}
1088
 
3031 serge 1089
static void gen6_pm_rps_work(struct work_struct *work)
1090
{
5060 serge 1091
	struct drm_i915_private *dev_priv =
1092
		container_of(work, struct drm_i915_private, rps.work);
4104 Serge 1093
	u32 pm_iir;
4560 Serge 1094
	int new_delay, adj;
2352 Serge 1095
 
4104 Serge 1096
	spin_lock_irq(&dev_priv->irq_lock);
5354 serge 1097
	/* Speed up work cancelation during disabling rps interrupts. */
1098
	if (!dev_priv->rps.interrupts_enabled) {
1099
		spin_unlock_irq(&dev_priv->irq_lock);
1100
		return;
1101
	}
3031 serge 1102
	pm_iir = dev_priv->rps.pm_iir;
1103
	dev_priv->rps.pm_iir = 0;
5354 serge 1104
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
5060 serge 1105
		gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
4104 Serge 1106
	spin_unlock_irq(&dev_priv->irq_lock);
2352 Serge 1107
 
4104 Serge 1108
	/* Make sure we didn't queue anything we're not going to process. */
5060 serge 1109
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
4104 Serge 1110
 
5060 serge 1111
	if ((pm_iir & dev_priv->pm_rps_events) == 0)
3031 serge 1112
		return;
1113
 
3243 Serge 1114
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 1115
 
4560 Serge 1116
	adj = dev_priv->rps.last_adj;
4104 Serge 1117
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
4560 Serge 1118
		if (adj > 0)
1119
			adj *= 2;
5060 serge 1120
		else {
1121
			/* CHV needs even encode values */
1122
			adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1123
		}
1124
		new_delay = dev_priv->rps.cur_freq + adj;
4104 Serge 1125
 
1126
		/*
1127
		 * For better performance, jump directly
1128
		 * to RPe if we're below it.
1129
		 */
5060 serge 1130
		if (new_delay < dev_priv->rps.efficient_freq)
1131
			new_delay = dev_priv->rps.efficient_freq;
4560 Serge 1132
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
5060 serge 1133
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1134
			new_delay = dev_priv->rps.efficient_freq;
4560 Serge 1135
		else
5060 serge 1136
			new_delay = dev_priv->rps.min_freq_softlimit;
4560 Serge 1137
		adj = 0;
5060 serge 1138
	} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1139
		new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
4560 Serge 1140
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1141
		if (adj < 0)
1142
			adj *= 2;
5060 serge 1143
		else {
1144
			/* CHV needs even encode values */
1145
			adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1146
		}
1147
		new_delay = dev_priv->rps.cur_freq + adj;
4560 Serge 1148
	} else { /* unknown event */
5060 serge 1149
		new_delay = dev_priv->rps.cur_freq;
4560 Serge 1150
	}
3031 serge 1151
 
1152
	/* sysfs frequency interfaces may have snuck in while servicing the
1153
	 * interrupt
1154
	 */
4560 Serge 1155
	new_delay = clamp_t(int, new_delay,
5060 serge 1156
			    dev_priv->rps.min_freq_softlimit,
1157
			    dev_priv->rps.max_freq_softlimit);
4560 Serge 1158
 
5060 serge 1159
	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1160
 
4104 Serge 1161
		if (IS_VALLEYVIEW(dev_priv->dev))
1162
			valleyview_set_rps(dev_priv->dev, new_delay);
1163
		else
3031 serge 1164
		gen6_set_rps(dev_priv->dev, new_delay);
1165
 
3243 Serge 1166
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 1167
}
1168
 
1169
 
1170
/**
1171
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1172
 * occurred.
1173
 * @work: workqueue struct
1174
 *
1175
 * Doesn't actually do anything except notify userspace. As a consequence of
1176
 * this event, userspace should try to remap the bad rows since statistically
1177
 * it is likely the same row is more likely to go bad again.
1178
 */
1179
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 1180
{
5060 serge 1181
	struct drm_i915_private *dev_priv =
1182
		container_of(work, struct drm_i915_private, l3_parity.error_work);
3031 serge 1183
	u32 error_status, row, bank, subbank;
4560 Serge 1184
	char *parity_event[6];
3031 serge 1185
	uint32_t misccpctl;
4560 Serge 1186
	uint8_t slice = 0;
3031 serge 1187
 
1188
	/* We must turn off DOP level clock gating to access the L3 registers.
1189
	 * In order to prevent a get/put style interface, acquire struct mutex
1190
	 * any time we access those registers.
1191
	 */
1192
	mutex_lock(&dev_priv->dev->struct_mutex);
1193
 
4560 Serge 1194
	/* If we've screwed up tracking, just let the interrupt fire again */
1195
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1196
		goto out;
1197
 
3031 serge 1198
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1199
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1200
	POSTING_READ(GEN7_MISCCPCTL);
1201
 
4560 Serge 1202
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1203
		u32 reg;
1204
 
1205
		slice--;
1206
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1207
			break;
1208
 
1209
		dev_priv->l3_parity.which_slice &= ~(1<
1210
 
1211
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1212
 
1213
		error_status = I915_READ(reg);
3031 serge 1214
	row = GEN7_PARITY_ERROR_ROW(error_status);
1215
	bank = GEN7_PARITY_ERROR_BANK(error_status);
1216
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1217
 
4560 Serge 1218
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1219
		POSTING_READ(reg);
3031 serge 1220
 
4560 Serge 1221
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1222
			  slice, row, bank, subbank);
1223
 
1224
	}
1225
 
3031 serge 1226
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1227
 
4560 Serge 1228
out:
1229
	WARN_ON(dev_priv->l3_parity.which_slice);
5354 serge 1230
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 1231
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
5354 serge 1232
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 1233
 
1234
	mutex_unlock(&dev_priv->dev->struct_mutex);
1235
}
1236
 
4560 Serge 1237
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1238
{
5060 serge 1239
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1240
 
4560 Serge 1241
	if (!HAS_L3_DPF(dev))
3031 serge 1242
		return;
1243
 
4104 Serge 1244
	spin_lock(&dev_priv->irq_lock);
5060 serge 1245
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
4104 Serge 1246
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1247
 
4560 Serge 1248
	iir &= GT_PARITY_ERROR(dev);
1249
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1250
		dev_priv->l3_parity.which_slice |= 1 << 1;
1251
 
1252
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1253
		dev_priv->l3_parity.which_slice |= 1 << 0;
1254
 
3243 Serge 1255
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 1256
}
1257
 
4104 Serge 1258
static void ilk_gt_irq_handler(struct drm_device *dev,
1259
			       struct drm_i915_private *dev_priv,
1260
			       u32 gt_iir)
1261
{
1262
	if (gt_iir &
1263
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1264
		notify_ring(dev, &dev_priv->ring[RCS]);
1265
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1266
		notify_ring(dev, &dev_priv->ring[VCS]);
1267
}
1268
 
3031 serge 1269
static void snb_gt_irq_handler(struct drm_device *dev,
1270
			       struct drm_i915_private *dev_priv,
1271
			       u32 gt_iir)
1272
{
1273
 
4104 Serge 1274
	if (gt_iir &
1275
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
3031 serge 1276
		notify_ring(dev, &dev_priv->ring[RCS]);
4104 Serge 1277
	if (gt_iir & GT_BSD_USER_INTERRUPT)
3031 serge 1278
		notify_ring(dev, &dev_priv->ring[VCS]);
4104 Serge 1279
	if (gt_iir & GT_BLT_USER_INTERRUPT)
3031 serge 1280
		notify_ring(dev, &dev_priv->ring[BCS]);
1281
 
4104 Serge 1282
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1283
		      GT_BSD_CS_ERROR_INTERRUPT |
5354 serge 1284
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1285
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
3031 serge 1286
 
4560 Serge 1287
	if (gt_iir & GT_PARITY_ERROR(dev))
1288
		ivybridge_parity_error_irq_handler(dev, gt_iir);
3031 serge 1289
}
1290
 
4560 Serge 1291
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1292
				       struct drm_i915_private *dev_priv,
1293
				       u32 master_ctl)
1294
{
5354 serge 1295
	struct intel_engine_cs *ring;
4560 Serge 1296
	u32 rcs, bcs, vcs;
1297
	uint32_t tmp = 0;
1298
	irqreturn_t ret = IRQ_NONE;
1299
 
1300
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1301
		tmp = I915_READ(GEN8_GT_IIR(0));
1302
		if (tmp) {
5060 serge 1303
			I915_WRITE(GEN8_GT_IIR(0), tmp);
4560 Serge 1304
			ret = IRQ_HANDLED;
5354 serge 1305
 
4560 Serge 1306
			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
5354 serge 1307
			ring = &dev_priv->ring[RCS];
1308
			if (rcs & GT_RENDER_USER_INTERRUPT)
1309
				notify_ring(dev, ring);
1310
			if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1311
				intel_execlists_handle_ctx_events(ring);
1312
 
4560 Serge 1313
			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
5354 serge 1314
			ring = &dev_priv->ring[BCS];
4560 Serge 1315
			if (bcs & GT_RENDER_USER_INTERRUPT)
5354 serge 1316
				notify_ring(dev, ring);
1317
			if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1318
				intel_execlists_handle_ctx_events(ring);
4560 Serge 1319
		} else
1320
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1321
	}
1322
 
5060 serge 1323
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
4560 Serge 1324
		tmp = I915_READ(GEN8_GT_IIR(1));
1325
		if (tmp) {
5060 serge 1326
			I915_WRITE(GEN8_GT_IIR(1), tmp);
4560 Serge 1327
			ret = IRQ_HANDLED;
5354 serge 1328
 
4560 Serge 1329
			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
5354 serge 1330
			ring = &dev_priv->ring[VCS];
4560 Serge 1331
			if (vcs & GT_RENDER_USER_INTERRUPT)
5354 serge 1332
				notify_ring(dev, ring);
1333
			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1334
				intel_execlists_handle_ctx_events(ring);
1335
 
5060 serge 1336
			vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
5354 serge 1337
			ring = &dev_priv->ring[VCS2];
5060 serge 1338
			if (vcs & GT_RENDER_USER_INTERRUPT)
5354 serge 1339
				notify_ring(dev, ring);
1340
			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1341
				intel_execlists_handle_ctx_events(ring);
4560 Serge 1342
		} else
1343
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1344
	}
1345
 
5060 serge 1346
	if (master_ctl & GEN8_GT_PM_IRQ) {
1347
		tmp = I915_READ(GEN8_GT_IIR(2));
1348
		if (tmp & dev_priv->pm_rps_events) {
1349
			I915_WRITE(GEN8_GT_IIR(2),
1350
				   tmp & dev_priv->pm_rps_events);
1351
			ret = IRQ_HANDLED;
5354 serge 1352
			gen6_rps_irq_handler(dev_priv, tmp);
5060 serge 1353
		} else
1354
			DRM_ERROR("The master control interrupt lied (PM)!\n");
1355
	}
1356
 
4560 Serge 1357
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1358
		tmp = I915_READ(GEN8_GT_IIR(3));
1359
		if (tmp) {
5060 serge 1360
			I915_WRITE(GEN8_GT_IIR(3), tmp);
4560 Serge 1361
			ret = IRQ_HANDLED;
5354 serge 1362
 
4560 Serge 1363
			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
5354 serge 1364
			ring = &dev_priv->ring[VECS];
4560 Serge 1365
			if (vcs & GT_RENDER_USER_INTERRUPT)
5354 serge 1366
				notify_ring(dev, ring);
1367
			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1368
				intel_execlists_handle_ctx_events(ring);
4560 Serge 1369
		} else
1370
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1371
	}
1372
 
1373
	return ret;
1374
}
1375
 
3746 Serge 1376
#define HPD_STORM_DETECT_PERIOD 1000
1377
#define HPD_STORM_THRESHOLD 5
1378
 
5354 serge 1379
static int pch_port_to_hotplug_shift(enum port port)
5060 serge 1380
{
1381
	switch (port) {
1382
	case PORT_A:
1383
	case PORT_E:
1384
	default:
1385
		return -1;
1386
	case PORT_B:
1387
		return 0;
1388
	case PORT_C:
1389
		return 8;
1390
	case PORT_D:
1391
		return 16;
1392
	}
1393
}
1394
 
5354 serge 1395
static int i915_port_to_hotplug_shift(enum port port)
5060 serge 1396
{
1397
	switch (port) {
1398
	case PORT_A:
1399
	case PORT_E:
1400
	default:
1401
		return -1;
1402
	case PORT_B:
1403
		return 17;
1404
	case PORT_C:
1405
		return 19;
1406
	case PORT_D:
1407
		return 21;
1408
	}
1409
}
1410
 
1411
static inline enum port get_port_from_pin(enum hpd_pin pin)
1412
{
1413
	switch (pin) {
1414
	case HPD_PORT_B:
1415
		return PORT_B;
1416
	case HPD_PORT_C:
1417
		return PORT_C;
1418
	case HPD_PORT_D:
1419
		return PORT_D;
1420
	default:
1421
		return PORT_A; /* no hpd */
1422
	}
1423
}
1424
 
4104 Serge 1425
static inline void intel_hpd_irq_handler(struct drm_device *dev,
3746 Serge 1426
					    u32 hotplug_trigger,
5060 serge 1427
					 u32 dig_hotplug_reg,
3746 Serge 1428
					    const u32 *hpd)
1429
{
5060 serge 1430
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 1431
	int i;
5060 serge 1432
	enum port port;
4104 Serge 1433
	bool storm_detected = false;
5060 serge 1434
	bool queue_dig = false, queue_hp = false;
1435
	u32 dig_shift;
1436
	u32 dig_port_mask = 0;
3746 Serge 1437
 
4104 Serge 1438
	if (!hotplug_trigger)
1439
		return;
3746 Serge 1440
 
5060 serge 1441
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1442
			 hotplug_trigger, dig_hotplug_reg);
1443
 
4104 Serge 1444
	spin_lock(&dev_priv->irq_lock);
3746 Serge 1445
	for (i = 1; i < HPD_NUM_PINS; i++) {
5060 serge 1446
		if (!(hpd[i] & hotplug_trigger))
1447
			continue;
3746 Serge 1448
 
5060 serge 1449
		port = get_port_from_pin(i);
1450
		if (port && dev_priv->hpd_irq_port[port]) {
1451
			bool long_hpd;
1452
 
5354 serge 1453
			if (HAS_PCH_SPLIT(dev)) {
1454
				dig_shift = pch_port_to_hotplug_shift(port);
1455
				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1456
			} else {
1457
				dig_shift = i915_port_to_hotplug_shift(port);
5060 serge 1458
				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1459
			}
1460
 
5354 serge 1461
			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1462
					 port_name(port),
1463
					 long_hpd ? "long" : "short");
5060 serge 1464
			/* for long HPD pulses we want to have the digital queue happen,
1465
			   but we still want HPD storm detection to function. */
1466
			if (long_hpd) {
1467
				dev_priv->long_hpd_port_mask |= (1 << port);
1468
				dig_port_mask |= hpd[i];
1469
			} else {
1470
				/* for short HPD just trigger the digital queue */
1471
				dev_priv->short_hpd_port_mask |= (1 << port);
1472
				hotplug_trigger &= ~hpd[i];
1473
			}
1474
			queue_dig = true;
1475
		}
1476
	}
1477
 
1478
	for (i = 1; i < HPD_NUM_PINS; i++) {
1479
		if (hpd[i] & hotplug_trigger &&
1480
		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1481
			/*
1482
			 * On GMCH platforms the interrupt mask bits only
1483
			 * prevent irq generation, not the setting of the
1484
			 * hotplug bits itself. So only WARN about unexpected
1485
			 * interrupts on saner platforms.
1486
			 */
1487
			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
4560 Serge 1488
			  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1489
			  hotplug_trigger, i, hpd[i]);
4104 Serge 1490
 
5060 serge 1491
			continue;
1492
		}
1493
 
3746 Serge 1494
		if (!(hpd[i] & hotplug_trigger) ||
1495
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1496
			continue;
1497
 
5060 serge 1498
		if (!(dig_port_mask & hpd[i])) {
4104 Serge 1499
		dev_priv->hpd_event_bits |= (1 << i);
5060 serge 1500
			queue_hp = true;
1501
		}
1502
 
1503
		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
4126 Serge 1504
                  dev_priv->hpd_stats[i].hpd_last_jiffies
1505
                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
5060 serge 1506
			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
4126 Serge 1507
           dev_priv->hpd_stats[i].hpd_cnt = 0;
1508
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1509
       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1510
           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1511
			dev_priv->hpd_event_bits &= ~(1 << i);
1512
           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1513
			storm_detected = true;
1514
		} else {
3746 Serge 1515
			dev_priv->hpd_stats[i].hpd_cnt++;
4126 Serge 1516
			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1517
				      dev_priv->hpd_stats[i].hpd_cnt);
1518
		}
3746 Serge 1519
	}
1520
 
4104 Serge 1521
	if (storm_detected)
1522
		dev_priv->display.hpd_irq_setup(dev);
1523
	spin_unlock(&dev_priv->irq_lock);
3746 Serge 1524
 
4126 Serge 1525
	/*
1526
	 * Our hotplug handler can grab modeset locks (by calling down into the
1527
	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1528
	 * queue for otherwise the flush_work in the pageflip code will
1529
	 * deadlock.
1530
	 */
5060 serge 1531
	if (queue_hp)
1532
		schedule_work(&dev_priv->hotplug_work);
3746 Serge 1533
}
1534
 
3480 Serge 1535
static void gmbus_irq_handler(struct drm_device *dev)
1536
{
5060 serge 1537
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1538
 
1539
	wake_up_all(&dev_priv->gmbus_wait_queue);
1540
}
1541
 
1542
static void dp_aux_irq_handler(struct drm_device *dev)
1543
{
5060 serge 1544
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1545
 
1546
	wake_up_all(&dev_priv->gmbus_wait_queue);
1547
}
1548
 
4560 Serge 1549
#if defined(CONFIG_DEBUG_FS)
1550
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1551
					 uint32_t crc0, uint32_t crc1,
1552
					 uint32_t crc2, uint32_t crc3,
1553
					 uint32_t crc4)
1554
{
1555
	struct drm_i915_private *dev_priv = dev->dev_private;
1556
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1557
	struct intel_pipe_crc_entry *entry;
1558
	int head, tail;
1559
 
1560
	spin_lock(&pipe_crc->lock);
1561
 
1562
	if (!pipe_crc->entries) {
1563
		spin_unlock(&pipe_crc->lock);
5354 serge 1564
		DRM_DEBUG_KMS("spurious interrupt\n");
4560 Serge 1565
		return;
1566
	}
1567
 
1568
	head = pipe_crc->head;
1569
	tail = pipe_crc->tail;
1570
 
1571
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1572
		spin_unlock(&pipe_crc->lock);
1573
		DRM_ERROR("CRC buffer overflowing\n");
1574
		return;
1575
	}
1576
 
1577
	entry = &pipe_crc->entries[head];
1578
 
1579
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1580
	entry->crc[0] = crc0;
1581
	entry->crc[1] = crc1;
1582
	entry->crc[2] = crc2;
1583
	entry->crc[3] = crc3;
1584
	entry->crc[4] = crc4;
1585
 
1586
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1587
	pipe_crc->head = head;
1588
 
1589
	spin_unlock(&pipe_crc->lock);
1590
 
1591
	wake_up_interruptible(&pipe_crc->wq);
1592
}
1593
#else
1594
static inline void
1595
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1596
			     uint32_t crc0, uint32_t crc1,
1597
			     uint32_t crc2, uint32_t crc3,
1598
			     uint32_t crc4) {}
1599
#endif
1600
 
1601
 
1602
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1603
{
1604
	struct drm_i915_private *dev_priv = dev->dev_private;
1605
 
1606
	display_pipe_crc_irq_handler(dev, pipe,
1607
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1608
				     0, 0, 0, 0);
1609
}
1610
 
1611
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1612
{
1613
	struct drm_i915_private *dev_priv = dev->dev_private;
1614
 
1615
	display_pipe_crc_irq_handler(dev, pipe,
1616
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1617
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1618
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1619
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1620
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1621
}
1622
 
1623
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1624
{
1625
	struct drm_i915_private *dev_priv = dev->dev_private;
1626
	uint32_t res1, res2;
1627
 
1628
	if (INTEL_INFO(dev)->gen >= 3)
1629
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1630
	else
1631
		res1 = 0;
1632
 
1633
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1634
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1635
	else
1636
		res2 = 0;
1637
 
1638
	display_pipe_crc_irq_handler(dev, pipe,
1639
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1640
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1641
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1642
				     res1, res2);
1643
}
1644
 
4104 Serge 1645
/* The RPS events need forcewake, so we add them to a work queue and mask their
1646
 * IMR bits until the work is done. Other interrupts can be processed without
1647
 * the work queue. */
1648
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1649
{
5354 serge 1650
	/* TODO: RPS on GEN9+ is not supported yet. */
1651
	if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1652
		      "GEN9+: unexpected RPS IRQ\n"))
1653
		return;
1654
 
5060 serge 1655
	if (pm_iir & dev_priv->pm_rps_events) {
4104 Serge 1656
		spin_lock(&dev_priv->irq_lock);
5354 serge 1657
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1658
		if (dev_priv->rps.interrupts_enabled) {
5060 serge 1659
		dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
5354 serge 1660
			queue_work(dev_priv->wq, &dev_priv->rps.work);
1661
		}
4104 Serge 1662
		spin_unlock(&dev_priv->irq_lock);
1663
	}
1664
 
5354 serge 1665
	if (INTEL_INFO(dev_priv)->gen >= 8)
1666
		return;
1667
 
4104 Serge 1668
	if (HAS_VEBOX(dev_priv->dev)) {
1669
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1670
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1671
 
5354 serge 1672
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1673
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
4104 Serge 1674
	}
1675
}
1676
 
5354 serge 1677
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1678
{
1679
	if (!drm_handle_vblank(dev, pipe))
1680
		return false;
1681
 
1682
	return true;
1683
}
1684
 
5060 serge 1685
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1686
{
5060 serge 1687
	struct drm_i915_private *dev_priv = dev->dev_private;
1688
	u32 pipe_stats[I915_MAX_PIPES] = { };
3031 serge 1689
	int pipe;
1690
 
5060 serge 1691
	spin_lock(&dev_priv->irq_lock);
5354 serge 1692
	for_each_pipe(dev_priv, pipe) {
5060 serge 1693
		int reg;
1694
		u32 mask, iir_bit = 0;
3031 serge 1695
 
5060 serge 1696
		/*
1697
		 * PIPESTAT bits get signalled even when the interrupt is
1698
		 * disabled with the mask bits, and some of the status bits do
1699
		 * not generate interrupts at all (like the underrun bit). Hence
1700
		 * we need to be careful that we only handle what we want to
1701
		 * handle.
1702
		 */
3031 serge 1703
 
5354 serge 1704
		/* fifo underruns are filterered in the underrun handler. */
1705
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1706
 
5060 serge 1707
		switch (pipe) {
1708
		case PIPE_A:
1709
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1710
			break;
1711
		case PIPE_B:
1712
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1713
			break;
1714
		case PIPE_C:
1715
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1716
			break;
1717
		}
1718
		if (iir & iir_bit)
1719
			mask |= dev_priv->pipestat_irq_mask[pipe];
3031 serge 1720
 
5060 serge 1721
		if (!mask)
1722
			continue;
3031 serge 1723
 
5060 serge 1724
		reg = PIPESTAT(pipe);
1725
		mask |= PIPESTAT_INT_ENABLE_MASK;
1726
		pipe_stats[pipe] = I915_READ(reg) & mask;
3031 serge 1727
 
1728
			/*
1729
			 * Clear the PIPE*STAT regs before the IIR
1730
			 */
5060 serge 1731
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1732
					PIPESTAT_INT_STATUS_MASK))
3031 serge 1733
				I915_WRITE(reg, pipe_stats[pipe]);
1734
			}
5060 serge 1735
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1736
 
5354 serge 1737
	for_each_pipe(dev_priv, pipe) {
3031 serge 1738
 
4560 Serge 1739
 
1740
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1741
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 1742
 
5354 serge 1743
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1744
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3031 serge 1745
		}
1746
 
5060 serge 1747
	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1748
		gmbus_irq_handler(dev);
1749
}
3031 serge 1750
 
5060 serge 1751
static void i9xx_hpd_irq_handler(struct drm_device *dev)
1752
{
1753
	struct drm_i915_private *dev_priv = dev->dev_private;
1754
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
4104 Serge 1755
 
5060 serge 1756
	if (hotplug_status) {
1757
		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1758
		/*
1759
		 * Make sure hotplug status is cleared before we clear IIR, or else we
1760
		 * may miss hotplug events.
1761
		 */
1762
		POSTING_READ(PORT_HOTPLUG_STAT);
4104 Serge 1763
 
5060 serge 1764
	if (IS_G4X(dev)) {
1765
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
4560 Serge 1766
 
5060 serge 1767
			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1768
	} else {
1769
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1770
 
1771
			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1772
	}
1773
 
1774
	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1775
	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1776
		dp_aux_irq_handler(dev);
1777
	}
1778
}
1779
 
1780
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1781
{
1782
	struct drm_device *dev = arg;
1783
	struct drm_i915_private *dev_priv = dev->dev_private;
1784
	u32 iir, gt_iir, pm_iir;
1785
	irqreturn_t ret = IRQ_NONE;
1786
 
1787
	while (true) {
1788
		/* Find, clear, then process each source of interrupt */
1789
 
1790
		gt_iir = I915_READ(GTIIR);
1791
		if (gt_iir)
1792
			I915_WRITE(GTIIR, gt_iir);
1793
 
1794
		pm_iir = I915_READ(GEN6_PMIIR);
1795
		if (pm_iir)
1796
			I915_WRITE(GEN6_PMIIR, pm_iir);
1797
 
1798
		iir = I915_READ(VLV_IIR);
1799
		if (iir) {
1800
			/* Consume port before clearing IIR or we'll miss events */
1801
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1802
				i9xx_hpd_irq_handler(dev);
1803
			I915_WRITE(VLV_IIR, iir);
3031 serge 1804
		}
1805
 
5060 serge 1806
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1807
			goto out;
3031 serge 1808
 
5060 serge 1809
		ret = IRQ_HANDLED;
1810
 
1811
		if (gt_iir)
1812
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
4126 Serge 1813
		if (pm_iir)
1814
			gen6_rps_irq_handler(dev_priv, pm_iir);
5060 serge 1815
		/* Call regardless, as some status bits might not be
1816
		 * signalled in iir */
1817
		valleyview_pipestat_irq_handler(dev, iir);
3031 serge 1818
	}
1819
 
1820
out:
1821
	return ret;
1822
}
1823
 
5060 serge 1824
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1825
{
1826
	struct drm_device *dev = arg;
1827
	struct drm_i915_private *dev_priv = dev->dev_private;
1828
	u32 master_ctl, iir;
1829
	irqreturn_t ret = IRQ_NONE;
1830
 
1831
	for (;;) {
1832
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1833
		iir = I915_READ(VLV_IIR);
1834
 
1835
		if (master_ctl == 0 && iir == 0)
1836
			break;
1837
 
1838
		ret = IRQ_HANDLED;
1839
 
1840
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1841
 
1842
		/* Find, clear, then process each source of interrupt */
1843
 
1844
		if (iir) {
1845
			/* Consume port before clearing IIR or we'll miss events */
1846
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1847
				i9xx_hpd_irq_handler(dev);
1848
			I915_WRITE(VLV_IIR, iir);
1849
		}
1850
 
1851
		gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1852
 
1853
		/* Call regardless, as some status bits might not be
1854
		 * signalled in iir */
1855
		valleyview_pipestat_irq_handler(dev, iir);
1856
 
1857
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1858
		POSTING_READ(GEN8_MASTER_IRQ);
1859
	}
1860
 
1861
	return ret;
1862
}
1863
 
3031 serge 1864
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1865
{
5060 serge 1866
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1867
	int pipe;
3746 Serge 1868
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
5060 serge 1869
	u32 dig_hotplug_reg;
3031 serge 1870
 
5060 serge 1871
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1872
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
4104 Serge 1873
 
5060 serge 1874
	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1875
 
4104 Serge 1876
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1877
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1878
			       SDE_AUDIO_POWER_SHIFT);
1879
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1880
				 port_name(port));
3746 Serge 1881
	}
3031 serge 1882
 
3480 Serge 1883
	if (pch_iir & SDE_AUX_MASK)
1884
		dp_aux_irq_handler(dev);
1885
 
3031 serge 1886
	if (pch_iir & SDE_GMBUS)
3480 Serge 1887
		gmbus_irq_handler(dev);
3031 serge 1888
 
1889
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1890
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1891
 
1892
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1893
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1894
 
1895
	if (pch_iir & SDE_POISON)
1896
		DRM_ERROR("PCH poison interrupt\n");
1897
 
1898
	if (pch_iir & SDE_FDI_MASK)
5354 serge 1899
		for_each_pipe(dev_priv, pipe)
3031 serge 1900
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1901
					 pipe_name(pipe),
1902
					 I915_READ(FDI_RX_IIR(pipe)));
1903
 
1904
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1905
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1906
 
1907
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1908
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1909
 
4104 Serge 1910
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
5354 serge 1911
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1912
 
3031 serge 1913
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
5354 serge 1914
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
3031 serge 1915
}
1916
 
4104 Serge 1917
static void ivb_err_int_handler(struct drm_device *dev)
1918
{
1919
	struct drm_i915_private *dev_priv = dev->dev_private;
1920
	u32 err_int = I915_READ(GEN7_ERR_INT);
4560 Serge 1921
	enum pipe pipe;
4104 Serge 1922
 
1923
	if (err_int & ERR_INT_POISON)
1924
		DRM_ERROR("Poison interrupt\n");
1925
 
5354 serge 1926
	for_each_pipe(dev_priv, pipe) {
1927
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1928
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4104 Serge 1929
 
4560 Serge 1930
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1931
			if (IS_IVYBRIDGE(dev))
1932
				ivb_pipe_crc_irq_handler(dev, pipe);
1933
			else
1934
				hsw_pipe_crc_irq_handler(dev, pipe);
1935
		}
1936
	}
4104 Serge 1937
 
1938
	I915_WRITE(GEN7_ERR_INT, err_int);
1939
}
1940
 
1941
static void cpt_serr_int_handler(struct drm_device *dev)
1942
{
1943
	struct drm_i915_private *dev_priv = dev->dev_private;
1944
	u32 serr_int = I915_READ(SERR_INT);
1945
 
1946
	if (serr_int & SERR_INT_POISON)
1947
		DRM_ERROR("PCH poison interrupt\n");
1948
 
1949
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
5354 serge 1950
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1951
 
1952
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
5354 serge 1953
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
4104 Serge 1954
 
1955
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
5354 serge 1956
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
4104 Serge 1957
 
1958
	I915_WRITE(SERR_INT, serr_int);
1959
}
1960
 
3031 serge 1961
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1962
{
5060 serge 1963
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1964
	int pipe;
3746 Serge 1965
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
5060 serge 1966
	u32 dig_hotplug_reg;
3031 serge 1967
 
5060 serge 1968
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1969
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
4104 Serge 1970
 
5060 serge 1971
	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
1972
 
4104 Serge 1973
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1974
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1975
			       SDE_AUDIO_POWER_SHIFT_CPT);
1976
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1977
				 port_name(port));
3746 Serge 1978
	}
3031 serge 1979
 
1980
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 1981
		dp_aux_irq_handler(dev);
3031 serge 1982
 
1983
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 1984
		gmbus_irq_handler(dev);
3031 serge 1985
 
1986
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1987
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1988
 
1989
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1990
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1991
 
1992
	if (pch_iir & SDE_FDI_MASK_CPT)
5354 serge 1993
		for_each_pipe(dev_priv, pipe)
3031 serge 1994
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1995
					 pipe_name(pipe),
1996
					 I915_READ(FDI_RX_IIR(pipe)));
1997
 
4104 Serge 1998
	if (pch_iir & SDE_ERROR_CPT)
1999
		cpt_serr_int_handler(dev);
4539 Serge 2000
}
3480 Serge 2001
 
4104 Serge 2002
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2003
{
4104 Serge 2004
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2005
	enum pipe pipe;
3031 serge 2006
 
3480 Serge 2007
	if (de_iir & DE_AUX_CHANNEL_A)
2008
		dp_aux_irq_handler(dev);
2009
 
3031 serge 2010
	if (de_iir & DE_GSE)
4104 Serge 2011
		intel_opregion_asle_intr(dev);
2351 Serge 2012
 
4104 Serge 2013
	if (de_iir & DE_POISON)
2014
		DRM_ERROR("Poison interrupt\n");
2015
 
5354 serge 2016
	for_each_pipe(dev_priv, pipe) {
4104 Serge 2017
 
4560 Serge 2018
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
5354 serge 2019
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2351 Serge 2020
 
4560 Serge 2021
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2022
			i9xx_pipe_crc_irq_handler(dev, pipe);
2023
 
2024
		/* plane/pipes map 1:1 on ilk+ */
2025
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2026
//			intel_prepare_page_flip(dev, pipe);
2027
//			intel_finish_page_flip_plane(dev, pipe);
2028
		}
3031 serge 2029
	}
2351 Serge 2030
 
3031 serge 2031
	/* check event from PCH */
2032
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 2033
		u32 pch_iir = I915_READ(SDEIIR);
2034
 
3031 serge 2035
		if (HAS_PCH_CPT(dev))
2036
			cpt_irq_handler(dev, pch_iir);
2037
		else
2038
			ibx_irq_handler(dev, pch_iir);
3480 Serge 2039
 
2040
		/* should clear PCH hotplug event before clear CPU irq */
2041
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 2042
	}
4104 Serge 2043
 
3031 serge 2044
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
4104 Serge 2045
		ironlake_rps_change_irq_handler(dev);
2351 Serge 2046
}
2047
 
4104 Serge 2048
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2049
{
2050
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2051
	enum pipe pipe;
2351 Serge 2052
 
4126 Serge 2053
	if (de_iir & DE_ERR_INT_IVB)
2054
		ivb_err_int_handler(dev);
2351 Serge 2055
 
4104 Serge 2056
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2057
		dp_aux_irq_handler(dev);
3031 serge 2058
 
4104 Serge 2059
	if (de_iir & DE_GSE_IVB)
2060
		intel_opregion_asle_intr(dev);
4560 Serge 2061
 
5354 serge 2062
	for_each_pipe(dev_priv, pipe) {
4560 Serge 2063
 
2064
		/* plane/pipes map 1:1 on ilk+ */
5060 serge 2065
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2066
//			intel_prepare_page_flip(dev, pipe);
2067
//			intel_finish_page_flip_plane(dev, pipe);
3031 serge 2068
		}
2069
	}
2070
 
4104 Serge 2071
	/* check event from PCH */
2072
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2073
		u32 pch_iir = I915_READ(SDEIIR);
3031 serge 2074
 
4104 Serge 2075
		cpt_irq_handler(dev, pch_iir);
3031 serge 2076
 
4104 Serge 2077
		/* clear PCH hotplug event before clear CPU irq */
2078
		I915_WRITE(SDEIIR, pch_iir);
4539 Serge 2079
	}
3031 serge 2080
}
2081
 
5060 serge 2082
/*
2083
 * To handle irqs with the minimum potential races with fresh interrupts, we:
2084
 * 1 - Disable Master Interrupt Control.
2085
 * 2 - Find the source(s) of the interrupt.
2086
 * 3 - Clear the Interrupt Identity bits (IIR).
2087
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2088
 * 5 - Re-enable Master Interrupt Control.
2089
 */
4104 Serge 2090
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 2091
{
5060 serge 2092
	struct drm_device *dev = arg;
2093
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2094
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2095
	irqreturn_t ret = IRQ_NONE;
3031 serge 2096
 
4104 Serge 2097
	/* We get interrupts on unclaimed registers, so check for this before we
2098
	 * do any I915_{READ,WRITE}. */
2099
	intel_uncore_check_errors(dev);
3031 serge 2100
 
4104 Serge 2101
	/* disable master interrupt before clearing iir  */
2102
	de_ier = I915_READ(DEIER);
2103
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2104
	POSTING_READ(DEIER);
3031 serge 2105
 
4104 Serge 2106
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2107
	 * interrupts will will be stored on its back queue, and then we'll be
2108
	 * able to process them after we restore SDEIER (as soon as we restore
2109
	 * it, we'll get an interrupt if SDEIIR still has something to process
2110
	 * due to its back queue). */
2111
	if (!HAS_PCH_NOP(dev)) {
2112
		sde_ier = I915_READ(SDEIER);
2113
		I915_WRITE(SDEIER, 0);
2114
		POSTING_READ(SDEIER);
3031 serge 2115
	}
2116
 
5060 serge 2117
	/* Find, clear, then process each source of interrupt */
2118
 
4104 Serge 2119
	gt_iir = I915_READ(GTIIR);
2120
	if (gt_iir) {
5060 serge 2121
		I915_WRITE(GTIIR, gt_iir);
2122
		ret = IRQ_HANDLED;
4104 Serge 2123
		if (INTEL_INFO(dev)->gen >= 6)
2124
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2125
		else
2126
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
4539 Serge 2127
	}
3031 serge 2128
 
4104 Serge 2129
	de_iir = I915_READ(DEIIR);
2130
	if (de_iir) {
5060 serge 2131
		I915_WRITE(DEIIR, de_iir);
2132
		ret = IRQ_HANDLED;
4104 Serge 2133
		if (INTEL_INFO(dev)->gen >= 7)
2134
			ivb_display_irq_handler(dev, de_iir);
2135
		else
2136
			ilk_display_irq_handler(dev, de_iir);
3480 Serge 2137
	}
2138
 
4104 Serge 2139
	if (INTEL_INFO(dev)->gen >= 6) {
2140
		u32 pm_iir = I915_READ(GEN6_PMIIR);
2141
		if (pm_iir) {
2142
			I915_WRITE(GEN6_PMIIR, pm_iir);
2143
			ret = IRQ_HANDLED;
5060 serge 2144
			gen6_rps_irq_handler(dev_priv, pm_iir);
4560 Serge 2145
		}
3031 serge 2146
	}
2147
 
4104 Serge 2148
	I915_WRITE(DEIER, de_ier);
2149
	POSTING_READ(DEIER);
2150
	if (!HAS_PCH_NOP(dev)) {
2151
		I915_WRITE(SDEIER, sde_ier);
2152
		POSTING_READ(SDEIER);
3031 serge 2153
	}
2154
 
4104 Serge 2155
	return ret;
3031 serge 2156
}
2157
 
4560 Serge 2158
static irqreturn_t gen8_irq_handler(int irq, void *arg)
2159
{
2160
	struct drm_device *dev = arg;
2161
	struct drm_i915_private *dev_priv = dev->dev_private;
2162
	u32 master_ctl;
2163
	irqreturn_t ret = IRQ_NONE;
2164
	uint32_t tmp = 0;
2165
	enum pipe pipe;
5354 serge 2166
	u32 aux_mask = GEN8_AUX_CHANNEL_A;
4560 Serge 2167
 
5354 serge 2168
	if (IS_GEN9(dev))
2169
		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2170
			GEN9_AUX_CHANNEL_D;
2171
 
4560 Serge 2172
	master_ctl = I915_READ(GEN8_MASTER_IRQ);
2173
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2174
	if (!master_ctl)
2175
		return IRQ_NONE;
2176
 
2177
	I915_WRITE(GEN8_MASTER_IRQ, 0);
2178
	POSTING_READ(GEN8_MASTER_IRQ);
2179
 
5060 serge 2180
	/* Find, clear, then process each source of interrupt */
2181
 
4560 Serge 2182
	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2183
 
2184
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2185
		tmp = I915_READ(GEN8_DE_MISC_IIR);
5060 serge 2186
		if (tmp) {
2187
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2188
			ret = IRQ_HANDLED;
4560 Serge 2189
		if (tmp & GEN8_DE_MISC_GSE)
2190
			intel_opregion_asle_intr(dev);
5060 serge 2191
			else
4560 Serge 2192
			DRM_ERROR("Unexpected DE Misc interrupt\n");
5060 serge 2193
		}
4560 Serge 2194
		else
2195
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2196
	}
2197
 
2198
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2199
		tmp = I915_READ(GEN8_DE_PORT_IIR);
5060 serge 2200
		if (tmp) {
2201
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2202
			ret = IRQ_HANDLED;
5354 serge 2203
 
2204
			if (tmp & aux_mask)
4560 Serge 2205
			dp_aux_irq_handler(dev);
5060 serge 2206
			else
4560 Serge 2207
			DRM_ERROR("Unexpected DE Port interrupt\n");
5060 serge 2208
		}
4560 Serge 2209
		else
2210
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2211
	}
2212
 
5354 serge 2213
	for_each_pipe(dev_priv, pipe) {
2214
		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
4560 Serge 2215
 
2216
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2217
			continue;
2218
 
2219
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
5060 serge 2220
		if (pipe_iir) {
2221
			ret = IRQ_HANDLED;
2222
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
4560 Serge 2223
 
2224
 
5354 serge 2225
			if (IS_GEN9(dev))
2226
				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2227
			else
2228
				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2229
 
2230
 
4560 Serge 2231
		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2232
			hsw_pipe_crc_irq_handler(dev, pipe);
2233
 
5354 serge 2234
			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2235
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2236
								    pipe);
4560 Serge 2237
 
5354 serge 2238
 
2239
			if (IS_GEN9(dev))
2240
				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2241
			else
2242
				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2243
 
2244
			if (fault_errors)
4560 Serge 2245
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2246
				  pipe_name(pipe),
2247
				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2248
		} else
2249
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2250
	}
2251
 
2252
	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2253
		/*
2254
		 * FIXME(BDW): Assume for now that the new interrupt handling
2255
		 * scheme also closed the SDE interrupt handling race we've seen
2256
		 * on older pch-split platforms. But this needs testing.
2257
		 */
2258
		u32 pch_iir = I915_READ(SDEIIR);
2259
		if (pch_iir) {
2260
			I915_WRITE(SDEIIR, pch_iir);
2261
			ret = IRQ_HANDLED;
5060 serge 2262
			cpt_irq_handler(dev, pch_iir);
2263
		} else
2264
			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2265
 
4560 Serge 2266
	}
2267
 
2268
	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2269
	POSTING_READ(GEN8_MASTER_IRQ);
2270
 
2271
	return ret;
2272
}
2273
 
4104 Serge 2274
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2275
			       bool reset_completed)
3746 Serge 2276
{
5060 serge 2277
	struct intel_engine_cs *ring;
4104 Serge 2278
	int i;
3031 serge 2279
 
4104 Serge 2280
	/*
2281
	 * Notify all waiters for GPU completion events that reset state has
2282
	 * been changed, and that they need to restart their wait after
2283
	 * checking for potential errors (and bail out to drop locks if there is
2284
	 * a gpu reset pending so that i915_error_work_func can acquire them).
2285
	 */
3031 serge 2286
 
4104 Serge 2287
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2288
	for_each_ring(ring, dev_priv, i)
2289
		wake_up_all(&ring->irq_queue);
3031 serge 2290
 
2291
 
4104 Serge 2292
	/*
2293
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2294
	 * reset state is cleared.
2295
	 */
2296
	if (reset_completed)
2297
		wake_up_all(&dev_priv->gpu_error.reset_queue);
3031 serge 2298
}
2299
 
2300
/**
4104 Serge 2301
 * i915_error_work_func - do process context error handling work
2302
 * @work: work struct
3031 serge 2303
 *
4104 Serge 2304
 * Fire an error uevent so userspace can see that a hang or error
2305
 * was detected.
3031 serge 2306
 */
4104 Serge 2307
static void i915_error_work_func(struct work_struct *work)
3031 serge 2308
{
4104 Serge 2309
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2310
						    work);
5060 serge 2311
	struct drm_i915_private *dev_priv =
2312
		container_of(error, struct drm_i915_private, gpu_error);
4104 Serge 2313
	struct drm_device *dev = dev_priv->dev;
2314
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2315
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2316
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2317
	int ret;
3031 serge 2318
 
4104 Serge 2319
	/*
2320
	 * Note that there's only one work item which does gpu resets, so we
2321
	 * need not worry about concurrent gpu resets potentially incrementing
2322
	 * error->reset_counter twice. We only need to take care of another
2323
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2324
	 * quick check for that is good enough: schedule_work ensures the
2325
	 * correct ordering between hang detection and this work item, and since
2326
	 * the reset in-progress bit is only ever set by code outside of this
2327
	 * work we don't need to worry about any other races.
2328
	 */
2329
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2330
		DRM_DEBUG_DRIVER("resetting chip\n");
3031 serge 2331
 
4104 Serge 2332
		/*
2333
		 * All state reset _must_ be completed before we update the
2334
		 * reset counter, for otherwise waiters might miss the reset
2335
		 * pending state and not properly drop locks, resulting in
2336
		 * deadlocks with the reset work.
2337
		 */
4560 Serge 2338
//		ret = i915_reset(dev);
3031 serge 2339
 
4126 Serge 2340
//       intel_display_handle_reset(dev);
3031 serge 2341
 
4104 Serge 2342
		if (ret == 0) {
2343
			/*
2344
			 * After all the gem state is reset, increment the reset
2345
			 * counter and wake up everyone waiting for the reset to
2346
			 * complete.
2347
			 *
2348
			 * Since unlock operations are a one-sided barrier only,
2349
			 * we need to insert a barrier here to order any seqno
2350
			 * updates before
2351
			 * the counter increment.
2352
			 */
2353
			atomic_inc(&dev_priv->gpu_error.reset_counter);
3031 serge 2354
 
4104 Serge 2355
		} else {
4560 Serge 2356
			atomic_set_mask(I915_WEDGED, &error->reset_counter);
3031 serge 2357
	}
2358
 
4104 Serge 2359
		/*
2360
		 * Note: The wake_up also serves as a memory barrier so that
2361
		 * waiters see the update value of the reset counter atomic_t.
2362
		 */
2363
		i915_error_wake_up(dev_priv, true);
3031 serge 2364
	}
2365
}
2366
 
2367
static void i915_report_and_clear_eir(struct drm_device *dev)
2368
{
2369
	struct drm_i915_private *dev_priv = dev->dev_private;
2370
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2371
	u32 eir = I915_READ(EIR);
2372
	int pipe, i;
2373
 
2374
	if (!eir)
2375
		return;
2376
 
2377
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2378
 
2379
	i915_get_extra_instdone(dev, instdone);
2380
 
2381
	if (IS_G4X(dev)) {
2382
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2383
			u32 ipeir = I915_READ(IPEIR_I965);
2384
 
2385
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2386
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2387
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2388
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2389
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2390
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2391
			I915_WRITE(IPEIR_I965, ipeir);
2392
			POSTING_READ(IPEIR_I965);
2393
		}
2394
		if (eir & GM45_ERROR_PAGE_TABLE) {
2395
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2396
			pr_err("page table error\n");
2397
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2398
			I915_WRITE(PGTBL_ER, pgtbl_err);
2399
			POSTING_READ(PGTBL_ER);
2400
		}
2401
	}
2402
 
2403
	if (!IS_GEN2(dev)) {
2404
		if (eir & I915_ERROR_PAGE_TABLE) {
2405
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2406
			pr_err("page table error\n");
2407
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2408
			I915_WRITE(PGTBL_ER, pgtbl_err);
2409
			POSTING_READ(PGTBL_ER);
2410
		}
2411
	}
2412
 
2413
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2414
		pr_err("memory refresh error:\n");
5354 serge 2415
		for_each_pipe(dev_priv, pipe)
3031 serge 2416
			pr_err("pipe %c stat: 0x%08x\n",
2417
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2418
		/* pipestat has already been acked */
2419
	}
2420
	if (eir & I915_ERROR_INSTRUCTION) {
2421
		pr_err("instruction error\n");
2422
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2423
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2424
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2425
		if (INTEL_INFO(dev)->gen < 4) {
2426
			u32 ipeir = I915_READ(IPEIR);
2427
 
2428
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2429
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2430
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2431
			I915_WRITE(IPEIR, ipeir);
2432
			POSTING_READ(IPEIR);
2433
		} else {
2434
			u32 ipeir = I915_READ(IPEIR_I965);
2435
 
2436
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2437
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2438
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2439
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2440
			I915_WRITE(IPEIR_I965, ipeir);
2441
			POSTING_READ(IPEIR_I965);
2442
		}
2443
	}
2444
 
2445
	I915_WRITE(EIR, eir);
2446
	POSTING_READ(EIR);
2447
	eir = I915_READ(EIR);
2448
	if (eir) {
2449
		/*
2450
		 * some errors might have become stuck,
2451
		 * mask them.
2452
		 */
2453
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2454
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2455
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2456
	}
2457
}
2458
 
2459
/**
2460
 * i915_handle_error - handle an error interrupt
2461
 * @dev: drm device
2462
 *
2463
 * Do some basic checking of regsiter state at error interrupt time and
2464
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2465
 * sure we get a record and make it available in debugfs.  Fire a uevent
2466
 * so userspace knows something bad happened (should trigger collection
2467
 * of a ring dump etc.).
2468
 */
5060 serge 2469
void i915_handle_error(struct drm_device *dev, bool wedged,
2470
		       const char *fmt, ...)
3031 serge 2471
{
2472
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2473
	va_list args;
2474
	char error_msg[80];
3031 serge 2475
 
5060 serge 2476
	va_start(args, fmt);
2477
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2478
	va_end(args);
2479
 
4560 Serge 2480
//	i915_capture_error_state(dev);
3031 serge 2481
	i915_report_and_clear_eir(dev);
2482
 
2483
	if (wedged) {
3480 Serge 2484
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2485
				&dev_priv->gpu_error.reset_counter);
3031 serge 2486
 
2487
		/*
4104 Serge 2488
		 * Wakeup waiting processes so that the reset work function
2489
		 * i915_error_work_func doesn't deadlock trying to grab various
2490
		 * locks. By bumping the reset counter first, the woken
2491
		 * processes will see a reset in progress and back off,
2492
		 * releasing their locks and then wait for the reset completion.
2493
		 * We must do this for _all_ gpu waiters that might hold locks
2494
		 * that the reset work needs to acquire.
2495
		 *
2496
		 * Note: The wake_up serves as the required memory barrier to
2497
		 * ensure that the waiters see the updated value of the reset
2498
		 * counter atomic_t.
3031 serge 2499
		 */
4104 Serge 2500
		i915_error_wake_up(dev_priv, false);
3031 serge 2501
	}
2502
 
4104 Serge 2503
	/*
2504
	 * Our reset work can grab modeset locks (since it needs to reset the
2505
	 * state of outstanding pagelips). Hence it must not be run on our own
2506
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2507
	 * code will deadlock.
2508
	 */
2509
	schedule_work(&dev_priv->gpu_error.work);
3031 serge 2510
}
2511
 
2512
/* Called from drm generic code, passed 'crtc' which
2513
 * we use as a pipe index
2514
 */
2515
static int i915_enable_vblank(struct drm_device *dev, int pipe)
2516
{
5060 serge 2517
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2518
	unsigned long irqflags;
2519
 
2520
	if (!i915_pipe_enabled(dev, pipe))
2521
		return -EINVAL;
2522
 
2523
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2524
	if (INTEL_INFO(dev)->gen >= 4)
2525
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2526
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2527
	else
2528
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2529
				     PIPE_VBLANK_INTERRUPT_STATUS);
3031 serge 2530
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2531
 
2532
	return 0;
2533
}
2534
 
2535
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2536
{
5060 serge 2537
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2538
	unsigned long irqflags;
4104 Serge 2539
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2540
						     DE_PIPE_VBLANK(pipe);
3031 serge 2541
 
2542
	if (!i915_pipe_enabled(dev, pipe))
2543
		return -EINVAL;
2544
 
2545
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2546
	ironlake_enable_display_irq(dev_priv, bit);
3031 serge 2547
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2548
 
2549
	return 0;
2550
}
2551
 
2552
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2553
{
5060 serge 2554
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2555
	unsigned long irqflags;
2556
 
2557
	if (!i915_pipe_enabled(dev, pipe))
2558
		return -EINVAL;
2559
 
2560
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2561
	i915_enable_pipestat(dev_priv, pipe,
5060 serge 2562
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2563
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2564
 
2565
	return 0;
2566
}
2567
 
4560 Serge 2568
static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2569
{
2570
	struct drm_i915_private *dev_priv = dev->dev_private;
2571
	unsigned long irqflags;
2572
 
2573
	if (!i915_pipe_enabled(dev, pipe))
2574
		return -EINVAL;
2575
 
2576
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2577
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2578
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2579
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2580
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2581
	return 0;
2582
}
2583
 
3031 serge 2584
/* Called from drm generic code, passed 'crtc' which
2585
 * we use as a pipe index
2586
 */
2587
static void i915_disable_vblank(struct drm_device *dev, int pipe)
2588
{
5060 serge 2589
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2590
	unsigned long irqflags;
2591
 
2592
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2593
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2594
			      PIPE_VBLANK_INTERRUPT_STATUS |
2595
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2596
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2597
}
2598
 
2599
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2600
{
5060 serge 2601
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2602
	unsigned long irqflags;
4104 Serge 2603
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2604
						     DE_PIPE_VBLANK(pipe);
3031 serge 2605
 
2606
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2607
	ironlake_disable_display_irq(dev_priv, bit);
3031 serge 2608
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2609
}
2610
 
2611
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2612
{
5060 serge 2613
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2614
	unsigned long irqflags;
2615
 
2616
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2617
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2618
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2619
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2620
}
2621
 
4560 Serge 2622
static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2623
{
2624
	struct drm_i915_private *dev_priv = dev->dev_private;
2625
	unsigned long irqflags;
2626
 
2627
	if (!i915_pipe_enabled(dev, pipe))
2628
		return;
2629
 
2630
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2631
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2632
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2633
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2634
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2635
}
2636
 
3031 serge 2637
static u32
5060 serge 2638
ring_last_seqno(struct intel_engine_cs *ring)
3031 serge 2639
{
2640
	return list_entry(ring->request_list.prev,
2641
			  struct drm_i915_gem_request, list)->seqno;
2642
}
4104 Serge 2643
 
2644
static bool
5060 serge 2645
ring_idle(struct intel_engine_cs *ring, u32 seqno)
2351 Serge 2646
{
4104 Serge 2647
	return (list_empty(&ring->request_list) ||
2648
		i915_seqno_passed(seqno, ring_last_seqno(ring)));
2649
}
2351 Serge 2650
 
5060 serge 2651
static bool
2652
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
4104 Serge 2653
{
5060 serge 2654
	if (INTEL_INFO(dev)->gen >= 8) {
2655
		return (ipehr >> 23) == 0x1c;
2656
	} else {
2657
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2658
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2659
				 MI_SEMAPHORE_REGISTER);
2660
	}
2661
}
2662
 
2663
static struct intel_engine_cs *
2664
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2665
{
4104 Serge 2666
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2667
	struct intel_engine_cs *signaller;
2668
	int i;
2351 Serge 2669
 
5060 serge 2670
	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2671
		for_each_ring(signaller, dev_priv, i) {
2672
			if (ring == signaller)
2673
				continue;
2674
 
2675
			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2676
				return signaller;
2677
		}
2678
	} else {
2679
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2680
 
2681
		for_each_ring(signaller, dev_priv, i) {
2682
			if(ring == signaller)
2683
				continue;
2684
 
2685
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2686
				return signaller;
2687
		}
2688
	}
2689
 
2690
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2691
		  ring->id, ipehr, offset);
2692
 
2693
	return NULL;
2694
}
2695
 
2696
static struct intel_engine_cs *
2697
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2698
{
2699
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2700
	u32 cmd, ipehr, head;
2701
	u64 offset = 0;
2702
	int i, backwards;
2703
 
4104 Serge 2704
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
5060 serge 2705
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
4104 Serge 2706
		return NULL;
2351 Serge 2707
 
5060 serge 2708
	/*
2709
	 * HEAD is likely pointing to the dword after the actual command,
2710
	 * so scan backwards until we find the MBOX. But limit it to just 3
2711
	 * or 4 dwords depending on the semaphore wait command size.
2712
	 * Note that we don't care about ACTHD here since that might
2713
	 * point at at batch, and semaphores are always emitted into the
2714
	 * ringbuffer itself.
4104 Serge 2715
	 */
5060 serge 2716
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2717
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2718
 
2719
	for (i = backwards; i; --i) {
2720
		/*
2721
		 * Be paranoid and presume the hw has gone off into the wild -
2722
		 * our ring is smaller than what the hardware (and hence
2723
		 * HEAD_ADDR) allows. Also handles wrap-around.
2724
		 */
2725
		head &= ring->buffer->size - 1;
2726
 
2727
		/* This here seems to blow up */
2728
		cmd = ioread32(ring->buffer->virtual_start + head);
4104 Serge 2729
		if (cmd == ipehr)
2730
			break;
2351 Serge 2731
 
5060 serge 2732
		head -= 4;
2733
	}
2734
 
2735
	if (!i)
4104 Serge 2736
			return NULL;
2351 Serge 2737
 
5060 serge 2738
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2739
	if (INTEL_INFO(ring->dev)->gen >= 8) {
2740
		offset = ioread32(ring->buffer->virtual_start + head + 12);
2741
		offset <<= 32;
2742
		offset = ioread32(ring->buffer->virtual_start + head + 8);
2743
	}
2744
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
4104 Serge 2745
}
2351 Serge 2746
 
5060 serge 2747
static int semaphore_passed(struct intel_engine_cs *ring)
4104 Serge 2748
{
2749
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2750
	struct intel_engine_cs *signaller;
2751
	u32 seqno;
4104 Serge 2752
 
5060 serge 2753
	ring->hangcheck.deadlock++;
4104 Serge 2754
 
2755
	signaller = semaphore_waits_for(ring, &seqno);
5060 serge 2756
	if (signaller == NULL)
4104 Serge 2757
		return -1;
2758
 
5060 serge 2759
	/* Prevent pathological recursion due to driver bugs */
2760
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2761
		return -1;
2762
 
2763
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2764
		return 1;
2765
 
4104 Serge 2766
	/* cursory check for an unkickable deadlock */
5060 serge 2767
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2768
	    semaphore_passed(signaller) < 0)
4104 Serge 2769
		return -1;
2770
 
5060 serge 2771
	return 0;
4104 Serge 2772
}
2773
 
2774
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2775
{
5060 serge 2776
	struct intel_engine_cs *ring;
4104 Serge 2777
	int i;
2778
 
2779
	for_each_ring(ring, dev_priv, i)
5060 serge 2780
		ring->hangcheck.deadlock = 0;
4104 Serge 2781
}
2782
 
2783
static enum intel_ring_hangcheck_action
5060 serge 2784
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
4104 Serge 2785
{
2786
	struct drm_device *dev = ring->dev;
2787
	struct drm_i915_private *dev_priv = dev->dev_private;
2788
	u32 tmp;
2789
 
5060 serge 2790
	if (acthd != ring->hangcheck.acthd) {
2791
		if (acthd > ring->hangcheck.max_acthd) {
2792
			ring->hangcheck.max_acthd = acthd;
4104 Serge 2793
		return HANGCHECK_ACTIVE;
5060 serge 2794
		}
4104 Serge 2795
 
5060 serge 2796
		return HANGCHECK_ACTIVE_LOOP;
2797
	}
2798
 
4104 Serge 2799
	if (IS_GEN2(dev))
2800
		return HANGCHECK_HUNG;
2801
 
2802
	/* Is the chip hanging on a WAIT_FOR_EVENT?
2803
	 * If so we can simply poke the RB_WAIT bit
2804
	 * and break the hang. This should work on
2805
	 * all but the second generation chipsets.
2806
	 */
2807
	tmp = I915_READ_CTL(ring);
2808
	if (tmp & RING_WAIT) {
5060 serge 2809
		i915_handle_error(dev, false,
2810
				  "Kicking stuck wait on %s",
4104 Serge 2811
			  ring->name);
2812
		I915_WRITE_CTL(ring, tmp);
2813
		return HANGCHECK_KICK;
2814
	}
2815
 
2816
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2817
		switch (semaphore_passed(ring)) {
2818
		default:
2819
			return HANGCHECK_HUNG;
2820
		case 1:
5060 serge 2821
			i915_handle_error(dev, false,
2822
					  "Kicking stuck semaphore on %s",
4104 Serge 2823
				  ring->name);
2824
			I915_WRITE_CTL(ring, tmp);
2825
			return HANGCHECK_KICK;
2826
		case 0:
2827
			return HANGCHECK_WAIT;
2828
		}
2829
	}
2830
 
2831
	return HANGCHECK_HUNG;
2832
}
2833
 
2834
/**
2835
 * This is called when the chip hasn't reported back with completed
2836
 * batchbuffers in a long time. We keep track per ring seqno progress and
2837
 * if there are no progress, hangcheck score for that ring is increased.
2838
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2839
 * we kick the ring. If we see no progress on three subsequent calls
2840
 * we assume chip is wedged and try to fix it by resetting the chip.
2841
 */
2842
static void i915_hangcheck_elapsed(unsigned long data)
2843
{
2844
	struct drm_device *dev = (struct drm_device *)data;
5060 serge 2845
	struct drm_i915_private *dev_priv = dev->dev_private;
2846
	struct intel_engine_cs *ring;
4104 Serge 2847
	int i;
2848
	int busy_count = 0, rings_hung = 0;
2849
	bool stuck[I915_NUM_RINGS] = { 0 };
2850
#define BUSY 1
2851
#define KICK 5
2852
#define HUNG 20
2853
 
5060 serge 2854
	if (!i915.enable_hangcheck)
4104 Serge 2855
		return;
2856
 
2857
	for_each_ring(ring, dev_priv, i) {
5060 serge 2858
		u64 acthd;
2859
		u32 seqno;
4104 Serge 2860
		bool busy = true;
2861
 
2862
		semaphore_clear_deadlocks(dev_priv);
2863
 
2864
		seqno = ring->get_seqno(ring, false);
2865
		acthd = intel_ring_get_active_head(ring);
2866
 
2867
		if (ring->hangcheck.seqno == seqno) {
2868
			if (ring_idle(ring, seqno)) {
5060 serge 2869
				ring->hangcheck.action = HANGCHECK_IDLE;
2870
 
4104 Serge 2871
//               if (waitqueue_active(&ring->irq_queue)) {
2872
					/* Issue a wake-up to catch stuck h/w. */
2873
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2874
//                         ring->name);
2875
//                   wake_up_all(&ring->irq_queue);
2876
//               } else
2877
					busy = false;
2878
			} else {
2879
				/* We always increment the hangcheck score
2880
				 * if the ring is busy and still processing
2881
				 * the same request, so that no single request
2882
				 * can run indefinitely (such as a chain of
2883
				 * batches). The only time we do not increment
2884
				 * the hangcheck score on this ring, if this
2885
				 * ring is in a legitimate wait for another
2886
				 * ring. In that case the waiting ring is a
2887
				 * victim and we want to be sure we catch the
2888
				 * right culprit. Then every time we do kick
2889
				 * the ring, add a small increment to the
2890
				 * score so that we can catch a batch that is
2891
				 * being repeatedly kicked and so responsible
2892
				 * for stalling the machine.
2893
				 */
2894
				ring->hangcheck.action = ring_stuck(ring,
2895
								    acthd);
2896
 
2897
				switch (ring->hangcheck.action) {
4560 Serge 2898
				case HANGCHECK_IDLE:
4104 Serge 2899
				case HANGCHECK_WAIT:
5060 serge 2900
				case HANGCHECK_ACTIVE:
4104 Serge 2901
					break;
5060 serge 2902
				case HANGCHECK_ACTIVE_LOOP:
4104 Serge 2903
					ring->hangcheck.score += BUSY;
2904
					break;
2905
				case HANGCHECK_KICK:
2906
					ring->hangcheck.score += KICK;
2907
					break;
2908
				case HANGCHECK_HUNG:
2909
					ring->hangcheck.score += HUNG;
2910
					stuck[i] = true;
2911
					break;
2912
				}
2913
			}
2914
		} else {
4560 Serge 2915
			ring->hangcheck.action = HANGCHECK_ACTIVE;
2916
 
4104 Serge 2917
			/* Gradually reduce the count so that we catch DoS
2918
			 * attempts across multiple batches.
2919
			 */
2920
			if (ring->hangcheck.score > 0)
2921
				ring->hangcheck.score--;
5060 serge 2922
 
2923
			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
4104 Serge 2924
		}
2925
 
2926
		ring->hangcheck.seqno = seqno;
2927
		ring->hangcheck.acthd = acthd;
2928
		busy_count += busy;
2929
	}
2930
 
2931
	for_each_ring(ring, dev_priv, i) {
5060 serge 2932
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
4104 Serge 2933
			DRM_INFO("%s on %s\n",
2934
				  stuck[i] ? "stuck" : "no progress",
2935
				  ring->name);
2936
			rings_hung++;
2937
		}
2938
	}
2939
 
2940
//   if (rings_hung)
2941
//       return i915_handle_error(dev, true);
2942
 
2943
}
5060 serge 2944
static void ibx_irq_reset(struct drm_device *dev)
2945
{
2946
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2947
 
5060 serge 2948
	if (HAS_PCH_NOP(dev))
2949
		return;
2950
 
2951
	GEN5_IRQ_RESET(SDE);
2952
 
2953
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2954
		I915_WRITE(SERR_INT, 0xffffffff);
2955
}
2956
 
2957
/*
2958
 * SDEIER is also touched by the interrupt handler to work around missed PCH
2959
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2960
 * instead we unconditionally enable all PCH interrupt sources here, but then
2961
 * only unmask them as needed with SDEIMR.
2962
 *
2963
 * This function needs to be called before interrupts are enabled.
2964
 */
2965
static void ibx_irq_pre_postinstall(struct drm_device *dev)
4104 Serge 2966
{
2967
	struct drm_i915_private *dev_priv = dev->dev_private;
2968
 
3746 Serge 2969
	if (HAS_PCH_NOP(dev))
2970
		return;
2971
 
5060 serge 2972
	WARN_ON(I915_READ(SDEIER) != 0);
3746 Serge 2973
	I915_WRITE(SDEIER, 0xffffffff);
4104 Serge 2974
	POSTING_READ(SDEIER);
2351 Serge 2975
}
2976
 
5060 serge 2977
static void gen5_gt_irq_reset(struct drm_device *dev)
4104 Serge 2978
{
2979
	struct drm_i915_private *dev_priv = dev->dev_private;
2980
 
5060 serge 2981
	GEN5_IRQ_RESET(GT);
2982
	if (INTEL_INFO(dev)->gen >= 6)
2983
		GEN5_IRQ_RESET(GEN6_PM);
4104 Serge 2984
}
2985
 
2986
/* drm_dma.h hooks
2987
*/
5060 serge 2988
static void ironlake_irq_reset(struct drm_device *dev)
4104 Serge 2989
{
5060 serge 2990
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2991
 
5060 serge 2992
	I915_WRITE(HWSTAM, 0xffffffff);
4104 Serge 2993
 
5060 serge 2994
	GEN5_IRQ_RESET(DE);
2995
	if (IS_GEN7(dev))
2996
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
4104 Serge 2997
 
5060 serge 2998
	gen5_gt_irq_reset(dev);
4104 Serge 2999
 
5060 serge 3000
	ibx_irq_reset(dev);
4104 Serge 3001
}
3002
 
5354 serge 3003
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3004
{
3005
	enum pipe pipe;
3006
 
3007
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3008
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3009
 
3010
	for_each_pipe(dev_priv, pipe)
3011
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3012
 
3013
	GEN5_IRQ_RESET(VLV_);
3014
}
3015
 
3031 serge 3016
static void valleyview_irq_preinstall(struct drm_device *dev)
3017
{
5060 serge 3018
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3019
 
3020
	/* VLV magic */
3021
	I915_WRITE(VLV_IMR, 0);
3022
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3023
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3024
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3025
 
5060 serge 3026
	gen5_gt_irq_reset(dev);
4104 Serge 3027
 
5354 serge 3028
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3031 serge 3029
 
5354 serge 3030
	vlv_display_irq_reset(dev_priv);
3031 serge 3031
}
3032
 
5060 serge 3033
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
4560 Serge 3034
{
5060 serge 3035
	GEN8_IRQ_RESET_NDX(GT, 0);
3036
	GEN8_IRQ_RESET_NDX(GT, 1);
3037
	GEN8_IRQ_RESET_NDX(GT, 2);
3038
	GEN8_IRQ_RESET_NDX(GT, 3);
3039
}
3040
 
3041
static void gen8_irq_reset(struct drm_device *dev)
3042
{
4560 Serge 3043
	struct drm_i915_private *dev_priv = dev->dev_private;
3044
	int pipe;
3045
 
3046
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3047
	POSTING_READ(GEN8_MASTER_IRQ);
3048
 
5060 serge 3049
	gen8_gt_irq_reset(dev_priv);
4560 Serge 3050
 
5354 serge 3051
	for_each_pipe(dev_priv, pipe)
3052
		if (intel_display_power_is_enabled(dev_priv,
5060 serge 3053
						POWER_DOMAIN_PIPE(pipe)))
3054
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
4560 Serge 3055
 
5060 serge 3056
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3057
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3058
	GEN5_IRQ_RESET(GEN8_PCU_);
4560 Serge 3059
 
5060 serge 3060
	ibx_irq_reset(dev);
3061
}
4560 Serge 3062
 
5060 serge 3063
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3064
{
5354 serge 3065
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
4560 Serge 3066
 
5354 serge 3067
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3068
	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
5354 serge 3069
			  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
5060 serge 3070
	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
5354 serge 3071
			  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3072
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 3073
}
3074
 
3075
static void cherryview_irq_preinstall(struct drm_device *dev)
3076
{
3077
	struct drm_i915_private *dev_priv = dev->dev_private;
3078
 
3079
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3080
	POSTING_READ(GEN8_MASTER_IRQ);
3081
 
3082
	gen8_gt_irq_reset(dev_priv);
3083
 
3084
	GEN5_IRQ_RESET(GEN8_PCU_);
3085
 
3086
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3087
 
5354 serge 3088
	vlv_display_irq_reset(dev_priv);
4560 Serge 3089
}
3090
 
3746 Serge 3091
static void ibx_hpd_irq_setup(struct drm_device *dev)
3092
{
5060 serge 3093
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3094
	struct intel_encoder *intel_encoder;
4104 Serge 3095
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3746 Serge 3096
 
3097
	if (HAS_PCH_IBX(dev)) {
4104 Serge 3098
		hotplug_irqs = SDE_HOTPLUG_MASK;
5354 serge 3099
		for_each_intel_encoder(dev, intel_encoder)
3746 Serge 3100
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4104 Serge 3101
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3746 Serge 3102
	} else {
4104 Serge 3103
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
5354 serge 3104
		for_each_intel_encoder(dev, intel_encoder)
3746 Serge 3105
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4104 Serge 3106
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3746 Serge 3107
	}
3108
 
4104 Serge 3109
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3746 Serge 3110
 
3111
	/*
2351 Serge 3112
 * Enable digital hotplug on the PCH, and configure the DP short pulse
3113
 * duration to 2ms (which is the minimum in the Display Port spec)
3114
 *
3115
 * This register is the same on all known PCH chips.
3116
 */
3117
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3118
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3119
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3120
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3121
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3122
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3123
}
3124
 
3480 Serge 3125
static void ibx_irq_postinstall(struct drm_device *dev)
3126
{
5060 serge 3127
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 3128
	u32 mask;
3129
 
3746 Serge 3130
	if (HAS_PCH_NOP(dev))
3131
		return;
3132
 
5060 serge 3133
	if (HAS_PCH_IBX(dev))
3134
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3135
	else
3136
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4104 Serge 3137
 
5060 serge 3138
	GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3480 Serge 3139
	I915_WRITE(SDEIMR, ~mask);
3140
}
3141
 
4104 Serge 3142
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2351 Serge 3143
{
4104 Serge 3144
	struct drm_i915_private *dev_priv = dev->dev_private;
3145
	u32 pm_irqs, gt_irqs;
2351 Serge 3146
 
4104 Serge 3147
	pm_irqs = gt_irqs = 0;
2351 Serge 3148
 
3149
	dev_priv->gt_irq_mask = ~0;
4560 Serge 3150
	if (HAS_L3_DPF(dev)) {
4104 Serge 3151
		/* L3 parity interrupt is always unmasked. */
4560 Serge 3152
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3153
		gt_irqs |= GT_PARITY_ERROR(dev);
4104 Serge 3154
	}
2351 Serge 3155
 
4104 Serge 3156
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3157
	if (IS_GEN5(dev)) {
3158
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3159
			   ILK_BSD_USER_INTERRUPT;
3160
	} else {
3161
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3162
	}
2351 Serge 3163
 
5060 serge 3164
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
2351 Serge 3165
 
4104 Serge 3166
	if (INTEL_INFO(dev)->gen >= 6) {
5354 serge 3167
		/*
3168
		 * RPS interrupts will get enabled/disabled on demand when RPS
3169
		 * itself is enabled/disabled.
3170
		 */
4104 Serge 3171
		if (HAS_VEBOX(dev))
3172
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3173
 
3174
		dev_priv->pm_irq_mask = 0xffffffff;
5060 serge 3175
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
2351 Serge 3176
    }
3177
}
3178
 
4104 Serge 3179
static int ironlake_irq_postinstall(struct drm_device *dev)
3031 serge 3180
{
5060 serge 3181
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3182
	u32 display_mask, extra_mask;
3183
 
3184
	if (INTEL_INFO(dev)->gen >= 7) {
3185
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3186
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3031 serge 3187
		DE_PLANEB_FLIP_DONE_IVB |
5060 serge 3188
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
4104 Serge 3189
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
5060 serge 3190
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
4104 Serge 3191
	} else {
3192
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3193
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
4560 Serge 3194
				DE_AUX_CHANNEL_A |
3195
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3196
				DE_POISON);
5060 serge 3197
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3198
				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
4104 Serge 3199
	}
3200
 
3031 serge 3201
	dev_priv->irq_mask = ~display_mask;
3202
 
5060 serge 3203
	I915_WRITE(HWSTAM, 0xeffe);
3031 serge 3204
 
5060 serge 3205
	ibx_irq_pre_postinstall(dev);
3206
 
3207
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3208
 
4104 Serge 3209
	gen5_gt_irq_postinstall(dev);
3031 serge 3210
 
4104 Serge 3211
	ibx_irq_postinstall(dev);
3031 serge 3212
 
4104 Serge 3213
	if (IS_IRONLAKE_M(dev)) {
3214
		/* Enable PCU event interrupts
3215
		 *
3216
		 * spinlocking not required here for correctness since interrupt
3217
		 * setup is guaranteed to run in single-threaded context. But we
3218
		 * need it to make the assert_spin_locked happy. */
5354 serge 3219
		spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 3220
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
5354 serge 3221
		spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 3222
	}
3031 serge 3223
 
3224
	return 0;
3225
}
3226
 
5060 serge 3227
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3228
{
3229
	u32 pipestat_mask;
3230
	u32 iir_mask;
5354 serge 3231
	enum pipe pipe;
5060 serge 3232
 
3233
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3234
			PIPE_FIFO_UNDERRUN_STATUS;
3235
 
5354 serge 3236
	for_each_pipe(dev_priv, pipe)
3237
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3238
	POSTING_READ(PIPESTAT(PIPE_A));
3239
 
3240
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3241
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3242
 
5354 serge 3243
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3244
	for_each_pipe(dev_priv, pipe)
3245
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3246
 
3247
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3248
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3249
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3250
	if (IS_CHERRYVIEW(dev_priv))
3251
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3252
	dev_priv->irq_mask &= ~iir_mask;
3253
 
3254
	I915_WRITE(VLV_IIR, iir_mask);
3255
	I915_WRITE(VLV_IIR, iir_mask);
5354 serge 3256
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
5060 serge 3257
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3258
	POSTING_READ(VLV_IMR);
5060 serge 3259
}
3260
 
3261
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3262
{
3263
	u32 pipestat_mask;
3264
	u32 iir_mask;
5354 serge 3265
	enum pipe pipe;
5060 serge 3266
 
3267
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3268
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3269
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3270
	if (IS_CHERRYVIEW(dev_priv))
3271
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3272
 
3273
	dev_priv->irq_mask |= iir_mask;
5354 serge 3274
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5060 serge 3275
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3276
	I915_WRITE(VLV_IIR, iir_mask);
3277
	I915_WRITE(VLV_IIR, iir_mask);
3278
	POSTING_READ(VLV_IIR);
3279
 
3280
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3281
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3282
 
5354 serge 3283
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3284
	for_each_pipe(dev_priv, pipe)
3285
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3286
 
3287
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3288
			PIPE_FIFO_UNDERRUN_STATUS;
5354 serge 3289
 
3290
	for_each_pipe(dev_priv, pipe)
3291
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3292
	POSTING_READ(PIPESTAT(PIPE_A));
3293
}
3294
 
3295
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3296
{
3297
	assert_spin_locked(&dev_priv->irq_lock);
3298
 
3299
	if (dev_priv->display_irqs_enabled)
3300
		return;
3301
 
3302
	dev_priv->display_irqs_enabled = true;
3303
 
5354 serge 3304
	if (intel_irqs_enabled(dev_priv))
5060 serge 3305
		valleyview_display_irqs_install(dev_priv);
3306
}
3307
 
3308
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3309
{
3310
	assert_spin_locked(&dev_priv->irq_lock);
3311
 
3312
	if (!dev_priv->display_irqs_enabled)
3313
		return;
3314
 
3315
	dev_priv->display_irqs_enabled = false;
3316
 
5354 serge 3317
	if (intel_irqs_enabled(dev_priv))
5060 serge 3318
		valleyview_display_irqs_uninstall(dev_priv);
3319
}
3320
 
5354 serge 3321
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3031 serge 3322
{
5060 serge 3323
	dev_priv->irq_mask = ~0;
3031 serge 3324
 
3480 Serge 3325
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3326
	POSTING_READ(PORT_HOTPLUG_EN);
3327
 
5354 serge 3328
	I915_WRITE(VLV_IIR, 0xffffffff);
3329
	I915_WRITE(VLV_IIR, 0xffffffff);
3330
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3031 serge 3331
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3332
	POSTING_READ(VLV_IMR);
3031 serge 3333
 
4104 Serge 3334
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3335
	 * just to make the assert_spin_locked check happy. */
5354 serge 3336
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3337
	if (dev_priv->display_irqs_enabled)
3338
		valleyview_display_irqs_install(dev_priv);
5354 serge 3339
	spin_unlock_irq(&dev_priv->irq_lock);
3340
}
3031 serge 3341
 
5354 serge 3342
static int valleyview_irq_postinstall(struct drm_device *dev)
3343
{
3344
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3345
 
5354 serge 3346
	vlv_display_irq_postinstall(dev_priv);
3347
 
4104 Serge 3348
	gen5_gt_irq_postinstall(dev);
3243 Serge 3349
 
3031 serge 3350
	/* ack & enable invalid PTE error interrupts */
3351
#if 0 /* FIXME: add support to irq handler for checking these bits */
3352
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3353
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3354
#endif
3355
 
3356
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 3357
 
3358
	return 0;
3359
}
3360
 
4560 Serge 3361
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3362
{
3363
	/* These are interrupts we'll toggle with the ring mask register */
3364
	uint32_t gt_interrupts[] = {
3365
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
5354 serge 3366
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4560 Serge 3367
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
5354 serge 3368
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3369
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4560 Serge 3370
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
5354 serge 3371
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3372
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3373
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4560 Serge 3374
		0,
5354 serge 3375
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3376
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4560 Serge 3377
		};
3378
 
5060 serge 3379
	dev_priv->pm_irq_mask = 0xffffffff;
5354 serge 3380
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3381
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3382
	/*
3383
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3384
	 * is enabled/disabled.
3385
	 */
3386
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3387
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4560 Serge 3388
}
3389
 
3390
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3391
{
5354 serge 3392
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3393
	uint32_t de_pipe_enables;
3394
	int pipe;
3395
	u32 aux_en = GEN8_AUX_CHANNEL_A;
3396
 
3397
	if (IS_GEN9(dev_priv)) {
3398
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3399
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3400
		aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3401
			GEN9_AUX_CHANNEL_D;
3402
	} else
3403
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
4560 Serge 3404
		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
5354 serge 3405
 
3406
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
5060 serge 3407
		GEN8_PIPE_FIFO_UNDERRUN;
5354 serge 3408
 
4560 Serge 3409
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3410
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3411
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3412
 
5354 serge 3413
	for_each_pipe(dev_priv, pipe)
3414
		if (intel_display_power_is_enabled(dev_priv,
5060 serge 3415
				POWER_DOMAIN_PIPE(pipe)))
3416
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3417
					  dev_priv->de_irq_mask[pipe],
3418
				  de_pipe_enables);
4560 Serge 3419
 
5354 serge 3420
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
4560 Serge 3421
}
3422
 
3423
static int gen8_irq_postinstall(struct drm_device *dev)
3424
{
3425
	struct drm_i915_private *dev_priv = dev->dev_private;
3426
 
5060 serge 3427
	ibx_irq_pre_postinstall(dev);
3428
 
4560 Serge 3429
	gen8_gt_irq_postinstall(dev_priv);
3430
	gen8_de_irq_postinstall(dev_priv);
3431
 
3432
	ibx_irq_postinstall(dev);
3433
 
3434
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3435
	POSTING_READ(GEN8_MASTER_IRQ);
3436
 
3437
	return 0;
3438
}
3439
 
5060 serge 3440
static int cherryview_irq_postinstall(struct drm_device *dev)
4560 Serge 3441
{
3442
	struct drm_i915_private *dev_priv = dev->dev_private;
3443
 
5354 serge 3444
	vlv_display_irq_postinstall(dev_priv);
4560 Serge 3445
 
5060 serge 3446
	gen8_gt_irq_postinstall(dev_priv);
4560 Serge 3447
 
5060 serge 3448
	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3449
	POSTING_READ(GEN8_MASTER_IRQ);
4560 Serge 3450
 
5060 serge 3451
	return 0;
3452
}
4560 Serge 3453
 
5060 serge 3454
static void gen8_irq_uninstall(struct drm_device *dev)
3455
{
3456
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3457
 
5060 serge 3458
	if (!dev_priv)
3459
		return;
3460
 
3461
	gen8_irq_reset(dev);
4560 Serge 3462
}
3463
 
5354 serge 3464
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3465
{
3466
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3467
	 * just to make the assert_spin_locked check happy. */
3468
	spin_lock_irq(&dev_priv->irq_lock);
3469
	if (dev_priv->display_irqs_enabled)
3470
		valleyview_display_irqs_uninstall(dev_priv);
3471
	spin_unlock_irq(&dev_priv->irq_lock);
3472
 
3473
	vlv_display_irq_reset(dev_priv);
3474
 
3475
	dev_priv->irq_mask = ~0;
3476
}
3477
 
3031 serge 3478
static void valleyview_irq_uninstall(struct drm_device *dev)
3479
{
5060 serge 3480
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3481
 
3482
	if (!dev_priv)
3483
		return;
3484
 
5060 serge 3485
	I915_WRITE(VLV_MASTER_IER, 0);
4293 Serge 3486
 
5354 serge 3487
	gen5_gt_irq_reset(dev);
3031 serge 3488
 
3489
	I915_WRITE(HWSTAM, 0xffffffff);
5060 serge 3490
 
5354 serge 3491
	vlv_display_irq_uninstall(dev_priv);
3031 serge 3492
}
3493
 
5060 serge 3494
static void cherryview_irq_uninstall(struct drm_device *dev)
3031 serge 3495
{
5060 serge 3496
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3497
 
3498
	if (!dev_priv)
3499
		return;
3500
 
5060 serge 3501
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3502
	POSTING_READ(GEN8_MASTER_IRQ);
4293 Serge 3503
 
5354 serge 3504
	gen8_gt_irq_reset(dev_priv);
3031 serge 3505
 
5354 serge 3506
	GEN5_IRQ_RESET(GEN8_PCU_);
3031 serge 3507
 
5354 serge 3508
	vlv_display_irq_uninstall(dev_priv);
5060 serge 3509
}
3510
 
3511
static void ironlake_irq_uninstall(struct drm_device *dev)
3512
{
3513
	struct drm_i915_private *dev_priv = dev->dev_private;
3514
 
3515
	if (!dev_priv)
3746 Serge 3516
		return;
3517
 
5060 serge 3518
	ironlake_irq_reset(dev);
3031 serge 3519
}
3520
 
3521
#if 0
3522
static void i8xx_irq_preinstall(struct drm_device * dev)
3523
{
5060 serge 3524
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3525
	int pipe;
3526
 
5354 serge 3527
	for_each_pipe(dev_priv, pipe)
3031 serge 3528
		I915_WRITE(PIPESTAT(pipe), 0);
3529
	I915_WRITE16(IMR, 0xffff);
3530
	I915_WRITE16(IER, 0x0);
3531
	POSTING_READ16(IER);
3532
}
3533
 
3534
static int i8xx_irq_postinstall(struct drm_device *dev)
3535
{
5060 serge 3536
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3537
 
3538
	I915_WRITE16(EMR,
3539
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3540
 
3541
	/* Unmask the interrupts that we always want on. */
3542
	dev_priv->irq_mask =
3543
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3544
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3545
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3546
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3547
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3548
	I915_WRITE16(IMR, dev_priv->irq_mask);
3549
 
3550
	I915_WRITE16(IER,
3551
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3552
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3553
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3554
		     I915_USER_INTERRUPT);
3555
	POSTING_READ16(IER);
3556
 
4560 Serge 3557
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3558
	 * just to make the assert_spin_locked check happy. */
5354 serge 3559
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3560
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3561
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3562
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3563
 
3031 serge 3564
	return 0;
3565
}
3566
 
3746 Serge 3567
/*
3568
 * Returns true when a page flip has completed.
3569
 */
3570
static bool i8xx_handle_vblank(struct drm_device *dev,
4560 Serge 3571
			       int plane, int pipe, u32 iir)
3746 Serge 3572
{
5060 serge 3573
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3574
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3746 Serge 3575
 
3576
//   if (!drm_handle_vblank(dev, pipe))
3577
       return false;
3578
 
3579
	if ((iir & flip_pending) == 0)
5354 serge 3580
		goto check_page_flip;
3746 Serge 3581
 
3582
//   intel_prepare_page_flip(dev, pipe);
3583
 
3584
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3585
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3586
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3587
	 * the flip is completed (no longer pending). Since this doesn't raise
3588
	 * an interrupt per se, we watch for the change at vblank.
3589
	 */
3590
	if (I915_READ16(ISR) & flip_pending)
5354 serge 3591
		goto check_page_flip;
3746 Serge 3592
 
3593
	intel_finish_page_flip(dev, pipe);
5354 serge 3594
	return true;
3746 Serge 3595
 
5354 serge 3596
check_page_flip:
3597
//	intel_check_page_flip(dev, pipe);
3598
	return false;
3746 Serge 3599
}
3600
 
3243 Serge 3601
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 3602
{
5060 serge 3603
	struct drm_device *dev = arg;
3604
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3605
	u16 iir, new_iir;
3606
	u32 pipe_stats[2];
3607
	int pipe;
3608
	u16 flip_mask =
3609
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3610
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3611
 
3612
	iir = I915_READ16(IIR);
3613
	if (iir == 0)
3614
		return IRQ_NONE;
3615
 
3616
	while (iir & ~flip_mask) {
3617
		/* Can't rely on pipestat interrupt bit in iir as it might
3618
		 * have been cleared after the pipestat interrupt was received.
3619
		 * It doesn't set the bit in iir again, but it still produces
3620
		 * interrupts (for non-MSI).
3621
		 */
5354 serge 3622
		spin_lock(&dev_priv->irq_lock);
4126 Serge 3623
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 3624
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 3625
 
5354 serge 3626
		for_each_pipe(dev_priv, pipe) {
3031 serge 3627
			int reg = PIPESTAT(pipe);
3628
			pipe_stats[pipe] = I915_READ(reg);
3629
 
3630
			/*
3631
			 * Clear the PIPE*STAT regs before the IIR
3632
			 */
5060 serge 3633
			if (pipe_stats[pipe] & 0x8000ffff)
3031 serge 3634
				I915_WRITE(reg, pipe_stats[pipe]);
3635
			}
5354 serge 3636
		spin_unlock(&dev_priv->irq_lock);
3031 serge 3637
 
3638
		I915_WRITE16(IIR, iir & ~flip_mask);
3639
		new_iir = I915_READ16(IIR); /* Flush posted writes */
3640
 
3641
		if (iir & I915_USER_INTERRUPT)
3642
			notify_ring(dev, &dev_priv->ring[RCS]);
3643
 
5354 serge 3644
		for_each_pipe(dev_priv, pipe) {
4560 Serge 3645
			int plane = pipe;
3646
			if (HAS_FBC(dev))
3647
				plane = !plane;
3031 serge 3648
 
4560 Serge 3649
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3650
			    i8xx_handle_vblank(dev, plane, pipe, iir))
3651
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3031 serge 3652
 
4560 Serge 3653
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3654
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 3655
 
5354 serge 3656
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3657
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3658
								    pipe);
4560 Serge 3659
		}
3660
 
3031 serge 3661
		iir = new_iir;
3662
	}
3663
 
3664
	return IRQ_HANDLED;
3665
}
3666
 
3667
static void i8xx_irq_uninstall(struct drm_device * dev)
3668
{
5060 serge 3669
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3670
	int pipe;
3671
 
5354 serge 3672
	for_each_pipe(dev_priv, pipe) {
3031 serge 3673
		/* Clear enable bits; then clear status bits */
3674
		I915_WRITE(PIPESTAT(pipe), 0);
3675
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3676
	}
3677
	I915_WRITE16(IMR, 0xffff);
3678
	I915_WRITE16(IER, 0x0);
3679
	I915_WRITE16(IIR, I915_READ16(IIR));
3680
}
3681
 
3682
#endif
3683
 
3684
static void i915_irq_preinstall(struct drm_device * dev)
3685
{
5060 serge 3686
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3687
	int pipe;
3688
 
3689
	if (I915_HAS_HOTPLUG(dev)) {
3690
		I915_WRITE(PORT_HOTPLUG_EN, 0);
3691
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3692
	}
3693
 
3694
	I915_WRITE16(HWSTAM, 0xeffe);
5354 serge 3695
	for_each_pipe(dev_priv, pipe)
3031 serge 3696
		I915_WRITE(PIPESTAT(pipe), 0);
3697
	I915_WRITE(IMR, 0xffffffff);
3698
	I915_WRITE(IER, 0x0);
3699
	POSTING_READ(IER);
3700
}
3701
 
3702
static int i915_irq_postinstall(struct drm_device *dev)
3703
{
5060 serge 3704
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3705
	u32 enable_mask;
3706
 
3707
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3708
 
3709
	/* Unmask the interrupts that we always want on. */
3710
	dev_priv->irq_mask =
3711
		~(I915_ASLE_INTERRUPT |
3712
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3713
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3714
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3715
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3716
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3717
 
3718
	enable_mask =
3719
		I915_ASLE_INTERRUPT |
3720
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3721
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3722
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3723
		I915_USER_INTERRUPT;
3480 Serge 3724
 
3031 serge 3725
	if (I915_HAS_HOTPLUG(dev)) {
3480 Serge 3726
		I915_WRITE(PORT_HOTPLUG_EN, 0);
3727
		POSTING_READ(PORT_HOTPLUG_EN);
3728
 
3031 serge 3729
		/* Enable in IER... */
3730
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3731
		/* and unmask in IMR */
3732
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3733
	}
3734
 
3735
	I915_WRITE(IMR, dev_priv->irq_mask);
3736
	I915_WRITE(IER, enable_mask);
3737
	POSTING_READ(IER);
3738
 
4126 Serge 3739
	i915_enable_asle_pipestat(dev);
3480 Serge 3740
 
4560 Serge 3741
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3742
	 * just to make the assert_spin_locked check happy. */
5354 serge 3743
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3744
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3745
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3746
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3747
 
3480 Serge 3748
	return 0;
3749
}
3750
 
3746 Serge 3751
/*
3752
 * Returns true when a page flip has completed.
3753
 */
3754
static bool i915_handle_vblank(struct drm_device *dev,
3755
			       int plane, int pipe, u32 iir)
3480 Serge 3756
{
5060 serge 3757
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3758
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3480 Serge 3759
 
3746 Serge 3760
//   if (!drm_handle_vblank(dev, pipe))
3761
		return false;
3480 Serge 3762
 
3746 Serge 3763
	if ((iir & flip_pending) == 0)
5354 serge 3764
		goto check_page_flip;
3480 Serge 3765
 
3746 Serge 3766
//   intel_prepare_page_flip(dev, plane);
3031 serge 3767
 
3746 Serge 3768
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3769
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3770
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3771
	 * the flip is completed (no longer pending). Since this doesn't raise
3772
	 * an interrupt per se, we watch for the change at vblank.
3773
	 */
3774
	if (I915_READ(ISR) & flip_pending)
5354 serge 3775
		goto check_page_flip;
3746 Serge 3776
 
3777
	intel_finish_page_flip(dev, pipe);
5354 serge 3778
	return true;
3746 Serge 3779
 
5354 serge 3780
check_page_flip:
3781
//	intel_check_page_flip(dev, pipe);
3782
	return false;
3031 serge 3783
}
3784
 
3243 Serge 3785
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 3786
{
5060 serge 3787
	struct drm_device *dev = arg;
3788
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3789
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3790
	u32 flip_mask =
3791
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3792
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3793
	int pipe, ret = IRQ_NONE;
3794
 
3795
	iir = I915_READ(IIR);
3796
	do {
3797
		bool irq_received = (iir & ~flip_mask) != 0;
3798
		bool blc_event = false;
3799
 
3800
		/* Can't rely on pipestat interrupt bit in iir as it might
3801
		 * have been cleared after the pipestat interrupt was received.
3802
		 * It doesn't set the bit in iir again, but it still produces
3803
		 * interrupts (for non-MSI).
3804
		 */
5354 serge 3805
		spin_lock(&dev_priv->irq_lock);
4126 Serge 3806
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 3807
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 3808
 
5354 serge 3809
		for_each_pipe(dev_priv, pipe) {
3031 serge 3810
			int reg = PIPESTAT(pipe);
3811
			pipe_stats[pipe] = I915_READ(reg);
3812
 
3813
			/* Clear the PIPE*STAT regs before the IIR */
3814
			if (pipe_stats[pipe] & 0x8000ffff) {
3815
				I915_WRITE(reg, pipe_stats[pipe]);
3816
				irq_received = true;
3817
			}
3818
		}
5354 serge 3819
		spin_unlock(&dev_priv->irq_lock);
3031 serge 3820
 
3821
		if (!irq_received)
3822
			break;
3823
 
3824
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 3825
		if (I915_HAS_HOTPLUG(dev) &&
3826
		    iir & I915_DISPLAY_PORT_INTERRUPT)
3827
			i9xx_hpd_irq_handler(dev);
3031 serge 3828
 
3829
		I915_WRITE(IIR, iir & ~flip_mask);
3830
		new_iir = I915_READ(IIR); /* Flush posted writes */
3831
 
3832
		if (iir & I915_USER_INTERRUPT)
3833
			notify_ring(dev, &dev_priv->ring[RCS]);
3834
 
5354 serge 3835
		for_each_pipe(dev_priv, pipe) {
3031 serge 3836
			int plane = pipe;
4560 Serge 3837
			if (HAS_FBC(dev))
3031 serge 3838
				plane = !plane;
3839
 
3746 Serge 3840
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3841
			    i915_handle_vblank(dev, plane, pipe, iir))
3842
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3843
 
3031 serge 3844
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3845
				blc_event = true;
4560 Serge 3846
 
3847
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3848
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 3849
 
5354 serge 3850
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3851
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3852
								    pipe);
3031 serge 3853
		}
3854
 
4126 Serge 3855
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3856
			intel_opregion_asle_intr(dev);
3031 serge 3857
 
3858
		/* With MSI, interrupts are only generated when iir
3859
		 * transitions from zero to nonzero.  If another bit got
3860
		 * set while we were handling the existing iir bits, then
3861
		 * we would never get another interrupt.
3862
		 *
3863
		 * This is fine on non-MSI as well, as if we hit this path
3864
		 * we avoid exiting the interrupt handler only to generate
3865
		 * another one.
3866
		 *
3867
		 * Note that for MSI this could cause a stray interrupt report
3868
		 * if an interrupt landed in the time between writing IIR and
3869
		 * the posting read.  This should be rare enough to never
3870
		 * trigger the 99% of 100,000 interrupts test for disabling
3871
		 * stray interrupts.
3872
		 */
3873
		ret = IRQ_HANDLED;
3874
		iir = new_iir;
3875
	} while (iir & ~flip_mask);
3876
 
3877
	return ret;
3878
}
3879
 
3880
static void i915_irq_uninstall(struct drm_device * dev)
3881
{
5060 serge 3882
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3883
	int pipe;
3884
 
3885
	if (I915_HAS_HOTPLUG(dev)) {
3886
		I915_WRITE(PORT_HOTPLUG_EN, 0);
3887
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3888
	}
3889
 
3890
	I915_WRITE16(HWSTAM, 0xffff);
5354 serge 3891
	for_each_pipe(dev_priv, pipe) {
3031 serge 3892
		/* Clear enable bits; then clear status bits */
3893
		I915_WRITE(PIPESTAT(pipe), 0);
3894
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3895
	}
3896
	I915_WRITE(IMR, 0xffffffff);
3897
	I915_WRITE(IER, 0x0);
3898
 
3899
	I915_WRITE(IIR, I915_READ(IIR));
3900
}
3901
 
3902
static void i965_irq_preinstall(struct drm_device * dev)
3903
{
5060 serge 3904
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3905
	int pipe;
3906
 
3907
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3908
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3909
 
3910
	I915_WRITE(HWSTAM, 0xeffe);
5354 serge 3911
	for_each_pipe(dev_priv, pipe)
3031 serge 3912
		I915_WRITE(PIPESTAT(pipe), 0);
3913
	I915_WRITE(IMR, 0xffffffff);
3914
	I915_WRITE(IER, 0x0);
3915
	POSTING_READ(IER);
3916
}
3917
 
3918
static int i965_irq_postinstall(struct drm_device *dev)
3919
{
5060 serge 3920
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3921
	u32 enable_mask;
3922
	u32 error_mask;
3923
 
3924
	/* Unmask the interrupts that we always want on. */
3925
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3926
			       I915_DISPLAY_PORT_INTERRUPT |
3927
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3928
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3929
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3930
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3931
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3932
 
3933
	enable_mask = ~dev_priv->irq_mask;
3746 Serge 3934
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3935
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3936
	enable_mask |= I915_USER_INTERRUPT;
3937
 
3938
	if (IS_G4X(dev))
3939
		enable_mask |= I915_BSD_USER_INTERRUPT;
3940
 
4104 Serge 3941
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3942
	 * just to make the assert_spin_locked check happy. */
5354 serge 3943
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3944
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3945
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3946
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3947
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 3948
 
3949
	/*
3950
	 * Enable some error detection, note the instruction error mask
3951
	 * bit is reserved, so we leave it masked.
3952
	 */
3953
	if (IS_G4X(dev)) {
3954
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3955
			       GM45_ERROR_MEM_PRIV |
3956
			       GM45_ERROR_CP_PRIV |
3957
			       I915_ERROR_MEMORY_REFRESH);
3958
	} else {
3959
		error_mask = ~(I915_ERROR_PAGE_TABLE |
3960
			       I915_ERROR_MEMORY_REFRESH);
3961
	}
3962
	I915_WRITE(EMR, error_mask);
3963
 
3964
	I915_WRITE(IMR, dev_priv->irq_mask);
3965
	I915_WRITE(IER, enable_mask);
3966
	POSTING_READ(IER);
3967
 
3480 Serge 3968
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3969
	POSTING_READ(PORT_HOTPLUG_EN);
3970
 
4126 Serge 3971
	i915_enable_asle_pipestat(dev);
3480 Serge 3972
 
3973
	return 0;
3974
}
3975
 
3746 Serge 3976
static void i915_hpd_irq_setup(struct drm_device *dev)
3480 Serge 3977
{
5060 serge 3978
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3979
	struct intel_encoder *intel_encoder;
3480 Serge 3980
	u32 hotplug_en;
3981
 
4104 Serge 3982
	assert_spin_locked(&dev_priv->irq_lock);
3983
 
3746 Serge 3984
	if (I915_HAS_HOTPLUG(dev)) {
3985
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3986
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3031 serge 3987
	/* Note HDMI and DP share hotplug bits */
3746 Serge 3988
		/* enable bits are the same for all generations */
5354 serge 3989
		for_each_intel_encoder(dev, intel_encoder)
3746 Serge 3990
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3991
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3031 serge 3992
		/* Programming the CRT detection parameters tends
3993
		   to generate a spurious hotplug event about three
3994
		   seconds later.  So just do it once.
3995
		   */
3996
		if (IS_G4X(dev))
3997
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3746 Serge 3998
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3031 serge 3999
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3480 Serge 4000
 
3031 serge 4001
	/* Ignore TV since it's buggy */
4002
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3746 Serge 4003
	}
3031 serge 4004
}
4005
 
3243 Serge 4006
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 4007
{
5060 serge 4008
	struct drm_device *dev = arg;
4009
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4010
	u32 iir, new_iir;
4011
	u32 pipe_stats[I915_MAX_PIPES];
4012
	int ret = IRQ_NONE, pipe;
3746 Serge 4013
	u32 flip_mask =
4014
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4015
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3031 serge 4016
 
4017
	iir = I915_READ(IIR);
4018
 
4019
	for (;;) {
5060 serge 4020
		bool irq_received = (iir & ~flip_mask) != 0;
3031 serge 4021
		bool blc_event = false;
4022
 
4023
		/* Can't rely on pipestat interrupt bit in iir as it might
4024
		 * have been cleared after the pipestat interrupt was received.
4025
		 * It doesn't set the bit in iir again, but it still produces
4026
		 * interrupts (for non-MSI).
4027
		 */
5354 serge 4028
		spin_lock(&dev_priv->irq_lock);
4126 Serge 4029
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 4030
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 4031
 
5354 serge 4032
		for_each_pipe(dev_priv, pipe) {
3031 serge 4033
			int reg = PIPESTAT(pipe);
4034
			pipe_stats[pipe] = I915_READ(reg);
4035
 
4036
			/*
4037
			 * Clear the PIPE*STAT regs before the IIR
4038
			 */
4039
			if (pipe_stats[pipe] & 0x8000ffff) {
4040
				I915_WRITE(reg, pipe_stats[pipe]);
5060 serge 4041
				irq_received = true;
3031 serge 4042
			}
4043
		}
5354 serge 4044
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4045
 
4046
		if (!irq_received)
4047
			break;
4048
 
4049
		ret = IRQ_HANDLED;
4050
 
4051
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4052
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4053
			i9xx_hpd_irq_handler(dev);
3031 serge 4054
 
3746 Serge 4055
		I915_WRITE(IIR, iir & ~flip_mask);
3031 serge 4056
		new_iir = I915_READ(IIR); /* Flush posted writes */
4057
 
4058
		if (iir & I915_USER_INTERRUPT)
4059
			notify_ring(dev, &dev_priv->ring[RCS]);
4060
		if (iir & I915_BSD_USER_INTERRUPT)
4061
			notify_ring(dev, &dev_priv->ring[VCS]);
4062
 
5354 serge 4063
		for_each_pipe(dev_priv, pipe) {
3746 Serge 4064
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4065
			    i915_handle_vblank(dev, pipe, pipe, iir))
4066
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3031 serge 4067
 
4068
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4069
				blc_event = true;
4560 Serge 4070
 
4071
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4072
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4073
 
5354 serge 4074
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4075
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3031 serge 4076
		}
4077
 
4126 Serge 4078
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4079
			intel_opregion_asle_intr(dev);
3031 serge 4080
 
3480 Serge 4081
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4082
			gmbus_irq_handler(dev);
4083
 
3031 serge 4084
		/* With MSI, interrupts are only generated when iir
4085
		 * transitions from zero to nonzero.  If another bit got
4086
		 * set while we were handling the existing iir bits, then
4087
		 * we would never get another interrupt.
4088
		 *
4089
		 * This is fine on non-MSI as well, as if we hit this path
4090
		 * we avoid exiting the interrupt handler only to generate
4091
		 * another one.
4092
		 *
4093
		 * Note that for MSI this could cause a stray interrupt report
4094
		 * if an interrupt landed in the time between writing IIR and
4095
		 * the posting read.  This should be rare enough to never
4096
		 * trigger the 99% of 100,000 interrupts test for disabling
4097
		 * stray interrupts.
4098
		 */
4099
		iir = new_iir;
4100
	}
4101
 
4102
	return ret;
4103
}
4104
 
4105
static void i965_irq_uninstall(struct drm_device * dev)
4106
{
5060 serge 4107
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4108
	int pipe;
4109
 
4110
	if (!dev_priv)
4111
		return;
4112
 
4113
	I915_WRITE(PORT_HOTPLUG_EN, 0);
4114
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4115
 
4116
	I915_WRITE(HWSTAM, 0xffffffff);
5354 serge 4117
	for_each_pipe(dev_priv, pipe)
3031 serge 4118
		I915_WRITE(PIPESTAT(pipe), 0);
4119
	I915_WRITE(IMR, 0xffffffff);
4120
	I915_WRITE(IER, 0x0);
4121
 
5354 serge 4122
	for_each_pipe(dev_priv, pipe)
3031 serge 4123
		I915_WRITE(PIPESTAT(pipe),
4124
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4125
	I915_WRITE(IIR, I915_READ(IIR));
4126
}
4127
 
5354 serge 4128
static void intel_hpd_irq_reenable_work(struct work_struct *work)
4126 Serge 4129
{
5060 serge 4130
	struct drm_i915_private *dev_priv =
4131
		container_of(work, typeof(*dev_priv),
4132
			     hotplug_reenable_work.work);
4126 Serge 4133
	struct drm_device *dev = dev_priv->dev;
4134
	struct drm_mode_config *mode_config = &dev->mode_config;
4135
	int i;
4136
 
5354 serge 4137
	intel_runtime_pm_get(dev_priv);
4138
 
4139
	spin_lock_irq(&dev_priv->irq_lock);
4126 Serge 4140
	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4141
		struct drm_connector *connector;
4142
 
4143
		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4144
			continue;
4145
 
4146
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4147
 
4148
		list_for_each_entry(connector, &mode_config->connector_list, head) {
4149
			struct intel_connector *intel_connector = to_intel_connector(connector);
4150
 
4151
			if (intel_connector->encoder->hpd_pin == i) {
4152
				if (connector->polled != intel_connector->polled)
4153
					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
5060 serge 4154
							 connector->name);
4126 Serge 4155
				connector->polled = intel_connector->polled;
4156
				if (!connector->polled)
4157
					connector->polled = DRM_CONNECTOR_POLL_HPD;
4158
			}
4159
		}
4160
	}
4161
	if (dev_priv->display.hpd_irq_setup)
4162
		dev_priv->display.hpd_irq_setup(dev);
5354 serge 4163
	spin_unlock_irq(&dev_priv->irq_lock);
4164
 
4165
	intel_runtime_pm_put(dev_priv);
4126 Serge 4166
}
4167
 
5354 serge 4168
/**
4169
 * intel_irq_init - initializes irq support
4170
 * @dev_priv: i915 device instance
4171
 *
4172
 * This function initializes all the irq support including work items, timers
4173
 * and all the vtables. It does not setup the interrupt itself though.
4174
 */
4175
void intel_irq_init(struct drm_i915_private *dev_priv)
2351 Serge 4176
{
5354 serge 4177
	struct drm_device *dev = dev_priv->dev;
3031 serge 4178
 
3480 Serge 4179
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
5354 serge 4180
//	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4126 Serge 4181
	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4182
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4183
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3480 Serge 4184
 
5060 serge 4185
	/* Let's track the enabled rps events */
5354 serge 4186
	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4187
		/* WaGsvRC0ResidencyMethod:vlv */
5060 serge 4188
		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4189
	else
4190
	dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
3480 Serge 4191
 
4560 Serge 4192
 
5354 serge 4193
 
4194
	if (IS_GEN2(dev_priv)) {
4560 Serge 4195
		dev->max_vblank_count = 0;
4196
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
5354 serge 4197
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4560 Serge 4198
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4199
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4200
	} else {
4293 Serge 4201
	dev->driver->get_vblank_counter = i915_get_vblank_counter;
4202
	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4203
	}
3480 Serge 4204
 
5354 serge 4205
	/*
4206
	 * Opt out of the vblank disable timer on everything except gen2.
4207
	 * Gen2 doesn't have a hardware frame counter and so depends on
4208
	 * vblank interrupts to produce sane vblank seuquence numbers.
4209
	 */
4210
	if (!IS_GEN2(dev_priv))
4211
		dev->vblank_disable_immediate = true;
4212
 
4560 Serge 4213
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4293 Serge 4214
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4215
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4560 Serge 4216
	}
3480 Serge 4217
 
5354 serge 4218
	if (IS_CHERRYVIEW(dev_priv)) {
5060 serge 4219
		dev->driver->irq_handler = cherryview_irq_handler;
4220
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4221
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4222
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4223
		dev->driver->enable_vblank = valleyview_enable_vblank;
4224
		dev->driver->disable_vblank = valleyview_disable_vblank;
4225
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4226
	} else if (IS_VALLEYVIEW(dev_priv)) {
3243 Serge 4227
		dev->driver->irq_handler = valleyview_irq_handler;
4228
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4229
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4293 Serge 4230
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4231
		dev->driver->enable_vblank = valleyview_enable_vblank;
4232
		dev->driver->disable_vblank = valleyview_disable_vblank;
3746 Serge 4233
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4234
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4560 Serge 4235
		dev->driver->irq_handler = gen8_irq_handler;
5060 serge 4236
		dev->driver->irq_preinstall = gen8_irq_reset;
4560 Serge 4237
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4238
		dev->driver->irq_uninstall = gen8_irq_uninstall;
4239
		dev->driver->enable_vblank = gen8_enable_vblank;
4240
		dev->driver->disable_vblank = gen8_disable_vblank;
4241
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2351 Serge 4242
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 4243
		dev->driver->irq_handler = ironlake_irq_handler;
5060 serge 4244
		dev->driver->irq_preinstall = ironlake_irq_reset;
3243 Serge 4245
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4293 Serge 4246
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4247
		dev->driver->enable_vblank = ironlake_enable_vblank;
4248
		dev->driver->disable_vblank = ironlake_disable_vblank;
3746 Serge 4249
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2351 Serge 4250
	} else {
5354 serge 4251
		if (INTEL_INFO(dev_priv)->gen == 2) {
4252
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
3243 Serge 4253
			dev->driver->irq_preinstall = i915_irq_preinstall;
4254
			dev->driver->irq_postinstall = i915_irq_postinstall;
4293 Serge 4255
			dev->driver->irq_uninstall = i915_irq_uninstall;
3243 Serge 4256
			dev->driver->irq_handler = i915_irq_handler;
3480 Serge 4257
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 4258
		} else {
3243 Serge 4259
			dev->driver->irq_preinstall = i965_irq_preinstall;
4260
			dev->driver->irq_postinstall = i965_irq_postinstall;
4293 Serge 4261
			dev->driver->irq_uninstall = i965_irq_uninstall;
3243 Serge 4262
			dev->driver->irq_handler = i965_irq_handler;
3746 Serge 4263
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 4264
		}
4293 Serge 4265
		dev->driver->enable_vblank = i915_enable_vblank;
4266
		dev->driver->disable_vblank = i915_disable_vblank;
2351 Serge 4267
	}
3480 Serge 4268
}
3243 Serge 4269
 
5354 serge 4270
/**
4271
 * intel_hpd_init - initializes and enables hpd support
4272
 * @dev_priv: i915 device instance
4273
 *
4274
 * This function enables the hotplug support. It requires that interrupts have
4275
 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4276
 * poll request can run concurrently to other code, so locking rules must be
4277
 * obeyed.
4278
 *
4279
 * This is a separate step from interrupt enabling to simplify the locking rules
4280
 * in the driver load and resume code.
4281
 */
4282
void intel_hpd_init(struct drm_i915_private *dev_priv)
3480 Serge 4283
{
5354 serge 4284
	struct drm_device *dev = dev_priv->dev;
3746 Serge 4285
	struct drm_mode_config *mode_config = &dev->mode_config;
4286
	struct drm_connector *connector;
4287
	int i;
3480 Serge 4288
 
3746 Serge 4289
	for (i = 1; i < HPD_NUM_PINS; i++) {
4290
		dev_priv->hpd_stats[i].hpd_cnt = 0;
4291
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4292
	}
4293
	list_for_each_entry(connector, &mode_config->connector_list, head) {
4294
		struct intel_connector *intel_connector = to_intel_connector(connector);
4295
		connector->polled = intel_connector->polled;
5060 serge 4296
		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3746 Serge 4297
			connector->polled = DRM_CONNECTOR_POLL_HPD;
5060 serge 4298
		if (intel_connector->mst_port)
4299
			connector->polled = DRM_CONNECTOR_POLL_HPD;
3746 Serge 4300
	}
4104 Serge 4301
 
4302
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4303
	 * just to make the assert_spin_locked checks happy. */
5354 serge 4304
	spin_lock_irq(&dev_priv->irq_lock);
3480 Serge 4305
	if (dev_priv->display.hpd_irq_setup)
4306
		dev_priv->display.hpd_irq_setup(dev);
5354 serge 4307
	spin_unlock_irq(&dev_priv->irq_lock);
2351 Serge 4308
}
4309
 
5354 serge 4310
/**
4311
 * intel_irq_install - enables the hardware interrupt
4312
 * @dev_priv: i915 device instance
4313
 *
4314
 * This function enables the hardware interrupt handling, but leaves the hotplug
4315
 * handling still disabled. It is called after intel_irq_init().
4316
 *
4317
 * In the driver load and resume code we need working interrupts in a few places
4318
 * but don't want to deal with the hassle of concurrent probe and hotplug
4319
 * workers. Hence the split into this two-stage approach.
4320
 */
4321
int intel_irq_install(struct drm_i915_private *dev_priv)
3243 Serge 4322
{
5354 serge 4323
	/*
4324
	 * We enable some interrupt sources in our postinstall hooks, so mark
4325
	 * interrupts as enabled _before_ actually enabling them to avoid
4326
	 * special cases in our ordering checks.
4327
	 */
4328
	dev_priv->pm.irqs_enabled = true;
2351 Serge 4329
 
5354 serge 4330
	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
3243 Serge 4331
}
4332
 
5354 serge 4333
/**
4334
 * intel_irq_uninstall - finilizes all irq handling
4335
 * @dev_priv: i915 device instance
4336
 *
4337
 * This stops interrupt and hotplug handling and unregisters and frees all
4338
 * resources acquired in the init functions.
4339
 */
4340
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
2351 Serge 4341
{
5354 serge 4342
//	drm_irq_uninstall(dev_priv->dev);
4343
//	intel_hpd_cancel_work(dev_priv);
4344
	dev_priv->pm.irqs_enabled = false;
4345
}
2351 Serge 4346
 
5354 serge 4347
/**
4348
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4349
 * @dev_priv: i915 device instance
4350
 *
4351
 * This function is used to disable interrupts at runtime, both in the runtime
4352
 * pm and the system suspend/resume code.
4353
 */
4354
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4355
{
4356
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4357
	dev_priv->pm.irqs_enabled = false;
4104 Serge 4358
}
2351 Serge 4359
 
5354 serge 4360
/**
4361
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4362
 * @dev_priv: i915 device instance
4363
 *
4364
 * This function is used to enable interrupts at runtime, both in the runtime
4365
 * pm and the system suspend/resume code.
4366
 */
4367
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4368
{
4369
	dev_priv->pm.irqs_enabled = true;
4370
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4371
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4372
}
2351 Serge 4373
 
4104 Serge 4374
irqreturn_t intel_irq_handler(struct drm_device *dev)
4375
{
2351 Serge 4376
 
4104 Serge 4377
//    printf("i915 irq\n");
4378
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
4379
 
4380
    return dev->driver->irq_handler(0, dev);
2351 Serge 4381
}
4382