Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3746 Serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3031 serge 30
 
6103 serge 31
#include 
3031 serge 32
#include 
6088 serge 33
#include 
3031 serge 34
#include 
35
#include 
2351 Serge 36
#include "i915_drv.h"
37
#include "i915_trace.h"
38
#include "intel_drv.h"
39
 
5354 serge 40
/**
41
 * DOC: interrupt handling
42
 *
43
 * These functions provide the basic support for enabling and disabling the
44
 * interrupt handling support. There's a lot more functionality in i915_irq.c
45
 * and related files, but that will be described in separate chapters.
46
 */
4104 Serge 47
 
6084 serge 48
static const u32 hpd_ilk[HPD_NUM_PINS] = {
49
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50
};
51
 
52
static const u32 hpd_ivb[HPD_NUM_PINS] = {
53
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54
};
55
 
56
static const u32 hpd_bdw[HPD_NUM_PINS] = {
57
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58
};
59
 
60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
3746 Serge 61
	[HPD_CRT] = SDE_CRT_HOTPLUG,
62
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66
};
3031 serge 67
 
6084 serge 68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
3746 Serge 69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74
};
75
 
6084 serge 76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82
};
83
 
84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
3746 Serge 85
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91
};
92
 
6084 serge 93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
3746 Serge 94
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100
};
101
 
6084 serge 102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
3746 Serge 103
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109
};
110
 
6084 serge 111
/* BXT hpd list */
112
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116
};
117
 
5060 serge 118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121
	POSTING_READ(GEN8_##type##_IMR(which)); \
122
	I915_WRITE(GEN8_##type##_IER(which), 0); \
123
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124
	POSTING_READ(GEN8_##type##_IIR(which)); \
125
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126
	POSTING_READ(GEN8_##type##_IIR(which)); \
127
} while (0)
3746 Serge 128
 
5060 serge 129
#define GEN5_IRQ_RESET(type) do { \
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
132
	I915_WRITE(type##IER, 0); \
133
	I915_WRITE(type##IIR, 0xffffffff); \
134
	POSTING_READ(type##IIR); \
135
	I915_WRITE(type##IIR, 0xffffffff); \
136
	POSTING_READ(type##IIR); \
137
} while (0)
138
 
139
/*
140
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141
 */
6937 serge 142
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143
				    i915_reg_t reg)
6084 serge 144
{
145
	u32 val = I915_READ(reg);
5060 serge 146
 
6084 serge 147
	if (val == 0)
148
		return;
149
 
150
	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
6937 serge 151
	     i915_mmio_reg_offset(reg), val);
6084 serge 152
	I915_WRITE(reg, 0xffffffff);
153
	POSTING_READ(reg);
154
	I915_WRITE(reg, 0xffffffff);
155
	POSTING_READ(reg);
156
}
157
 
5060 serge 158
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
6084 serge 159
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
5354 serge 160
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
5060 serge 161
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
5354 serge 162
	POSTING_READ(GEN8_##type##_IMR(which)); \
5060 serge 163
} while (0)
164
 
165
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
6084 serge 166
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
5354 serge 167
	I915_WRITE(type##IER, (ier_val)); \
5060 serge 168
	I915_WRITE(type##IMR, (imr_val)); \
5354 serge 169
	POSTING_READ(type##IMR); \
5060 serge 170
} while (0)
171
 
5354 serge 172
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
3031 serge 173
 
2351 Serge 174
/* For display hotplug interrupt */
6084 serge 175
static inline void
176
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177
				     uint32_t mask,
178
				     uint32_t bits)
2351 Serge 179
{
6084 serge 180
	uint32_t val;
181
 
4104 Serge 182
	assert_spin_locked(&dev_priv->irq_lock);
6084 serge 183
	WARN_ON(bits & ~mask);
4104 Serge 184
 
6084 serge 185
	val = I915_READ(PORT_HOTPLUG_EN);
186
	val &= ~mask;
187
	val |= bits;
188
	I915_WRITE(PORT_HOTPLUG_EN, val);
189
}
4104 Serge 190
 
6084 serge 191
/**
192
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193
 * @dev_priv: driver private
194
 * @mask: bits to update
195
 * @bits: bits to enable
196
 * NOTE: the HPD enable bits are modified both inside and outside
197
 * of an interrupt context. To avoid that read-modify-write cycles
198
 * interfer, these bits are protected by a spinlock. Since this
199
 * function is usually not called from a context where the lock is
200
 * held already, this function acquires the lock itself. A non-locking
201
 * version is also available.
202
 */
203
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204
				   uint32_t mask,
205
				   uint32_t bits)
206
{
207
	spin_lock_irq(&dev_priv->irq_lock);
208
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209
	spin_unlock_irq(&dev_priv->irq_lock);
2351 Serge 210
}
211
 
6084 serge 212
/**
213
 * ilk_update_display_irq - update DEIMR
214
 * @dev_priv: driver private
215
 * @interrupt_mask: mask of interrupt bits to update
216
 * @enabled_irq_mask: mask of interrupt bits to enable
217
 */
6937 serge 218
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
7144 serge 219
			    uint32_t interrupt_mask,
220
			    uint32_t enabled_irq_mask)
2351 Serge 221
{
6084 serge 222
	uint32_t new_val;
223
 
4104 Serge 224
	assert_spin_locked(&dev_priv->irq_lock);
225
 
6084 serge 226
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
 
5354 serge 228
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 229
		return;
230
 
6084 serge 231
	new_val = dev_priv->irq_mask;
232
	new_val &= ~interrupt_mask;
233
	new_val |= (~enabled_irq_mask & interrupt_mask);
234
 
235
	if (new_val != dev_priv->irq_mask) {
236
		dev_priv->irq_mask = new_val;
237
		I915_WRITE(DEIMR, dev_priv->irq_mask);
238
		POSTING_READ(DEIMR);
239
	}
2351 Serge 240
}
3031 serge 241
 
4104 Serge 242
/**
243
 * ilk_update_gt_irq - update GTIMR
244
 * @dev_priv: driver private
245
 * @interrupt_mask: mask of interrupt bits to update
246
 * @enabled_irq_mask: mask of interrupt bits to enable
247
 */
248
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249
			      uint32_t interrupt_mask,
250
			      uint32_t enabled_irq_mask)
251
{
252
	assert_spin_locked(&dev_priv->irq_lock);
253
 
6084 serge 254
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
 
5060 serge 256
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 257
		return;
258
 
259
	dev_priv->gt_irq_mask &= ~interrupt_mask;
260
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262
	POSTING_READ(GTIMR);
263
}
264
 
5060 serge 265
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 266
{
267
	ilk_update_gt_irq(dev_priv, mask, mask);
268
}
269
 
5060 serge 270
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 271
{
272
	ilk_update_gt_irq(dev_priv, mask, 0);
273
}
274
 
6937 serge 275
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
4104 Serge 276
{
5354 serge 277
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
4104 Serge 278
}
279
 
6937 serge 280
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
4104 Serge 281
{
5354 serge 282
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
4104 Serge 283
}
284
 
6937 serge 285
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
4104 Serge 286
{
5354 serge 287
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
4104 Serge 288
}
289
 
5060 serge 290
/**
7144 serge 291
 * snb_update_pm_irq - update GEN6_PMIMR
292
 * @dev_priv: driver private
293
 * @interrupt_mask: mask of interrupt bits to update
294
 * @enabled_irq_mask: mask of interrupt bits to enable
295
 */
5354 serge 296
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
5060 serge 297
			      uint32_t interrupt_mask,
298
			      uint32_t enabled_irq_mask)
299
{
300
	uint32_t new_val;
301
 
6084 serge 302
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
 
5060 serge 304
	assert_spin_locked(&dev_priv->irq_lock);
305
 
306
	new_val = dev_priv->pm_irq_mask;
307
	new_val &= ~interrupt_mask;
308
	new_val |= (~enabled_irq_mask & interrupt_mask);
309
 
310
	if (new_val != dev_priv->pm_irq_mask) {
311
		dev_priv->pm_irq_mask = new_val;
5354 serge 312
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313
		POSTING_READ(gen6_pm_imr(dev_priv));
5060 serge 314
	}
315
}
316
 
5354 serge 317
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
5060 serge 318
{
5354 serge 319
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320
		return;
321
 
322
	snb_update_pm_irq(dev_priv, mask, mask);
5060 serge 323
}
324
 
5354 serge 325
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326
				  uint32_t mask)
5060 serge 327
{
5354 serge 328
	snb_update_pm_irq(dev_priv, mask, 0);
5060 serge 329
}
330
 
5354 serge 331
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 332
{
5354 serge 333
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334
		return;
4104 Serge 335
 
5354 serge 336
	__gen6_disable_pm_irq(dev_priv, mask);
4104 Serge 337
}
338
 
5354 serge 339
void gen6_reset_rps_interrupts(struct drm_device *dev)
5060 serge 340
{
341
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 342
	i915_reg_t reg = gen6_pm_iir(dev_priv);
5060 serge 343
 
5354 serge 344
	spin_lock_irq(&dev_priv->irq_lock);
345
	I915_WRITE(reg, dev_priv->pm_rps_events);
346
	I915_WRITE(reg, dev_priv->pm_rps_events);
6084 serge 347
	POSTING_READ(reg);
348
	dev_priv->rps.pm_iir = 0;
5354 serge 349
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 350
}
351
 
5354 serge 352
void gen6_enable_rps_interrupts(struct drm_device *dev)
5060 serge 353
{
354
	struct drm_i915_private *dev_priv = dev->dev_private;
355
 
5354 serge 356
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 357
 
5354 serge 358
	WARN_ON(dev_priv->rps.pm_iir);
359
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
360
	dev_priv->rps.interrupts_enabled = true;
361
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
362
				dev_priv->pm_rps_events);
363
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
364
 
365
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 366
}
367
 
6084 serge 368
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369
{
370
	/*
371
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
372
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
373
	 *
374
	 * TODO: verify if this can be reproduced on VLV,CHV.
375
	 */
376
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377
		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378
 
379
	if (INTEL_INFO(dev_priv)->gen >= 8)
380
		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381
 
382
	return mask;
383
}
384
 
5354 serge 385
void gen6_disable_rps_interrupts(struct drm_device *dev)
4104 Serge 386
{
387
	struct drm_i915_private *dev_priv = dev->dev_private;
388
 
5354 serge 389
	spin_lock_irq(&dev_priv->irq_lock);
390
	dev_priv->rps.interrupts_enabled = false;
391
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 392
 
393
 
5354 serge 394
	spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 395
 
6084 serge 396
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
4104 Serge 397
 
5354 serge 398
	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
399
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
400
				~dev_priv->pm_rps_events);
4104 Serge 401
 
6084 serge 402
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 403
 
7144 serge 404
	synchronize_irq(dev->irq);
4560 Serge 405
}
406
 
4104 Serge 407
/**
7144 serge 408
 * bdw_update_port_irq - update DE port interrupt
409
 * @dev_priv: driver private
410
 * @interrupt_mask: mask of interrupt bits to update
411
 * @enabled_irq_mask: mask of interrupt bits to enable
412
 */
6084 serge 413
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
414
				uint32_t interrupt_mask,
415
				uint32_t enabled_irq_mask)
416
{
417
	uint32_t new_val;
418
	uint32_t old_val;
419
 
420
	assert_spin_locked(&dev_priv->irq_lock);
421
 
422
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
423
 
424
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
425
		return;
426
 
427
	old_val = I915_READ(GEN8_DE_PORT_IMR);
428
 
429
	new_val = old_val;
430
	new_val &= ~interrupt_mask;
431
	new_val |= (~enabled_irq_mask & interrupt_mask);
432
 
433
	if (new_val != old_val) {
434
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
435
		POSTING_READ(GEN8_DE_PORT_IMR);
436
	}
437
}
438
 
439
/**
6937 serge 440
 * bdw_update_pipe_irq - update DE pipe interrupt
441
 * @dev_priv: driver private
442
 * @pipe: pipe whose interrupt to update
443
 * @interrupt_mask: mask of interrupt bits to update
444
 * @enabled_irq_mask: mask of interrupt bits to enable
445
 */
446
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
447
			 enum pipe pipe,
448
			 uint32_t interrupt_mask,
449
			 uint32_t enabled_irq_mask)
450
{
451
	uint32_t new_val;
452
 
453
	assert_spin_locked(&dev_priv->irq_lock);
454
 
455
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
456
 
457
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
458
		return;
459
 
460
	new_val = dev_priv->de_irq_mask[pipe];
461
	new_val &= ~interrupt_mask;
462
	new_val |= (~enabled_irq_mask & interrupt_mask);
463
 
464
	if (new_val != dev_priv->de_irq_mask[pipe]) {
465
		dev_priv->de_irq_mask[pipe] = new_val;
466
		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
467
		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
468
	}
469
}
470
 
471
/**
4104 Serge 472
 * ibx_display_interrupt_update - update SDEIMR
473
 * @dev_priv: driver private
474
 * @interrupt_mask: mask of interrupt bits to update
475
 * @enabled_irq_mask: mask of interrupt bits to enable
476
 */
5354 serge 477
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
6084 serge 478
				  uint32_t interrupt_mask,
479
				  uint32_t enabled_irq_mask)
4104 Serge 480
{
481
	uint32_t sdeimr = I915_READ(SDEIMR);
482
	sdeimr &= ~interrupt_mask;
483
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
484
 
6084 serge 485
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
486
 
4104 Serge 487
	assert_spin_locked(&dev_priv->irq_lock);
488
 
5060 serge 489
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 490
		return;
491
 
492
	I915_WRITE(SDEIMR, sdeimr);
493
	POSTING_READ(SDEIMR);
494
}
495
 
5060 serge 496
static void
497
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
498
		       u32 enable_mask, u32 status_mask)
3031 serge 499
{
6937 serge 500
	i915_reg_t reg = PIPESTAT(pipe);
5060 serge 501
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 502
 
4104 Serge 503
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 504
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 505
 
5060 serge 506
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
507
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
508
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
509
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 510
		return;
511
 
5060 serge 512
	if ((pipestat & enable_mask) == enable_mask)
513
		return;
514
 
515
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
516
 
6084 serge 517
	/* Enable the interrupt, clear any pending status */
5060 serge 518
	pipestat |= enable_mask | status_mask;
3746 Serge 519
	I915_WRITE(reg, pipestat);
6084 serge 520
	POSTING_READ(reg);
3031 serge 521
}
522
 
5060 serge 523
static void
524
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
525
		        u32 enable_mask, u32 status_mask)
3031 serge 526
{
6937 serge 527
	i915_reg_t reg = PIPESTAT(pipe);
5060 serge 528
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 529
 
4104 Serge 530
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 531
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 532
 
5060 serge 533
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
534
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
535
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
536
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 537
		return;
538
 
5060 serge 539
	if ((pipestat & enable_mask) == 0)
540
		return;
541
 
542
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
543
 
544
	pipestat &= ~enable_mask;
3746 Serge 545
	I915_WRITE(reg, pipestat);
6084 serge 546
	POSTING_READ(reg);
3031 serge 547
}
548
 
5060 serge 549
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
550
{
551
	u32 enable_mask = status_mask << 16;
552
 
553
	/*
554
	 * On pipe A we don't support the PSR interrupt yet,
555
	 * on pipe B and C the same bit MBZ.
556
	 */
557
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
558
		return 0;
559
	/*
560
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
561
	 * A the same bit is for perf counters which we don't use either.
562
	 */
563
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
564
		return 0;
565
 
566
	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
567
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
568
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
569
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
570
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
571
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
572
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
573
 
574
	return enable_mask;
575
}
576
 
577
void
578
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
579
		     u32 status_mask)
580
{
581
	u32 enable_mask;
582
 
6937 serge 583
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5060 serge 584
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
585
							   status_mask);
586
	else
587
		enable_mask = status_mask << 16;
588
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
589
}
590
 
591
void
592
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
593
		      u32 status_mask)
594
{
595
	u32 enable_mask;
596
 
6937 serge 597
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5060 serge 598
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
599
							   status_mask);
600
	else
601
		enable_mask = status_mask << 16;
602
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
603
}
604
 
3031 serge 605
/**
4104 Serge 606
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
6084 serge 607
 * @dev: drm device
3031 serge 608
 */
4104 Serge 609
static void i915_enable_asle_pipestat(struct drm_device *dev)
3031 serge 610
{
5060 serge 611
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 612
 
4104 Serge 613
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
3031 serge 614
		return;
615
 
5354 serge 616
	spin_lock_irq(&dev_priv->irq_lock);
3031 serge 617
 
5060 serge 618
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
6084 serge 619
	if (INTEL_INFO(dev)->gen >= 4)
4560 Serge 620
		i915_enable_pipestat(dev_priv, PIPE_A,
5060 serge 621
				     PIPE_LEGACY_BLC_EVENT_STATUS);
3031 serge 622
 
5354 serge 623
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 624
}
625
 
5060 serge 626
/*
627
 * This timing diagram depicts the video signal in and
628
 * around the vertical blanking period.
629
 *
630
 * Assumptions about the fictitious mode used in this example:
631
 *  vblank_start >= 3
632
 *  vsync_start = vblank_start + 1
633
 *  vsync_end = vblank_start + 2
634
 *  vtotal = vblank_start + 3
635
 *
636
 *           start of vblank:
637
 *           latch double buffered registers
638
 *           increment frame counter (ctg+)
639
 *           generate start of vblank interrupt (gen4+)
640
 *           |
641
 *           |          frame start:
642
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
643
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
644
 *           |          |
645
 *           |          |  start of vsync:
646
 *           |          |  generate vsync interrupt
647
 *           |          |  |
648
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
649
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
650
 * ----va---> <-----------------vb--------------------> <--------va-------------
651
 *       |          |       <----vs----->                     |
652
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
653
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
654
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
655
 *       |          |                                         |
656
 *       last visible pixel                                   first visible pixel
657
 *                  |                                         increment frame counter (gen3/4)
658
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
659
 *
660
 * x  = horizontal active
661
 * _  = horizontal blanking
662
 * hs = horizontal sync
663
 * va = vertical active
664
 * vb = vertical blanking
665
 * vs = vertical sync
666
 * vbs = vblank_start (number)
667
 *
668
 * Summary:
669
 * - most events happen at the start of horizontal sync
670
 * - frame start happens at the start of horizontal blank, 1-4 lines
671
 *   (depending on PIPECONF settings) after the start of vblank
672
 * - gen3/4 pixel and frame counter are synchronized with the start
673
 *   of horizontal active on the first line of vertical active
674
 */
675
 
6084 serge 676
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4560 Serge 677
{
678
	/* Gen2 doesn't have a hardware frame counter */
679
	return 0;
680
}
681
 
3031 serge 682
/* Called from drm generic code, passed a 'crtc', which
683
 * we use as a pipe index
684
 */
6084 serge 685
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
3031 serge 686
{
5060 serge 687
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 688
	i915_reg_t high_frame, low_frame;
5060 serge 689
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
6084 serge 690
	struct intel_crtc *intel_crtc =
691
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
692
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
3031 serge 693
 
6084 serge 694
	htotal = mode->crtc_htotal;
695
	hsync_start = mode->crtc_hsync_start;
696
	vbl_start = mode->crtc_vblank_start;
697
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
698
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
3031 serge 699
 
5060 serge 700
	/* Convert to pixel count */
6084 serge 701
	vbl_start *= htotal;
4560 Serge 702
 
5060 serge 703
	/* Start of vblank event occurs at start of hsync */
704
	vbl_start -= htotal - hsync_start;
705
 
3031 serge 706
	high_frame = PIPEFRAME(pipe);
707
	low_frame = PIPEFRAMEPIXEL(pipe);
708
 
709
	/*
710
	 * High & low register fields aren't synchronized, so make sure
711
	 * we get a low value that's stable across two reads of the high
712
	 * register.
713
	 */
714
	do {
715
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
4560 Serge 716
		low   = I915_READ(low_frame);
3031 serge 717
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
718
	} while (high1 != high2);
719
 
720
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
4560 Serge 721
	pixel = low & PIPE_PIXEL_MASK;
3031 serge 722
	low >>= PIPE_FRAME_LOW_SHIFT;
4560 Serge 723
 
724
	/*
725
	 * The frame counter increments at beginning of active.
726
	 * Cook up a vblank counter by also checking the pixel
727
	 * counter against vblank start.
728
	 */
729
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
3031 serge 730
}
731
 
6084 serge 732
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
3031 serge 733
{
5060 serge 734
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 735
 
6084 serge 736
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
3031 serge 737
}
738
 
6937 serge 739
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
5060 serge 740
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
4560 Serge 741
{
5060 serge 742
	struct drm_device *dev = crtc->base.dev;
4560 Serge 743
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 744
	const struct drm_display_mode *mode = &crtc->base.hwmode;
5060 serge 745
	enum pipe pipe = crtc->pipe;
746
	int position, vtotal;
4560 Serge 747
 
5060 serge 748
	vtotal = mode->crtc_vtotal;
749
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
750
		vtotal /= 2;
4560 Serge 751
 
5060 serge 752
	if (IS_GEN2(dev))
6937 serge 753
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
5060 serge 754
	else
6937 serge 755
		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
5060 serge 756
 
757
	/*
6084 serge 758
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
759
	 * read it just before the start of vblank.  So try it again
760
	 * so we don't accidentally end up spanning a vblank frame
761
	 * increment, causing the pipe_update_end() code to squak at us.
762
	 *
763
	 * The nature of this problem means we can't simply check the ISR
764
	 * bit and return the vblank start value; nor can we use the scanline
765
	 * debug register in the transcoder as it appears to have the same
766
	 * problem.  We may need to extend this to include other platforms,
767
	 * but so far testing only shows the problem on HSW.
768
	 */
769
	if (HAS_DDI(dev) && !position) {
770
		int i, temp;
771
 
772
		for (i = 0; i < 100; i++) {
773
			udelay(1);
774
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
775
				DSL_LINEMASK_GEN3;
776
			if (temp != position) {
777
				position = temp;
778
				break;
779
			}
780
		}
781
	}
782
 
783
	/*
5060 serge 784
	 * See update_scanline_offset() for the details on the
785
	 * scanline_offset adjustment.
786
	 */
787
	return (position + crtc->scanline_offset) % vtotal;
4560 Serge 788
}
789
 
6084 serge 790
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4560 Serge 791
				    unsigned int flags, int *vpos, int *hpos,
6084 serge 792
				    ktime_t *stime, ktime_t *etime,
793
				    const struct drm_display_mode *mode)
3746 Serge 794
{
4560 Serge 795
	struct drm_i915_private *dev_priv = dev->dev_private;
796
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
797
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
798
	int position;
5060 serge 799
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
3746 Serge 800
	bool in_vbl = true;
801
	int ret = 0;
4560 Serge 802
	unsigned long irqflags;
3746 Serge 803
 
6084 serge 804
	if (WARN_ON(!mode->crtc_clock)) {
3746 Serge 805
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
806
				 "pipe %c\n", pipe_name(pipe));
807
		return 0;
808
	}
809
 
4560 Serge 810
	htotal = mode->crtc_htotal;
5060 serge 811
	hsync_start = mode->crtc_hsync_start;
4560 Serge 812
	vtotal = mode->crtc_vtotal;
813
	vbl_start = mode->crtc_vblank_start;
814
	vbl_end = mode->crtc_vblank_end;
3746 Serge 815
 
4560 Serge 816
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
817
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
818
		vbl_end /= 2;
819
		vtotal /= 2;
820
	}
821
 
822
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
823
 
824
	/*
825
	 * Lock uncore.lock, as we will do multiple timing critical raw
826
	 * register reads, potentially with preemption disabled, so the
827
	 * following code must not block on uncore.lock.
828
	 */
829
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
830
 
831
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
832
 
6084 serge 833
	/* Get optional system timestamp before query. */
834
	if (stime)
835
		*stime = ktime_get();
4560 Serge 836
 
837
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3746 Serge 838
		/* No obvious pixelcount register. Only query vertical
839
		 * scanout position from Display scan line register.
840
		 */
5060 serge 841
		position = __intel_get_crtc_scanline(intel_crtc);
3746 Serge 842
	} else {
843
		/* Have access to pixelcount since start of frame.
844
		 * We can split this into vertical and horizontal
845
		 * scanout position.
846
		 */
6937 serge 847
		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
3746 Serge 848
 
4560 Serge 849
		/* convert to pixel counts */
850
		vbl_start *= htotal;
851
		vbl_end *= htotal;
852
		vtotal *= htotal;
5060 serge 853
 
854
		/*
855
		 * In interlaced modes, the pixel counter counts all pixels,
856
		 * so one field will have htotal more pixels. In order to avoid
857
		 * the reported position from jumping backwards when the pixel
858
		 * counter is beyond the length of the shorter field, just
859
		 * clamp the position the length of the shorter field. This
860
		 * matches how the scanline counter based position works since
861
		 * the scanline counter doesn't count the two half lines.
862
		 */
863
		if (position >= vtotal)
864
			position = vtotal - 1;
865
 
866
		/*
867
		 * Start of vblank interrupt is triggered at start of hsync,
868
		 * just prior to the first active line of vblank. However we
869
		 * consider lines to start at the leading edge of horizontal
870
		 * active. So, should we get here before we've crossed into
871
		 * the horizontal active of the first line in vblank, we would
872
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
873
		 * always add htotal-hsync_start to the current pixel position.
874
		 */
875
		position = (position + htotal - hsync_start) % vtotal;
3746 Serge 876
	}
877
 
6084 serge 878
	/* Get optional system timestamp after query. */
879
	if (etime)
880
		*etime = ktime_get();
3746 Serge 881
 
4560 Serge 882
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
3746 Serge 883
 
4560 Serge 884
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3746 Serge 885
 
4560 Serge 886
	in_vbl = position >= vbl_start && position < vbl_end;
3746 Serge 887
 
4560 Serge 888
	/*
889
	 * While in vblank, position will be negative
890
	 * counting up towards 0 at vbl_end. And outside
891
	 * vblank, position will be positive counting
892
	 * up since vbl_end.
893
	 */
894
	if (position >= vbl_start)
895
		position -= vbl_end;
896
	else
897
		position += vtotal - vbl_end;
3746 Serge 898
 
4560 Serge 899
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
900
		*vpos = position;
901
		*hpos = 0;
902
	} else {
903
		*vpos = position / htotal;
904
		*hpos = position - (*vpos * htotal);
905
	}
906
 
3746 Serge 907
	/* In vblank? */
908
	if (in_vbl)
5354 serge 909
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
3746 Serge 910
 
911
	return ret;
912
}
913
 
5060 serge 914
int intel_get_crtc_scanline(struct intel_crtc *crtc)
915
{
916
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
917
	unsigned long irqflags;
918
	int position;
919
 
920
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
921
	position = __intel_get_crtc_scanline(crtc);
922
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
923
 
924
	return position;
925
}
926
 
6084 serge 927
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
3746 Serge 928
			      int *max_error,
929
			      struct timeval *vblank_time,
930
			      unsigned flags)
931
{
932
	struct drm_crtc *crtc;
933
 
6084 serge 934
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
935
		DRM_ERROR("Invalid crtc %u\n", pipe);
3746 Serge 936
		return -EINVAL;
937
	}
938
 
939
	/* Get drm_crtc to timestamp: */
940
	crtc = intel_get_crtc_for_pipe(dev, pipe);
941
	if (crtc == NULL) {
6084 serge 942
		DRM_ERROR("Invalid crtc %u\n", pipe);
3746 Serge 943
		return -EINVAL;
944
	}
945
 
6084 serge 946
	if (!crtc->hwmode.crtc_clock) {
947
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
3746 Serge 948
		return -EBUSY;
949
	}
950
 
951
	/* Helper routine in DRM core does all the work: */
952
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
953
						     vblank_time, flags,
6084 serge 954
						     &crtc->hwmode);
3746 Serge 955
}
956
 
4104 Serge 957
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
3746 Serge 958
{
5060 serge 959
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 960
	u32 busy_up, busy_down, max_avg, min_avg;
961
	u8 new_delay;
962
 
4104 Serge 963
	spin_lock(&mchdev_lock);
3746 Serge 964
 
965
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
966
 
967
	new_delay = dev_priv->ips.cur_delay;
968
 
969
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
970
	busy_up = I915_READ(RCPREVBSYTUPAVG);
971
	busy_down = I915_READ(RCPREVBSYTDNAVG);
972
	max_avg = I915_READ(RCBMAXAVG);
973
	min_avg = I915_READ(RCBMINAVG);
974
 
975
	/* Handle RCS change request from hw */
976
	if (busy_up > max_avg) {
977
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
978
			new_delay = dev_priv->ips.cur_delay - 1;
979
		if (new_delay < dev_priv->ips.max_delay)
980
			new_delay = dev_priv->ips.max_delay;
981
	} else if (busy_down < min_avg) {
982
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
983
			new_delay = dev_priv->ips.cur_delay + 1;
984
		if (new_delay > dev_priv->ips.min_delay)
985
			new_delay = dev_priv->ips.min_delay;
986
	}
987
 
988
	if (ironlake_set_drps(dev, new_delay))
989
		dev_priv->ips.cur_delay = new_delay;
990
 
4104 Serge 991
	spin_unlock(&mchdev_lock);
3746 Serge 992
 
993
	return;
994
}
995
 
6084 serge 996
static void notify_ring(struct intel_engine_cs *ring)
2352 Serge 997
{
5060 serge 998
	if (!intel_ring_initialized(ring))
2352 Serge 999
		return;
2351 Serge 1000
 
6084 serge 1001
	trace_i915_gem_request_notify(ring);
2351 Serge 1002
 
2352 Serge 1003
	wake_up_all(&ring->irq_queue);
1004
}
1005
 
6084 serge 1006
static void vlv_c0_read(struct drm_i915_private *dev_priv,
1007
			struct intel_rps_ei *ei)
5060 serge 1008
{
6084 serge 1009
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1010
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1011
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1012
}
5060 serge 1013
 
6937 serge 1014
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1015
			 const struct intel_rps_ei *old,
1016
			 const struct intel_rps_ei *now,
1017
			 int threshold)
1018
{
1019
	u64 time, c0;
1020
	unsigned int mul = 100;
1021
 
1022
	if (old->cz_clock == 0)
1023
		return false;
1024
 
1025
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1026
		mul <<= 8;
1027
 
1028
	time = now->cz_clock - old->cz_clock;
1029
	time *= threshold * dev_priv->czclk_freq;
1030
 
1031
	/* Workload can be split between render + media, e.g. SwapBuffers
1032
	 * being blitted in X after being rendered in mesa. To account for
1033
	 * this we need to combine both engines into our activity counter.
1034
	 */
1035
	c0 = now->render_c0 - old->render_c0;
1036
	c0 += now->media_c0 - old->media_c0;
1037
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1038
 
1039
	return c0 >= time;
1040
}
1041
 
6084 serge 1042
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
5060 serge 1043
{
6937 serge 1044
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1045
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
6084 serge 1046
}
5060 serge 1047
 
6084 serge 1048
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1049
{
1050
	struct intel_rps_ei now;
1051
	u32 events = 0;
5060 serge 1052
 
6937 serge 1053
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
6084 serge 1054
		return 0;
5060 serge 1055
 
6084 serge 1056
	vlv_c0_read(dev_priv, &now);
1057
	if (now.cz_clock == 0)
1058
		return 0;
5060 serge 1059
 
6937 serge 1060
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1061
		if (!vlv_c0_above(dev_priv,
1062
				  &dev_priv->rps.down_ei, &now,
1063
				  dev_priv->rps.down_threshold))
1064
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1065
		dev_priv->rps.down_ei = now;
1066
	}
5060 serge 1067
 
6937 serge 1068
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1069
		if (vlv_c0_above(dev_priv,
1070
				 &dev_priv->rps.up_ei, &now,
1071
				 dev_priv->rps.up_threshold))
1072
			events |= GEN6_PM_RP_UP_THRESHOLD;
1073
		dev_priv->rps.up_ei = now;
5060 serge 1074
	}
1075
 
6084 serge 1076
	return events;
1077
}
5060 serge 1078
 
6084 serge 1079
static bool any_waiters(struct drm_i915_private *dev_priv)
1080
{
1081
	struct intel_engine_cs *ring;
1082
	int i;
5060 serge 1083
 
6084 serge 1084
	for_each_ring(ring, dev_priv, i)
1085
		if (ring->irq_refcount)
1086
			return true;
5060 serge 1087
 
6084 serge 1088
	return false;
5060 serge 1089
}
1090
 
3031 serge 1091
static void gen6_pm_rps_work(struct work_struct *work)
1092
{
5060 serge 1093
	struct drm_i915_private *dev_priv =
1094
		container_of(work, struct drm_i915_private, rps.work);
6084 serge 1095
	bool client_boost;
1096
	int new_delay, adj, min, max;
4104 Serge 1097
	u32 pm_iir;
2352 Serge 1098
 
4104 Serge 1099
	spin_lock_irq(&dev_priv->irq_lock);
5354 serge 1100
	/* Speed up work cancelation during disabling rps interrupts. */
1101
	if (!dev_priv->rps.interrupts_enabled) {
1102
		spin_unlock_irq(&dev_priv->irq_lock);
1103
		return;
1104
	}
6937 serge 1105
 
1106
	/*
1107
	 * The RPS work is synced during runtime suspend, we don't require a
1108
	 * wakeref. TODO: instead of disabling the asserts make sure that we
1109
	 * always hold an RPM reference while the work is running.
1110
	 */
1111
	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1112
 
3031 serge 1113
	pm_iir = dev_priv->rps.pm_iir;
1114
	dev_priv->rps.pm_iir = 0;
5354 serge 1115
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
6084 serge 1116
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1117
	client_boost = dev_priv->rps.client_boost;
1118
	dev_priv->rps.client_boost = false;
4104 Serge 1119
	spin_unlock_irq(&dev_priv->irq_lock);
2352 Serge 1120
 
4104 Serge 1121
	/* Make sure we didn't queue anything we're not going to process. */
5060 serge 1122
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
4104 Serge 1123
 
6084 serge 1124
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
6937 serge 1125
		goto out;
3031 serge 1126
 
3243 Serge 1127
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 1128
 
6084 serge 1129
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1130
 
4560 Serge 1131
	adj = dev_priv->rps.last_adj;
6084 serge 1132
	new_delay = dev_priv->rps.cur_freq;
1133
	min = dev_priv->rps.min_freq_softlimit;
1134
	max = dev_priv->rps.max_freq_softlimit;
1135
 
1136
	if (client_boost) {
1137
		new_delay = dev_priv->rps.max_freq_softlimit;
1138
		adj = 0;
1139
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
4560 Serge 1140
		if (adj > 0)
1141
			adj *= 2;
6084 serge 1142
		else /* CHV needs even encode values */
1143
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
4104 Serge 1144
		/*
1145
		 * For better performance, jump directly
1146
		 * to RPe if we're below it.
1147
		 */
6084 serge 1148
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
5060 serge 1149
			new_delay = dev_priv->rps.efficient_freq;
6084 serge 1150
			adj = 0;
1151
		}
1152
	} else if (any_waiters(dev_priv)) {
1153
		adj = 0;
4560 Serge 1154
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
5060 serge 1155
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1156
			new_delay = dev_priv->rps.efficient_freq;
4560 Serge 1157
		else
5060 serge 1158
			new_delay = dev_priv->rps.min_freq_softlimit;
4560 Serge 1159
		adj = 0;
1160
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1161
		if (adj < 0)
1162
			adj *= 2;
6084 serge 1163
		else /* CHV needs even encode values */
1164
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
4560 Serge 1165
	} else { /* unknown event */
6084 serge 1166
		adj = 0;
4560 Serge 1167
	}
3031 serge 1168
 
6084 serge 1169
	dev_priv->rps.last_adj = adj;
1170
 
3031 serge 1171
	/* sysfs frequency interfaces may have snuck in while servicing the
1172
	 * interrupt
1173
	 */
6084 serge 1174
	new_delay += adj;
1175
	new_delay = clamp_t(int, new_delay, min, max);
4560 Serge 1176
 
6084 serge 1177
	intel_set_rps(dev_priv->dev, new_delay);
5060 serge 1178
 
3243 Serge 1179
	mutex_unlock(&dev_priv->rps.hw_lock);
6937 serge 1180
out:
1181
	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3031 serge 1182
}
1183
 
1184
 
1185
/**
1186
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1187
 * occurred.
1188
 * @work: workqueue struct
1189
 *
1190
 * Doesn't actually do anything except notify userspace. As a consequence of
1191
 * this event, userspace should try to remap the bad rows since statistically
1192
 * it is likely the same row is more likely to go bad again.
1193
 */
1194
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 1195
{
5060 serge 1196
	struct drm_i915_private *dev_priv =
1197
		container_of(work, struct drm_i915_private, l3_parity.error_work);
3031 serge 1198
	u32 error_status, row, bank, subbank;
4560 Serge 1199
	char *parity_event[6];
3031 serge 1200
	uint32_t misccpctl;
4560 Serge 1201
	uint8_t slice = 0;
3031 serge 1202
 
1203
	/* We must turn off DOP level clock gating to access the L3 registers.
1204
	 * In order to prevent a get/put style interface, acquire struct mutex
1205
	 * any time we access those registers.
1206
	 */
1207
	mutex_lock(&dev_priv->dev->struct_mutex);
1208
 
4560 Serge 1209
	/* If we've screwed up tracking, just let the interrupt fire again */
1210
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1211
		goto out;
1212
 
3031 serge 1213
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1214
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1215
	POSTING_READ(GEN7_MISCCPCTL);
1216
 
4560 Serge 1217
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
6937 serge 1218
		i915_reg_t reg;
4560 Serge 1219
 
1220
		slice--;
1221
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1222
			break;
1223
 
1224
		dev_priv->l3_parity.which_slice &= ~(1<
1225
 
6937 serge 1226
		reg = GEN7_L3CDERRST1(slice);
4560 Serge 1227
 
1228
		error_status = I915_READ(reg);
6084 serge 1229
		row = GEN7_PARITY_ERROR_ROW(error_status);
1230
		bank = GEN7_PARITY_ERROR_BANK(error_status);
1231
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
3031 serge 1232
 
4560 Serge 1233
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1234
		POSTING_READ(reg);
3031 serge 1235
 
4560 Serge 1236
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1237
			  slice, row, bank, subbank);
1238
 
1239
	}
1240
 
3031 serge 1241
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1242
 
4560 Serge 1243
out:
1244
	WARN_ON(dev_priv->l3_parity.which_slice);
5354 serge 1245
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 1246
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
5354 serge 1247
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 1248
 
1249
	mutex_unlock(&dev_priv->dev->struct_mutex);
1250
}
1251
 
4560 Serge 1252
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1253
{
5060 serge 1254
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1255
 
4560 Serge 1256
	if (!HAS_L3_DPF(dev))
3031 serge 1257
		return;
1258
 
4104 Serge 1259
	spin_lock(&dev_priv->irq_lock);
5060 serge 1260
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
4104 Serge 1261
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1262
 
4560 Serge 1263
	iir &= GT_PARITY_ERROR(dev);
1264
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1265
		dev_priv->l3_parity.which_slice |= 1 << 1;
1266
 
1267
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1268
		dev_priv->l3_parity.which_slice |= 1 << 0;
1269
 
3243 Serge 1270
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 1271
}
1272
 
4104 Serge 1273
static void ilk_gt_irq_handler(struct drm_device *dev,
1274
			       struct drm_i915_private *dev_priv,
1275
			       u32 gt_iir)
1276
{
1277
	if (gt_iir &
1278
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
6084 serge 1279
		notify_ring(&dev_priv->ring[RCS]);
4104 Serge 1280
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
6084 serge 1281
		notify_ring(&dev_priv->ring[VCS]);
4104 Serge 1282
}
1283
 
3031 serge 1284
static void snb_gt_irq_handler(struct drm_device *dev,
1285
			       struct drm_i915_private *dev_priv,
1286
			       u32 gt_iir)
1287
{
1288
 
4104 Serge 1289
	if (gt_iir &
1290
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
6084 serge 1291
		notify_ring(&dev_priv->ring[RCS]);
4104 Serge 1292
	if (gt_iir & GT_BSD_USER_INTERRUPT)
6084 serge 1293
		notify_ring(&dev_priv->ring[VCS]);
4104 Serge 1294
	if (gt_iir & GT_BLT_USER_INTERRUPT)
6084 serge 1295
		notify_ring(&dev_priv->ring[BCS]);
3031 serge 1296
 
4104 Serge 1297
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1298
		      GT_BSD_CS_ERROR_INTERRUPT |
5354 serge 1299
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1300
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
3031 serge 1301
 
4560 Serge 1302
	if (gt_iir & GT_PARITY_ERROR(dev))
1303
		ivybridge_parity_error_irq_handler(dev, gt_iir);
3031 serge 1304
}
1305
 
6937 serge 1306
static __always_inline void
1307
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1308
{
1309
	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1310
		notify_ring(ring);
1311
	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1312
		intel_lrc_irq_handler(ring);
1313
}
1314
 
6084 serge 1315
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
4560 Serge 1316
				       u32 master_ctl)
1317
{
1318
	irqreturn_t ret = IRQ_NONE;
1319
 
1320
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
6937 serge 1321
		u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1322
		if (iir) {
1323
			I915_WRITE_FW(GEN8_GT_IIR(0), iir);
4560 Serge 1324
			ret = IRQ_HANDLED;
5354 serge 1325
 
6937 serge 1326
			gen8_cs_irq_handler(&dev_priv->ring[RCS],
1327
					iir, GEN8_RCS_IRQ_SHIFT);
5354 serge 1328
 
6937 serge 1329
			gen8_cs_irq_handler(&dev_priv->ring[BCS],
1330
					iir, GEN8_BCS_IRQ_SHIFT);
4560 Serge 1331
		} else
1332
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1333
	}
1334
 
5060 serge 1335
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
6937 serge 1336
		u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1337
		if (iir) {
1338
			I915_WRITE_FW(GEN8_GT_IIR(1), iir);
4560 Serge 1339
			ret = IRQ_HANDLED;
5354 serge 1340
 
6937 serge 1341
			gen8_cs_irq_handler(&dev_priv->ring[VCS],
1342
					iir, GEN8_VCS1_IRQ_SHIFT);
5354 serge 1343
 
6937 serge 1344
			gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1345
					iir, GEN8_VCS2_IRQ_SHIFT);
4560 Serge 1346
		} else
1347
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1348
	}
1349
 
6084 serge 1350
	if (master_ctl & GEN8_GT_VECS_IRQ) {
6937 serge 1351
		u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1352
		if (iir) {
1353
			I915_WRITE_FW(GEN8_GT_IIR(3), iir);
6084 serge 1354
			ret = IRQ_HANDLED;
1355
 
6937 serge 1356
			gen8_cs_irq_handler(&dev_priv->ring[VECS],
1357
					iir, GEN8_VECS_IRQ_SHIFT);
6084 serge 1358
		} else
1359
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1360
	}
1361
 
5060 serge 1362
	if (master_ctl & GEN8_GT_PM_IRQ) {
6937 serge 1363
		u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1364
		if (iir & dev_priv->pm_rps_events) {
6084 serge 1365
			I915_WRITE_FW(GEN8_GT_IIR(2),
6937 serge 1366
				      iir & dev_priv->pm_rps_events);
5060 serge 1367
			ret = IRQ_HANDLED;
6937 serge 1368
			gen6_rps_irq_handler(dev_priv, iir);
5060 serge 1369
		} else
1370
			DRM_ERROR("The master control interrupt lied (PM)!\n");
1371
	}
1372
 
6084 serge 1373
	return ret;
1374
}
5354 serge 1375
 
6084 serge 1376
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1377
{
1378
	switch (port) {
1379
	case PORT_A:
1380
		return val & PORTA_HOTPLUG_LONG_DETECT;
1381
	case PORT_B:
1382
		return val & PORTB_HOTPLUG_LONG_DETECT;
1383
	case PORT_C:
1384
		return val & PORTC_HOTPLUG_LONG_DETECT;
1385
	default:
1386
		return false;
4560 Serge 1387
	}
6084 serge 1388
}
4560 Serge 1389
 
6084 serge 1390
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1391
{
1392
	switch (port) {
1393
	case PORT_E:
1394
		return val & PORTE_HOTPLUG_LONG_DETECT;
1395
	default:
1396
		return false;
1397
	}
4560 Serge 1398
}
1399
 
6084 serge 1400
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1401
{
1402
	switch (port) {
1403
	case PORT_A:
6084 serge 1404
		return val & PORTA_HOTPLUG_LONG_DETECT;
5060 serge 1405
	case PORT_B:
6084 serge 1406
		return val & PORTB_HOTPLUG_LONG_DETECT;
5060 serge 1407
	case PORT_C:
6084 serge 1408
		return val & PORTC_HOTPLUG_LONG_DETECT;
5060 serge 1409
	case PORT_D:
6084 serge 1410
		return val & PORTD_HOTPLUG_LONG_DETECT;
1411
	default:
1412
		return false;
5060 serge 1413
	}
1414
}
1415
 
6084 serge 1416
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1417
{
1418
	switch (port) {
1419
	case PORT_A:
6084 serge 1420
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
5060 serge 1421
	default:
6084 serge 1422
		return false;
1423
	}
1424
}
1425
 
1426
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1427
{
1428
	switch (port) {
5060 serge 1429
	case PORT_B:
6084 serge 1430
		return val & PORTB_HOTPLUG_LONG_DETECT;
5060 serge 1431
	case PORT_C:
6084 serge 1432
		return val & PORTC_HOTPLUG_LONG_DETECT;
5060 serge 1433
	case PORT_D:
6084 serge 1434
		return val & PORTD_HOTPLUG_LONG_DETECT;
1435
	default:
1436
		return false;
5060 serge 1437
	}
1438
}
1439
 
6084 serge 1440
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1441
{
6084 serge 1442
	switch (port) {
1443
	case PORT_B:
1444
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1445
	case PORT_C:
1446
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1447
	case PORT_D:
1448
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
5060 serge 1449
	default:
6084 serge 1450
		return false;
5060 serge 1451
	}
1452
}
1453
 
6084 serge 1454
/*
1455
 * Get a bit mask of pins that have triggered, and which ones may be long.
1456
 * This can be called multiple times with the same masks to accumulate
1457
 * hotplug detection results from several registers.
1458
 *
1459
 * Note that the caller is expected to zero out the masks initially.
1460
 */
1461
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1462
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1463
			     const u32 hpd[HPD_NUM_PINS],
1464
			     bool long_pulse_detect(enum port port, u32 val))
3746 Serge 1465
{
6084 serge 1466
	enum port port;
3746 Serge 1467
	int i;
1468
 
6084 serge 1469
	for_each_hpd_pin(i) {
1470
		if ((hpd[i] & hotplug_trigger) == 0)
5060 serge 1471
			continue;
3746 Serge 1472
 
6084 serge 1473
		*pin_mask |= BIT(i);
5060 serge 1474
 
6296 serge 1475
		if (!intel_hpd_pin_to_port(i, &port))
6131 serge 1476
			continue;
5060 serge 1477
 
6084 serge 1478
		if (long_pulse_detect(port, dig_hotplug_reg))
1479
			*long_mask |= BIT(i);
3746 Serge 1480
	}
1481
 
6084 serge 1482
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1483
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
3746 Serge 1484
 
1485
}
1486
 
3480 Serge 1487
static void gmbus_irq_handler(struct drm_device *dev)
1488
{
5060 serge 1489
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1490
 
1491
	wake_up_all(&dev_priv->gmbus_wait_queue);
1492
}
1493
 
1494
static void dp_aux_irq_handler(struct drm_device *dev)
1495
{
5060 serge 1496
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1497
 
1498
	wake_up_all(&dev_priv->gmbus_wait_queue);
1499
}
1500
 
4560 Serge 1501
#if defined(CONFIG_DEBUG_FS)
1502
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1503
					 uint32_t crc0, uint32_t crc1,
1504
					 uint32_t crc2, uint32_t crc3,
1505
					 uint32_t crc4)
1506
{
1507
	struct drm_i915_private *dev_priv = dev->dev_private;
1508
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1509
	struct intel_pipe_crc_entry *entry;
1510
	int head, tail;
1511
 
1512
	spin_lock(&pipe_crc->lock);
1513
 
1514
	if (!pipe_crc->entries) {
1515
		spin_unlock(&pipe_crc->lock);
5354 serge 1516
		DRM_DEBUG_KMS("spurious interrupt\n");
4560 Serge 1517
		return;
1518
	}
1519
 
1520
	head = pipe_crc->head;
1521
	tail = pipe_crc->tail;
1522
 
1523
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1524
		spin_unlock(&pipe_crc->lock);
1525
		DRM_ERROR("CRC buffer overflowing\n");
1526
		return;
1527
	}
1528
 
1529
	entry = &pipe_crc->entries[head];
1530
 
1531
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1532
	entry->crc[0] = crc0;
1533
	entry->crc[1] = crc1;
1534
	entry->crc[2] = crc2;
1535
	entry->crc[3] = crc3;
1536
	entry->crc[4] = crc4;
1537
 
1538
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1539
	pipe_crc->head = head;
1540
 
1541
	spin_unlock(&pipe_crc->lock);
1542
 
1543
	wake_up_interruptible(&pipe_crc->wq);
1544
}
1545
#else
1546
static inline void
1547
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1548
			     uint32_t crc0, uint32_t crc1,
1549
			     uint32_t crc2, uint32_t crc3,
1550
			     uint32_t crc4) {}
1551
#endif
1552
 
1553
 
1554
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1555
{
1556
	struct drm_i915_private *dev_priv = dev->dev_private;
1557
 
1558
	display_pipe_crc_irq_handler(dev, pipe,
1559
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1560
				     0, 0, 0, 0);
1561
}
1562
 
1563
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1564
{
1565
	struct drm_i915_private *dev_priv = dev->dev_private;
1566
 
1567
	display_pipe_crc_irq_handler(dev, pipe,
1568
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1569
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1570
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1571
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1572
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1573
}
1574
 
1575
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1576
{
1577
	struct drm_i915_private *dev_priv = dev->dev_private;
1578
	uint32_t res1, res2;
1579
 
1580
	if (INTEL_INFO(dev)->gen >= 3)
1581
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1582
	else
1583
		res1 = 0;
1584
 
1585
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1586
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1587
	else
1588
		res2 = 0;
1589
 
1590
	display_pipe_crc_irq_handler(dev, pipe,
1591
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1592
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1593
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1594
				     res1, res2);
1595
}
1596
 
4104 Serge 1597
/* The RPS events need forcewake, so we add them to a work queue and mask their
1598
 * IMR bits until the work is done. Other interrupts can be processed without
1599
 * the work queue. */
1600
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1601
{
5060 serge 1602
	if (pm_iir & dev_priv->pm_rps_events) {
4104 Serge 1603
		spin_lock(&dev_priv->irq_lock);
5354 serge 1604
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1605
		if (dev_priv->rps.interrupts_enabled) {
6084 serge 1606
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
5354 serge 1607
			queue_work(dev_priv->wq, &dev_priv->rps.work);
1608
		}
4104 Serge 1609
		spin_unlock(&dev_priv->irq_lock);
1610
	}
1611
 
5354 serge 1612
	if (INTEL_INFO(dev_priv)->gen >= 8)
1613
		return;
1614
 
4104 Serge 1615
	if (HAS_VEBOX(dev_priv->dev)) {
1616
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
6084 serge 1617
			notify_ring(&dev_priv->ring[VECS]);
4104 Serge 1618
 
5354 serge 1619
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1620
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
4104 Serge 1621
	}
1622
}
1623
 
5354 serge 1624
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1625
{
6088 serge 1626
	if (!drm_handle_vblank(dev, pipe))
1627
		return false;
5354 serge 1628
 
1629
	return true;
1630
}
1631
 
5060 serge 1632
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1633
{
5060 serge 1634
	struct drm_i915_private *dev_priv = dev->dev_private;
1635
	u32 pipe_stats[I915_MAX_PIPES] = { };
3031 serge 1636
	int pipe;
1637
 
5060 serge 1638
	spin_lock(&dev_priv->irq_lock);
7144 serge 1639
 
1640
	if (!dev_priv->display_irqs_enabled) {
1641
		spin_unlock(&dev_priv->irq_lock);
1642
		return;
1643
	}
1644
 
5354 serge 1645
	for_each_pipe(dev_priv, pipe) {
6937 serge 1646
		i915_reg_t reg;
5060 serge 1647
		u32 mask, iir_bit = 0;
3031 serge 1648
 
5060 serge 1649
		/*
1650
		 * PIPESTAT bits get signalled even when the interrupt is
1651
		 * disabled with the mask bits, and some of the status bits do
1652
		 * not generate interrupts at all (like the underrun bit). Hence
1653
		 * we need to be careful that we only handle what we want to
1654
		 * handle.
1655
		 */
3031 serge 1656
 
5354 serge 1657
		/* fifo underruns are filterered in the underrun handler. */
1658
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1659
 
5060 serge 1660
		switch (pipe) {
1661
		case PIPE_A:
1662
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1663
			break;
1664
		case PIPE_B:
1665
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1666
			break;
1667
		case PIPE_C:
1668
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1669
			break;
1670
		}
1671
		if (iir & iir_bit)
1672
			mask |= dev_priv->pipestat_irq_mask[pipe];
3031 serge 1673
 
5060 serge 1674
		if (!mask)
1675
			continue;
3031 serge 1676
 
5060 serge 1677
		reg = PIPESTAT(pipe);
1678
		mask |= PIPESTAT_INT_ENABLE_MASK;
1679
		pipe_stats[pipe] = I915_READ(reg) & mask;
3031 serge 1680
 
6084 serge 1681
		/*
1682
		 * Clear the PIPE*STAT regs before the IIR
1683
		 */
5060 serge 1684
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1685
					PIPESTAT_INT_STATUS_MASK))
6084 serge 1686
			I915_WRITE(reg, pipe_stats[pipe]);
1687
	}
5060 serge 1688
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1689
 
5354 serge 1690
	for_each_pipe(dev_priv, pipe) {
6084 serge 1691
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1692
		    intel_pipe_handle_vblank(dev, pipe))
6320 serge 1693
			intel_check_page_flip(dev, pipe);
3031 serge 1694
 
6084 serge 1695
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
6320 serge 1696
			intel_prepare_page_flip(dev, pipe);
1697
			intel_finish_page_flip(dev, pipe);
6084 serge 1698
		}
4560 Serge 1699
 
6084 serge 1700
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1701
			i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 1702
 
5354 serge 1703
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1704
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
6084 serge 1705
	}
3031 serge 1706
 
5060 serge 1707
	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1708
		gmbus_irq_handler(dev);
1709
}
3031 serge 1710
 
5060 serge 1711
static void i9xx_hpd_irq_handler(struct drm_device *dev)
1712
{
1713
	struct drm_i915_private *dev_priv = dev->dev_private;
1714
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
6084 serge 1715
	u32 pin_mask = 0, long_mask = 0;
4104 Serge 1716
 
6084 serge 1717
	if (!hotplug_status)
1718
		return;
4104 Serge 1719
 
6084 serge 1720
	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1721
	/*
1722
	 * Make sure hotplug status is cleared before we clear IIR, or else we
1723
	 * may miss hotplug events.
1724
	 */
1725
	POSTING_READ(PORT_HOTPLUG_STAT);
1726
 
6937 serge 1727
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5060 serge 1728
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
4560 Serge 1729
 
6084 serge 1730
		if (hotplug_trigger) {
1731
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1732
					   hotplug_trigger, hpd_status_g4x,
1733
					   i9xx_port_hotplug_long_detect);
1734
 
6296 serge 1735
			intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 1736
		}
1737
 
1738
		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1739
			dp_aux_irq_handler(dev);
5060 serge 1740
	} else {
1741
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1742
 
6084 serge 1743
		if (hotplug_trigger) {
1744
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1745
					   hotplug_trigger, hpd_status_i915,
1746
					   i9xx_port_hotplug_long_detect);
6296 serge 1747
			intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 1748
		}
5060 serge 1749
	}
1750
}
1751
 
1752
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1753
{
1754
	struct drm_device *dev = arg;
1755
	struct drm_i915_private *dev_priv = dev->dev_private;
1756
	u32 iir, gt_iir, pm_iir;
1757
	irqreturn_t ret = IRQ_NONE;
1758
 
6084 serge 1759
	if (!intel_irqs_enabled(dev_priv))
1760
		return IRQ_NONE;
1761
 
6937 serge 1762
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1763
	disable_rpm_wakeref_asserts(dev_priv);
1764
 
5060 serge 1765
	while (true) {
1766
		/* Find, clear, then process each source of interrupt */
1767
 
1768
		gt_iir = I915_READ(GTIIR);
1769
		if (gt_iir)
1770
			I915_WRITE(GTIIR, gt_iir);
1771
 
1772
		pm_iir = I915_READ(GEN6_PMIIR);
1773
		if (pm_iir)
1774
			I915_WRITE(GEN6_PMIIR, pm_iir);
1775
 
1776
		iir = I915_READ(VLV_IIR);
1777
		if (iir) {
1778
			/* Consume port before clearing IIR or we'll miss events */
1779
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1780
				i9xx_hpd_irq_handler(dev);
1781
			I915_WRITE(VLV_IIR, iir);
3031 serge 1782
		}
1783
 
5060 serge 1784
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1785
			goto out;
3031 serge 1786
 
5060 serge 1787
		ret = IRQ_HANDLED;
1788
 
1789
		if (gt_iir)
6084 serge 1790
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
4126 Serge 1791
		if (pm_iir)
1792
			gen6_rps_irq_handler(dev_priv, pm_iir);
5060 serge 1793
		/* Call regardless, as some status bits might not be
1794
		 * signalled in iir */
1795
		valleyview_pipestat_irq_handler(dev, iir);
3031 serge 1796
	}
1797
 
1798
out:
6937 serge 1799
	enable_rpm_wakeref_asserts(dev_priv);
1800
 
3031 serge 1801
	return ret;
1802
}
1803
 
5060 serge 1804
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1805
{
1806
	struct drm_device *dev = arg;
1807
	struct drm_i915_private *dev_priv = dev->dev_private;
1808
	u32 master_ctl, iir;
1809
	irqreturn_t ret = IRQ_NONE;
1810
 
6084 serge 1811
	if (!intel_irqs_enabled(dev_priv))
1812
		return IRQ_NONE;
1813
 
6937 serge 1814
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1815
	disable_rpm_wakeref_asserts(dev_priv);
1816
 
5060 serge 1817
	for (;;) {
1818
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1819
		iir = I915_READ(VLV_IIR);
1820
 
1821
		if (master_ctl == 0 && iir == 0)
1822
			break;
1823
 
1824
		ret = IRQ_HANDLED;
1825
 
1826
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1827
 
1828
		/* Find, clear, then process each source of interrupt */
1829
 
1830
		if (iir) {
1831
			/* Consume port before clearing IIR or we'll miss events */
1832
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1833
				i9xx_hpd_irq_handler(dev);
1834
			I915_WRITE(VLV_IIR, iir);
1835
		}
1836
 
6084 serge 1837
		gen8_gt_irq_handler(dev_priv, master_ctl);
5060 serge 1838
 
1839
		/* Call regardless, as some status bits might not be
1840
		 * signalled in iir */
1841
		valleyview_pipestat_irq_handler(dev, iir);
1842
 
1843
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1844
		POSTING_READ(GEN8_MASTER_IRQ);
1845
	}
1846
 
6937 serge 1847
	enable_rpm_wakeref_asserts(dev_priv);
1848
 
5060 serge 1849
	return ret;
1850
}
1851
 
6084 serge 1852
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1853
				const u32 hpd[HPD_NUM_PINS])
1854
{
1855
	struct drm_i915_private *dev_priv = to_i915(dev);
1856
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1857
 
6937 serge 1858
	/*
1859
	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1860
	 * unless we touch the hotplug register, even if hotplug_trigger is
1861
	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1862
	 * errors.
1863
	 */
6084 serge 1864
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
6937 serge 1865
	if (!hotplug_trigger) {
1866
		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1867
			PORTD_HOTPLUG_STATUS_MASK |
1868
			PORTC_HOTPLUG_STATUS_MASK |
1869
			PORTB_HOTPLUG_STATUS_MASK;
1870
		dig_hotplug_reg &= ~mask;
1871
	}
1872
 
6084 serge 1873
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
6937 serge 1874
	if (!hotplug_trigger)
1875
		return;
6084 serge 1876
 
1877
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1878
			   dig_hotplug_reg, hpd,
1879
			   pch_port_hotplug_long_detect);
1880
 
6296 serge 1881
	intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 1882
}
1883
 
3031 serge 1884
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1885
{
5060 serge 1886
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1887
	int pipe;
3746 Serge 1888
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
3031 serge 1889
 
7144 serge 1890
	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
4104 Serge 1891
 
1892
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1893
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1894
			       SDE_AUDIO_POWER_SHIFT);
1895
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1896
				 port_name(port));
3746 Serge 1897
	}
3031 serge 1898
 
3480 Serge 1899
	if (pch_iir & SDE_AUX_MASK)
1900
		dp_aux_irq_handler(dev);
1901
 
3031 serge 1902
	if (pch_iir & SDE_GMBUS)
3480 Serge 1903
		gmbus_irq_handler(dev);
3031 serge 1904
 
1905
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1906
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1907
 
1908
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1909
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1910
 
1911
	if (pch_iir & SDE_POISON)
1912
		DRM_ERROR("PCH poison interrupt\n");
1913
 
1914
	if (pch_iir & SDE_FDI_MASK)
5354 serge 1915
		for_each_pipe(dev_priv, pipe)
3031 serge 1916
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1917
					 pipe_name(pipe),
1918
					 I915_READ(FDI_RX_IIR(pipe)));
1919
 
1920
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1921
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1922
 
1923
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1924
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1925
 
4104 Serge 1926
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
5354 serge 1927
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1928
 
3031 serge 1929
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
5354 serge 1930
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
3031 serge 1931
}
1932
 
4104 Serge 1933
static void ivb_err_int_handler(struct drm_device *dev)
1934
{
1935
	struct drm_i915_private *dev_priv = dev->dev_private;
1936
	u32 err_int = I915_READ(GEN7_ERR_INT);
4560 Serge 1937
	enum pipe pipe;
4104 Serge 1938
 
1939
	if (err_int & ERR_INT_POISON)
1940
		DRM_ERROR("Poison interrupt\n");
1941
 
5354 serge 1942
	for_each_pipe(dev_priv, pipe) {
1943
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1944
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4104 Serge 1945
 
4560 Serge 1946
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1947
			if (IS_IVYBRIDGE(dev))
1948
				ivb_pipe_crc_irq_handler(dev, pipe);
1949
			else
1950
				hsw_pipe_crc_irq_handler(dev, pipe);
1951
		}
1952
	}
4104 Serge 1953
 
1954
	I915_WRITE(GEN7_ERR_INT, err_int);
1955
}
1956
 
1957
static void cpt_serr_int_handler(struct drm_device *dev)
1958
{
1959
	struct drm_i915_private *dev_priv = dev->dev_private;
1960
	u32 serr_int = I915_READ(SERR_INT);
1961
 
1962
	if (serr_int & SERR_INT_POISON)
1963
		DRM_ERROR("PCH poison interrupt\n");
1964
 
1965
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
5354 serge 1966
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1967
 
1968
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
5354 serge 1969
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
4104 Serge 1970
 
1971
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
5354 serge 1972
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
4104 Serge 1973
 
1974
	I915_WRITE(SERR_INT, serr_int);
1975
}
1976
 
3031 serge 1977
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1978
{
5060 serge 1979
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1980
	int pipe;
3746 Serge 1981
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
3031 serge 1982
 
7144 serge 1983
	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
4104 Serge 1984
 
1985
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1986
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1987
			       SDE_AUDIO_POWER_SHIFT_CPT);
1988
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1989
				 port_name(port));
3746 Serge 1990
	}
3031 serge 1991
 
1992
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 1993
		dp_aux_irq_handler(dev);
3031 serge 1994
 
1995
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 1996
		gmbus_irq_handler(dev);
3031 serge 1997
 
1998
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1999
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2000
 
2001
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2002
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2003
 
2004
	if (pch_iir & SDE_FDI_MASK_CPT)
5354 serge 2005
		for_each_pipe(dev_priv, pipe)
3031 serge 2006
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2007
					 pipe_name(pipe),
2008
					 I915_READ(FDI_RX_IIR(pipe)));
2009
 
4104 Serge 2010
	if (pch_iir & SDE_ERROR_CPT)
2011
		cpt_serr_int_handler(dev);
4539 Serge 2012
}
3480 Serge 2013
 
6084 serge 2014
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2015
{
2016
	struct drm_i915_private *dev_priv = dev->dev_private;
2017
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2018
		~SDE_PORTE_HOTPLUG_SPT;
2019
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2020
	u32 pin_mask = 0, long_mask = 0;
2021
 
2022
	if (hotplug_trigger) {
2023
		u32 dig_hotplug_reg;
2024
 
2025
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2026
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2027
 
2028
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2029
				   dig_hotplug_reg, hpd_spt,
2030
				   spt_port_hotplug_long_detect);
2031
	}
2032
 
2033
	if (hotplug2_trigger) {
2034
		u32 dig_hotplug_reg;
2035
 
2036
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2037
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2038
 
2039
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2040
				   dig_hotplug_reg, hpd_spt,
2041
				   spt_port_hotplug2_long_detect);
2042
	}
2043
 
6296 serge 2044
	if (pin_mask)
2045
		intel_hpd_irq_handler(dev, pin_mask, long_mask);
2046
 
6084 serge 2047
	if (pch_iir & SDE_GMBUS_CPT)
2048
		gmbus_irq_handler(dev);
2049
}
2050
 
2051
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2052
				const u32 hpd[HPD_NUM_PINS])
2053
{
2054
	struct drm_i915_private *dev_priv = to_i915(dev);
2055
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2056
 
2057
	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2058
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2059
 
2060
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2061
			   dig_hotplug_reg, hpd,
2062
			   ilk_port_hotplug_long_detect);
2063
 
6296 serge 2064
	intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 2065
}
2066
 
4104 Serge 2067
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2068
{
4104 Serge 2069
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2070
	enum pipe pipe;
6084 serge 2071
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
3031 serge 2072
 
6296 serge 2073
	if (hotplug_trigger)
2074
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
6084 serge 2075
 
3480 Serge 2076
	if (de_iir & DE_AUX_CHANNEL_A)
2077
		dp_aux_irq_handler(dev);
2078
 
3031 serge 2079
	if (de_iir & DE_GSE)
4104 Serge 2080
		intel_opregion_asle_intr(dev);
2351 Serge 2081
 
4104 Serge 2082
	if (de_iir & DE_POISON)
2083
		DRM_ERROR("Poison interrupt\n");
2084
 
5354 serge 2085
	for_each_pipe(dev_priv, pipe) {
6084 serge 2086
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2087
		    intel_pipe_handle_vblank(dev, pipe))
6320 serge 2088
			intel_check_page_flip(dev, pipe);
4104 Serge 2089
 
4560 Serge 2090
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
5354 serge 2091
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2351 Serge 2092
 
4560 Serge 2093
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2094
			i9xx_pipe_crc_irq_handler(dev, pipe);
2095
 
2096
		/* plane/pipes map 1:1 on ilk+ */
2097
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
6320 serge 2098
			intel_prepare_page_flip(dev, pipe);
2099
			intel_finish_page_flip_plane(dev, pipe);
4560 Serge 2100
		}
3031 serge 2101
	}
2351 Serge 2102
 
3031 serge 2103
	/* check event from PCH */
2104
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 2105
		u32 pch_iir = I915_READ(SDEIIR);
2106
 
3031 serge 2107
		if (HAS_PCH_CPT(dev))
2108
			cpt_irq_handler(dev, pch_iir);
2109
		else
2110
			ibx_irq_handler(dev, pch_iir);
3480 Serge 2111
 
2112
		/* should clear PCH hotplug event before clear CPU irq */
2113
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 2114
	}
4104 Serge 2115
 
6084 serge 2116
	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
4104 Serge 2117
		ironlake_rps_change_irq_handler(dev);
2351 Serge 2118
}
2119
 
4104 Serge 2120
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2121
{
2122
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2123
	enum pipe pipe;
6084 serge 2124
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2351 Serge 2125
 
6084 serge 2126
	if (hotplug_trigger)
2127
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2128
 
4126 Serge 2129
	if (de_iir & DE_ERR_INT_IVB)
2130
		ivb_err_int_handler(dev);
2351 Serge 2131
 
4104 Serge 2132
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2133
		dp_aux_irq_handler(dev);
3031 serge 2134
 
4104 Serge 2135
	if (de_iir & DE_GSE_IVB)
2136
		intel_opregion_asle_intr(dev);
4560 Serge 2137
 
5354 serge 2138
	for_each_pipe(dev_priv, pipe) {
6084 serge 2139
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2140
		    intel_pipe_handle_vblank(dev, pipe))
6320 serge 2141
			intel_check_page_flip(dev, pipe);
4560 Serge 2142
 
2143
		/* plane/pipes map 1:1 on ilk+ */
5060 serge 2144
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
6320 serge 2145
			intel_prepare_page_flip(dev, pipe);
2146
			intel_finish_page_flip_plane(dev, pipe);
3031 serge 2147
		}
2148
	}
2149
 
4104 Serge 2150
	/* check event from PCH */
2151
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2152
		u32 pch_iir = I915_READ(SDEIIR);
3031 serge 2153
 
4104 Serge 2154
		cpt_irq_handler(dev, pch_iir);
3031 serge 2155
 
4104 Serge 2156
		/* clear PCH hotplug event before clear CPU irq */
2157
		I915_WRITE(SDEIIR, pch_iir);
4539 Serge 2158
	}
3031 serge 2159
}
2160
 
5060 serge 2161
/*
2162
 * To handle irqs with the minimum potential races with fresh interrupts, we:
2163
 * 1 - Disable Master Interrupt Control.
2164
 * 2 - Find the source(s) of the interrupt.
2165
 * 3 - Clear the Interrupt Identity bits (IIR).
2166
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2167
 * 5 - Re-enable Master Interrupt Control.
2168
 */
4104 Serge 2169
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 2170
{
5060 serge 2171
	struct drm_device *dev = arg;
2172
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2173
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2174
	irqreturn_t ret = IRQ_NONE;
3031 serge 2175
 
6084 serge 2176
	if (!intel_irqs_enabled(dev_priv))
2177
		return IRQ_NONE;
2178
 
6937 serge 2179
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2180
	disable_rpm_wakeref_asserts(dev_priv);
2181
 
4104 Serge 2182
	/* disable master interrupt before clearing iir  */
2183
	de_ier = I915_READ(DEIER);
2184
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2185
	POSTING_READ(DEIER);
3031 serge 2186
 
4104 Serge 2187
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2188
	 * interrupts will will be stored on its back queue, and then we'll be
2189
	 * able to process them after we restore SDEIER (as soon as we restore
2190
	 * it, we'll get an interrupt if SDEIIR still has something to process
2191
	 * due to its back queue). */
2192
	if (!HAS_PCH_NOP(dev)) {
2193
		sde_ier = I915_READ(SDEIER);
2194
		I915_WRITE(SDEIER, 0);
2195
		POSTING_READ(SDEIER);
3031 serge 2196
	}
2197
 
5060 serge 2198
	/* Find, clear, then process each source of interrupt */
2199
 
4104 Serge 2200
	gt_iir = I915_READ(GTIIR);
2201
	if (gt_iir) {
5060 serge 2202
		I915_WRITE(GTIIR, gt_iir);
2203
		ret = IRQ_HANDLED;
4104 Serge 2204
		if (INTEL_INFO(dev)->gen >= 6)
2205
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2206
		else
2207
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
4539 Serge 2208
	}
3031 serge 2209
 
4104 Serge 2210
	de_iir = I915_READ(DEIIR);
2211
	if (de_iir) {
5060 serge 2212
		I915_WRITE(DEIIR, de_iir);
2213
		ret = IRQ_HANDLED;
4104 Serge 2214
		if (INTEL_INFO(dev)->gen >= 7)
2215
			ivb_display_irq_handler(dev, de_iir);
2216
		else
2217
			ilk_display_irq_handler(dev, de_iir);
3480 Serge 2218
	}
2219
 
4104 Serge 2220
	if (INTEL_INFO(dev)->gen >= 6) {
2221
		u32 pm_iir = I915_READ(GEN6_PMIIR);
2222
		if (pm_iir) {
2223
			I915_WRITE(GEN6_PMIIR, pm_iir);
2224
			ret = IRQ_HANDLED;
5060 serge 2225
			gen6_rps_irq_handler(dev_priv, pm_iir);
4560 Serge 2226
		}
3031 serge 2227
	}
2228
 
4104 Serge 2229
	I915_WRITE(DEIER, de_ier);
2230
	POSTING_READ(DEIER);
2231
	if (!HAS_PCH_NOP(dev)) {
2232
		I915_WRITE(SDEIER, sde_ier);
2233
		POSTING_READ(SDEIER);
3031 serge 2234
	}
2235
 
6937 serge 2236
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2237
	enable_rpm_wakeref_asserts(dev_priv);
2238
 
4104 Serge 2239
	return ret;
3031 serge 2240
}
2241
 
6084 serge 2242
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2243
				const u32 hpd[HPD_NUM_PINS])
2244
{
2245
	struct drm_i915_private *dev_priv = to_i915(dev);
2246
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2247
 
2248
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2249
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2250
 
2251
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2252
			   dig_hotplug_reg, hpd,
2253
			   bxt_port_hotplug_long_detect);
2254
 
6296 serge 2255
	intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 2256
}
2257
 
7144 serge 2258
static irqreturn_t
2259
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
4560 Serge 2260
{
7144 serge 2261
	struct drm_device *dev = dev_priv->dev;
4560 Serge 2262
	irqreturn_t ret = IRQ_NONE;
7144 serge 2263
	u32 iir;
4560 Serge 2264
	enum pipe pipe;
2265
 
2266
	if (master_ctl & GEN8_DE_MISC_IRQ) {
7144 serge 2267
		iir = I915_READ(GEN8_DE_MISC_IIR);
2268
		if (iir) {
2269
			I915_WRITE(GEN8_DE_MISC_IIR, iir);
5060 serge 2270
			ret = IRQ_HANDLED;
7144 serge 2271
			if (iir & GEN8_DE_MISC_GSE)
6084 serge 2272
				intel_opregion_asle_intr(dev);
5060 serge 2273
			else
6084 serge 2274
				DRM_ERROR("Unexpected DE Misc interrupt\n");
5060 serge 2275
		}
4560 Serge 2276
		else
2277
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2278
	}
2279
 
2280
	if (master_ctl & GEN8_DE_PORT_IRQ) {
7144 serge 2281
		iir = I915_READ(GEN8_DE_PORT_IIR);
2282
		if (iir) {
2283
			u32 tmp_mask;
6084 serge 2284
			bool found = false;
2285
 
7144 serge 2286
			I915_WRITE(GEN8_DE_PORT_IIR, iir);
5060 serge 2287
			ret = IRQ_HANDLED;
5354 serge 2288
 
7144 serge 2289
			tmp_mask = GEN8_AUX_CHANNEL_A;
2290
			if (INTEL_INFO(dev_priv)->gen >= 9)
2291
				tmp_mask |= GEN9_AUX_CHANNEL_B |
2292
					    GEN9_AUX_CHANNEL_C |
2293
					    GEN9_AUX_CHANNEL_D;
2294
 
2295
			if (iir & tmp_mask) {
6084 serge 2296
				dp_aux_irq_handler(dev);
2297
				found = true;
2298
			}
2299
 
7144 serge 2300
			if (IS_BROXTON(dev_priv)) {
2301
				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2302
				if (tmp_mask) {
2303
					bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
2304
					found = true;
2305
				}
2306
			} else if (IS_BROADWELL(dev_priv)) {
2307
				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2308
				if (tmp_mask) {
2309
					ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
2310
					found = true;
2311
				}
6084 serge 2312
			}
2313
 
7144 serge 2314
			if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
6084 serge 2315
				gmbus_irq_handler(dev);
2316
				found = true;
2317
			}
2318
 
2319
			if (!found)
2320
				DRM_ERROR("Unexpected DE Port interrupt\n");
5060 serge 2321
		}
4560 Serge 2322
		else
2323
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2324
	}
2325
 
5354 serge 2326
	for_each_pipe(dev_priv, pipe) {
7144 serge 2327
		u32 flip_done, fault_errors;
4560 Serge 2328
 
2329
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2330
			continue;
2331
 
7144 serge 2332
		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2333
		if (!iir) {
2334
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2335
			continue;
2336
		}
4560 Serge 2337
 
7144 serge 2338
		ret = IRQ_HANDLED;
2339
		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
4560 Serge 2340
 
7144 serge 2341
		if (iir & GEN8_PIPE_VBLANK &&
2342
		    intel_pipe_handle_vblank(dev, pipe))
2343
			intel_check_page_flip(dev, pipe);
5354 serge 2344
 
7144 serge 2345
		flip_done = iir;
2346
		if (INTEL_INFO(dev_priv)->gen >= 9)
2347
			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2348
		else
2349
			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
5354 serge 2350
 
7144 serge 2351
		if (flip_done) {
2352
			intel_prepare_page_flip(dev, pipe);
2353
			intel_finish_page_flip_plane(dev, pipe);
2354
		}
4560 Serge 2355
 
7144 serge 2356
		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2357
			hsw_pipe_crc_irq_handler(dev, pipe);
4560 Serge 2358
 
7144 serge 2359
		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2360
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5354 serge 2361
 
7144 serge 2362
		fault_errors = iir;
2363
		if (INTEL_INFO(dev_priv)->gen >= 9)
2364
			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2365
		else
2366
			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
5354 serge 2367
 
7144 serge 2368
		if (fault_errors)
2369
			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2370
				  pipe_name(pipe),
2371
				  fault_errors);
4560 Serge 2372
	}
2373
 
6084 serge 2374
	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2375
	    master_ctl & GEN8_DE_PCH_IRQ) {
4560 Serge 2376
		/*
2377
		 * FIXME(BDW): Assume for now that the new interrupt handling
2378
		 * scheme also closed the SDE interrupt handling race we've seen
2379
		 * on older pch-split platforms. But this needs testing.
2380
		 */
7144 serge 2381
		iir = I915_READ(SDEIIR);
2382
		if (iir) {
2383
			I915_WRITE(SDEIIR, iir);
4560 Serge 2384
			ret = IRQ_HANDLED;
6084 serge 2385
 
2386
			if (HAS_PCH_SPT(dev_priv))
7144 serge 2387
				spt_irq_handler(dev, iir);
6084 serge 2388
			else
7144 serge 2389
				cpt_irq_handler(dev, iir);
6320 serge 2390
		} else {
2391
			/*
2392
			 * Like on previous PCH there seems to be something
2393
			 * fishy going on with forwarding PCH interrupts.
2394
			 */
2395
			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2396
		}
4560 Serge 2397
	}
2398
 
7144 serge 2399
	return ret;
2400
}
2401
 
2402
static irqreturn_t gen8_irq_handler(int irq, void *arg)
2403
{
2404
	struct drm_device *dev = arg;
2405
	struct drm_i915_private *dev_priv = dev->dev_private;
2406
	u32 master_ctl;
2407
	irqreturn_t ret;
2408
 
2409
	if (!intel_irqs_enabled(dev_priv))
2410
		return IRQ_NONE;
2411
 
2412
	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2413
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2414
	if (!master_ctl)
2415
		return IRQ_NONE;
2416
 
2417
	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2418
 
2419
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2420
	disable_rpm_wakeref_asserts(dev_priv);
2421
 
2422
	/* Find, clear, then process each source of interrupt */
2423
	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2424
	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2425
 
6084 serge 2426
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2427
	POSTING_READ_FW(GEN8_MASTER_IRQ);
4560 Serge 2428
 
6937 serge 2429
	enable_rpm_wakeref_asserts(dev_priv);
2430
 
4560 Serge 2431
	return ret;
2432
}
2433
 
4104 Serge 2434
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2435
			       bool reset_completed)
3746 Serge 2436
{
5060 serge 2437
	struct intel_engine_cs *ring;
4104 Serge 2438
	int i;
3031 serge 2439
 
4104 Serge 2440
	/*
2441
	 * Notify all waiters for GPU completion events that reset state has
2442
	 * been changed, and that they need to restart their wait after
2443
	 * checking for potential errors (and bail out to drop locks if there is
2444
	 * a gpu reset pending so that i915_error_work_func can acquire them).
2445
	 */
3031 serge 2446
 
4104 Serge 2447
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2448
	for_each_ring(ring, dev_priv, i)
2449
		wake_up_all(&ring->irq_queue);
3031 serge 2450
 
6320 serge 2451
	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2452
	wake_up_all(&dev_priv->pending_flip_queue);
3031 serge 2453
 
4104 Serge 2454
	/*
2455
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2456
	 * reset state is cleared.
2457
	 */
2458
	if (reset_completed)
2459
		wake_up_all(&dev_priv->gpu_error.reset_queue);
3031 serge 2460
}
2461
 
2462
/**
6084 serge 2463
 * i915_reset_and_wakeup - do process context error handling work
2464
 * @dev: drm device
3031 serge 2465
 *
4104 Serge 2466
 * Fire an error uevent so userspace can see that a hang or error
2467
 * was detected.
3031 serge 2468
 */
6084 serge 2469
static void i915_reset_and_wakeup(struct drm_device *dev)
3031 serge 2470
{
6084 serge 2471
	struct drm_i915_private *dev_priv = to_i915(dev);
2472
	struct i915_gpu_error *error = &dev_priv->gpu_error;
4104 Serge 2473
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2474
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2475
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2476
	int ret;
3031 serge 2477
 
4104 Serge 2478
	/*
2479
	 * Note that there's only one work item which does gpu resets, so we
2480
	 * need not worry about concurrent gpu resets potentially incrementing
2481
	 * error->reset_counter twice. We only need to take care of another
2482
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2483
	 * quick check for that is good enough: schedule_work ensures the
2484
	 * correct ordering between hang detection and this work item, and since
2485
	 * the reset in-progress bit is only ever set by code outside of this
2486
	 * work we don't need to worry about any other races.
2487
	 */
2488
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2489
		DRM_DEBUG_DRIVER("resetting chip\n");
6937 serge 2490
		/*
2491
		 * In most cases it's guaranteed that we get here with an RPM
2492
		 * reference held, for example because there is a pending GPU
2493
		 * request that won't finish until the reset is done. This
2494
		 * isn't the case at least when we get here by doing a
2495
		 * simulated reset via debugs, so get an RPM reference.
2496
		 */
6084 serge 2497
		intel_runtime_pm_get(dev_priv);
3031 serge 2498
 
7144 serge 2499
		intel_prepare_reset(dev);
2500
 
4104 Serge 2501
		/*
2502
		 * All state reset _must_ be completed before we update the
2503
		 * reset counter, for otherwise waiters might miss the reset
2504
		 * pending state and not properly drop locks, resulting in
2505
		 * deadlocks with the reset work.
2506
		 */
7144 serge 2507
		ret = i915_reset(dev);
3031 serge 2508
 
7144 serge 2509
		intel_finish_reset(dev);
3031 serge 2510
 
6084 serge 2511
		intel_runtime_pm_put(dev_priv);
2512
 
4104 Serge 2513
		if (ret == 0) {
2514
			/*
2515
			 * After all the gem state is reset, increment the reset
2516
			 * counter and wake up everyone waiting for the reset to
2517
			 * complete.
2518
			 *
2519
			 * Since unlock operations are a one-sided barrier only,
2520
			 * we need to insert a barrier here to order any seqno
2521
			 * updates before
2522
			 * the counter increment.
2523
			 */
6084 serge 2524
			smp_mb__before_atomic();
4104 Serge 2525
			atomic_inc(&dev_priv->gpu_error.reset_counter);
3031 serge 2526
 
4104 Serge 2527
		} else {
6088 serge 2528
			atomic_or(I915_WEDGED, &error->reset_counter);
2529
		}
3031 serge 2530
 
4104 Serge 2531
		/*
2532
		 * Note: The wake_up also serves as a memory barrier so that
2533
		 * waiters see the update value of the reset counter atomic_t.
2534
		 */
2535
		i915_error_wake_up(dev_priv, true);
3031 serge 2536
	}
2537
}
2538
 
2539
static void i915_report_and_clear_eir(struct drm_device *dev)
2540
{
2541
	struct drm_i915_private *dev_priv = dev->dev_private;
2542
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2543
	u32 eir = I915_READ(EIR);
2544
	int pipe, i;
2545
 
2546
	if (!eir)
2547
		return;
2548
 
2549
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2550
 
2551
	i915_get_extra_instdone(dev, instdone);
2552
 
2553
	if (IS_G4X(dev)) {
2554
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2555
			u32 ipeir = I915_READ(IPEIR_I965);
2556
 
2557
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2558
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2559
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2560
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2561
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2562
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2563
			I915_WRITE(IPEIR_I965, ipeir);
2564
			POSTING_READ(IPEIR_I965);
2565
		}
2566
		if (eir & GM45_ERROR_PAGE_TABLE) {
2567
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2568
			pr_err("page table error\n");
2569
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2570
			I915_WRITE(PGTBL_ER, pgtbl_err);
2571
			POSTING_READ(PGTBL_ER);
2572
		}
2573
	}
2574
 
2575
	if (!IS_GEN2(dev)) {
2576
		if (eir & I915_ERROR_PAGE_TABLE) {
2577
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2578
			pr_err("page table error\n");
2579
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2580
			I915_WRITE(PGTBL_ER, pgtbl_err);
2581
			POSTING_READ(PGTBL_ER);
2582
		}
2583
	}
2584
 
2585
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2586
		pr_err("memory refresh error:\n");
5354 serge 2587
		for_each_pipe(dev_priv, pipe)
3031 serge 2588
			pr_err("pipe %c stat: 0x%08x\n",
2589
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2590
		/* pipestat has already been acked */
2591
	}
2592
	if (eir & I915_ERROR_INSTRUCTION) {
2593
		pr_err("instruction error\n");
2594
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2595
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2596
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2597
		if (INTEL_INFO(dev)->gen < 4) {
2598
			u32 ipeir = I915_READ(IPEIR);
2599
 
2600
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2601
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2602
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2603
			I915_WRITE(IPEIR, ipeir);
2604
			POSTING_READ(IPEIR);
2605
		} else {
2606
			u32 ipeir = I915_READ(IPEIR_I965);
2607
 
2608
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2609
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2610
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2611
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2612
			I915_WRITE(IPEIR_I965, ipeir);
2613
			POSTING_READ(IPEIR_I965);
2614
		}
2615
	}
2616
 
2617
	I915_WRITE(EIR, eir);
2618
	POSTING_READ(EIR);
2619
	eir = I915_READ(EIR);
2620
	if (eir) {
2621
		/*
2622
		 * some errors might have become stuck,
2623
		 * mask them.
2624
		 */
2625
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2626
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2627
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2628
	}
2629
}
2630
 
2631
/**
6084 serge 2632
 * i915_handle_error - handle a gpu error
3031 serge 2633
 * @dev: drm device
2634
 *
6084 serge 2635
 * Do some basic checking of register state at error time and
3031 serge 2636
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2637
 * sure we get a record and make it available in debugfs.  Fire a uevent
2638
 * so userspace knows something bad happened (should trigger collection
2639
 * of a ring dump etc.).
2640
 */
5060 serge 2641
void i915_handle_error(struct drm_device *dev, bool wedged,
2642
		       const char *fmt, ...)
3031 serge 2643
{
2644
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2645
	va_list args;
2646
	char error_msg[80];
3031 serge 2647
 
5060 serge 2648
	va_start(args, fmt);
2649
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2650
	va_end(args);
2651
 
7144 serge 2652
	i915_capture_error_state(dev, wedged, error_msg);
3031 serge 2653
	i915_report_and_clear_eir(dev);
2654
 
2655
	if (wedged) {
6084 serge 2656
		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
3480 Serge 2657
				&dev_priv->gpu_error.reset_counter);
3031 serge 2658
 
2659
		/*
6084 serge 2660
		 * Wakeup waiting processes so that the reset function
2661
		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2662
		 * various locks. By bumping the reset counter first, the woken
4104 Serge 2663
		 * processes will see a reset in progress and back off,
2664
		 * releasing their locks and then wait for the reset completion.
2665
		 * We must do this for _all_ gpu waiters that might hold locks
2666
		 * that the reset work needs to acquire.
2667
		 *
2668
		 * Note: The wake_up serves as the required memory barrier to
2669
		 * ensure that the waiters see the updated value of the reset
2670
		 * counter atomic_t.
3031 serge 2671
		 */
4104 Serge 2672
		i915_error_wake_up(dev_priv, false);
3031 serge 2673
	}
2674
 
6084 serge 2675
	i915_reset_and_wakeup(dev);
3031 serge 2676
}
2677
 
2678
/* Called from drm generic code, passed 'crtc' which
2679
 * we use as a pipe index
2680
 */
6084 serge 2681
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2682
{
5060 serge 2683
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2684
	unsigned long irqflags;
2685
 
2686
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2687
	if (INTEL_INFO(dev)->gen >= 4)
2688
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2689
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2690
	else
2691
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2692
				     PIPE_VBLANK_INTERRUPT_STATUS);
3031 serge 2693
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2694
 
2695
	return 0;
2696
}
2697
 
6084 serge 2698
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2699
{
5060 serge 2700
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2701
	unsigned long irqflags;
4104 Serge 2702
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2703
						     DE_PIPE_VBLANK(pipe);
3031 serge 2704
 
2705
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6937 serge 2706
	ilk_enable_display_irq(dev_priv, bit);
3031 serge 2707
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2708
 
2709
	return 0;
2710
}
2711
 
6084 serge 2712
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2713
{
5060 serge 2714
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2715
	unsigned long irqflags;
2716
 
2717
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2718
	i915_enable_pipestat(dev_priv, pipe,
5060 serge 2719
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2720
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2721
 
2722
	return 0;
2723
}
2724
 
6084 serge 2725
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
4560 Serge 2726
{
2727
	struct drm_i915_private *dev_priv = dev->dev_private;
2728
	unsigned long irqflags;
2729
 
2730
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6937 serge 2731
	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
4560 Serge 2732
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6937 serge 2733
 
4560 Serge 2734
	return 0;
2735
}
2736
 
3031 serge 2737
/* Called from drm generic code, passed 'crtc' which
2738
 * we use as a pipe index
2739
 */
6084 serge 2740
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2741
{
5060 serge 2742
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2743
	unsigned long irqflags;
2744
 
2745
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2746
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2747
			      PIPE_VBLANK_INTERRUPT_STATUS |
2748
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2749
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2750
}
2751
 
6084 serge 2752
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2753
{
5060 serge 2754
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2755
	unsigned long irqflags;
4104 Serge 2756
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2757
						     DE_PIPE_VBLANK(pipe);
3031 serge 2758
 
2759
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6937 serge 2760
	ilk_disable_display_irq(dev_priv, bit);
3031 serge 2761
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2762
}
2763
 
6084 serge 2764
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2765
{
5060 serge 2766
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2767
	unsigned long irqflags;
2768
 
2769
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2770
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2771
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2772
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2773
}
2774
 
6084 serge 2775
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
4560 Serge 2776
{
2777
	struct drm_i915_private *dev_priv = dev->dev_private;
2778
	unsigned long irqflags;
2779
 
2780
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6937 serge 2781
	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
4560 Serge 2782
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2783
}
2784
 
4104 Serge 2785
static bool
5060 serge 2786
ring_idle(struct intel_engine_cs *ring, u32 seqno)
2351 Serge 2787
{
4104 Serge 2788
	return (list_empty(&ring->request_list) ||
6084 serge 2789
		i915_seqno_passed(seqno, ring->last_submitted_seqno));
4104 Serge 2790
}
2351 Serge 2791
 
5060 serge 2792
static bool
2793
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
4104 Serge 2794
{
5060 serge 2795
	if (INTEL_INFO(dev)->gen >= 8) {
2796
		return (ipehr >> 23) == 0x1c;
2797
	} else {
2798
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2799
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2800
				 MI_SEMAPHORE_REGISTER);
2801
	}
2802
}
2803
 
2804
static struct intel_engine_cs *
2805
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2806
{
4104 Serge 2807
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2808
	struct intel_engine_cs *signaller;
2809
	int i;
2351 Serge 2810
 
5060 serge 2811
	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2812
		for_each_ring(signaller, dev_priv, i) {
2813
			if (ring == signaller)
2814
				continue;
2815
 
2816
			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2817
				return signaller;
2818
		}
2819
	} else {
2820
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2821
 
2822
		for_each_ring(signaller, dev_priv, i) {
2823
			if(ring == signaller)
2824
				continue;
2825
 
2826
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2827
				return signaller;
2828
		}
2829
	}
2830
 
2831
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2832
		  ring->id, ipehr, offset);
2833
 
2834
	return NULL;
2835
}
2836
 
2837
static struct intel_engine_cs *
2838
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2839
{
2840
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2841
	u32 cmd, ipehr, head;
2842
	u64 offset = 0;
2843
	int i, backwards;
2844
 
6084 serge 2845
	/*
2846
	 * This function does not support execlist mode - any attempt to
2847
	 * proceed further into this function will result in a kernel panic
2848
	 * when dereferencing ring->buffer, which is not set up in execlist
2849
	 * mode.
2850
	 *
2851
	 * The correct way of doing it would be to derive the currently
2852
	 * executing ring buffer from the current context, which is derived
2853
	 * from the currently running request. Unfortunately, to get the
2854
	 * current request we would have to grab the struct_mutex before doing
2855
	 * anything else, which would be ill-advised since some other thread
2856
	 * might have grabbed it already and managed to hang itself, causing
2857
	 * the hang checker to deadlock.
2858
	 *
2859
	 * Therefore, this function does not support execlist mode in its
2860
	 * current form. Just return NULL and move on.
2861
	 */
2862
	if (ring->buffer == NULL)
2863
		return NULL;
2864
 
4104 Serge 2865
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
5060 serge 2866
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
4104 Serge 2867
		return NULL;
2351 Serge 2868
 
5060 serge 2869
	/*
2870
	 * HEAD is likely pointing to the dword after the actual command,
2871
	 * so scan backwards until we find the MBOX. But limit it to just 3
2872
	 * or 4 dwords depending on the semaphore wait command size.
2873
	 * Note that we don't care about ACTHD here since that might
2874
	 * point at at batch, and semaphores are always emitted into the
2875
	 * ringbuffer itself.
4104 Serge 2876
	 */
5060 serge 2877
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2878
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2879
 
2880
	for (i = backwards; i; --i) {
2881
		/*
2882
		 * Be paranoid and presume the hw has gone off into the wild -
2883
		 * our ring is smaller than what the hardware (and hence
2884
		 * HEAD_ADDR) allows. Also handles wrap-around.
2885
		 */
2886
		head &= ring->buffer->size - 1;
2887
 
2888
		/* This here seems to blow up */
2889
		cmd = ioread32(ring->buffer->virtual_start + head);
4104 Serge 2890
		if (cmd == ipehr)
2891
			break;
2351 Serge 2892
 
5060 serge 2893
		head -= 4;
2894
	}
2895
 
2896
	if (!i)
6084 serge 2897
		return NULL;
2351 Serge 2898
 
5060 serge 2899
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2900
	if (INTEL_INFO(ring->dev)->gen >= 8) {
2901
		offset = ioread32(ring->buffer->virtual_start + head + 12);
2902
		offset <<= 32;
2903
		offset = ioread32(ring->buffer->virtual_start + head + 8);
2904
	}
2905
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
4104 Serge 2906
}
2351 Serge 2907
 
5060 serge 2908
static int semaphore_passed(struct intel_engine_cs *ring)
4104 Serge 2909
{
2910
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2911
	struct intel_engine_cs *signaller;
2912
	u32 seqno;
4104 Serge 2913
 
5060 serge 2914
	ring->hangcheck.deadlock++;
4104 Serge 2915
 
2916
	signaller = semaphore_waits_for(ring, &seqno);
5060 serge 2917
	if (signaller == NULL)
4104 Serge 2918
		return -1;
2919
 
5060 serge 2920
	/* Prevent pathological recursion due to driver bugs */
2921
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2922
		return -1;
2923
 
2924
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2925
		return 1;
2926
 
4104 Serge 2927
	/* cursory check for an unkickable deadlock */
5060 serge 2928
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2929
	    semaphore_passed(signaller) < 0)
4104 Serge 2930
		return -1;
2931
 
5060 serge 2932
	return 0;
4104 Serge 2933
}
2934
 
2935
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2936
{
5060 serge 2937
	struct intel_engine_cs *ring;
4104 Serge 2938
	int i;
2939
 
2940
	for_each_ring(ring, dev_priv, i)
5060 serge 2941
		ring->hangcheck.deadlock = 0;
4104 Serge 2942
}
2943
 
7144 serge 2944
static bool subunits_stuck(struct intel_engine_cs *ring)
2945
{
2946
	u32 instdone[I915_NUM_INSTDONE_REG];
2947
	bool stuck;
2948
	int i;
2949
 
2950
	if (ring->id != RCS)
2951
		return true;
2952
 
2953
	i915_get_extra_instdone(ring->dev, instdone);
2954
 
2955
	/* There might be unstable subunit states even when
2956
	 * actual head is not moving. Filter out the unstable ones by
2957
	 * accumulating the undone -> done transitions and only
2958
	 * consider those as progress.
2959
	 */
2960
	stuck = true;
2961
	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2962
		const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
2963
 
2964
		if (tmp != ring->hangcheck.instdone[i])
2965
			stuck = false;
2966
 
2967
		ring->hangcheck.instdone[i] |= tmp;
2968
	}
2969
 
2970
	return stuck;
2971
}
2972
 
4104 Serge 2973
static enum intel_ring_hangcheck_action
7144 serge 2974
head_stuck(struct intel_engine_cs *ring, u64 acthd)
4104 Serge 2975
{
7144 serge 2976
	if (acthd != ring->hangcheck.acthd) {
4104 Serge 2977
 
7144 serge 2978
		/* Clear subunit states on head movement */
2979
		memset(ring->hangcheck.instdone, 0,
2980
		       sizeof(ring->hangcheck.instdone));
2981
 
5060 serge 2982
		if (acthd > ring->hangcheck.max_acthd) {
2983
			ring->hangcheck.max_acthd = acthd;
6084 serge 2984
			return HANGCHECK_ACTIVE;
5060 serge 2985
		}
4104 Serge 2986
 
5060 serge 2987
		return HANGCHECK_ACTIVE_LOOP;
2988
	}
2989
 
7144 serge 2990
	if (!subunits_stuck(ring))
2991
		return HANGCHECK_ACTIVE;
2992
 
2993
	return HANGCHECK_HUNG;
2994
}
2995
 
2996
static enum intel_ring_hangcheck_action
2997
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2998
{
2999
	struct drm_device *dev = ring->dev;
3000
	struct drm_i915_private *dev_priv = dev->dev_private;
3001
	enum intel_ring_hangcheck_action ha;
3002
	u32 tmp;
3003
 
3004
	ha = head_stuck(ring, acthd);
3005
	if (ha != HANGCHECK_HUNG)
3006
		return ha;
3007
 
4104 Serge 3008
	if (IS_GEN2(dev))
3009
		return HANGCHECK_HUNG;
3010
 
3011
	/* Is the chip hanging on a WAIT_FOR_EVENT?
3012
	 * If so we can simply poke the RB_WAIT bit
3013
	 * and break the hang. This should work on
3014
	 * all but the second generation chipsets.
3015
	 */
3016
	tmp = I915_READ_CTL(ring);
3017
	if (tmp & RING_WAIT) {
5060 serge 3018
		i915_handle_error(dev, false,
3019
				  "Kicking stuck wait on %s",
6084 serge 3020
				  ring->name);
4104 Serge 3021
		I915_WRITE_CTL(ring, tmp);
3022
		return HANGCHECK_KICK;
3023
	}
3024
 
3025
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3026
		switch (semaphore_passed(ring)) {
3027
		default:
3028
			return HANGCHECK_HUNG;
3029
		case 1:
5060 serge 3030
			i915_handle_error(dev, false,
3031
					  "Kicking stuck semaphore on %s",
6084 serge 3032
					  ring->name);
4104 Serge 3033
			I915_WRITE_CTL(ring, tmp);
3034
			return HANGCHECK_KICK;
3035
		case 0:
3036
			return HANGCHECK_WAIT;
3037
		}
3038
	}
3039
 
3040
	return HANGCHECK_HUNG;
3041
}
3042
 
6084 serge 3043
/*
4104 Serge 3044
 * This is called when the chip hasn't reported back with completed
3045
 * batchbuffers in a long time. We keep track per ring seqno progress and
3046
 * if there are no progress, hangcheck score for that ring is increased.
3047
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3048
 * we kick the ring. If we see no progress on three subsequent calls
3049
 * we assume chip is wedged and try to fix it by resetting the chip.
3050
 */
6084 serge 3051
static void i915_hangcheck_elapsed(struct work_struct *work)
4104 Serge 3052
{
6084 serge 3053
	struct drm_i915_private *dev_priv =
3054
		container_of(work, typeof(*dev_priv),
3055
			     gpu_error.hangcheck_work.work);
3056
	struct drm_device *dev = dev_priv->dev;
5060 serge 3057
	struct intel_engine_cs *ring;
4104 Serge 3058
	int i;
3059
	int busy_count = 0, rings_hung = 0;
3060
	bool stuck[I915_NUM_RINGS] = { 0 };
3061
#define BUSY 1
3062
#define KICK 5
3063
#define HUNG 20
3064
 
5060 serge 3065
	if (!i915.enable_hangcheck)
4104 Serge 3066
		return;
3067
 
6937 serge 3068
	/*
3069
	 * The hangcheck work is synced during runtime suspend, we don't
3070
	 * require a wakeref. TODO: instead of disabling the asserts make
3071
	 * sure that we hold a reference when this work is running.
3072
	 */
3073
	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3074
 
7144 serge 3075
	/* As enabling the GPU requires fairly extensive mmio access,
3076
	 * periodically arm the mmio checker to see if we are triggering
3077
	 * any invalid access.
3078
	 */
3079
	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3080
 
4104 Serge 3081
	for_each_ring(ring, dev_priv, i) {
5060 serge 3082
		u64 acthd;
3083
		u32 seqno;
4104 Serge 3084
		bool busy = true;
3085
 
3086
		semaphore_clear_deadlocks(dev_priv);
3087
 
3088
		seqno = ring->get_seqno(ring, false);
3089
		acthd = intel_ring_get_active_head(ring);
3090
 
3091
		if (ring->hangcheck.seqno == seqno) {
3092
			if (ring_idle(ring, seqno)) {
5060 serge 3093
				ring->hangcheck.action = HANGCHECK_IDLE;
3094
 
6084 serge 3095
				if (waitqueue_active(&ring->irq_queue)) {
4104 Serge 3096
					/* Issue a wake-up to catch stuck h/w. */
6084 serge 3097
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3098
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3099
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3100
								  ring->name);
3101
						else
3102
							DRM_INFO("Fake missed irq on %s\n",
3103
								 ring->name);
3104
						wake_up_all(&ring->irq_queue);
3105
					}
3106
					/* Safeguard against driver failure */
3107
					ring->hangcheck.score += BUSY;
3108
				} else
4104 Serge 3109
					busy = false;
3110
			} else {
3111
				/* We always increment the hangcheck score
3112
				 * if the ring is busy and still processing
3113
				 * the same request, so that no single request
3114
				 * can run indefinitely (such as a chain of
3115
				 * batches). The only time we do not increment
3116
				 * the hangcheck score on this ring, if this
3117
				 * ring is in a legitimate wait for another
3118
				 * ring. In that case the waiting ring is a
3119
				 * victim and we want to be sure we catch the
3120
				 * right culprit. Then every time we do kick
3121
				 * the ring, add a small increment to the
3122
				 * score so that we can catch a batch that is
3123
				 * being repeatedly kicked and so responsible
3124
				 * for stalling the machine.
3125
				 */
3126
				ring->hangcheck.action = ring_stuck(ring,
3127
								    acthd);
3128
 
3129
				switch (ring->hangcheck.action) {
4560 Serge 3130
				case HANGCHECK_IDLE:
4104 Serge 3131
				case HANGCHECK_WAIT:
5060 serge 3132
				case HANGCHECK_ACTIVE:
4104 Serge 3133
					break;
5060 serge 3134
				case HANGCHECK_ACTIVE_LOOP:
4104 Serge 3135
					ring->hangcheck.score += BUSY;
3136
					break;
3137
				case HANGCHECK_KICK:
3138
					ring->hangcheck.score += KICK;
3139
					break;
3140
				case HANGCHECK_HUNG:
3141
					ring->hangcheck.score += HUNG;
3142
					stuck[i] = true;
3143
					break;
3144
				}
3145
			}
3146
		} else {
4560 Serge 3147
			ring->hangcheck.action = HANGCHECK_ACTIVE;
3148
 
4104 Serge 3149
			/* Gradually reduce the count so that we catch DoS
3150
			 * attempts across multiple batches.
3151
			 */
3152
			if (ring->hangcheck.score > 0)
3153
				ring->hangcheck.score--;
5060 serge 3154
 
7144 serge 3155
			/* Clear head and subunit states on seqno movement */
5060 serge 3156
			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
7144 serge 3157
 
3158
			memset(ring->hangcheck.instdone, 0,
3159
			       sizeof(ring->hangcheck.instdone));
4104 Serge 3160
		}
3161
 
3162
		ring->hangcheck.seqno = seqno;
3163
		ring->hangcheck.acthd = acthd;
3164
		busy_count += busy;
3165
	}
3166
 
3167
	for_each_ring(ring, dev_priv, i) {
5060 serge 3168
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
4104 Serge 3169
			DRM_INFO("%s on %s\n",
6084 serge 3170
				 stuck[i] ? "stuck" : "no progress",
3171
				 ring->name);
4104 Serge 3172
			rings_hung++;
3173
		}
3174
	}
3175
 
7144 serge 3176
	if (rings_hung) {
3177
		i915_handle_error(dev, true, "Ring hung");
3178
		goto out;
3179
	}
4104 Serge 3180
 
7144 serge 3181
	if (busy_count)
3182
		/* Reset timer case chip hangs without another request
3183
		 * being added */
3184
		i915_queue_hangcheck(dev);
3185
 
3186
out:
3187
	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
4104 Serge 3188
}
6088 serge 3189
 
7144 serge 3190
void i915_queue_hangcheck(struct drm_device *dev)
3191
{
3192
	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3193
 
3194
	if (!i915.enable_hangcheck)
3195
		return;
3196
 
3197
	/* Don't continually defer the hangcheck so that it is always run at
3198
	 * least once after work has been scheduled on any ring. Otherwise,
3199
	 * we will ignore a hung ring if a second ring is kept busy.
3200
	 */
3201
 
3202
	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3203
			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3204
}
3205
 
5060 serge 3206
static void ibx_irq_reset(struct drm_device *dev)
3207
{
3208
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3209
 
5060 serge 3210
	if (HAS_PCH_NOP(dev))
3211
		return;
3212
 
3213
	GEN5_IRQ_RESET(SDE);
3214
 
3215
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3216
		I915_WRITE(SERR_INT, 0xffffffff);
3217
}
3218
 
3219
/*
3220
 * SDEIER is also touched by the interrupt handler to work around missed PCH
3221
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3222
 * instead we unconditionally enable all PCH interrupt sources here, but then
3223
 * only unmask them as needed with SDEIMR.
3224
 *
3225
 * This function needs to be called before interrupts are enabled.
3226
 */
3227
static void ibx_irq_pre_postinstall(struct drm_device *dev)
4104 Serge 3228
{
3229
	struct drm_i915_private *dev_priv = dev->dev_private;
3230
 
3746 Serge 3231
	if (HAS_PCH_NOP(dev))
3232
		return;
3233
 
5060 serge 3234
	WARN_ON(I915_READ(SDEIER) != 0);
3746 Serge 3235
	I915_WRITE(SDEIER, 0xffffffff);
4104 Serge 3236
	POSTING_READ(SDEIER);
2351 Serge 3237
}
3238
 
5060 serge 3239
static void gen5_gt_irq_reset(struct drm_device *dev)
4104 Serge 3240
{
3241
	struct drm_i915_private *dev_priv = dev->dev_private;
3242
 
5060 serge 3243
	GEN5_IRQ_RESET(GT);
3244
	if (INTEL_INFO(dev)->gen >= 6)
3245
		GEN5_IRQ_RESET(GEN6_PM);
4104 Serge 3246
}
3247
 
3248
/* drm_dma.h hooks
3249
*/
5060 serge 3250
static void ironlake_irq_reset(struct drm_device *dev)
4104 Serge 3251
{
5060 serge 3252
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3253
 
5060 serge 3254
	I915_WRITE(HWSTAM, 0xffffffff);
4104 Serge 3255
 
5060 serge 3256
	GEN5_IRQ_RESET(DE);
3257
	if (IS_GEN7(dev))
3258
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
4104 Serge 3259
 
5060 serge 3260
	gen5_gt_irq_reset(dev);
4104 Serge 3261
 
5060 serge 3262
	ibx_irq_reset(dev);
4104 Serge 3263
}
3264
 
5354 serge 3265
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3266
{
3267
	enum pipe pipe;
3268
 
6084 serge 3269
	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
5354 serge 3270
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3271
 
3272
	for_each_pipe(dev_priv, pipe)
3273
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3274
 
3275
	GEN5_IRQ_RESET(VLV_);
3276
}
3277
 
3031 serge 3278
static void valleyview_irq_preinstall(struct drm_device *dev)
3279
{
5060 serge 3280
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3281
 
3282
	/* VLV magic */
3283
	I915_WRITE(VLV_IMR, 0);
3284
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3285
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3286
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3287
 
5060 serge 3288
	gen5_gt_irq_reset(dev);
4104 Serge 3289
 
5354 serge 3290
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3031 serge 3291
 
5354 serge 3292
	vlv_display_irq_reset(dev_priv);
3031 serge 3293
}
3294
 
5060 serge 3295
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
4560 Serge 3296
{
5060 serge 3297
	GEN8_IRQ_RESET_NDX(GT, 0);
3298
	GEN8_IRQ_RESET_NDX(GT, 1);
3299
	GEN8_IRQ_RESET_NDX(GT, 2);
3300
	GEN8_IRQ_RESET_NDX(GT, 3);
3301
}
3302
 
3303
static void gen8_irq_reset(struct drm_device *dev)
3304
{
4560 Serge 3305
	struct drm_i915_private *dev_priv = dev->dev_private;
3306
	int pipe;
3307
 
3308
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3309
	POSTING_READ(GEN8_MASTER_IRQ);
3310
 
5060 serge 3311
	gen8_gt_irq_reset(dev_priv);
4560 Serge 3312
 
5354 serge 3313
	for_each_pipe(dev_priv, pipe)
3314
		if (intel_display_power_is_enabled(dev_priv,
6084 serge 3315
						   POWER_DOMAIN_PIPE(pipe)))
3316
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
4560 Serge 3317
 
5060 serge 3318
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3319
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3320
	GEN5_IRQ_RESET(GEN8_PCU_);
4560 Serge 3321
 
6084 serge 3322
	if (HAS_PCH_SPLIT(dev))
3323
		ibx_irq_reset(dev);
5060 serge 3324
}
4560 Serge 3325
 
6084 serge 3326
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3327
				     unsigned int pipe_mask)
5060 serge 3328
{
5354 serge 3329
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
7144 serge 3330
	enum pipe pipe;
4560 Serge 3331
 
5354 serge 3332
	spin_lock_irq(&dev_priv->irq_lock);
7144 serge 3333
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3334
		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3335
				  dev_priv->de_irq_mask[pipe],
3336
				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
5354 serge 3337
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 3338
}
3339
 
7144 serge 3340
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3341
				     unsigned int pipe_mask)
3342
{
3343
	enum pipe pipe;
3344
 
3345
	spin_lock_irq(&dev_priv->irq_lock);
3346
	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3347
		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3348
	spin_unlock_irq(&dev_priv->irq_lock);
3349
 
3350
	/* make sure we're done processing display irqs */
3351
	synchronize_irq(dev_priv->dev->irq);
3352
}
3353
 
5060 serge 3354
static void cherryview_irq_preinstall(struct drm_device *dev)
3355
{
3356
	struct drm_i915_private *dev_priv = dev->dev_private;
3357
 
3358
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3359
	POSTING_READ(GEN8_MASTER_IRQ);
3360
 
3361
	gen8_gt_irq_reset(dev_priv);
3362
 
3363
	GEN5_IRQ_RESET(GEN8_PCU_);
3364
 
3365
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3366
 
5354 serge 3367
	vlv_display_irq_reset(dev_priv);
4560 Serge 3368
}
3369
 
6084 serge 3370
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3371
				  const u32 hpd[HPD_NUM_PINS])
3372
{
3373
	struct drm_i915_private *dev_priv = to_i915(dev);
3374
	struct intel_encoder *encoder;
3375
	u32 enabled_irqs = 0;
3376
 
3377
	for_each_intel_encoder(dev, encoder)
3378
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3379
			enabled_irqs |= hpd[encoder->hpd_pin];
3380
 
3381
	return enabled_irqs;
3382
}
3383
 
3746 Serge 3384
static void ibx_hpd_irq_setup(struct drm_device *dev)
3385
{
5060 serge 3386
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3387
	u32 hotplug_irqs, hotplug, enabled_irqs;
3746 Serge 3388
 
3389
	if (HAS_PCH_IBX(dev)) {
4104 Serge 3390
		hotplug_irqs = SDE_HOTPLUG_MASK;
6084 serge 3391
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3746 Serge 3392
	} else {
4104 Serge 3393
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
6084 serge 3394
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3746 Serge 3395
	}
3396
 
4104 Serge 3397
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3746 Serge 3398
 
3399
	/*
6084 serge 3400
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3401
	 * duration to 2ms (which is the minimum in the Display Port spec).
3402
	 * The pulse duration bits are reserved on LPT+.
3403
	 */
2351 Serge 3404
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3405
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3406
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3407
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3408
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
6084 serge 3409
	/*
3410
	 * When CPU and PCH are on the same package, port A
3411
	 * HPD must be enabled in both north and south.
3412
	 */
3413
	if (HAS_PCH_LPT_LP(dev))
3414
		hotplug |= PORTA_HOTPLUG_ENABLE;
2351 Serge 3415
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3416
}
3417
 
6084 serge 3418
static void spt_hpd_irq_setup(struct drm_device *dev)
3419
{
3420
	struct drm_i915_private *dev_priv = dev->dev_private;
3421
	u32 hotplug_irqs, hotplug, enabled_irqs;
3422
 
3423
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3424
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3425
 
3426
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3427
 
3428
	/* Enable digital hotplug on the PCH */
3429
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3430
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3431
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3432
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3433
 
3434
	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3435
	hotplug |= PORTE_HOTPLUG_ENABLE;
3436
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3437
}
3438
 
3439
static void ilk_hpd_irq_setup(struct drm_device *dev)
3440
{
3441
	struct drm_i915_private *dev_priv = dev->dev_private;
3442
	u32 hotplug_irqs, hotplug, enabled_irqs;
3443
 
3444
	if (INTEL_INFO(dev)->gen >= 8) {
3445
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3446
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3447
 
3448
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3449
	} else if (INTEL_INFO(dev)->gen >= 7) {
3450
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3451
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3452
 
3453
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3454
	} else {
3455
		hotplug_irqs = DE_DP_A_HOTPLUG;
3456
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3457
 
3458
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3459
	}
3460
 
3461
	/*
3462
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3463
	 * duration to 2ms (which is the minimum in the Display Port spec)
3464
	 * The pulse duration bits are reserved on HSW+.
3465
	 */
3466
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3467
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3468
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3469
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3470
 
3471
	ibx_hpd_irq_setup(dev);
3472
}
3473
 
3474
static void bxt_hpd_irq_setup(struct drm_device *dev)
3475
{
3476
	struct drm_i915_private *dev_priv = dev->dev_private;
3477
	u32 hotplug_irqs, hotplug, enabled_irqs;
3478
 
3479
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3480
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3481
 
3482
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3483
 
3484
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3485
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3486
		PORTA_HOTPLUG_ENABLE;
3487
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3488
}
3489
 
3480 Serge 3490
static void ibx_irq_postinstall(struct drm_device *dev)
3491
{
5060 serge 3492
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 3493
	u32 mask;
3494
 
3746 Serge 3495
	if (HAS_PCH_NOP(dev))
3496
		return;
3497
 
5060 serge 3498
	if (HAS_PCH_IBX(dev))
3499
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3500
	else
3501
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4104 Serge 3502
 
6084 serge 3503
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3480 Serge 3504
	I915_WRITE(SDEIMR, ~mask);
3505
}
3506
 
4104 Serge 3507
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2351 Serge 3508
{
4104 Serge 3509
	struct drm_i915_private *dev_priv = dev->dev_private;
3510
	u32 pm_irqs, gt_irqs;
2351 Serge 3511
 
4104 Serge 3512
	pm_irqs = gt_irqs = 0;
2351 Serge 3513
 
3514
	dev_priv->gt_irq_mask = ~0;
4560 Serge 3515
	if (HAS_L3_DPF(dev)) {
4104 Serge 3516
		/* L3 parity interrupt is always unmasked. */
4560 Serge 3517
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3518
		gt_irqs |= GT_PARITY_ERROR(dev);
4104 Serge 3519
	}
2351 Serge 3520
 
4104 Serge 3521
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3522
	if (IS_GEN5(dev)) {
3523
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3524
			   ILK_BSD_USER_INTERRUPT;
3525
	} else {
3526
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3527
	}
2351 Serge 3528
 
5060 serge 3529
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
2351 Serge 3530
 
4104 Serge 3531
	if (INTEL_INFO(dev)->gen >= 6) {
5354 serge 3532
		/*
3533
		 * RPS interrupts will get enabled/disabled on demand when RPS
3534
		 * itself is enabled/disabled.
3535
		 */
4104 Serge 3536
		if (HAS_VEBOX(dev))
3537
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3538
 
3539
		dev_priv->pm_irq_mask = 0xffffffff;
5060 serge 3540
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
6084 serge 3541
	}
2351 Serge 3542
}
3543
 
4104 Serge 3544
static int ironlake_irq_postinstall(struct drm_device *dev)
3031 serge 3545
{
5060 serge 3546
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3547
	u32 display_mask, extra_mask;
3548
 
3549
	if (INTEL_INFO(dev)->gen >= 7) {
3550
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3551
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
6084 serge 3552
				DE_PLANEB_FLIP_DONE_IVB |
5060 serge 3553
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
4104 Serge 3554
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
6084 serge 3555
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3556
			      DE_DP_A_HOTPLUG_IVB);
4104 Serge 3557
	} else {
3558
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3559
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
4560 Serge 3560
				DE_AUX_CHANNEL_A |
3561
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3562
				DE_POISON);
6084 serge 3563
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3564
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3565
			      DE_DP_A_HOTPLUG);
4104 Serge 3566
	}
3567
 
3031 serge 3568
	dev_priv->irq_mask = ~display_mask;
3569
 
5060 serge 3570
	I915_WRITE(HWSTAM, 0xeffe);
3031 serge 3571
 
5060 serge 3572
	ibx_irq_pre_postinstall(dev);
3573
 
3574
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3575
 
4104 Serge 3576
	gen5_gt_irq_postinstall(dev);
3031 serge 3577
 
4104 Serge 3578
	ibx_irq_postinstall(dev);
3031 serge 3579
 
4104 Serge 3580
	if (IS_IRONLAKE_M(dev)) {
3581
		/* Enable PCU event interrupts
3582
		 *
3583
		 * spinlocking not required here for correctness since interrupt
3584
		 * setup is guaranteed to run in single-threaded context. But we
3585
		 * need it to make the assert_spin_locked happy. */
5354 serge 3586
		spin_lock_irq(&dev_priv->irq_lock);
6937 serge 3587
		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
5354 serge 3588
		spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 3589
	}
3031 serge 3590
 
3591
	return 0;
3592
}
3593
 
5060 serge 3594
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3595
{
3596
	u32 pipestat_mask;
3597
	u32 iir_mask;
5354 serge 3598
	enum pipe pipe;
5060 serge 3599
 
3600
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3601
			PIPE_FIFO_UNDERRUN_STATUS;
3602
 
5354 serge 3603
	for_each_pipe(dev_priv, pipe)
3604
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3605
	POSTING_READ(PIPESTAT(PIPE_A));
3606
 
3607
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3608
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3609
 
5354 serge 3610
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3611
	for_each_pipe(dev_priv, pipe)
3612
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3613
 
3614
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3615
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3616
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3617
	if (IS_CHERRYVIEW(dev_priv))
3618
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3619
	dev_priv->irq_mask &= ~iir_mask;
3620
 
3621
	I915_WRITE(VLV_IIR, iir_mask);
3622
	I915_WRITE(VLV_IIR, iir_mask);
5354 serge 3623
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
5060 serge 3624
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3625
	POSTING_READ(VLV_IMR);
5060 serge 3626
}
3627
 
3628
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3629
{
3630
	u32 pipestat_mask;
3631
	u32 iir_mask;
5354 serge 3632
	enum pipe pipe;
5060 serge 3633
 
3634
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3635
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3636
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3637
	if (IS_CHERRYVIEW(dev_priv))
3638
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3639
 
3640
	dev_priv->irq_mask |= iir_mask;
5354 serge 3641
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5060 serge 3642
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3643
	I915_WRITE(VLV_IIR, iir_mask);
3644
	I915_WRITE(VLV_IIR, iir_mask);
3645
	POSTING_READ(VLV_IIR);
3646
 
3647
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3648
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3649
 
5354 serge 3650
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3651
	for_each_pipe(dev_priv, pipe)
3652
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3653
 
3654
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3655
			PIPE_FIFO_UNDERRUN_STATUS;
5354 serge 3656
 
3657
	for_each_pipe(dev_priv, pipe)
3658
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3659
	POSTING_READ(PIPESTAT(PIPE_A));
3660
}
3661
 
3662
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3663
{
3664
	assert_spin_locked(&dev_priv->irq_lock);
3665
 
3666
	if (dev_priv->display_irqs_enabled)
3667
		return;
3668
 
3669
	dev_priv->display_irqs_enabled = true;
3670
 
5354 serge 3671
	if (intel_irqs_enabled(dev_priv))
5060 serge 3672
		valleyview_display_irqs_install(dev_priv);
3673
}
3674
 
3675
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3676
{
3677
	assert_spin_locked(&dev_priv->irq_lock);
3678
 
3679
	if (!dev_priv->display_irqs_enabled)
3680
		return;
3681
 
3682
	dev_priv->display_irqs_enabled = false;
3683
 
5354 serge 3684
	if (intel_irqs_enabled(dev_priv))
5060 serge 3685
		valleyview_display_irqs_uninstall(dev_priv);
3686
}
3687
 
5354 serge 3688
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3031 serge 3689
{
5060 serge 3690
	dev_priv->irq_mask = ~0;
3031 serge 3691
 
6084 serge 3692
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 3693
	POSTING_READ(PORT_HOTPLUG_EN);
3694
 
5354 serge 3695
	I915_WRITE(VLV_IIR, 0xffffffff);
3696
	I915_WRITE(VLV_IIR, 0xffffffff);
3697
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3031 serge 3698
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3699
	POSTING_READ(VLV_IMR);
3031 serge 3700
 
4104 Serge 3701
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3702
	 * just to make the assert_spin_locked check happy. */
5354 serge 3703
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3704
	if (dev_priv->display_irqs_enabled)
3705
		valleyview_display_irqs_install(dev_priv);
5354 serge 3706
	spin_unlock_irq(&dev_priv->irq_lock);
3707
}
3031 serge 3708
 
5354 serge 3709
static int valleyview_irq_postinstall(struct drm_device *dev)
3710
{
3711
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3712
 
5354 serge 3713
	vlv_display_irq_postinstall(dev_priv);
3714
 
4104 Serge 3715
	gen5_gt_irq_postinstall(dev);
3243 Serge 3716
 
3031 serge 3717
	/* ack & enable invalid PTE error interrupts */
3718
#if 0 /* FIXME: add support to irq handler for checking these bits */
3719
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3720
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3721
#endif
3722
 
3723
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 3724
 
3725
	return 0;
3726
}
3727
 
4560 Serge 3728
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3729
{
3730
	/* These are interrupts we'll toggle with the ring mask register */
3731
	uint32_t gt_interrupts[] = {
3732
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
5354 serge 3733
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4560 Serge 3734
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
5354 serge 3735
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3736
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4560 Serge 3737
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
5354 serge 3738
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3739
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3740
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4560 Serge 3741
		0,
5354 serge 3742
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3743
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4560 Serge 3744
		};
3745
 
5060 serge 3746
	dev_priv->pm_irq_mask = 0xffffffff;
5354 serge 3747
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3748
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3749
	/*
3750
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3751
	 * is enabled/disabled.
3752
	 */
3753
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3754
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4560 Serge 3755
}
3756
 
3757
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3758
{
5354 serge 3759
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3760
	uint32_t de_pipe_enables;
6084 serge 3761
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3762
	u32 de_port_enables;
3763
	enum pipe pipe;
5354 serge 3764
 
6084 serge 3765
	if (INTEL_INFO(dev_priv)->gen >= 9) {
5354 serge 3766
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3767
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
6084 serge 3768
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3769
				  GEN9_AUX_CHANNEL_D;
3770
		if (IS_BROXTON(dev_priv))
3771
			de_port_masked |= BXT_DE_PORT_GMBUS;
3772
	} else {
5354 serge 3773
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
6084 serge 3774
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3775
	}
5354 serge 3776
 
3777
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
6084 serge 3778
					   GEN8_PIPE_FIFO_UNDERRUN;
5354 serge 3779
 
6084 serge 3780
	de_port_enables = de_port_masked;
3781
	if (IS_BROXTON(dev_priv))
3782
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3783
	else if (IS_BROADWELL(dev_priv))
3784
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3785
 
4560 Serge 3786
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3787
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3788
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3789
 
5354 serge 3790
	for_each_pipe(dev_priv, pipe)
3791
		if (intel_display_power_is_enabled(dev_priv,
5060 serge 3792
				POWER_DOMAIN_PIPE(pipe)))
3793
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3794
					  dev_priv->de_irq_mask[pipe],
6084 serge 3795
					  de_pipe_enables);
4560 Serge 3796
 
6084 serge 3797
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4560 Serge 3798
}
3799
 
3800
static int gen8_irq_postinstall(struct drm_device *dev)
3801
{
3802
	struct drm_i915_private *dev_priv = dev->dev_private;
3803
 
6084 serge 3804
	if (HAS_PCH_SPLIT(dev))
3805
		ibx_irq_pre_postinstall(dev);
5060 serge 3806
 
4560 Serge 3807
	gen8_gt_irq_postinstall(dev_priv);
3808
	gen8_de_irq_postinstall(dev_priv);
3809
 
6084 serge 3810
	if (HAS_PCH_SPLIT(dev))
3811
		ibx_irq_postinstall(dev);
4560 Serge 3812
 
3813
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3814
	POSTING_READ(GEN8_MASTER_IRQ);
3815
 
3816
	return 0;
3817
}
3818
 
5060 serge 3819
static int cherryview_irq_postinstall(struct drm_device *dev)
4560 Serge 3820
{
3821
	struct drm_i915_private *dev_priv = dev->dev_private;
3822
 
5354 serge 3823
	vlv_display_irq_postinstall(dev_priv);
4560 Serge 3824
 
5060 serge 3825
	gen8_gt_irq_postinstall(dev_priv);
4560 Serge 3826
 
5060 serge 3827
	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3828
	POSTING_READ(GEN8_MASTER_IRQ);
4560 Serge 3829
 
5060 serge 3830
	return 0;
3831
}
4560 Serge 3832
 
5060 serge 3833
static void gen8_irq_uninstall(struct drm_device *dev)
3834
{
3835
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3836
 
5060 serge 3837
	if (!dev_priv)
3838
		return;
3839
 
3840
	gen8_irq_reset(dev);
4560 Serge 3841
}
3842
 
5354 serge 3843
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3844
{
3845
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3846
	 * just to make the assert_spin_locked check happy. */
3847
	spin_lock_irq(&dev_priv->irq_lock);
3848
	if (dev_priv->display_irqs_enabled)
3849
		valleyview_display_irqs_uninstall(dev_priv);
3850
	spin_unlock_irq(&dev_priv->irq_lock);
3851
 
3852
	vlv_display_irq_reset(dev_priv);
3853
 
3854
	dev_priv->irq_mask = ~0;
3855
}
3856
 
3031 serge 3857
static void valleyview_irq_uninstall(struct drm_device *dev)
3858
{
5060 serge 3859
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3860
 
3861
	if (!dev_priv)
3862
		return;
3863
 
5060 serge 3864
	I915_WRITE(VLV_MASTER_IER, 0);
4293 Serge 3865
 
5354 serge 3866
	gen5_gt_irq_reset(dev);
3031 serge 3867
 
3868
	I915_WRITE(HWSTAM, 0xffffffff);
5060 serge 3869
 
5354 serge 3870
	vlv_display_irq_uninstall(dev_priv);
3031 serge 3871
}
3872
 
5060 serge 3873
static void cherryview_irq_uninstall(struct drm_device *dev)
3031 serge 3874
{
5060 serge 3875
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3876
 
3877
	if (!dev_priv)
3878
		return;
3879
 
5060 serge 3880
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3881
	POSTING_READ(GEN8_MASTER_IRQ);
4293 Serge 3882
 
5354 serge 3883
	gen8_gt_irq_reset(dev_priv);
3031 serge 3884
 
5354 serge 3885
	GEN5_IRQ_RESET(GEN8_PCU_);
3031 serge 3886
 
5354 serge 3887
	vlv_display_irq_uninstall(dev_priv);
5060 serge 3888
}
3889
 
3890
static void ironlake_irq_uninstall(struct drm_device *dev)
3891
{
3892
	struct drm_i915_private *dev_priv = dev->dev_private;
3893
 
3894
	if (!dev_priv)
3746 Serge 3895
		return;
3896
 
5060 serge 3897
	ironlake_irq_reset(dev);
3031 serge 3898
}
3899
 
3900
#if 0
3901
static void i8xx_irq_preinstall(struct drm_device * dev)
3902
{
5060 serge 3903
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3904
	int pipe;
3905
 
5354 serge 3906
	for_each_pipe(dev_priv, pipe)
3031 serge 3907
		I915_WRITE(PIPESTAT(pipe), 0);
3908
	I915_WRITE16(IMR, 0xffff);
3909
	I915_WRITE16(IER, 0x0);
3910
	POSTING_READ16(IER);
3911
}
3912
 
3913
static int i8xx_irq_postinstall(struct drm_device *dev)
3914
{
5060 serge 3915
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3916
 
3917
	I915_WRITE16(EMR,
3918
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3919
 
3920
	/* Unmask the interrupts that we always want on. */
3921
	dev_priv->irq_mask =
3922
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3923
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3924
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
6084 serge 3925
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3926
	I915_WRITE16(IMR, dev_priv->irq_mask);
3927
 
3928
	I915_WRITE16(IER,
3929
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3930
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3931
		     I915_USER_INTERRUPT);
3932
	POSTING_READ16(IER);
3933
 
4560 Serge 3934
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3935
	 * just to make the assert_spin_locked check happy. */
5354 serge 3936
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3937
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3938
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3939
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3940
 
3031 serge 3941
	return 0;
3942
}
3943
 
3746 Serge 3944
/*
3945
 * Returns true when a page flip has completed.
3946
 */
3947
static bool i8xx_handle_vblank(struct drm_device *dev,
4560 Serge 3948
			       int plane, int pipe, u32 iir)
3746 Serge 3949
{
5060 serge 3950
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3951
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3746 Serge 3952
 
6084 serge 3953
	if (!intel_pipe_handle_vblank(dev, pipe))
3954
		return false;
3746 Serge 3955
 
3956
	if ((iir & flip_pending) == 0)
5354 serge 3957
		goto check_page_flip;
3746 Serge 3958
 
3959
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3960
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3961
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3962
	 * the flip is completed (no longer pending). Since this doesn't raise
3963
	 * an interrupt per se, we watch for the change at vblank.
3964
	 */
3965
	if (I915_READ16(ISR) & flip_pending)
5354 serge 3966
		goto check_page_flip;
3746 Serge 3967
 
6320 serge 3968
	intel_prepare_page_flip(dev, plane);
3969
	intel_finish_page_flip(dev, pipe);
5354 serge 3970
	return true;
3746 Serge 3971
 
5354 serge 3972
check_page_flip:
6320 serge 3973
	intel_check_page_flip(dev, pipe);
5354 serge 3974
	return false;
3746 Serge 3975
}
3976
 
3243 Serge 3977
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 3978
{
5060 serge 3979
	struct drm_device *dev = arg;
3980
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3981
	u16 iir, new_iir;
3982
	u32 pipe_stats[2];
3983
	int pipe;
3984
	u16 flip_mask =
3985
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3986
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
6937 serge 3987
	irqreturn_t ret;
3031 serge 3988
 
6084 serge 3989
	if (!intel_irqs_enabled(dev_priv))
3990
		return IRQ_NONE;
3991
 
6937 serge 3992
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3993
	disable_rpm_wakeref_asserts(dev_priv);
3994
 
3995
	ret = IRQ_NONE;
3031 serge 3996
	iir = I915_READ16(IIR);
3997
	if (iir == 0)
6937 serge 3998
		goto out;
3031 serge 3999
 
4000
	while (iir & ~flip_mask) {
4001
		/* Can't rely on pipestat interrupt bit in iir as it might
4002
		 * have been cleared after the pipestat interrupt was received.
4003
		 * It doesn't set the bit in iir again, but it still produces
4004
		 * interrupts (for non-MSI).
4005
		 */
5354 serge 4006
		spin_lock(&dev_priv->irq_lock);
4126 Serge 4007
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 4008
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 4009
 
5354 serge 4010
		for_each_pipe(dev_priv, pipe) {
6937 serge 4011
			i915_reg_t reg = PIPESTAT(pipe);
3031 serge 4012
			pipe_stats[pipe] = I915_READ(reg);
4013
 
4014
			/*
4015
			 * Clear the PIPE*STAT regs before the IIR
4016
			 */
5060 serge 4017
			if (pipe_stats[pipe] & 0x8000ffff)
3031 serge 4018
				I915_WRITE(reg, pipe_stats[pipe]);
6084 serge 4019
		}
5354 serge 4020
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4021
 
4022
		I915_WRITE16(IIR, iir & ~flip_mask);
4023
		new_iir = I915_READ16(IIR); /* Flush posted writes */
4024
 
4025
		if (iir & I915_USER_INTERRUPT)
6084 serge 4026
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4027
 
5354 serge 4028
		for_each_pipe(dev_priv, pipe) {
4560 Serge 4029
			int plane = pipe;
4030
			if (HAS_FBC(dev))
4031
				plane = !plane;
3031 serge 4032
 
4560 Serge 4033
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4034
			    i8xx_handle_vblank(dev, plane, pipe, iir))
4035
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3031 serge 4036
 
4560 Serge 4037
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4038
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4039
 
5354 serge 4040
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4041
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4042
								    pipe);
4560 Serge 4043
		}
4044
 
3031 serge 4045
		iir = new_iir;
4046
	}
6937 serge 4047
	ret = IRQ_HANDLED;
3031 serge 4048
 
6937 serge 4049
out:
4050
	enable_rpm_wakeref_asserts(dev_priv);
4051
 
4052
	return ret;
3031 serge 4053
}
4054
 
4055
static void i8xx_irq_uninstall(struct drm_device * dev)
4056
{
5060 serge 4057
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4058
	int pipe;
4059
 
5354 serge 4060
	for_each_pipe(dev_priv, pipe) {
3031 serge 4061
		/* Clear enable bits; then clear status bits */
4062
		I915_WRITE(PIPESTAT(pipe), 0);
4063
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4064
	}
4065
	I915_WRITE16(IMR, 0xffff);
4066
	I915_WRITE16(IER, 0x0);
4067
	I915_WRITE16(IIR, I915_READ16(IIR));
4068
}
4069
 
4070
#endif
4071
 
4072
static void i915_irq_preinstall(struct drm_device * dev)
4073
{
5060 serge 4074
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4075
	int pipe;
4076
 
4077
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 4078
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4079
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4080
	}
4081
 
4082
	I915_WRITE16(HWSTAM, 0xeffe);
5354 serge 4083
	for_each_pipe(dev_priv, pipe)
3031 serge 4084
		I915_WRITE(PIPESTAT(pipe), 0);
4085
	I915_WRITE(IMR, 0xffffffff);
4086
	I915_WRITE(IER, 0x0);
4087
	POSTING_READ(IER);
4088
}
4089
 
4090
static int i915_irq_postinstall(struct drm_device *dev)
4091
{
5060 serge 4092
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4093
	u32 enable_mask;
4094
 
4095
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4096
 
4097
	/* Unmask the interrupts that we always want on. */
4098
	dev_priv->irq_mask =
4099
		~(I915_ASLE_INTERRUPT |
4100
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4101
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4102
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
6084 serge 4103
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 4104
 
4105
	enable_mask =
4106
		I915_ASLE_INTERRUPT |
4107
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4108
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4109
		I915_USER_INTERRUPT;
3480 Serge 4110
 
3031 serge 4111
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 4112
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 4113
		POSTING_READ(PORT_HOTPLUG_EN);
4114
 
3031 serge 4115
		/* Enable in IER... */
4116
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4117
		/* and unmask in IMR */
4118
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4119
	}
4120
 
4121
	I915_WRITE(IMR, dev_priv->irq_mask);
4122
	I915_WRITE(IER, enable_mask);
4123
	POSTING_READ(IER);
4124
 
4126 Serge 4125
	i915_enable_asle_pipestat(dev);
3480 Serge 4126
 
4560 Serge 4127
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4128
	 * just to make the assert_spin_locked check happy. */
5354 serge 4129
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 4130
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4131
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 4132
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 4133
 
3480 Serge 4134
	return 0;
4135
}
4136
 
3746 Serge 4137
/*
4138
 * Returns true when a page flip has completed.
4139
 */
4140
static bool i915_handle_vblank(struct drm_device *dev,
4141
			       int plane, int pipe, u32 iir)
3480 Serge 4142
{
5060 serge 4143
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 4144
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3480 Serge 4145
 
6088 serge 4146
	if (!intel_pipe_handle_vblank(dev, pipe))
4147
		return false;
3480 Serge 4148
 
3746 Serge 4149
	if ((iir & flip_pending) == 0)
5354 serge 4150
		goto check_page_flip;
3480 Serge 4151
 
3746 Serge 4152
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
4153
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4154
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4155
	 * the flip is completed (no longer pending). Since this doesn't raise
4156
	 * an interrupt per se, we watch for the change at vblank.
4157
	 */
4158
	if (I915_READ(ISR) & flip_pending)
5354 serge 4159
		goto check_page_flip;
3746 Serge 4160
 
6320 serge 4161
	intel_prepare_page_flip(dev, plane);
4162
	intel_finish_page_flip(dev, pipe);
5354 serge 4163
	return true;
3746 Serge 4164
 
5354 serge 4165
check_page_flip:
6320 serge 4166
	intel_check_page_flip(dev, pipe);
5354 serge 4167
	return false;
3031 serge 4168
}
4169
 
3243 Serge 4170
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 4171
{
5060 serge 4172
	struct drm_device *dev = arg;
4173
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4174
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4175
	u32 flip_mask =
4176
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4177
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4178
	int pipe, ret = IRQ_NONE;
4179
 
6084 serge 4180
	if (!intel_irqs_enabled(dev_priv))
4181
		return IRQ_NONE;
4182
 
6937 serge 4183
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4184
	disable_rpm_wakeref_asserts(dev_priv);
4185
 
3031 serge 4186
	iir = I915_READ(IIR);
4187
	do {
4188
		bool irq_received = (iir & ~flip_mask) != 0;
4189
		bool blc_event = false;
4190
 
4191
		/* Can't rely on pipestat interrupt bit in iir as it might
4192
		 * have been cleared after the pipestat interrupt was received.
4193
		 * It doesn't set the bit in iir again, but it still produces
4194
		 * interrupts (for non-MSI).
4195
		 */
5354 serge 4196
		spin_lock(&dev_priv->irq_lock);
4126 Serge 4197
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 4198
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 4199
 
5354 serge 4200
		for_each_pipe(dev_priv, pipe) {
6937 serge 4201
			i915_reg_t reg = PIPESTAT(pipe);
3031 serge 4202
			pipe_stats[pipe] = I915_READ(reg);
4203
 
4204
			/* Clear the PIPE*STAT regs before the IIR */
4205
			if (pipe_stats[pipe] & 0x8000ffff) {
4206
				I915_WRITE(reg, pipe_stats[pipe]);
4207
				irq_received = true;
4208
			}
4209
		}
5354 serge 4210
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4211
 
4212
		if (!irq_received)
4213
			break;
4214
 
4215
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4216
		if (I915_HAS_HOTPLUG(dev) &&
4217
		    iir & I915_DISPLAY_PORT_INTERRUPT)
4218
			i9xx_hpd_irq_handler(dev);
3031 serge 4219
 
4220
		I915_WRITE(IIR, iir & ~flip_mask);
4221
		new_iir = I915_READ(IIR); /* Flush posted writes */
4222
 
4223
		if (iir & I915_USER_INTERRUPT)
6084 serge 4224
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4225
 
5354 serge 4226
		for_each_pipe(dev_priv, pipe) {
3031 serge 4227
			int plane = pipe;
4560 Serge 4228
			if (HAS_FBC(dev))
3031 serge 4229
				plane = !plane;
4230
 
3746 Serge 4231
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4232
			    i915_handle_vblank(dev, plane, pipe, iir))
4233
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4234
 
3031 serge 4235
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4236
				blc_event = true;
4560 Serge 4237
 
4238
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4239
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4240
 
5354 serge 4241
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4242
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4243
								    pipe);
3031 serge 4244
		}
4245
 
4126 Serge 4246
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4247
			intel_opregion_asle_intr(dev);
3031 serge 4248
 
4249
		/* With MSI, interrupts are only generated when iir
4250
		 * transitions from zero to nonzero.  If another bit got
4251
		 * set while we were handling the existing iir bits, then
4252
		 * we would never get another interrupt.
4253
		 *
4254
		 * This is fine on non-MSI as well, as if we hit this path
4255
		 * we avoid exiting the interrupt handler only to generate
4256
		 * another one.
4257
		 *
4258
		 * Note that for MSI this could cause a stray interrupt report
4259
		 * if an interrupt landed in the time between writing IIR and
4260
		 * the posting read.  This should be rare enough to never
4261
		 * trigger the 99% of 100,000 interrupts test for disabling
4262
		 * stray interrupts.
4263
		 */
4264
		ret = IRQ_HANDLED;
4265
		iir = new_iir;
4266
	} while (iir & ~flip_mask);
4267
 
6937 serge 4268
	enable_rpm_wakeref_asserts(dev_priv);
4269
 
3031 serge 4270
	return ret;
4271
}
4272
 
4273
static void i915_irq_uninstall(struct drm_device * dev)
4274
{
5060 serge 4275
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4276
	int pipe;
4277
 
4278
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 4279
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4280
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4281
	}
4282
 
4283
	I915_WRITE16(HWSTAM, 0xffff);
5354 serge 4284
	for_each_pipe(dev_priv, pipe) {
3031 serge 4285
		/* Clear enable bits; then clear status bits */
4286
		I915_WRITE(PIPESTAT(pipe), 0);
4287
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4288
	}
4289
	I915_WRITE(IMR, 0xffffffff);
4290
	I915_WRITE(IER, 0x0);
4291
 
4292
	I915_WRITE(IIR, I915_READ(IIR));
4293
}
4294
 
4295
static void i965_irq_preinstall(struct drm_device * dev)
4296
{
5060 serge 4297
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4298
	int pipe;
4299
 
6084 serge 4300
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4301
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4302
 
4303
	I915_WRITE(HWSTAM, 0xeffe);
5354 serge 4304
	for_each_pipe(dev_priv, pipe)
3031 serge 4305
		I915_WRITE(PIPESTAT(pipe), 0);
4306
	I915_WRITE(IMR, 0xffffffff);
4307
	I915_WRITE(IER, 0x0);
4308
	POSTING_READ(IER);
4309
}
4310
 
4311
static int i965_irq_postinstall(struct drm_device *dev)
4312
{
5060 serge 4313
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4314
	u32 enable_mask;
4315
	u32 error_mask;
4316
 
4317
	/* Unmask the interrupts that we always want on. */
4318
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4319
			       I915_DISPLAY_PORT_INTERRUPT |
4320
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4321
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4322
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4323
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4324
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4325
 
4326
	enable_mask = ~dev_priv->irq_mask;
3746 Serge 4327
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4328
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 4329
	enable_mask |= I915_USER_INTERRUPT;
4330
 
4331
	if (IS_G4X(dev))
4332
		enable_mask |= I915_BSD_USER_INTERRUPT;
4333
 
4104 Serge 4334
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4335
	 * just to make the assert_spin_locked check happy. */
5354 serge 4336
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 4337
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4338
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4339
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 4340
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 4341
 
4342
	/*
4343
	 * Enable some error detection, note the instruction error mask
4344
	 * bit is reserved, so we leave it masked.
4345
	 */
4346
	if (IS_G4X(dev)) {
4347
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4348
			       GM45_ERROR_MEM_PRIV |
4349
			       GM45_ERROR_CP_PRIV |
4350
			       I915_ERROR_MEMORY_REFRESH);
4351
	} else {
4352
		error_mask = ~(I915_ERROR_PAGE_TABLE |
4353
			       I915_ERROR_MEMORY_REFRESH);
4354
	}
4355
	I915_WRITE(EMR, error_mask);
4356
 
4357
	I915_WRITE(IMR, dev_priv->irq_mask);
4358
	I915_WRITE(IER, enable_mask);
4359
	POSTING_READ(IER);
4360
 
6084 serge 4361
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 4362
	POSTING_READ(PORT_HOTPLUG_EN);
4363
 
4126 Serge 4364
	i915_enable_asle_pipestat(dev);
3480 Serge 4365
 
4366
	return 0;
4367
}
4368
 
3746 Serge 4369
static void i915_hpd_irq_setup(struct drm_device *dev)
3480 Serge 4370
{
5060 serge 4371
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 4372
	u32 hotplug_en;
4373
 
4104 Serge 4374
	assert_spin_locked(&dev_priv->irq_lock);
4375
 
3031 serge 4376
	/* Note HDMI and DP share hotplug bits */
6084 serge 4377
	/* enable bits are the same for all generations */
4378
	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4379
	/* Programming the CRT detection parameters tends
4380
	   to generate a spurious hotplug event about three
4381
	   seconds later.  So just do it once.
4382
	*/
4383
	if (IS_G4X(dev))
4384
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4385
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3480 Serge 4386
 
3031 serge 4387
	/* Ignore TV since it's buggy */
6084 serge 4388
	i915_hotplug_interrupt_update_locked(dev_priv,
4389
					     HOTPLUG_INT_EN_MASK |
4390
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4391
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4392
					     hotplug_en);
3031 serge 4393
}
4394
 
3243 Serge 4395
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 4396
{
5060 serge 4397
	struct drm_device *dev = arg;
4398
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4399
	u32 iir, new_iir;
4400
	u32 pipe_stats[I915_MAX_PIPES];
4401
	int ret = IRQ_NONE, pipe;
3746 Serge 4402
	u32 flip_mask =
4403
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4404
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3031 serge 4405
 
6084 serge 4406
	if (!intel_irqs_enabled(dev_priv))
4407
		return IRQ_NONE;
4408
 
6937 serge 4409
	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4410
	disable_rpm_wakeref_asserts(dev_priv);
4411
 
3031 serge 4412
	iir = I915_READ(IIR);
4413
 
4414
	for (;;) {
5060 serge 4415
		bool irq_received = (iir & ~flip_mask) != 0;
3031 serge 4416
		bool blc_event = false;
4417
 
4418
		/* Can't rely on pipestat interrupt bit in iir as it might
4419
		 * have been cleared after the pipestat interrupt was received.
4420
		 * It doesn't set the bit in iir again, but it still produces
4421
		 * interrupts (for non-MSI).
4422
		 */
5354 serge 4423
		spin_lock(&dev_priv->irq_lock);
4126 Serge 4424
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 4425
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 4426
 
5354 serge 4427
		for_each_pipe(dev_priv, pipe) {
6937 serge 4428
			i915_reg_t reg = PIPESTAT(pipe);
3031 serge 4429
			pipe_stats[pipe] = I915_READ(reg);
4430
 
4431
			/*
4432
			 * Clear the PIPE*STAT regs before the IIR
4433
			 */
4434
			if (pipe_stats[pipe] & 0x8000ffff) {
4435
				I915_WRITE(reg, pipe_stats[pipe]);
5060 serge 4436
				irq_received = true;
3031 serge 4437
			}
4438
		}
5354 serge 4439
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4440
 
4441
		if (!irq_received)
4442
			break;
4443
 
4444
		ret = IRQ_HANDLED;
4445
 
4446
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4447
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4448
			i9xx_hpd_irq_handler(dev);
3031 serge 4449
 
3746 Serge 4450
		I915_WRITE(IIR, iir & ~flip_mask);
3031 serge 4451
		new_iir = I915_READ(IIR); /* Flush posted writes */
4452
 
4453
		if (iir & I915_USER_INTERRUPT)
6084 serge 4454
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4455
		if (iir & I915_BSD_USER_INTERRUPT)
6084 serge 4456
			notify_ring(&dev_priv->ring[VCS]);
3031 serge 4457
 
5354 serge 4458
		for_each_pipe(dev_priv, pipe) {
3746 Serge 4459
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4460
			    i915_handle_vblank(dev, pipe, pipe, iir))
4461
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3031 serge 4462
 
4463
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4464
				blc_event = true;
4560 Serge 4465
 
4466
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4467
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4468
 
5354 serge 4469
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4470
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3031 serge 4471
		}
4472
 
4126 Serge 4473
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4474
			intel_opregion_asle_intr(dev);
3031 serge 4475
 
3480 Serge 4476
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4477
			gmbus_irq_handler(dev);
4478
 
3031 serge 4479
		/* With MSI, interrupts are only generated when iir
4480
		 * transitions from zero to nonzero.  If another bit got
4481
		 * set while we were handling the existing iir bits, then
4482
		 * we would never get another interrupt.
4483
		 *
4484
		 * This is fine on non-MSI as well, as if we hit this path
4485
		 * we avoid exiting the interrupt handler only to generate
4486
		 * another one.
4487
		 *
4488
		 * Note that for MSI this could cause a stray interrupt report
4489
		 * if an interrupt landed in the time between writing IIR and
4490
		 * the posting read.  This should be rare enough to never
4491
		 * trigger the 99% of 100,000 interrupts test for disabling
4492
		 * stray interrupts.
4493
		 */
4494
		iir = new_iir;
4495
	}
4496
 
6937 serge 4497
	enable_rpm_wakeref_asserts(dev_priv);
4498
 
3031 serge 4499
	return ret;
4500
}
4501
 
4502
static void i965_irq_uninstall(struct drm_device * dev)
4503
{
5060 serge 4504
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4505
	int pipe;
4506
 
4507
	if (!dev_priv)
4508
		return;
4509
 
6084 serge 4510
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4511
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4512
 
4513
	I915_WRITE(HWSTAM, 0xffffffff);
5354 serge 4514
	for_each_pipe(dev_priv, pipe)
3031 serge 4515
		I915_WRITE(PIPESTAT(pipe), 0);
4516
	I915_WRITE(IMR, 0xffffffff);
4517
	I915_WRITE(IER, 0x0);
4518
 
5354 serge 4519
	for_each_pipe(dev_priv, pipe)
3031 serge 4520
		I915_WRITE(PIPESTAT(pipe),
4521
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4522
	I915_WRITE(IIR, I915_READ(IIR));
4523
}
4524
 
5354 serge 4525
/**
4526
 * intel_irq_init - initializes irq support
4527
 * @dev_priv: i915 device instance
4528
 *
4529
 * This function initializes all the irq support including work items, timers
4530
 * and all the vtables. It does not setup the interrupt itself though.
4531
 */
4532
void intel_irq_init(struct drm_i915_private *dev_priv)
2351 Serge 4533
{
5354 serge 4534
	struct drm_device *dev = dev_priv->dev;
3031 serge 4535
 
6296 serge 4536
	intel_hpd_init_work(dev_priv);
6084 serge 4537
 
4126 Serge 4538
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4539
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3480 Serge 4540
 
5060 serge 4541
	/* Let's track the enabled rps events */
6937 serge 4542
	if (IS_VALLEYVIEW(dev_priv))
5354 serge 4543
		/* WaGsvRC0ResidencyMethod:vlv */
6937 serge 4544
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
5060 serge 4545
	else
6084 serge 4546
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
3480 Serge 4547
 
6084 serge 4548
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4549
			  i915_hangcheck_elapsed);
4560 Serge 4550
 
5354 serge 4551
 
4552
	if (IS_GEN2(dev_priv)) {
4560 Serge 4553
		dev->max_vblank_count = 0;
4554
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
5354 serge 4555
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4560 Serge 4556
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
6084 serge 4557
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4560 Serge 4558
	} else {
6084 serge 4559
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4560
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4293 Serge 4561
	}
3480 Serge 4562
 
5354 serge 4563
	/*
4564
	 * Opt out of the vblank disable timer on everything except gen2.
4565
	 * Gen2 doesn't have a hardware frame counter and so depends on
4566
	 * vblank interrupts to produce sane vblank seuquence numbers.
4567
	 */
4568
	if (!IS_GEN2(dev_priv))
4569
		dev->vblank_disable_immediate = true;
4570
 
6084 serge 4571
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4293 Serge 4572
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3480 Serge 4573
 
5354 serge 4574
	if (IS_CHERRYVIEW(dev_priv)) {
5060 serge 4575
		dev->driver->irq_handler = cherryview_irq_handler;
4576
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4577
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4578
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4579
		dev->driver->enable_vblank = valleyview_enable_vblank;
4580
		dev->driver->disable_vblank = valleyview_disable_vblank;
4581
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4582
	} else if (IS_VALLEYVIEW(dev_priv)) {
3243 Serge 4583
		dev->driver->irq_handler = valleyview_irq_handler;
4584
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4585
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4293 Serge 4586
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4587
		dev->driver->enable_vblank = valleyview_enable_vblank;
4588
		dev->driver->disable_vblank = valleyview_disable_vblank;
3746 Serge 4589
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4590
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4560 Serge 4591
		dev->driver->irq_handler = gen8_irq_handler;
5060 serge 4592
		dev->driver->irq_preinstall = gen8_irq_reset;
4560 Serge 4593
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4594
		dev->driver->irq_uninstall = gen8_irq_uninstall;
4595
		dev->driver->enable_vblank = gen8_enable_vblank;
4596
		dev->driver->disable_vblank = gen8_disable_vblank;
6084 serge 4597
		if (IS_BROXTON(dev))
4598
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4599
		else if (HAS_PCH_SPT(dev))
4600
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4601
		else
4602
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
2351 Serge 4603
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 4604
		dev->driver->irq_handler = ironlake_irq_handler;
5060 serge 4605
		dev->driver->irq_preinstall = ironlake_irq_reset;
3243 Serge 4606
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4293 Serge 4607
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4608
		dev->driver->enable_vblank = ironlake_enable_vblank;
4609
		dev->driver->disable_vblank = ironlake_disable_vblank;
6084 serge 4610
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
2351 Serge 4611
	} else {
5354 serge 4612
		if (INTEL_INFO(dev_priv)->gen == 2) {
4613
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
3243 Serge 4614
			dev->driver->irq_preinstall = i915_irq_preinstall;
4615
			dev->driver->irq_postinstall = i915_irq_postinstall;
4293 Serge 4616
			dev->driver->irq_uninstall = i915_irq_uninstall;
3243 Serge 4617
			dev->driver->irq_handler = i915_irq_handler;
3031 serge 4618
		} else {
3243 Serge 4619
			dev->driver->irq_preinstall = i965_irq_preinstall;
4620
			dev->driver->irq_postinstall = i965_irq_postinstall;
4293 Serge 4621
			dev->driver->irq_uninstall = i965_irq_uninstall;
3243 Serge 4622
			dev->driver->irq_handler = i965_irq_handler;
6084 serge 4623
		}
4624
		if (I915_HAS_HOTPLUG(dev_priv))
3746 Serge 4625
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4293 Serge 4626
		dev->driver->enable_vblank = i915_enable_vblank;
4627
		dev->driver->disable_vblank = i915_disable_vblank;
2351 Serge 4628
	}
3480 Serge 4629
}
3243 Serge 4630
 
5354 serge 4631
/**
4632
 * intel_irq_install - enables the hardware interrupt
4633
 * @dev_priv: i915 device instance
4634
 *
4635
 * This function enables the hardware interrupt handling, but leaves the hotplug
4636
 * handling still disabled. It is called after intel_irq_init().
4637
 *
4638
 * In the driver load and resume code we need working interrupts in a few places
4639
 * but don't want to deal with the hassle of concurrent probe and hotplug
4640
 * workers. Hence the split into this two-stage approach.
4641
 */
4642
int intel_irq_install(struct drm_i915_private *dev_priv)
3243 Serge 4643
{
5354 serge 4644
	/*
4645
	 * We enable some interrupt sources in our postinstall hooks, so mark
4646
	 * interrupts as enabled _before_ actually enabling them to avoid
4647
	 * special cases in our ordering checks.
4648
	 */
4649
	dev_priv->pm.irqs_enabled = true;
2351 Serge 4650
 
5354 serge 4651
	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
3243 Serge 4652
}
4653
 
5354 serge 4654
/**
4655
 * intel_irq_uninstall - finilizes all irq handling
4656
 * @dev_priv: i915 device instance
4657
 *
4658
 * This stops interrupt and hotplug handling and unregisters and frees all
4659
 * resources acquired in the init functions.
4660
 */
4661
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
2351 Serge 4662
{
5354 serge 4663
//	drm_irq_uninstall(dev_priv->dev);
6320 serge 4664
	intel_hpd_cancel_work(dev_priv);
5354 serge 4665
	dev_priv->pm.irqs_enabled = false;
4666
}
2351 Serge 4667
 
5354 serge 4668
/**
4669
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4670
 * @dev_priv: i915 device instance
4671
 *
4672
 * This function is used to disable interrupts at runtime, both in the runtime
4673
 * pm and the system suspend/resume code.
4674
 */
4675
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4676
{
4677
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4678
	dev_priv->pm.irqs_enabled = false;
7144 serge 4679
	synchronize_irq(dev_priv->dev->irq);
4104 Serge 4680
}
2351 Serge 4681
 
5354 serge 4682
/**
4683
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4684
 * @dev_priv: i915 device instance
4685
 *
4686
 * This function is used to enable interrupts at runtime, both in the runtime
4687
 * pm and the system suspend/resume code.
4688
 */
4689
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4690
{
4691
	dev_priv->pm.irqs_enabled = true;
4692
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4693
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4694
}