Subversion Repositories Kolibri OS

Rev

Rev 6131 | Rev 6320 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3746 Serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3031 serge 30
 
6103 serge 31
#include 
3031 serge 32
#include 
6088 serge 33
#include 
3031 serge 34
#include 
35
#include 
2351 Serge 36
#include "i915_drv.h"
37
#include "i915_trace.h"
38
#include "intel_drv.h"
39
 
5354 serge 40
/**
41
 * DOC: interrupt handling
42
 *
43
 * These functions provide the basic support for enabling and disabling the
44
 * interrupt handling support. There's a lot more functionality in i915_irq.c
45
 * and related files, but that will be described in separate chapters.
46
 */
4104 Serge 47
 
6084 serge 48
static const u32 hpd_ilk[HPD_NUM_PINS] = {
49
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50
};
51
 
52
static const u32 hpd_ivb[HPD_NUM_PINS] = {
53
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54
};
55
 
56
static const u32 hpd_bdw[HPD_NUM_PINS] = {
57
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58
};
59
 
60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
3746 Serge 61
	[HPD_CRT] = SDE_CRT_HOTPLUG,
62
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66
};
3031 serge 67
 
6084 serge 68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
3746 Serge 69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74
};
75
 
6084 serge 76
static const u32 hpd_spt[HPD_NUM_PINS] = {
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82
};
83
 
84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
3746 Serge 85
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91
};
92
 
6084 serge 93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
3746 Serge 94
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100
};
101
 
6084 serge 102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
3746 Serge 103
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109
};
110
 
6084 serge 111
/* BXT hpd list */
112
static const u32 hpd_bxt[HPD_NUM_PINS] = {
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116
};
117
 
5060 serge 118
/* IIR can theoretically queue up two events. Be paranoid. */
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
120
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121
	POSTING_READ(GEN8_##type##_IMR(which)); \
122
	I915_WRITE(GEN8_##type##_IER(which), 0); \
123
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124
	POSTING_READ(GEN8_##type##_IIR(which)); \
125
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126
	POSTING_READ(GEN8_##type##_IIR(which)); \
127
} while (0)
3746 Serge 128
 
5060 serge 129
#define GEN5_IRQ_RESET(type) do { \
130
	I915_WRITE(type##IMR, 0xffffffff); \
131
	POSTING_READ(type##IMR); \
132
	I915_WRITE(type##IER, 0); \
133
	I915_WRITE(type##IIR, 0xffffffff); \
134
	POSTING_READ(type##IIR); \
135
	I915_WRITE(type##IIR, 0xffffffff); \
136
	POSTING_READ(type##IIR); \
137
} while (0)
138
 
139
/*
140
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141
 */
6084 serge 142
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
143
{
144
	u32 val = I915_READ(reg);
5060 serge 145
 
6084 serge 146
	if (val == 0)
147
		return;
148
 
149
	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
150
	     reg, val);
151
	I915_WRITE(reg, 0xffffffff);
152
	POSTING_READ(reg);
153
	I915_WRITE(reg, 0xffffffff);
154
	POSTING_READ(reg);
155
}
156
 
5060 serge 157
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
6084 serge 158
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
5354 serge 159
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
5060 serge 160
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
5354 serge 161
	POSTING_READ(GEN8_##type##_IMR(which)); \
5060 serge 162
} while (0)
163
 
164
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
6084 serge 165
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
5354 serge 166
	I915_WRITE(type##IER, (ier_val)); \
5060 serge 167
	I915_WRITE(type##IMR, (imr_val)); \
5354 serge 168
	POSTING_READ(type##IMR); \
5060 serge 169
} while (0)
170
 
5354 serge 171
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
3031 serge 172
 
2351 Serge 173
/* For display hotplug interrupt */
6084 serge 174
static inline void
175
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
176
				     uint32_t mask,
177
				     uint32_t bits)
2351 Serge 178
{
6084 serge 179
	uint32_t val;
180
 
4104 Serge 181
	assert_spin_locked(&dev_priv->irq_lock);
6084 serge 182
	WARN_ON(bits & ~mask);
4104 Serge 183
 
6084 serge 184
	val = I915_READ(PORT_HOTPLUG_EN);
185
	val &= ~mask;
186
	val |= bits;
187
	I915_WRITE(PORT_HOTPLUG_EN, val);
188
}
4104 Serge 189
 
6084 serge 190
/**
191
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
192
 * @dev_priv: driver private
193
 * @mask: bits to update
194
 * @bits: bits to enable
195
 * NOTE: the HPD enable bits are modified both inside and outside
196
 * of an interrupt context. To avoid that read-modify-write cycles
197
 * interfer, these bits are protected by a spinlock. Since this
198
 * function is usually not called from a context where the lock is
199
 * held already, this function acquires the lock itself. A non-locking
200
 * version is also available.
201
 */
202
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
203
				   uint32_t mask,
204
				   uint32_t bits)
205
{
206
	spin_lock_irq(&dev_priv->irq_lock);
207
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
208
	spin_unlock_irq(&dev_priv->irq_lock);
2351 Serge 209
}
210
 
6084 serge 211
/**
212
 * ilk_update_display_irq - update DEIMR
213
 * @dev_priv: driver private
214
 * @interrupt_mask: mask of interrupt bits to update
215
 * @enabled_irq_mask: mask of interrupt bits to enable
216
 */
217
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
218
				   uint32_t interrupt_mask,
219
				   uint32_t enabled_irq_mask)
2351 Serge 220
{
6084 serge 221
	uint32_t new_val;
222
 
4104 Serge 223
	assert_spin_locked(&dev_priv->irq_lock);
224
 
6084 serge 225
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
226
 
5354 serge 227
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 228
		return;
229
 
6084 serge 230
	new_val = dev_priv->irq_mask;
231
	new_val &= ~interrupt_mask;
232
	new_val |= (~enabled_irq_mask & interrupt_mask);
233
 
234
	if (new_val != dev_priv->irq_mask) {
235
		dev_priv->irq_mask = new_val;
236
		I915_WRITE(DEIMR, dev_priv->irq_mask);
237
		POSTING_READ(DEIMR);
238
	}
2351 Serge 239
}
3031 serge 240
 
6084 serge 241
void
242
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
243
{
244
	ilk_update_display_irq(dev_priv, mask, mask);
245
}
246
 
247
void
248
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
249
{
250
	ilk_update_display_irq(dev_priv, mask, 0);
251
}
252
 
4104 Serge 253
/**
254
 * ilk_update_gt_irq - update GTIMR
255
 * @dev_priv: driver private
256
 * @interrupt_mask: mask of interrupt bits to update
257
 * @enabled_irq_mask: mask of interrupt bits to enable
258
 */
259
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
260
			      uint32_t interrupt_mask,
261
			      uint32_t enabled_irq_mask)
262
{
263
	assert_spin_locked(&dev_priv->irq_lock);
264
 
6084 serge 265
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
266
 
5060 serge 267
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 268
		return;
269
 
270
	dev_priv->gt_irq_mask &= ~interrupt_mask;
271
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
272
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
273
	POSTING_READ(GTIMR);
274
}
275
 
5060 serge 276
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 277
{
278
	ilk_update_gt_irq(dev_priv, mask, mask);
279
}
280
 
5060 serge 281
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 282
{
283
	ilk_update_gt_irq(dev_priv, mask, 0);
284
}
285
 
5354 serge 286
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
4104 Serge 287
{
5354 serge 288
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
4104 Serge 289
}
290
 
5354 serge 291
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
4104 Serge 292
{
5354 serge 293
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
4104 Serge 294
}
295
 
5354 serge 296
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
4104 Serge 297
{
5354 serge 298
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
4104 Serge 299
}
300
 
5060 serge 301
/**
5354 serge 302
  * snb_update_pm_irq - update GEN6_PMIMR
5060 serge 303
  * @dev_priv: driver private
304
  * @interrupt_mask: mask of interrupt bits to update
305
  * @enabled_irq_mask: mask of interrupt bits to enable
306
  */
5354 serge 307
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
5060 serge 308
			      uint32_t interrupt_mask,
309
			      uint32_t enabled_irq_mask)
310
{
311
	uint32_t new_val;
312
 
6084 serge 313
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
314
 
5060 serge 315
	assert_spin_locked(&dev_priv->irq_lock);
316
 
317
	new_val = dev_priv->pm_irq_mask;
318
	new_val &= ~interrupt_mask;
319
	new_val |= (~enabled_irq_mask & interrupt_mask);
320
 
321
	if (new_val != dev_priv->pm_irq_mask) {
322
		dev_priv->pm_irq_mask = new_val;
5354 serge 323
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
324
		POSTING_READ(gen6_pm_imr(dev_priv));
5060 serge 325
	}
326
}
327
 
5354 serge 328
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
5060 serge 329
{
5354 serge 330
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
331
		return;
332
 
333
	snb_update_pm_irq(dev_priv, mask, mask);
5060 serge 334
}
335
 
5354 serge 336
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
337
				  uint32_t mask)
5060 serge 338
{
5354 serge 339
	snb_update_pm_irq(dev_priv, mask, 0);
5060 serge 340
}
341
 
5354 serge 342
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 343
{
5354 serge 344
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
345
		return;
4104 Serge 346
 
5354 serge 347
	__gen6_disable_pm_irq(dev_priv, mask);
4104 Serge 348
}
349
 
5354 serge 350
void gen6_reset_rps_interrupts(struct drm_device *dev)
5060 serge 351
{
352
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 353
	uint32_t reg = gen6_pm_iir(dev_priv);
5060 serge 354
 
5354 serge 355
	spin_lock_irq(&dev_priv->irq_lock);
356
	I915_WRITE(reg, dev_priv->pm_rps_events);
357
	I915_WRITE(reg, dev_priv->pm_rps_events);
6084 serge 358
	POSTING_READ(reg);
359
	dev_priv->rps.pm_iir = 0;
5354 serge 360
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 361
}
362
 
5354 serge 363
void gen6_enable_rps_interrupts(struct drm_device *dev)
5060 serge 364
{
365
	struct drm_i915_private *dev_priv = dev->dev_private;
366
 
5354 serge 367
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 368
 
5354 serge 369
	WARN_ON(dev_priv->rps.pm_iir);
370
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
371
	dev_priv->rps.interrupts_enabled = true;
372
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
373
				dev_priv->pm_rps_events);
374
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
375
 
376
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 377
}
378
 
6084 serge 379
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
380
{
381
	/*
382
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
383
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
384
	 *
385
	 * TODO: verify if this can be reproduced on VLV,CHV.
386
	 */
387
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
388
		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
389
 
390
	if (INTEL_INFO(dev_priv)->gen >= 8)
391
		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
392
 
393
	return mask;
394
}
395
 
5354 serge 396
void gen6_disable_rps_interrupts(struct drm_device *dev)
4104 Serge 397
{
398
	struct drm_i915_private *dev_priv = dev->dev_private;
399
 
5354 serge 400
	spin_lock_irq(&dev_priv->irq_lock);
401
	dev_priv->rps.interrupts_enabled = false;
402
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 403
 
5354 serge 404
	cancel_work_sync(&dev_priv->rps.work);
4104 Serge 405
 
5354 serge 406
	spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 407
 
6084 serge 408
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
4104 Serge 409
 
5354 serge 410
	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
411
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
412
				~dev_priv->pm_rps_events);
4104 Serge 413
 
6084 serge 414
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 415
 
416
}
417
 
4104 Serge 418
/**
6084 serge 419
  * bdw_update_port_irq - update DE port interrupt
420
  * @dev_priv: driver private
421
  * @interrupt_mask: mask of interrupt bits to update
422
  * @enabled_irq_mask: mask of interrupt bits to enable
423
  */
424
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
425
				uint32_t interrupt_mask,
426
				uint32_t enabled_irq_mask)
427
{
428
	uint32_t new_val;
429
	uint32_t old_val;
430
 
431
	assert_spin_locked(&dev_priv->irq_lock);
432
 
433
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
434
 
435
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
436
		return;
437
 
438
	old_val = I915_READ(GEN8_DE_PORT_IMR);
439
 
440
	new_val = old_val;
441
	new_val &= ~interrupt_mask;
442
	new_val |= (~enabled_irq_mask & interrupt_mask);
443
 
444
	if (new_val != old_val) {
445
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
446
		POSTING_READ(GEN8_DE_PORT_IMR);
447
	}
448
}
449
 
450
/**
4104 Serge 451
 * ibx_display_interrupt_update - update SDEIMR
452
 * @dev_priv: driver private
453
 * @interrupt_mask: mask of interrupt bits to update
454
 * @enabled_irq_mask: mask of interrupt bits to enable
455
 */
5354 serge 456
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
6084 serge 457
				  uint32_t interrupt_mask,
458
				  uint32_t enabled_irq_mask)
4104 Serge 459
{
460
	uint32_t sdeimr = I915_READ(SDEIMR);
461
	sdeimr &= ~interrupt_mask;
462
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
463
 
6084 serge 464
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
465
 
4104 Serge 466
	assert_spin_locked(&dev_priv->irq_lock);
467
 
5060 serge 468
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 469
		return;
470
 
471
	I915_WRITE(SDEIMR, sdeimr);
472
	POSTING_READ(SDEIMR);
473
}
474
 
5060 serge 475
static void
476
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
477
		       u32 enable_mask, u32 status_mask)
3031 serge 478
{
6084 serge 479
	u32 reg = PIPESTAT(pipe);
5060 serge 480
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 481
 
4104 Serge 482
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 483
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 484
 
5060 serge 485
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
486
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
487
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
488
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 489
		return;
490
 
5060 serge 491
	if ((pipestat & enable_mask) == enable_mask)
492
		return;
493
 
494
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
495
 
6084 serge 496
	/* Enable the interrupt, clear any pending status */
5060 serge 497
	pipestat |= enable_mask | status_mask;
3746 Serge 498
	I915_WRITE(reg, pipestat);
6084 serge 499
	POSTING_READ(reg);
3031 serge 500
}
501
 
5060 serge 502
static void
503
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
504
		        u32 enable_mask, u32 status_mask)
3031 serge 505
{
6084 serge 506
	u32 reg = PIPESTAT(pipe);
5060 serge 507
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 508
 
4104 Serge 509
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 510
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 511
 
5060 serge 512
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
513
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
514
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
515
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 516
		return;
517
 
5060 serge 518
	if ((pipestat & enable_mask) == 0)
519
		return;
520
 
521
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
522
 
523
	pipestat &= ~enable_mask;
3746 Serge 524
	I915_WRITE(reg, pipestat);
6084 serge 525
	POSTING_READ(reg);
3031 serge 526
}
527
 
5060 serge 528
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
529
{
530
	u32 enable_mask = status_mask << 16;
531
 
532
	/*
533
	 * On pipe A we don't support the PSR interrupt yet,
534
	 * on pipe B and C the same bit MBZ.
535
	 */
536
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
537
		return 0;
538
	/*
539
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
540
	 * A the same bit is for perf counters which we don't use either.
541
	 */
542
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
543
		return 0;
544
 
545
	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
546
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
547
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
548
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
549
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
550
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
551
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
552
 
553
	return enable_mask;
554
}
555
 
556
void
557
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
558
		     u32 status_mask)
559
{
560
	u32 enable_mask;
561
 
562
	if (IS_VALLEYVIEW(dev_priv->dev))
563
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
564
							   status_mask);
565
	else
566
		enable_mask = status_mask << 16;
567
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
568
}
569
 
570
void
571
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
572
		      u32 status_mask)
573
{
574
	u32 enable_mask;
575
 
576
	if (IS_VALLEYVIEW(dev_priv->dev))
577
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
578
							   status_mask);
579
	else
580
		enable_mask = status_mask << 16;
581
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
582
}
583
 
3031 serge 584
/**
4104 Serge 585
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
6084 serge 586
 * @dev: drm device
3031 serge 587
 */
4104 Serge 588
static void i915_enable_asle_pipestat(struct drm_device *dev)
3031 serge 589
{
5060 serge 590
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 591
 
4104 Serge 592
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
3031 serge 593
		return;
594
 
5354 serge 595
	spin_lock_irq(&dev_priv->irq_lock);
3031 serge 596
 
5060 serge 597
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
6084 serge 598
	if (INTEL_INFO(dev)->gen >= 4)
4560 Serge 599
		i915_enable_pipestat(dev_priv, PIPE_A,
5060 serge 600
				     PIPE_LEGACY_BLC_EVENT_STATUS);
3031 serge 601
 
5354 serge 602
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 603
}
604
 
5060 serge 605
/*
606
 * This timing diagram depicts the video signal in and
607
 * around the vertical blanking period.
608
 *
609
 * Assumptions about the fictitious mode used in this example:
610
 *  vblank_start >= 3
611
 *  vsync_start = vblank_start + 1
612
 *  vsync_end = vblank_start + 2
613
 *  vtotal = vblank_start + 3
614
 *
615
 *           start of vblank:
616
 *           latch double buffered registers
617
 *           increment frame counter (ctg+)
618
 *           generate start of vblank interrupt (gen4+)
619
 *           |
620
 *           |          frame start:
621
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
622
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
623
 *           |          |
624
 *           |          |  start of vsync:
625
 *           |          |  generate vsync interrupt
626
 *           |          |  |
627
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
628
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
629
 * ----va---> <-----------------vb--------------------> <--------va-------------
630
 *       |          |       <----vs----->                     |
631
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
632
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
633
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
634
 *       |          |                                         |
635
 *       last visible pixel                                   first visible pixel
636
 *                  |                                         increment frame counter (gen3/4)
637
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
638
 *
639
 * x  = horizontal active
640
 * _  = horizontal blanking
641
 * hs = horizontal sync
642
 * va = vertical active
643
 * vb = vertical blanking
644
 * vs = vertical sync
645
 * vbs = vblank_start (number)
646
 *
647
 * Summary:
648
 * - most events happen at the start of horizontal sync
649
 * - frame start happens at the start of horizontal blank, 1-4 lines
650
 *   (depending on PIPECONF settings) after the start of vblank
651
 * - gen3/4 pixel and frame counter are synchronized with the start
652
 *   of horizontal active on the first line of vertical active
653
 */
654
 
6084 serge 655
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4560 Serge 656
{
657
	/* Gen2 doesn't have a hardware frame counter */
658
	return 0;
659
}
660
 
3031 serge 661
/* Called from drm generic code, passed a 'crtc', which
662
 * we use as a pipe index
663
 */
6084 serge 664
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
3031 serge 665
{
5060 serge 666
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 667
	unsigned long high_frame;
668
	unsigned long low_frame;
5060 serge 669
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
6084 serge 670
	struct intel_crtc *intel_crtc =
671
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
672
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
3031 serge 673
 
6084 serge 674
	htotal = mode->crtc_htotal;
675
	hsync_start = mode->crtc_hsync_start;
676
	vbl_start = mode->crtc_vblank_start;
677
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
678
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
3031 serge 679
 
5060 serge 680
	/* Convert to pixel count */
6084 serge 681
	vbl_start *= htotal;
4560 Serge 682
 
5060 serge 683
	/* Start of vblank event occurs at start of hsync */
684
	vbl_start -= htotal - hsync_start;
685
 
3031 serge 686
	high_frame = PIPEFRAME(pipe);
687
	low_frame = PIPEFRAMEPIXEL(pipe);
688
 
689
	/*
690
	 * High & low register fields aren't synchronized, so make sure
691
	 * we get a low value that's stable across two reads of the high
692
	 * register.
693
	 */
694
	do {
695
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
4560 Serge 696
		low   = I915_READ(low_frame);
3031 serge 697
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
698
	} while (high1 != high2);
699
 
700
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
4560 Serge 701
	pixel = low & PIPE_PIXEL_MASK;
3031 serge 702
	low >>= PIPE_FRAME_LOW_SHIFT;
4560 Serge 703
 
704
	/*
705
	 * The frame counter increments at beginning of active.
706
	 * Cook up a vblank counter by also checking the pixel
707
	 * counter against vblank start.
708
	 */
709
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
3031 serge 710
}
711
 
6084 serge 712
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
3031 serge 713
{
5060 serge 714
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 715
 
6084 serge 716
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
3031 serge 717
}
718
 
4560 Serge 719
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
720
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
721
 
5060 serge 722
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
4560 Serge 723
{
5060 serge 724
	struct drm_device *dev = crtc->base.dev;
4560 Serge 725
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 726
	const struct drm_display_mode *mode = &crtc->base.hwmode;
5060 serge 727
	enum pipe pipe = crtc->pipe;
728
	int position, vtotal;
4560 Serge 729
 
5060 serge 730
	vtotal = mode->crtc_vtotal;
731
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
732
		vtotal /= 2;
4560 Serge 733
 
5060 serge 734
	if (IS_GEN2(dev))
735
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
736
	else
737
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
738
 
739
	/*
6084 serge 740
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
741
	 * read it just before the start of vblank.  So try it again
742
	 * so we don't accidentally end up spanning a vblank frame
743
	 * increment, causing the pipe_update_end() code to squak at us.
744
	 *
745
	 * The nature of this problem means we can't simply check the ISR
746
	 * bit and return the vblank start value; nor can we use the scanline
747
	 * debug register in the transcoder as it appears to have the same
748
	 * problem.  We may need to extend this to include other platforms,
749
	 * but so far testing only shows the problem on HSW.
750
	 */
751
	if (HAS_DDI(dev) && !position) {
752
		int i, temp;
753
 
754
		for (i = 0; i < 100; i++) {
755
			udelay(1);
756
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
757
				DSL_LINEMASK_GEN3;
758
			if (temp != position) {
759
				position = temp;
760
				break;
761
			}
762
		}
763
	}
764
 
765
	/*
5060 serge 766
	 * See update_scanline_offset() for the details on the
767
	 * scanline_offset adjustment.
768
	 */
769
	return (position + crtc->scanline_offset) % vtotal;
4560 Serge 770
}
771
 
6084 serge 772
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4560 Serge 773
				    unsigned int flags, int *vpos, int *hpos,
6084 serge 774
				    ktime_t *stime, ktime_t *etime,
775
				    const struct drm_display_mode *mode)
3746 Serge 776
{
4560 Serge 777
	struct drm_i915_private *dev_priv = dev->dev_private;
778
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
779
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
780
	int position;
5060 serge 781
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
3746 Serge 782
	bool in_vbl = true;
783
	int ret = 0;
4560 Serge 784
	unsigned long irqflags;
3746 Serge 785
 
6084 serge 786
	if (WARN_ON(!mode->crtc_clock)) {
3746 Serge 787
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
788
				 "pipe %c\n", pipe_name(pipe));
789
		return 0;
790
	}
791
 
4560 Serge 792
	htotal = mode->crtc_htotal;
5060 serge 793
	hsync_start = mode->crtc_hsync_start;
4560 Serge 794
	vtotal = mode->crtc_vtotal;
795
	vbl_start = mode->crtc_vblank_start;
796
	vbl_end = mode->crtc_vblank_end;
3746 Serge 797
 
4560 Serge 798
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
799
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
800
		vbl_end /= 2;
801
		vtotal /= 2;
802
	}
803
 
804
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
805
 
806
	/*
807
	 * Lock uncore.lock, as we will do multiple timing critical raw
808
	 * register reads, potentially with preemption disabled, so the
809
	 * following code must not block on uncore.lock.
810
	 */
811
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
812
 
813
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
814
 
6084 serge 815
	/* Get optional system timestamp before query. */
816
	if (stime)
817
		*stime = ktime_get();
4560 Serge 818
 
819
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3746 Serge 820
		/* No obvious pixelcount register. Only query vertical
821
		 * scanout position from Display scan line register.
822
		 */
5060 serge 823
		position = __intel_get_crtc_scanline(intel_crtc);
3746 Serge 824
	} else {
825
		/* Have access to pixelcount since start of frame.
826
		 * We can split this into vertical and horizontal
827
		 * scanout position.
828
		 */
4560 Serge 829
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
3746 Serge 830
 
4560 Serge 831
		/* convert to pixel counts */
832
		vbl_start *= htotal;
833
		vbl_end *= htotal;
834
		vtotal *= htotal;
5060 serge 835
 
836
		/*
837
		 * In interlaced modes, the pixel counter counts all pixels,
838
		 * so one field will have htotal more pixels. In order to avoid
839
		 * the reported position from jumping backwards when the pixel
840
		 * counter is beyond the length of the shorter field, just
841
		 * clamp the position the length of the shorter field. This
842
		 * matches how the scanline counter based position works since
843
		 * the scanline counter doesn't count the two half lines.
844
		 */
845
		if (position >= vtotal)
846
			position = vtotal - 1;
847
 
848
		/*
849
		 * Start of vblank interrupt is triggered at start of hsync,
850
		 * just prior to the first active line of vblank. However we
851
		 * consider lines to start at the leading edge of horizontal
852
		 * active. So, should we get here before we've crossed into
853
		 * the horizontal active of the first line in vblank, we would
854
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
855
		 * always add htotal-hsync_start to the current pixel position.
856
		 */
857
		position = (position + htotal - hsync_start) % vtotal;
3746 Serge 858
	}
859
 
6084 serge 860
	/* Get optional system timestamp after query. */
861
	if (etime)
862
		*etime = ktime_get();
3746 Serge 863
 
4560 Serge 864
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
3746 Serge 865
 
4560 Serge 866
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3746 Serge 867
 
4560 Serge 868
	in_vbl = position >= vbl_start && position < vbl_end;
3746 Serge 869
 
4560 Serge 870
	/*
871
	 * While in vblank, position will be negative
872
	 * counting up towards 0 at vbl_end. And outside
873
	 * vblank, position will be positive counting
874
	 * up since vbl_end.
875
	 */
876
	if (position >= vbl_start)
877
		position -= vbl_end;
878
	else
879
		position += vtotal - vbl_end;
3746 Serge 880
 
4560 Serge 881
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
882
		*vpos = position;
883
		*hpos = 0;
884
	} else {
885
		*vpos = position / htotal;
886
		*hpos = position - (*vpos * htotal);
887
	}
888
 
3746 Serge 889
	/* In vblank? */
890
	if (in_vbl)
5354 serge 891
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
3746 Serge 892
 
893
	return ret;
894
}
895
 
5060 serge 896
int intel_get_crtc_scanline(struct intel_crtc *crtc)
897
{
898
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
899
	unsigned long irqflags;
900
	int position;
901
 
902
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
903
	position = __intel_get_crtc_scanline(crtc);
904
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
905
 
906
	return position;
907
}
908
 
6084 serge 909
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
3746 Serge 910
			      int *max_error,
911
			      struct timeval *vblank_time,
912
			      unsigned flags)
913
{
914
	struct drm_crtc *crtc;
915
 
6084 serge 916
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
917
		DRM_ERROR("Invalid crtc %u\n", pipe);
3746 Serge 918
		return -EINVAL;
919
	}
920
 
921
	/* Get drm_crtc to timestamp: */
922
	crtc = intel_get_crtc_for_pipe(dev, pipe);
923
	if (crtc == NULL) {
6084 serge 924
		DRM_ERROR("Invalid crtc %u\n", pipe);
3746 Serge 925
		return -EINVAL;
926
	}
927
 
6084 serge 928
	if (!crtc->hwmode.crtc_clock) {
929
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
3746 Serge 930
		return -EBUSY;
931
	}
932
 
933
	/* Helper routine in DRM core does all the work: */
934
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
935
						     vblank_time, flags,
6084 serge 936
						     &crtc->hwmode);
3746 Serge 937
}
938
 
4104 Serge 939
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
3746 Serge 940
{
5060 serge 941
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 942
	u32 busy_up, busy_down, max_avg, min_avg;
943
	u8 new_delay;
944
 
4104 Serge 945
	spin_lock(&mchdev_lock);
3746 Serge 946
 
947
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
948
 
949
	new_delay = dev_priv->ips.cur_delay;
950
 
951
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
952
	busy_up = I915_READ(RCPREVBSYTUPAVG);
953
	busy_down = I915_READ(RCPREVBSYTDNAVG);
954
	max_avg = I915_READ(RCBMAXAVG);
955
	min_avg = I915_READ(RCBMINAVG);
956
 
957
	/* Handle RCS change request from hw */
958
	if (busy_up > max_avg) {
959
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
960
			new_delay = dev_priv->ips.cur_delay - 1;
961
		if (new_delay < dev_priv->ips.max_delay)
962
			new_delay = dev_priv->ips.max_delay;
963
	} else if (busy_down < min_avg) {
964
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
965
			new_delay = dev_priv->ips.cur_delay + 1;
966
		if (new_delay > dev_priv->ips.min_delay)
967
			new_delay = dev_priv->ips.min_delay;
968
	}
969
 
970
	if (ironlake_set_drps(dev, new_delay))
971
		dev_priv->ips.cur_delay = new_delay;
972
 
4104 Serge 973
	spin_unlock(&mchdev_lock);
3746 Serge 974
 
975
	return;
976
}
977
 
6084 serge 978
static void notify_ring(struct intel_engine_cs *ring)
2352 Serge 979
{
5060 serge 980
	if (!intel_ring_initialized(ring))
2352 Serge 981
		return;
2351 Serge 982
 
6084 serge 983
	trace_i915_gem_request_notify(ring);
2351 Serge 984
 
2352 Serge 985
	wake_up_all(&ring->irq_queue);
986
}
987
 
6084 serge 988
static void vlv_c0_read(struct drm_i915_private *dev_priv,
989
			struct intel_rps_ei *ei)
5060 serge 990
{
6084 serge 991
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
992
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
993
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
994
}
5060 serge 995
 
6084 serge 996
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
997
			 const struct intel_rps_ei *old,
998
			 const struct intel_rps_ei *now,
999
			 int threshold)
1000
{
1001
	u64 time, c0;
1002
	unsigned int mul = 100;
5060 serge 1003
 
6084 serge 1004
	if (old->cz_clock == 0)
1005
		return false;
5060 serge 1006
 
6084 serge 1007
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1008
		mul <<= 8;
5060 serge 1009
 
6084 serge 1010
	time = now->cz_clock - old->cz_clock;
1011
	time *= threshold * dev_priv->czclk_freq;
5060 serge 1012
 
6084 serge 1013
	/* Workload can be split between render + media, e.g. SwapBuffers
1014
	 * being blitted in X after being rendered in mesa. To account for
1015
	 * this we need to combine both engines into our activity counter.
5060 serge 1016
	 */
6084 serge 1017
	c0 = now->render_c0 - old->render_c0;
1018
	c0 += now->media_c0 - old->media_c0;
1019
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
5060 serge 1020
 
6084 serge 1021
	return c0 >= time;
5060 serge 1022
}
1023
 
6084 serge 1024
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
5060 serge 1025
{
6084 serge 1026
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1027
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1028
}
5060 serge 1029
 
6084 serge 1030
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1031
{
1032
	struct intel_rps_ei now;
1033
	u32 events = 0;
5060 serge 1034
 
6084 serge 1035
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1036
		return 0;
5060 serge 1037
 
6084 serge 1038
	vlv_c0_read(dev_priv, &now);
1039
	if (now.cz_clock == 0)
1040
		return 0;
5060 serge 1041
 
6084 serge 1042
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1043
		if (!vlv_c0_above(dev_priv,
1044
				  &dev_priv->rps.down_ei, &now,
1045
				  dev_priv->rps.down_threshold))
1046
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1047
		dev_priv->rps.down_ei = now;
5060 serge 1048
	}
1049
 
6084 serge 1050
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1051
		if (vlv_c0_above(dev_priv,
1052
				 &dev_priv->rps.up_ei, &now,
1053
				 dev_priv->rps.up_threshold))
1054
			events |= GEN6_PM_RP_UP_THRESHOLD;
1055
		dev_priv->rps.up_ei = now;
5060 serge 1056
	}
1057
 
6084 serge 1058
	return events;
1059
}
5060 serge 1060
 
6084 serge 1061
static bool any_waiters(struct drm_i915_private *dev_priv)
1062
{
1063
	struct intel_engine_cs *ring;
1064
	int i;
5060 serge 1065
 
6084 serge 1066
	for_each_ring(ring, dev_priv, i)
1067
		if (ring->irq_refcount)
1068
			return true;
5060 serge 1069
 
6084 serge 1070
	return false;
5060 serge 1071
}
1072
 
3031 serge 1073
static void gen6_pm_rps_work(struct work_struct *work)
1074
{
5060 serge 1075
	struct drm_i915_private *dev_priv =
1076
		container_of(work, struct drm_i915_private, rps.work);
6084 serge 1077
	bool client_boost;
1078
	int new_delay, adj, min, max;
4104 Serge 1079
	u32 pm_iir;
2352 Serge 1080
 
4104 Serge 1081
	spin_lock_irq(&dev_priv->irq_lock);
5354 serge 1082
	/* Speed up work cancelation during disabling rps interrupts. */
1083
	if (!dev_priv->rps.interrupts_enabled) {
1084
		spin_unlock_irq(&dev_priv->irq_lock);
1085
		return;
1086
	}
3031 serge 1087
	pm_iir = dev_priv->rps.pm_iir;
1088
	dev_priv->rps.pm_iir = 0;
5354 serge 1089
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
6084 serge 1090
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1091
	client_boost = dev_priv->rps.client_boost;
1092
	dev_priv->rps.client_boost = false;
4104 Serge 1093
	spin_unlock_irq(&dev_priv->irq_lock);
2352 Serge 1094
 
4104 Serge 1095
	/* Make sure we didn't queue anything we're not going to process. */
5060 serge 1096
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
4104 Serge 1097
 
6084 serge 1098
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
3031 serge 1099
		return;
1100
 
3243 Serge 1101
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 1102
 
6084 serge 1103
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1104
 
4560 Serge 1105
	adj = dev_priv->rps.last_adj;
6084 serge 1106
	new_delay = dev_priv->rps.cur_freq;
1107
	min = dev_priv->rps.min_freq_softlimit;
1108
	max = dev_priv->rps.max_freq_softlimit;
1109
 
1110
	if (client_boost) {
1111
		new_delay = dev_priv->rps.max_freq_softlimit;
1112
		adj = 0;
1113
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
4560 Serge 1114
		if (adj > 0)
1115
			adj *= 2;
6084 serge 1116
		else /* CHV needs even encode values */
1117
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
4104 Serge 1118
		/*
1119
		 * For better performance, jump directly
1120
		 * to RPe if we're below it.
1121
		 */
6084 serge 1122
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
5060 serge 1123
			new_delay = dev_priv->rps.efficient_freq;
6084 serge 1124
			adj = 0;
1125
		}
1126
	} else if (any_waiters(dev_priv)) {
1127
		adj = 0;
4560 Serge 1128
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
5060 serge 1129
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1130
			new_delay = dev_priv->rps.efficient_freq;
4560 Serge 1131
		else
5060 serge 1132
			new_delay = dev_priv->rps.min_freq_softlimit;
4560 Serge 1133
		adj = 0;
1134
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1135
		if (adj < 0)
1136
			adj *= 2;
6084 serge 1137
		else /* CHV needs even encode values */
1138
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
4560 Serge 1139
	} else { /* unknown event */
6084 serge 1140
		adj = 0;
4560 Serge 1141
	}
3031 serge 1142
 
6084 serge 1143
	dev_priv->rps.last_adj = adj;
1144
 
3031 serge 1145
	/* sysfs frequency interfaces may have snuck in while servicing the
1146
	 * interrupt
1147
	 */
6084 serge 1148
	new_delay += adj;
1149
	new_delay = clamp_t(int, new_delay, min, max);
4560 Serge 1150
 
6084 serge 1151
	intel_set_rps(dev_priv->dev, new_delay);
5060 serge 1152
 
3243 Serge 1153
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 1154
}
1155
 
1156
 
1157
/**
1158
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1159
 * occurred.
1160
 * @work: workqueue struct
1161
 *
1162
 * Doesn't actually do anything except notify userspace. As a consequence of
1163
 * this event, userspace should try to remap the bad rows since statistically
1164
 * it is likely the same row is more likely to go bad again.
1165
 */
1166
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 1167
{
5060 serge 1168
	struct drm_i915_private *dev_priv =
1169
		container_of(work, struct drm_i915_private, l3_parity.error_work);
3031 serge 1170
	u32 error_status, row, bank, subbank;
4560 Serge 1171
	char *parity_event[6];
3031 serge 1172
	uint32_t misccpctl;
4560 Serge 1173
	uint8_t slice = 0;
3031 serge 1174
 
1175
	/* We must turn off DOP level clock gating to access the L3 registers.
1176
	 * In order to prevent a get/put style interface, acquire struct mutex
1177
	 * any time we access those registers.
1178
	 */
1179
	mutex_lock(&dev_priv->dev->struct_mutex);
1180
 
4560 Serge 1181
	/* If we've screwed up tracking, just let the interrupt fire again */
1182
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1183
		goto out;
1184
 
3031 serge 1185
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1186
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1187
	POSTING_READ(GEN7_MISCCPCTL);
1188
 
4560 Serge 1189
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1190
		u32 reg;
1191
 
1192
		slice--;
1193
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1194
			break;
1195
 
1196
		dev_priv->l3_parity.which_slice &= ~(1<
1197
 
1198
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1199
 
1200
		error_status = I915_READ(reg);
6084 serge 1201
		row = GEN7_PARITY_ERROR_ROW(error_status);
1202
		bank = GEN7_PARITY_ERROR_BANK(error_status);
1203
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
3031 serge 1204
 
4560 Serge 1205
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1206
		POSTING_READ(reg);
3031 serge 1207
 
4560 Serge 1208
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1209
			  slice, row, bank, subbank);
1210
 
1211
	}
1212
 
3031 serge 1213
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1214
 
4560 Serge 1215
out:
1216
	WARN_ON(dev_priv->l3_parity.which_slice);
5354 serge 1217
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 1218
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
5354 serge 1219
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 1220
 
1221
	mutex_unlock(&dev_priv->dev->struct_mutex);
1222
}
1223
 
4560 Serge 1224
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1225
{
5060 serge 1226
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1227
 
4560 Serge 1228
	if (!HAS_L3_DPF(dev))
3031 serge 1229
		return;
1230
 
4104 Serge 1231
	spin_lock(&dev_priv->irq_lock);
5060 serge 1232
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
4104 Serge 1233
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1234
 
4560 Serge 1235
	iir &= GT_PARITY_ERROR(dev);
1236
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1237
		dev_priv->l3_parity.which_slice |= 1 << 1;
1238
 
1239
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1240
		dev_priv->l3_parity.which_slice |= 1 << 0;
1241
 
3243 Serge 1242
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 1243
}
1244
 
4104 Serge 1245
static void ilk_gt_irq_handler(struct drm_device *dev,
1246
			       struct drm_i915_private *dev_priv,
1247
			       u32 gt_iir)
1248
{
1249
	if (gt_iir &
1250
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
6084 serge 1251
		notify_ring(&dev_priv->ring[RCS]);
4104 Serge 1252
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
6084 serge 1253
		notify_ring(&dev_priv->ring[VCS]);
4104 Serge 1254
}
1255
 
3031 serge 1256
static void snb_gt_irq_handler(struct drm_device *dev,
1257
			       struct drm_i915_private *dev_priv,
1258
			       u32 gt_iir)
1259
{
1260
 
4104 Serge 1261
	if (gt_iir &
1262
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
6084 serge 1263
		notify_ring(&dev_priv->ring[RCS]);
4104 Serge 1264
	if (gt_iir & GT_BSD_USER_INTERRUPT)
6084 serge 1265
		notify_ring(&dev_priv->ring[VCS]);
4104 Serge 1266
	if (gt_iir & GT_BLT_USER_INTERRUPT)
6084 serge 1267
		notify_ring(&dev_priv->ring[BCS]);
3031 serge 1268
 
4104 Serge 1269
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1270
		      GT_BSD_CS_ERROR_INTERRUPT |
5354 serge 1271
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1272
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
3031 serge 1273
 
4560 Serge 1274
	if (gt_iir & GT_PARITY_ERROR(dev))
1275
		ivybridge_parity_error_irq_handler(dev, gt_iir);
3031 serge 1276
}
1277
 
6084 serge 1278
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
4560 Serge 1279
				       u32 master_ctl)
1280
{
1281
	irqreturn_t ret = IRQ_NONE;
1282
 
1283
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
6084 serge 1284
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
4560 Serge 1285
		if (tmp) {
6084 serge 1286
			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
4560 Serge 1287
			ret = IRQ_HANDLED;
5354 serge 1288
 
6084 serge 1289
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1290
				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1291
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1292
				notify_ring(&dev_priv->ring[RCS]);
5354 serge 1293
 
6084 serge 1294
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1295
				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1296
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1297
				notify_ring(&dev_priv->ring[BCS]);
4560 Serge 1298
		} else
1299
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1300
	}
1301
 
5060 serge 1302
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
6084 serge 1303
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
4560 Serge 1304
		if (tmp) {
6084 serge 1305
			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
4560 Serge 1306
			ret = IRQ_HANDLED;
5354 serge 1307
 
6084 serge 1308
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1309
				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1310
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1311
				notify_ring(&dev_priv->ring[VCS]);
5354 serge 1312
 
6084 serge 1313
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1314
				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1315
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1316
				notify_ring(&dev_priv->ring[VCS2]);
4560 Serge 1317
		} else
1318
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1319
	}
1320
 
6084 serge 1321
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1322
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1323
		if (tmp) {
1324
			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1325
			ret = IRQ_HANDLED;
1326
 
1327
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1328
				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1329
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1330
				notify_ring(&dev_priv->ring[VECS]);
1331
		} else
1332
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1333
	}
1334
 
5060 serge 1335
	if (master_ctl & GEN8_GT_PM_IRQ) {
6084 serge 1336
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
5060 serge 1337
		if (tmp & dev_priv->pm_rps_events) {
6084 serge 1338
			I915_WRITE_FW(GEN8_GT_IIR(2),
1339
				      tmp & dev_priv->pm_rps_events);
5060 serge 1340
			ret = IRQ_HANDLED;
5354 serge 1341
			gen6_rps_irq_handler(dev_priv, tmp);
5060 serge 1342
		} else
1343
			DRM_ERROR("The master control interrupt lied (PM)!\n");
1344
	}
1345
 
6084 serge 1346
	return ret;
1347
}
5354 serge 1348
 
6084 serge 1349
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1350
{
1351
	switch (port) {
1352
	case PORT_A:
1353
		return val & PORTA_HOTPLUG_LONG_DETECT;
1354
	case PORT_B:
1355
		return val & PORTB_HOTPLUG_LONG_DETECT;
1356
	case PORT_C:
1357
		return val & PORTC_HOTPLUG_LONG_DETECT;
1358
	default:
1359
		return false;
4560 Serge 1360
	}
6084 serge 1361
}
4560 Serge 1362
 
6084 serge 1363
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1364
{
1365
	switch (port) {
1366
	case PORT_E:
1367
		return val & PORTE_HOTPLUG_LONG_DETECT;
1368
	default:
1369
		return false;
1370
	}
4560 Serge 1371
}
1372
 
6084 serge 1373
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1374
{
1375
	switch (port) {
1376
	case PORT_A:
6084 serge 1377
		return val & PORTA_HOTPLUG_LONG_DETECT;
5060 serge 1378
	case PORT_B:
6084 serge 1379
		return val & PORTB_HOTPLUG_LONG_DETECT;
5060 serge 1380
	case PORT_C:
6084 serge 1381
		return val & PORTC_HOTPLUG_LONG_DETECT;
5060 serge 1382
	case PORT_D:
6084 serge 1383
		return val & PORTD_HOTPLUG_LONG_DETECT;
1384
	default:
1385
		return false;
5060 serge 1386
	}
1387
}
1388
 
6084 serge 1389
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1390
{
1391
	switch (port) {
1392
	case PORT_A:
6084 serge 1393
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
5060 serge 1394
	default:
6084 serge 1395
		return false;
1396
	}
1397
}
1398
 
1399
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1400
{
1401
	switch (port) {
5060 serge 1402
	case PORT_B:
6084 serge 1403
		return val & PORTB_HOTPLUG_LONG_DETECT;
5060 serge 1404
	case PORT_C:
6084 serge 1405
		return val & PORTC_HOTPLUG_LONG_DETECT;
5060 serge 1406
	case PORT_D:
6084 serge 1407
		return val & PORTD_HOTPLUG_LONG_DETECT;
1408
	default:
1409
		return false;
5060 serge 1410
	}
1411
}
1412
 
6084 serge 1413
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1414
{
6084 serge 1415
	switch (port) {
1416
	case PORT_B:
1417
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1418
	case PORT_C:
1419
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1420
	case PORT_D:
1421
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
5060 serge 1422
	default:
6084 serge 1423
		return false;
5060 serge 1424
	}
1425
}
1426
 
6084 serge 1427
/*
1428
 * Get a bit mask of pins that have triggered, and which ones may be long.
1429
 * This can be called multiple times with the same masks to accumulate
1430
 * hotplug detection results from several registers.
1431
 *
1432
 * Note that the caller is expected to zero out the masks initially.
1433
 */
1434
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1435
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1436
			     const u32 hpd[HPD_NUM_PINS],
1437
			     bool long_pulse_detect(enum port port, u32 val))
3746 Serge 1438
{
6084 serge 1439
	enum port port;
3746 Serge 1440
	int i;
1441
 
6084 serge 1442
	for_each_hpd_pin(i) {
1443
		if ((hpd[i] & hotplug_trigger) == 0)
5060 serge 1444
			continue;
3746 Serge 1445
 
6084 serge 1446
		*pin_mask |= BIT(i);
5060 serge 1447
 
6296 serge 1448
		if (!intel_hpd_pin_to_port(i, &port))
6131 serge 1449
			continue;
5060 serge 1450
 
6084 serge 1451
		if (long_pulse_detect(port, dig_hotplug_reg))
1452
			*long_mask |= BIT(i);
3746 Serge 1453
	}
1454
 
6084 serge 1455
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1456
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
3746 Serge 1457
 
1458
}
1459
 
3480 Serge 1460
static void gmbus_irq_handler(struct drm_device *dev)
1461
{
5060 serge 1462
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1463
 
1464
	wake_up_all(&dev_priv->gmbus_wait_queue);
1465
}
1466
 
1467
static void dp_aux_irq_handler(struct drm_device *dev)
1468
{
5060 serge 1469
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1470
 
1471
	wake_up_all(&dev_priv->gmbus_wait_queue);
1472
}
1473
 
4560 Serge 1474
#if defined(CONFIG_DEBUG_FS)
1475
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1476
					 uint32_t crc0, uint32_t crc1,
1477
					 uint32_t crc2, uint32_t crc3,
1478
					 uint32_t crc4)
1479
{
1480
	struct drm_i915_private *dev_priv = dev->dev_private;
1481
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1482
	struct intel_pipe_crc_entry *entry;
1483
	int head, tail;
1484
 
1485
	spin_lock(&pipe_crc->lock);
1486
 
1487
	if (!pipe_crc->entries) {
1488
		spin_unlock(&pipe_crc->lock);
5354 serge 1489
		DRM_DEBUG_KMS("spurious interrupt\n");
4560 Serge 1490
		return;
1491
	}
1492
 
1493
	head = pipe_crc->head;
1494
	tail = pipe_crc->tail;
1495
 
1496
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1497
		spin_unlock(&pipe_crc->lock);
1498
		DRM_ERROR("CRC buffer overflowing\n");
1499
		return;
1500
	}
1501
 
1502
	entry = &pipe_crc->entries[head];
1503
 
1504
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1505
	entry->crc[0] = crc0;
1506
	entry->crc[1] = crc1;
1507
	entry->crc[2] = crc2;
1508
	entry->crc[3] = crc3;
1509
	entry->crc[4] = crc4;
1510
 
1511
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1512
	pipe_crc->head = head;
1513
 
1514
	spin_unlock(&pipe_crc->lock);
1515
 
1516
	wake_up_interruptible(&pipe_crc->wq);
1517
}
1518
#else
1519
static inline void
1520
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1521
			     uint32_t crc0, uint32_t crc1,
1522
			     uint32_t crc2, uint32_t crc3,
1523
			     uint32_t crc4) {}
1524
#endif
1525
 
1526
 
1527
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1528
{
1529
	struct drm_i915_private *dev_priv = dev->dev_private;
1530
 
1531
	display_pipe_crc_irq_handler(dev, pipe,
1532
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1533
				     0, 0, 0, 0);
1534
}
1535
 
1536
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1537
{
1538
	struct drm_i915_private *dev_priv = dev->dev_private;
1539
 
1540
	display_pipe_crc_irq_handler(dev, pipe,
1541
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1542
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1543
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1544
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1545
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1546
}
1547
 
1548
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1549
{
1550
	struct drm_i915_private *dev_priv = dev->dev_private;
1551
	uint32_t res1, res2;
1552
 
1553
	if (INTEL_INFO(dev)->gen >= 3)
1554
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1555
	else
1556
		res1 = 0;
1557
 
1558
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1559
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1560
	else
1561
		res2 = 0;
1562
 
1563
	display_pipe_crc_irq_handler(dev, pipe,
1564
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1565
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1566
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1567
				     res1, res2);
1568
}
1569
 
4104 Serge 1570
/* The RPS events need forcewake, so we add them to a work queue and mask their
1571
 * IMR bits until the work is done. Other interrupts can be processed without
1572
 * the work queue. */
1573
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1574
{
5060 serge 1575
	if (pm_iir & dev_priv->pm_rps_events) {
4104 Serge 1576
		spin_lock(&dev_priv->irq_lock);
5354 serge 1577
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1578
		if (dev_priv->rps.interrupts_enabled) {
6084 serge 1579
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
5354 serge 1580
			queue_work(dev_priv->wq, &dev_priv->rps.work);
1581
		}
4104 Serge 1582
		spin_unlock(&dev_priv->irq_lock);
1583
	}
1584
 
5354 serge 1585
	if (INTEL_INFO(dev_priv)->gen >= 8)
1586
		return;
1587
 
4104 Serge 1588
	if (HAS_VEBOX(dev_priv->dev)) {
1589
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
6084 serge 1590
			notify_ring(&dev_priv->ring[VECS]);
4104 Serge 1591
 
5354 serge 1592
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1593
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
4104 Serge 1594
	}
1595
}
1596
 
5354 serge 1597
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1598
{
6088 serge 1599
	if (!drm_handle_vblank(dev, pipe))
1600
		return false;
5354 serge 1601
 
1602
	return true;
1603
}
1604
 
5060 serge 1605
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1606
{
5060 serge 1607
	struct drm_i915_private *dev_priv = dev->dev_private;
1608
	u32 pipe_stats[I915_MAX_PIPES] = { };
3031 serge 1609
	int pipe;
1610
 
5060 serge 1611
	spin_lock(&dev_priv->irq_lock);
5354 serge 1612
	for_each_pipe(dev_priv, pipe) {
5060 serge 1613
		int reg;
1614
		u32 mask, iir_bit = 0;
3031 serge 1615
 
5060 serge 1616
		/*
1617
		 * PIPESTAT bits get signalled even when the interrupt is
1618
		 * disabled with the mask bits, and some of the status bits do
1619
		 * not generate interrupts at all (like the underrun bit). Hence
1620
		 * we need to be careful that we only handle what we want to
1621
		 * handle.
1622
		 */
3031 serge 1623
 
5354 serge 1624
		/* fifo underruns are filterered in the underrun handler. */
1625
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1626
 
5060 serge 1627
		switch (pipe) {
1628
		case PIPE_A:
1629
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1630
			break;
1631
		case PIPE_B:
1632
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1633
			break;
1634
		case PIPE_C:
1635
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1636
			break;
1637
		}
1638
		if (iir & iir_bit)
1639
			mask |= dev_priv->pipestat_irq_mask[pipe];
3031 serge 1640
 
5060 serge 1641
		if (!mask)
1642
			continue;
3031 serge 1643
 
5060 serge 1644
		reg = PIPESTAT(pipe);
1645
		mask |= PIPESTAT_INT_ENABLE_MASK;
1646
		pipe_stats[pipe] = I915_READ(reg) & mask;
3031 serge 1647
 
6084 serge 1648
		/*
1649
		 * Clear the PIPE*STAT regs before the IIR
1650
		 */
5060 serge 1651
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1652
					PIPESTAT_INT_STATUS_MASK))
6084 serge 1653
			I915_WRITE(reg, pipe_stats[pipe]);
1654
	}
5060 serge 1655
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1656
 
5354 serge 1657
	for_each_pipe(dev_priv, pipe) {
6084 serge 1658
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1659
		    intel_pipe_handle_vblank(dev, pipe))
1660
            /*intel_check_page_flip(dev, pipe)*/;
3031 serge 1661
 
6084 serge 1662
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1663
//           intel_prepare_page_flip(dev, pipe);
1664
//           intel_finish_page_flip(dev, pipe);
1665
		}
4560 Serge 1666
 
6084 serge 1667
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1668
			i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 1669
 
5354 serge 1670
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1671
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
6084 serge 1672
	}
3031 serge 1673
 
5060 serge 1674
	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1675
		gmbus_irq_handler(dev);
1676
}
3031 serge 1677
 
5060 serge 1678
static void i9xx_hpd_irq_handler(struct drm_device *dev)
1679
{
1680
	struct drm_i915_private *dev_priv = dev->dev_private;
1681
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
6084 serge 1682
	u32 pin_mask = 0, long_mask = 0;
4104 Serge 1683
 
6084 serge 1684
	if (!hotplug_status)
1685
		return;
4104 Serge 1686
 
6084 serge 1687
	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1688
	/*
1689
	 * Make sure hotplug status is cleared before we clear IIR, or else we
1690
	 * may miss hotplug events.
1691
	 */
1692
	POSTING_READ(PORT_HOTPLUG_STAT);
1693
 
1694
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5060 serge 1695
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
4560 Serge 1696
 
6084 serge 1697
		if (hotplug_trigger) {
1698
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1699
					   hotplug_trigger, hpd_status_g4x,
1700
					   i9xx_port_hotplug_long_detect);
1701
 
6296 serge 1702
			intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 1703
		}
1704
 
1705
		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1706
			dp_aux_irq_handler(dev);
5060 serge 1707
	} else {
1708
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1709
 
6084 serge 1710
		if (hotplug_trigger) {
1711
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1712
					   hotplug_trigger, hpd_status_i915,
1713
					   i9xx_port_hotplug_long_detect);
6296 serge 1714
			intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 1715
		}
5060 serge 1716
	}
1717
}
1718
 
1719
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1720
{
1721
	struct drm_device *dev = arg;
1722
	struct drm_i915_private *dev_priv = dev->dev_private;
1723
	u32 iir, gt_iir, pm_iir;
1724
	irqreturn_t ret = IRQ_NONE;
1725
 
6084 serge 1726
	if (!intel_irqs_enabled(dev_priv))
1727
		return IRQ_NONE;
1728
 
5060 serge 1729
	while (true) {
1730
		/* Find, clear, then process each source of interrupt */
1731
 
1732
		gt_iir = I915_READ(GTIIR);
1733
		if (gt_iir)
1734
			I915_WRITE(GTIIR, gt_iir);
1735
 
1736
		pm_iir = I915_READ(GEN6_PMIIR);
1737
		if (pm_iir)
1738
			I915_WRITE(GEN6_PMIIR, pm_iir);
1739
 
1740
		iir = I915_READ(VLV_IIR);
1741
		if (iir) {
1742
			/* Consume port before clearing IIR or we'll miss events */
1743
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1744
				i9xx_hpd_irq_handler(dev);
1745
			I915_WRITE(VLV_IIR, iir);
3031 serge 1746
		}
1747
 
5060 serge 1748
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1749
			goto out;
3031 serge 1750
 
5060 serge 1751
		ret = IRQ_HANDLED;
1752
 
1753
		if (gt_iir)
6084 serge 1754
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
4126 Serge 1755
		if (pm_iir)
1756
			gen6_rps_irq_handler(dev_priv, pm_iir);
5060 serge 1757
		/* Call regardless, as some status bits might not be
1758
		 * signalled in iir */
1759
		valleyview_pipestat_irq_handler(dev, iir);
3031 serge 1760
	}
1761
 
1762
out:
1763
	return ret;
1764
}
1765
 
5060 serge 1766
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1767
{
1768
	struct drm_device *dev = arg;
1769
	struct drm_i915_private *dev_priv = dev->dev_private;
1770
	u32 master_ctl, iir;
1771
	irqreturn_t ret = IRQ_NONE;
1772
 
6084 serge 1773
	if (!intel_irqs_enabled(dev_priv))
1774
		return IRQ_NONE;
1775
 
5060 serge 1776
	for (;;) {
1777
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1778
		iir = I915_READ(VLV_IIR);
1779
 
1780
		if (master_ctl == 0 && iir == 0)
1781
			break;
1782
 
1783
		ret = IRQ_HANDLED;
1784
 
1785
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1786
 
1787
		/* Find, clear, then process each source of interrupt */
1788
 
1789
		if (iir) {
1790
			/* Consume port before clearing IIR or we'll miss events */
1791
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1792
				i9xx_hpd_irq_handler(dev);
1793
			I915_WRITE(VLV_IIR, iir);
1794
		}
1795
 
6084 serge 1796
		gen8_gt_irq_handler(dev_priv, master_ctl);
5060 serge 1797
 
1798
		/* Call regardless, as some status bits might not be
1799
		 * signalled in iir */
1800
		valleyview_pipestat_irq_handler(dev, iir);
1801
 
1802
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1803
		POSTING_READ(GEN8_MASTER_IRQ);
1804
	}
1805
 
1806
	return ret;
1807
}
1808
 
6084 serge 1809
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1810
				const u32 hpd[HPD_NUM_PINS])
1811
{
1812
	struct drm_i915_private *dev_priv = to_i915(dev);
1813
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1814
 
1815
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1816
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1817
 
1818
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1819
			   dig_hotplug_reg, hpd,
1820
			   pch_port_hotplug_long_detect);
1821
 
6296 serge 1822
	intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 1823
}
1824
 
3031 serge 1825
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1826
{
5060 serge 1827
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1828
	int pipe;
3746 Serge 1829
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
3031 serge 1830
 
6084 serge 1831
	if (hotplug_trigger)
1832
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
4104 Serge 1833
 
1834
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1835
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1836
			       SDE_AUDIO_POWER_SHIFT);
1837
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1838
				 port_name(port));
3746 Serge 1839
	}
3031 serge 1840
 
3480 Serge 1841
	if (pch_iir & SDE_AUX_MASK)
1842
		dp_aux_irq_handler(dev);
1843
 
3031 serge 1844
	if (pch_iir & SDE_GMBUS)
3480 Serge 1845
		gmbus_irq_handler(dev);
3031 serge 1846
 
1847
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1848
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1849
 
1850
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1851
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1852
 
1853
	if (pch_iir & SDE_POISON)
1854
		DRM_ERROR("PCH poison interrupt\n");
1855
 
1856
	if (pch_iir & SDE_FDI_MASK)
5354 serge 1857
		for_each_pipe(dev_priv, pipe)
3031 serge 1858
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1859
					 pipe_name(pipe),
1860
					 I915_READ(FDI_RX_IIR(pipe)));
1861
 
1862
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1863
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1864
 
1865
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1866
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1867
 
4104 Serge 1868
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
5354 serge 1869
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1870
 
3031 serge 1871
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
5354 serge 1872
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
3031 serge 1873
}
1874
 
4104 Serge 1875
static void ivb_err_int_handler(struct drm_device *dev)
1876
{
1877
	struct drm_i915_private *dev_priv = dev->dev_private;
1878
	u32 err_int = I915_READ(GEN7_ERR_INT);
4560 Serge 1879
	enum pipe pipe;
4104 Serge 1880
 
1881
	if (err_int & ERR_INT_POISON)
1882
		DRM_ERROR("Poison interrupt\n");
1883
 
5354 serge 1884
	for_each_pipe(dev_priv, pipe) {
1885
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1886
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4104 Serge 1887
 
4560 Serge 1888
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1889
			if (IS_IVYBRIDGE(dev))
1890
				ivb_pipe_crc_irq_handler(dev, pipe);
1891
			else
1892
				hsw_pipe_crc_irq_handler(dev, pipe);
1893
		}
1894
	}
4104 Serge 1895
 
1896
	I915_WRITE(GEN7_ERR_INT, err_int);
1897
}
1898
 
1899
static void cpt_serr_int_handler(struct drm_device *dev)
1900
{
1901
	struct drm_i915_private *dev_priv = dev->dev_private;
1902
	u32 serr_int = I915_READ(SERR_INT);
1903
 
1904
	if (serr_int & SERR_INT_POISON)
1905
		DRM_ERROR("PCH poison interrupt\n");
1906
 
1907
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
5354 serge 1908
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1909
 
1910
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
5354 serge 1911
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
4104 Serge 1912
 
1913
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
5354 serge 1914
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
4104 Serge 1915
 
1916
	I915_WRITE(SERR_INT, serr_int);
1917
}
1918
 
3031 serge 1919
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1920
{
5060 serge 1921
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1922
	int pipe;
3746 Serge 1923
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
3031 serge 1924
 
6084 serge 1925
	if (hotplug_trigger)
1926
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
4104 Serge 1927
 
1928
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1929
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1930
			       SDE_AUDIO_POWER_SHIFT_CPT);
1931
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1932
				 port_name(port));
3746 Serge 1933
	}
3031 serge 1934
 
1935
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 1936
		dp_aux_irq_handler(dev);
3031 serge 1937
 
1938
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 1939
		gmbus_irq_handler(dev);
3031 serge 1940
 
1941
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1942
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1943
 
1944
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1945
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1946
 
1947
	if (pch_iir & SDE_FDI_MASK_CPT)
5354 serge 1948
		for_each_pipe(dev_priv, pipe)
3031 serge 1949
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1950
					 pipe_name(pipe),
1951
					 I915_READ(FDI_RX_IIR(pipe)));
1952
 
4104 Serge 1953
	if (pch_iir & SDE_ERROR_CPT)
1954
		cpt_serr_int_handler(dev);
4539 Serge 1955
}
3480 Serge 1956
 
6084 serge 1957
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1958
{
1959
	struct drm_i915_private *dev_priv = dev->dev_private;
1960
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1961
		~SDE_PORTE_HOTPLUG_SPT;
1962
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1963
	u32 pin_mask = 0, long_mask = 0;
1964
 
1965
	if (hotplug_trigger) {
1966
		u32 dig_hotplug_reg;
1967
 
1968
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1969
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1970
 
1971
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1972
				   dig_hotplug_reg, hpd_spt,
1973
				   spt_port_hotplug_long_detect);
1974
	}
1975
 
1976
	if (hotplug2_trigger) {
1977
		u32 dig_hotplug_reg;
1978
 
1979
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1980
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1981
 
1982
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1983
				   dig_hotplug_reg, hpd_spt,
1984
				   spt_port_hotplug2_long_detect);
1985
	}
1986
 
6296 serge 1987
	if (pin_mask)
1988
		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1989
 
6084 serge 1990
	if (pch_iir & SDE_GMBUS_CPT)
1991
		gmbus_irq_handler(dev);
1992
}
1993
 
1994
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1995
				const u32 hpd[HPD_NUM_PINS])
1996
{
1997
	struct drm_i915_private *dev_priv = to_i915(dev);
1998
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1999
 
2000
	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2001
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2002
 
2003
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2004
			   dig_hotplug_reg, hpd,
2005
			   ilk_port_hotplug_long_detect);
2006
 
6296 serge 2007
	intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 2008
}
2009
 
4104 Serge 2010
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2011
{
4104 Serge 2012
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2013
	enum pipe pipe;
6084 serge 2014
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
3031 serge 2015
 
6296 serge 2016
	if (hotplug_trigger)
2017
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
6084 serge 2018
 
3480 Serge 2019
	if (de_iir & DE_AUX_CHANNEL_A)
2020
		dp_aux_irq_handler(dev);
2021
 
3031 serge 2022
	if (de_iir & DE_GSE)
4104 Serge 2023
		intel_opregion_asle_intr(dev);
2351 Serge 2024
 
4104 Serge 2025
	if (de_iir & DE_POISON)
2026
		DRM_ERROR("Poison interrupt\n");
2027
 
5354 serge 2028
	for_each_pipe(dev_priv, pipe) {
6084 serge 2029
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2030
		    intel_pipe_handle_vblank(dev, pipe))
2031
            /*intel_check_page_flip(dev, pipe)*/;
4104 Serge 2032
 
4560 Serge 2033
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
5354 serge 2034
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2351 Serge 2035
 
4560 Serge 2036
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2037
			i9xx_pipe_crc_irq_handler(dev, pipe);
2038
 
2039
		/* plane/pipes map 1:1 on ilk+ */
2040
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2041
//			intel_prepare_page_flip(dev, pipe);
2042
//			intel_finish_page_flip_plane(dev, pipe);
2043
		}
3031 serge 2044
	}
2351 Serge 2045
 
3031 serge 2046
	/* check event from PCH */
2047
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 2048
		u32 pch_iir = I915_READ(SDEIIR);
2049
 
3031 serge 2050
		if (HAS_PCH_CPT(dev))
2051
			cpt_irq_handler(dev, pch_iir);
2052
		else
2053
			ibx_irq_handler(dev, pch_iir);
3480 Serge 2054
 
2055
		/* should clear PCH hotplug event before clear CPU irq */
2056
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 2057
	}
4104 Serge 2058
 
6084 serge 2059
	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
4104 Serge 2060
		ironlake_rps_change_irq_handler(dev);
2351 Serge 2061
}
2062
 
4104 Serge 2063
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2064
{
2065
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2066
	enum pipe pipe;
6084 serge 2067
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2351 Serge 2068
 
6084 serge 2069
	if (hotplug_trigger)
2070
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2071
 
4126 Serge 2072
	if (de_iir & DE_ERR_INT_IVB)
2073
		ivb_err_int_handler(dev);
2351 Serge 2074
 
4104 Serge 2075
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2076
		dp_aux_irq_handler(dev);
3031 serge 2077
 
4104 Serge 2078
	if (de_iir & DE_GSE_IVB)
2079
		intel_opregion_asle_intr(dev);
4560 Serge 2080
 
5354 serge 2081
	for_each_pipe(dev_priv, pipe) {
6084 serge 2082
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2083
		    intel_pipe_handle_vblank(dev, pipe))
2084
            /*intel_check_page_flip(dev, pipe)*/;
4560 Serge 2085
 
2086
		/* plane/pipes map 1:1 on ilk+ */
5060 serge 2087
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2088
//			intel_prepare_page_flip(dev, pipe);
2089
//			intel_finish_page_flip_plane(dev, pipe);
3031 serge 2090
		}
2091
	}
2092
 
4104 Serge 2093
	/* check event from PCH */
2094
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2095
		u32 pch_iir = I915_READ(SDEIIR);
3031 serge 2096
 
4104 Serge 2097
		cpt_irq_handler(dev, pch_iir);
3031 serge 2098
 
4104 Serge 2099
		/* clear PCH hotplug event before clear CPU irq */
2100
		I915_WRITE(SDEIIR, pch_iir);
4539 Serge 2101
	}
3031 serge 2102
}
2103
 
5060 serge 2104
/*
2105
 * To handle irqs with the minimum potential races with fresh interrupts, we:
2106
 * 1 - Disable Master Interrupt Control.
2107
 * 2 - Find the source(s) of the interrupt.
2108
 * 3 - Clear the Interrupt Identity bits (IIR).
2109
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2110
 * 5 - Re-enable Master Interrupt Control.
2111
 */
4104 Serge 2112
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 2113
{
5060 serge 2114
	struct drm_device *dev = arg;
2115
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2116
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2117
	irqreturn_t ret = IRQ_NONE;
3031 serge 2118
 
6084 serge 2119
	if (!intel_irqs_enabled(dev_priv))
2120
		return IRQ_NONE;
2121
 
4104 Serge 2122
	/* We get interrupts on unclaimed registers, so check for this before we
2123
	 * do any I915_{READ,WRITE}. */
2124
	intel_uncore_check_errors(dev);
3031 serge 2125
 
4104 Serge 2126
	/* disable master interrupt before clearing iir  */
2127
	de_ier = I915_READ(DEIER);
2128
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2129
	POSTING_READ(DEIER);
3031 serge 2130
 
4104 Serge 2131
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2132
	 * interrupts will will be stored on its back queue, and then we'll be
2133
	 * able to process them after we restore SDEIER (as soon as we restore
2134
	 * it, we'll get an interrupt if SDEIIR still has something to process
2135
	 * due to its back queue). */
2136
	if (!HAS_PCH_NOP(dev)) {
2137
		sde_ier = I915_READ(SDEIER);
2138
		I915_WRITE(SDEIER, 0);
2139
		POSTING_READ(SDEIER);
3031 serge 2140
	}
2141
 
5060 serge 2142
	/* Find, clear, then process each source of interrupt */
2143
 
4104 Serge 2144
	gt_iir = I915_READ(GTIIR);
2145
	if (gt_iir) {
5060 serge 2146
		I915_WRITE(GTIIR, gt_iir);
2147
		ret = IRQ_HANDLED;
4104 Serge 2148
		if (INTEL_INFO(dev)->gen >= 6)
2149
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2150
		else
2151
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
4539 Serge 2152
	}
3031 serge 2153
 
4104 Serge 2154
	de_iir = I915_READ(DEIIR);
2155
	if (de_iir) {
5060 serge 2156
		I915_WRITE(DEIIR, de_iir);
2157
		ret = IRQ_HANDLED;
4104 Serge 2158
		if (INTEL_INFO(dev)->gen >= 7)
2159
			ivb_display_irq_handler(dev, de_iir);
2160
		else
2161
			ilk_display_irq_handler(dev, de_iir);
3480 Serge 2162
	}
2163
 
4104 Serge 2164
	if (INTEL_INFO(dev)->gen >= 6) {
2165
		u32 pm_iir = I915_READ(GEN6_PMIIR);
2166
		if (pm_iir) {
2167
			I915_WRITE(GEN6_PMIIR, pm_iir);
2168
			ret = IRQ_HANDLED;
5060 serge 2169
			gen6_rps_irq_handler(dev_priv, pm_iir);
4560 Serge 2170
		}
3031 serge 2171
	}
2172
 
4104 Serge 2173
	I915_WRITE(DEIER, de_ier);
2174
	POSTING_READ(DEIER);
2175
	if (!HAS_PCH_NOP(dev)) {
2176
		I915_WRITE(SDEIER, sde_ier);
2177
		POSTING_READ(SDEIER);
3031 serge 2178
	}
2179
 
4104 Serge 2180
	return ret;
3031 serge 2181
}
2182
 
6084 serge 2183
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2184
				const u32 hpd[HPD_NUM_PINS])
2185
{
2186
	struct drm_i915_private *dev_priv = to_i915(dev);
2187
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2188
 
2189
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2190
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2191
 
2192
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2193
			   dig_hotplug_reg, hpd,
2194
			   bxt_port_hotplug_long_detect);
2195
 
6296 serge 2196
	intel_hpd_irq_handler(dev, pin_mask, long_mask);
6084 serge 2197
}
2198
 
4560 Serge 2199
static irqreturn_t gen8_irq_handler(int irq, void *arg)
2200
{
2201
	struct drm_device *dev = arg;
2202
	struct drm_i915_private *dev_priv = dev->dev_private;
2203
	u32 master_ctl;
2204
	irqreturn_t ret = IRQ_NONE;
2205
	uint32_t tmp = 0;
2206
	enum pipe pipe;
5354 serge 2207
	u32 aux_mask = GEN8_AUX_CHANNEL_A;
4560 Serge 2208
 
6084 serge 2209
	if (!intel_irqs_enabled(dev_priv))
2210
		return IRQ_NONE;
2211
 
2212
	if (INTEL_INFO(dev_priv)->gen >= 9)
5354 serge 2213
		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2214
			GEN9_AUX_CHANNEL_D;
2215
 
6084 serge 2216
	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
4560 Serge 2217
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2218
	if (!master_ctl)
2219
		return IRQ_NONE;
2220
 
6084 serge 2221
	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
4560 Serge 2222
 
5060 serge 2223
	/* Find, clear, then process each source of interrupt */
2224
 
6084 serge 2225
	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
4560 Serge 2226
 
2227
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2228
		tmp = I915_READ(GEN8_DE_MISC_IIR);
5060 serge 2229
		if (tmp) {
2230
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2231
			ret = IRQ_HANDLED;
6084 serge 2232
			if (tmp & GEN8_DE_MISC_GSE)
2233
				intel_opregion_asle_intr(dev);
5060 serge 2234
			else
6084 serge 2235
				DRM_ERROR("Unexpected DE Misc interrupt\n");
5060 serge 2236
		}
4560 Serge 2237
		else
2238
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2239
	}
2240
 
2241
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2242
		tmp = I915_READ(GEN8_DE_PORT_IIR);
5060 serge 2243
		if (tmp) {
6084 serge 2244
			bool found = false;
2245
			u32 hotplug_trigger = 0;
2246
 
2247
			if (IS_BROXTON(dev_priv))
2248
				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2249
			else if (IS_BROADWELL(dev_priv))
2250
				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2251
 
5060 serge 2252
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2253
			ret = IRQ_HANDLED;
5354 serge 2254
 
6084 serge 2255
			if (tmp & aux_mask) {
2256
				dp_aux_irq_handler(dev);
2257
				found = true;
2258
			}
2259
 
2260
			if (hotplug_trigger) {
2261
				if (IS_BROXTON(dev))
2262
					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2263
				else
2264
					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2265
				found = true;
2266
			}
2267
 
2268
			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2269
				gmbus_irq_handler(dev);
2270
				found = true;
2271
			}
2272
 
2273
			if (!found)
2274
				DRM_ERROR("Unexpected DE Port interrupt\n");
5060 serge 2275
		}
4560 Serge 2276
		else
2277
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2278
	}
2279
 
5354 serge 2280
	for_each_pipe(dev_priv, pipe) {
2281
		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
4560 Serge 2282
 
2283
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2284
			continue;
2285
 
2286
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
5060 serge 2287
		if (pipe_iir) {
2288
			ret = IRQ_HANDLED;
2289
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
4560 Serge 2290
 
6088 serge 2291
			if (pipe_iir & GEN8_PIPE_VBLANK &&
2292
			    intel_pipe_handle_vblank(dev, pipe))
2293
			/*	intel_check_page_flip(dev, pipe)*/;
4560 Serge 2294
 
6084 serge 2295
			if (INTEL_INFO(dev_priv)->gen >= 9)
5354 serge 2296
				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2297
			else
2298
				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2299
 
2300
 
6084 serge 2301
			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2302
				hsw_pipe_crc_irq_handler(dev, pipe);
4560 Serge 2303
 
5354 serge 2304
			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2305
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2306
								    pipe);
4560 Serge 2307
 
5354 serge 2308
 
6084 serge 2309
			if (INTEL_INFO(dev_priv)->gen >= 9)
5354 serge 2310
				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2311
			else
2312
				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2313
 
2314
			if (fault_errors)
6084 serge 2315
				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2316
					  pipe_name(pipe),
2317
					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
4560 Serge 2318
		} else
2319
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2320
	}
2321
 
6084 serge 2322
	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2323
	    master_ctl & GEN8_DE_PCH_IRQ) {
4560 Serge 2324
		/*
2325
		 * FIXME(BDW): Assume for now that the new interrupt handling
2326
		 * scheme also closed the SDE interrupt handling race we've seen
2327
		 * on older pch-split platforms. But this needs testing.
2328
		 */
2329
		u32 pch_iir = I915_READ(SDEIIR);
2330
		if (pch_iir) {
2331
			I915_WRITE(SDEIIR, pch_iir);
2332
			ret = IRQ_HANDLED;
6084 serge 2333
 
2334
			if (HAS_PCH_SPT(dev_priv))
2335
				spt_irq_handler(dev, pch_iir);
2336
			else
2337
				cpt_irq_handler(dev, pch_iir);
5060 serge 2338
		} else
2339
			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2340
 
4560 Serge 2341
	}
2342
 
6084 serge 2343
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2344
	POSTING_READ_FW(GEN8_MASTER_IRQ);
4560 Serge 2345
 
2346
	return ret;
2347
}
2348
 
4104 Serge 2349
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2350
			       bool reset_completed)
3746 Serge 2351
{
5060 serge 2352
	struct intel_engine_cs *ring;
4104 Serge 2353
	int i;
3031 serge 2354
 
4104 Serge 2355
	/*
2356
	 * Notify all waiters for GPU completion events that reset state has
2357
	 * been changed, and that they need to restart their wait after
2358
	 * checking for potential errors (and bail out to drop locks if there is
2359
	 * a gpu reset pending so that i915_error_work_func can acquire them).
2360
	 */
3031 serge 2361
 
4104 Serge 2362
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2363
	for_each_ring(ring, dev_priv, i)
2364
		wake_up_all(&ring->irq_queue);
3031 serge 2365
 
2366
 
4104 Serge 2367
	/*
2368
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2369
	 * reset state is cleared.
2370
	 */
2371
	if (reset_completed)
2372
		wake_up_all(&dev_priv->gpu_error.reset_queue);
3031 serge 2373
}
2374
 
2375
/**
6084 serge 2376
 * i915_reset_and_wakeup - do process context error handling work
2377
 * @dev: drm device
3031 serge 2378
 *
4104 Serge 2379
 * Fire an error uevent so userspace can see that a hang or error
2380
 * was detected.
3031 serge 2381
 */
6084 serge 2382
static void i915_reset_and_wakeup(struct drm_device *dev)
3031 serge 2383
{
6084 serge 2384
	struct drm_i915_private *dev_priv = to_i915(dev);
2385
	struct i915_gpu_error *error = &dev_priv->gpu_error;
4104 Serge 2386
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2387
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2388
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2389
	int ret;
3031 serge 2390
 
4104 Serge 2391
	/*
2392
	 * Note that there's only one work item which does gpu resets, so we
2393
	 * need not worry about concurrent gpu resets potentially incrementing
2394
	 * error->reset_counter twice. We only need to take care of another
2395
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2396
	 * quick check for that is good enough: schedule_work ensures the
2397
	 * correct ordering between hang detection and this work item, and since
2398
	 * the reset in-progress bit is only ever set by code outside of this
2399
	 * work we don't need to worry about any other races.
2400
	 */
2401
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2402
		DRM_DEBUG_DRIVER("resetting chip\n");
6084 serge 2403
		intel_runtime_pm_get(dev_priv);
3031 serge 2404
 
4104 Serge 2405
		/*
2406
		 * All state reset _must_ be completed before we update the
2407
		 * reset counter, for otherwise waiters might miss the reset
2408
		 * pending state and not properly drop locks, resulting in
2409
		 * deadlocks with the reset work.
2410
		 */
4560 Serge 2411
//		ret = i915_reset(dev);
3031 serge 2412
 
6084 serge 2413
//		intel_finish_reset(dev);
3031 serge 2414
 
6084 serge 2415
		intel_runtime_pm_put(dev_priv);
2416
 
4104 Serge 2417
		if (ret == 0) {
2418
			/*
2419
			 * After all the gem state is reset, increment the reset
2420
			 * counter and wake up everyone waiting for the reset to
2421
			 * complete.
2422
			 *
2423
			 * Since unlock operations are a one-sided barrier only,
2424
			 * we need to insert a barrier here to order any seqno
2425
			 * updates before
2426
			 * the counter increment.
2427
			 */
6084 serge 2428
			smp_mb__before_atomic();
4104 Serge 2429
			atomic_inc(&dev_priv->gpu_error.reset_counter);
3031 serge 2430
 
4104 Serge 2431
		} else {
6088 serge 2432
			atomic_or(I915_WEDGED, &error->reset_counter);
2433
		}
3031 serge 2434
 
4104 Serge 2435
		/*
2436
		 * Note: The wake_up also serves as a memory barrier so that
2437
		 * waiters see the update value of the reset counter atomic_t.
2438
		 */
2439
		i915_error_wake_up(dev_priv, true);
3031 serge 2440
	}
2441
}
2442
 
2443
static void i915_report_and_clear_eir(struct drm_device *dev)
2444
{
2445
	struct drm_i915_private *dev_priv = dev->dev_private;
2446
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2447
	u32 eir = I915_READ(EIR);
2448
	int pipe, i;
2449
 
2450
	if (!eir)
2451
		return;
2452
 
2453
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2454
 
2455
	i915_get_extra_instdone(dev, instdone);
2456
 
2457
	if (IS_G4X(dev)) {
2458
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2459
			u32 ipeir = I915_READ(IPEIR_I965);
2460
 
2461
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2462
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2463
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2464
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2465
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2466
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2467
			I915_WRITE(IPEIR_I965, ipeir);
2468
			POSTING_READ(IPEIR_I965);
2469
		}
2470
		if (eir & GM45_ERROR_PAGE_TABLE) {
2471
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2472
			pr_err("page table error\n");
2473
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2474
			I915_WRITE(PGTBL_ER, pgtbl_err);
2475
			POSTING_READ(PGTBL_ER);
2476
		}
2477
	}
2478
 
2479
	if (!IS_GEN2(dev)) {
2480
		if (eir & I915_ERROR_PAGE_TABLE) {
2481
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2482
			pr_err("page table error\n");
2483
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2484
			I915_WRITE(PGTBL_ER, pgtbl_err);
2485
			POSTING_READ(PGTBL_ER);
2486
		}
2487
	}
2488
 
2489
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2490
		pr_err("memory refresh error:\n");
5354 serge 2491
		for_each_pipe(dev_priv, pipe)
3031 serge 2492
			pr_err("pipe %c stat: 0x%08x\n",
2493
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2494
		/* pipestat has already been acked */
2495
	}
2496
	if (eir & I915_ERROR_INSTRUCTION) {
2497
		pr_err("instruction error\n");
2498
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2499
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2500
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2501
		if (INTEL_INFO(dev)->gen < 4) {
2502
			u32 ipeir = I915_READ(IPEIR);
2503
 
2504
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2505
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2506
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2507
			I915_WRITE(IPEIR, ipeir);
2508
			POSTING_READ(IPEIR);
2509
		} else {
2510
			u32 ipeir = I915_READ(IPEIR_I965);
2511
 
2512
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2513
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2514
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2515
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2516
			I915_WRITE(IPEIR_I965, ipeir);
2517
			POSTING_READ(IPEIR_I965);
2518
		}
2519
	}
2520
 
2521
	I915_WRITE(EIR, eir);
2522
	POSTING_READ(EIR);
2523
	eir = I915_READ(EIR);
2524
	if (eir) {
2525
		/*
2526
		 * some errors might have become stuck,
2527
		 * mask them.
2528
		 */
2529
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2530
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2531
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2532
	}
2533
}
2534
 
2535
/**
6084 serge 2536
 * i915_handle_error - handle a gpu error
3031 serge 2537
 * @dev: drm device
2538
 *
6084 serge 2539
 * Do some basic checking of register state at error time and
3031 serge 2540
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2541
 * sure we get a record and make it available in debugfs.  Fire a uevent
2542
 * so userspace knows something bad happened (should trigger collection
2543
 * of a ring dump etc.).
2544
 */
5060 serge 2545
void i915_handle_error(struct drm_device *dev, bool wedged,
2546
		       const char *fmt, ...)
3031 serge 2547
{
2548
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2549
	va_list args;
2550
	char error_msg[80];
3031 serge 2551
 
5060 serge 2552
	va_start(args, fmt);
2553
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2554
	va_end(args);
2555
 
4560 Serge 2556
//	i915_capture_error_state(dev);
3031 serge 2557
	i915_report_and_clear_eir(dev);
2558
 
2559
	if (wedged) {
6084 serge 2560
		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
3480 Serge 2561
				&dev_priv->gpu_error.reset_counter);
3031 serge 2562
 
2563
		/*
6084 serge 2564
		 * Wakeup waiting processes so that the reset function
2565
		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2566
		 * various locks. By bumping the reset counter first, the woken
4104 Serge 2567
		 * processes will see a reset in progress and back off,
2568
		 * releasing their locks and then wait for the reset completion.
2569
		 * We must do this for _all_ gpu waiters that might hold locks
2570
		 * that the reset work needs to acquire.
2571
		 *
2572
		 * Note: The wake_up serves as the required memory barrier to
2573
		 * ensure that the waiters see the updated value of the reset
2574
		 * counter atomic_t.
3031 serge 2575
		 */
4104 Serge 2576
		i915_error_wake_up(dev_priv, false);
3031 serge 2577
	}
2578
 
6084 serge 2579
	i915_reset_and_wakeup(dev);
3031 serge 2580
}
2581
 
2582
/* Called from drm generic code, passed 'crtc' which
2583
 * we use as a pipe index
2584
 */
6084 serge 2585
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2586
{
5060 serge 2587
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2588
	unsigned long irqflags;
2589
 
2590
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2591
	if (INTEL_INFO(dev)->gen >= 4)
2592
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2593
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2594
	else
2595
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2596
				     PIPE_VBLANK_INTERRUPT_STATUS);
3031 serge 2597
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2598
 
2599
	return 0;
2600
}
2601
 
6084 serge 2602
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2603
{
5060 serge 2604
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2605
	unsigned long irqflags;
4104 Serge 2606
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2607
						     DE_PIPE_VBLANK(pipe);
3031 serge 2608
 
2609
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2610
	ironlake_enable_display_irq(dev_priv, bit);
3031 serge 2611
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2612
 
2613
	return 0;
2614
}
2615
 
6084 serge 2616
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2617
{
5060 serge 2618
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2619
	unsigned long irqflags;
2620
 
2621
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2622
	i915_enable_pipestat(dev_priv, pipe,
5060 serge 2623
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2624
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2625
 
2626
	return 0;
2627
}
2628
 
6084 serge 2629
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
4560 Serge 2630
{
2631
	struct drm_i915_private *dev_priv = dev->dev_private;
2632
	unsigned long irqflags;
2633
 
2634
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2635
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2636
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2637
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2638
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2639
	return 0;
2640
}
2641
 
3031 serge 2642
/* Called from drm generic code, passed 'crtc' which
2643
 * we use as a pipe index
2644
 */
6084 serge 2645
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2646
{
5060 serge 2647
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2648
	unsigned long irqflags;
2649
 
2650
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2651
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2652
			      PIPE_VBLANK_INTERRUPT_STATUS |
2653
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2654
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2655
}
2656
 
6084 serge 2657
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2658
{
5060 serge 2659
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2660
	unsigned long irqflags;
4104 Serge 2661
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2662
						     DE_PIPE_VBLANK(pipe);
3031 serge 2663
 
2664
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2665
	ironlake_disable_display_irq(dev_priv, bit);
3031 serge 2666
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2667
}
2668
 
6084 serge 2669
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2670
{
5060 serge 2671
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2672
	unsigned long irqflags;
2673
 
2674
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2675
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2676
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2677
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2678
}
2679
 
6084 serge 2680
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
4560 Serge 2681
{
2682
	struct drm_i915_private *dev_priv = dev->dev_private;
2683
	unsigned long irqflags;
2684
 
2685
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2686
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2687
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2688
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2689
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2690
}
2691
 
4104 Serge 2692
static bool
5060 serge 2693
ring_idle(struct intel_engine_cs *ring, u32 seqno)
2351 Serge 2694
{
4104 Serge 2695
	return (list_empty(&ring->request_list) ||
6084 serge 2696
		i915_seqno_passed(seqno, ring->last_submitted_seqno));
4104 Serge 2697
}
2351 Serge 2698
 
5060 serge 2699
static bool
2700
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
4104 Serge 2701
{
5060 serge 2702
	if (INTEL_INFO(dev)->gen >= 8) {
2703
		return (ipehr >> 23) == 0x1c;
2704
	} else {
2705
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2706
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2707
				 MI_SEMAPHORE_REGISTER);
2708
	}
2709
}
2710
 
2711
static struct intel_engine_cs *
2712
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2713
{
4104 Serge 2714
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2715
	struct intel_engine_cs *signaller;
2716
	int i;
2351 Serge 2717
 
5060 serge 2718
	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2719
		for_each_ring(signaller, dev_priv, i) {
2720
			if (ring == signaller)
2721
				continue;
2722
 
2723
			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2724
				return signaller;
2725
		}
2726
	} else {
2727
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2728
 
2729
		for_each_ring(signaller, dev_priv, i) {
2730
			if(ring == signaller)
2731
				continue;
2732
 
2733
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2734
				return signaller;
2735
		}
2736
	}
2737
 
2738
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2739
		  ring->id, ipehr, offset);
2740
 
2741
	return NULL;
2742
}
2743
 
2744
static struct intel_engine_cs *
2745
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2746
{
2747
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2748
	u32 cmd, ipehr, head;
2749
	u64 offset = 0;
2750
	int i, backwards;
2751
 
6084 serge 2752
	/*
2753
	 * This function does not support execlist mode - any attempt to
2754
	 * proceed further into this function will result in a kernel panic
2755
	 * when dereferencing ring->buffer, which is not set up in execlist
2756
	 * mode.
2757
	 *
2758
	 * The correct way of doing it would be to derive the currently
2759
	 * executing ring buffer from the current context, which is derived
2760
	 * from the currently running request. Unfortunately, to get the
2761
	 * current request we would have to grab the struct_mutex before doing
2762
	 * anything else, which would be ill-advised since some other thread
2763
	 * might have grabbed it already and managed to hang itself, causing
2764
	 * the hang checker to deadlock.
2765
	 *
2766
	 * Therefore, this function does not support execlist mode in its
2767
	 * current form. Just return NULL and move on.
2768
	 */
2769
	if (ring->buffer == NULL)
2770
		return NULL;
2771
 
4104 Serge 2772
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
5060 serge 2773
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
4104 Serge 2774
		return NULL;
2351 Serge 2775
 
5060 serge 2776
	/*
2777
	 * HEAD is likely pointing to the dword after the actual command,
2778
	 * so scan backwards until we find the MBOX. But limit it to just 3
2779
	 * or 4 dwords depending on the semaphore wait command size.
2780
	 * Note that we don't care about ACTHD here since that might
2781
	 * point at at batch, and semaphores are always emitted into the
2782
	 * ringbuffer itself.
4104 Serge 2783
	 */
5060 serge 2784
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2785
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2786
 
2787
	for (i = backwards; i; --i) {
2788
		/*
2789
		 * Be paranoid and presume the hw has gone off into the wild -
2790
		 * our ring is smaller than what the hardware (and hence
2791
		 * HEAD_ADDR) allows. Also handles wrap-around.
2792
		 */
2793
		head &= ring->buffer->size - 1;
2794
 
2795
		/* This here seems to blow up */
2796
		cmd = ioread32(ring->buffer->virtual_start + head);
4104 Serge 2797
		if (cmd == ipehr)
2798
			break;
2351 Serge 2799
 
5060 serge 2800
		head -= 4;
2801
	}
2802
 
2803
	if (!i)
6084 serge 2804
		return NULL;
2351 Serge 2805
 
5060 serge 2806
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2807
	if (INTEL_INFO(ring->dev)->gen >= 8) {
2808
		offset = ioread32(ring->buffer->virtual_start + head + 12);
2809
		offset <<= 32;
2810
		offset = ioread32(ring->buffer->virtual_start + head + 8);
2811
	}
2812
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
4104 Serge 2813
}
2351 Serge 2814
 
5060 serge 2815
static int semaphore_passed(struct intel_engine_cs *ring)
4104 Serge 2816
{
2817
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2818
	struct intel_engine_cs *signaller;
2819
	u32 seqno;
4104 Serge 2820
 
5060 serge 2821
	ring->hangcheck.deadlock++;
4104 Serge 2822
 
2823
	signaller = semaphore_waits_for(ring, &seqno);
5060 serge 2824
	if (signaller == NULL)
4104 Serge 2825
		return -1;
2826
 
5060 serge 2827
	/* Prevent pathological recursion due to driver bugs */
2828
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2829
		return -1;
2830
 
2831
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2832
		return 1;
2833
 
4104 Serge 2834
	/* cursory check for an unkickable deadlock */
5060 serge 2835
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2836
	    semaphore_passed(signaller) < 0)
4104 Serge 2837
		return -1;
2838
 
5060 serge 2839
	return 0;
4104 Serge 2840
}
2841
 
2842
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2843
{
5060 serge 2844
	struct intel_engine_cs *ring;
4104 Serge 2845
	int i;
2846
 
2847
	for_each_ring(ring, dev_priv, i)
5060 serge 2848
		ring->hangcheck.deadlock = 0;
4104 Serge 2849
}
2850
 
2851
static enum intel_ring_hangcheck_action
5060 serge 2852
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
4104 Serge 2853
{
2854
	struct drm_device *dev = ring->dev;
2855
	struct drm_i915_private *dev_priv = dev->dev_private;
2856
	u32 tmp;
2857
 
5060 serge 2858
	if (acthd != ring->hangcheck.acthd) {
2859
		if (acthd > ring->hangcheck.max_acthd) {
2860
			ring->hangcheck.max_acthd = acthd;
6084 serge 2861
			return HANGCHECK_ACTIVE;
5060 serge 2862
		}
4104 Serge 2863
 
5060 serge 2864
		return HANGCHECK_ACTIVE_LOOP;
2865
	}
2866
 
4104 Serge 2867
	if (IS_GEN2(dev))
2868
		return HANGCHECK_HUNG;
2869
 
2870
	/* Is the chip hanging on a WAIT_FOR_EVENT?
2871
	 * If so we can simply poke the RB_WAIT bit
2872
	 * and break the hang. This should work on
2873
	 * all but the second generation chipsets.
2874
	 */
2875
	tmp = I915_READ_CTL(ring);
2876
	if (tmp & RING_WAIT) {
5060 serge 2877
		i915_handle_error(dev, false,
2878
				  "Kicking stuck wait on %s",
6084 serge 2879
				  ring->name);
4104 Serge 2880
		I915_WRITE_CTL(ring, tmp);
2881
		return HANGCHECK_KICK;
2882
	}
2883
 
2884
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2885
		switch (semaphore_passed(ring)) {
2886
		default:
2887
			return HANGCHECK_HUNG;
2888
		case 1:
5060 serge 2889
			i915_handle_error(dev, false,
2890
					  "Kicking stuck semaphore on %s",
6084 serge 2891
					  ring->name);
4104 Serge 2892
			I915_WRITE_CTL(ring, tmp);
2893
			return HANGCHECK_KICK;
2894
		case 0:
2895
			return HANGCHECK_WAIT;
2896
		}
2897
	}
2898
 
2899
	return HANGCHECK_HUNG;
2900
}
2901
 
6084 serge 2902
/*
4104 Serge 2903
 * This is called when the chip hasn't reported back with completed
2904
 * batchbuffers in a long time. We keep track per ring seqno progress and
2905
 * if there are no progress, hangcheck score for that ring is increased.
2906
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2907
 * we kick the ring. If we see no progress on three subsequent calls
2908
 * we assume chip is wedged and try to fix it by resetting the chip.
2909
 */
6084 serge 2910
static void i915_hangcheck_elapsed(struct work_struct *work)
4104 Serge 2911
{
6084 serge 2912
	struct drm_i915_private *dev_priv =
2913
		container_of(work, typeof(*dev_priv),
2914
			     gpu_error.hangcheck_work.work);
2915
	struct drm_device *dev = dev_priv->dev;
5060 serge 2916
	struct intel_engine_cs *ring;
4104 Serge 2917
	int i;
2918
	int busy_count = 0, rings_hung = 0;
2919
	bool stuck[I915_NUM_RINGS] = { 0 };
2920
#define BUSY 1
2921
#define KICK 5
2922
#define HUNG 20
2923
 
5060 serge 2924
	if (!i915.enable_hangcheck)
4104 Serge 2925
		return;
2926
 
2927
	for_each_ring(ring, dev_priv, i) {
5060 serge 2928
		u64 acthd;
2929
		u32 seqno;
4104 Serge 2930
		bool busy = true;
2931
 
2932
		semaphore_clear_deadlocks(dev_priv);
2933
 
2934
		seqno = ring->get_seqno(ring, false);
2935
		acthd = intel_ring_get_active_head(ring);
2936
 
2937
		if (ring->hangcheck.seqno == seqno) {
2938
			if (ring_idle(ring, seqno)) {
5060 serge 2939
				ring->hangcheck.action = HANGCHECK_IDLE;
2940
 
6084 serge 2941
				if (waitqueue_active(&ring->irq_queue)) {
4104 Serge 2942
					/* Issue a wake-up to catch stuck h/w. */
6084 serge 2943
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2944
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2945
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2946
								  ring->name);
2947
						else
2948
							DRM_INFO("Fake missed irq on %s\n",
2949
								 ring->name);
2950
						wake_up_all(&ring->irq_queue);
2951
					}
2952
					/* Safeguard against driver failure */
2953
					ring->hangcheck.score += BUSY;
2954
				} else
4104 Serge 2955
					busy = false;
2956
			} else {
2957
				/* We always increment the hangcheck score
2958
				 * if the ring is busy and still processing
2959
				 * the same request, so that no single request
2960
				 * can run indefinitely (such as a chain of
2961
				 * batches). The only time we do not increment
2962
				 * the hangcheck score on this ring, if this
2963
				 * ring is in a legitimate wait for another
2964
				 * ring. In that case the waiting ring is a
2965
				 * victim and we want to be sure we catch the
2966
				 * right culprit. Then every time we do kick
2967
				 * the ring, add a small increment to the
2968
				 * score so that we can catch a batch that is
2969
				 * being repeatedly kicked and so responsible
2970
				 * for stalling the machine.
2971
				 */
2972
				ring->hangcheck.action = ring_stuck(ring,
2973
								    acthd);
2974
 
2975
				switch (ring->hangcheck.action) {
4560 Serge 2976
				case HANGCHECK_IDLE:
4104 Serge 2977
				case HANGCHECK_WAIT:
5060 serge 2978
				case HANGCHECK_ACTIVE:
4104 Serge 2979
					break;
5060 serge 2980
				case HANGCHECK_ACTIVE_LOOP:
4104 Serge 2981
					ring->hangcheck.score += BUSY;
2982
					break;
2983
				case HANGCHECK_KICK:
2984
					ring->hangcheck.score += KICK;
2985
					break;
2986
				case HANGCHECK_HUNG:
2987
					ring->hangcheck.score += HUNG;
2988
					stuck[i] = true;
2989
					break;
2990
				}
2991
			}
2992
		} else {
4560 Serge 2993
			ring->hangcheck.action = HANGCHECK_ACTIVE;
2994
 
4104 Serge 2995
			/* Gradually reduce the count so that we catch DoS
2996
			 * attempts across multiple batches.
2997
			 */
2998
			if (ring->hangcheck.score > 0)
2999
				ring->hangcheck.score--;
5060 serge 3000
 
3001
			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
4104 Serge 3002
		}
3003
 
3004
		ring->hangcheck.seqno = seqno;
3005
		ring->hangcheck.acthd = acthd;
3006
		busy_count += busy;
3007
	}
3008
 
3009
	for_each_ring(ring, dev_priv, i) {
5060 serge 3010
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
4104 Serge 3011
			DRM_INFO("%s on %s\n",
6084 serge 3012
				 stuck[i] ? "stuck" : "no progress",
3013
				 ring->name);
4104 Serge 3014
			rings_hung++;
3015
		}
3016
	}
3017
 
3018
//   if (rings_hung)
3019
//       return i915_handle_error(dev, true);
3020
 
3021
}
6088 serge 3022
 
5060 serge 3023
static void ibx_irq_reset(struct drm_device *dev)
3024
{
3025
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3026
 
5060 serge 3027
	if (HAS_PCH_NOP(dev))
3028
		return;
3029
 
3030
	GEN5_IRQ_RESET(SDE);
3031
 
3032
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3033
		I915_WRITE(SERR_INT, 0xffffffff);
3034
}
3035
 
3036
/*
3037
 * SDEIER is also touched by the interrupt handler to work around missed PCH
3038
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3039
 * instead we unconditionally enable all PCH interrupt sources here, but then
3040
 * only unmask them as needed with SDEIMR.
3041
 *
3042
 * This function needs to be called before interrupts are enabled.
3043
 */
3044
static void ibx_irq_pre_postinstall(struct drm_device *dev)
4104 Serge 3045
{
3046
	struct drm_i915_private *dev_priv = dev->dev_private;
3047
 
3746 Serge 3048
	if (HAS_PCH_NOP(dev))
3049
		return;
3050
 
5060 serge 3051
	WARN_ON(I915_READ(SDEIER) != 0);
3746 Serge 3052
	I915_WRITE(SDEIER, 0xffffffff);
4104 Serge 3053
	POSTING_READ(SDEIER);
2351 Serge 3054
}
3055
 
5060 serge 3056
static void gen5_gt_irq_reset(struct drm_device *dev)
4104 Serge 3057
{
3058
	struct drm_i915_private *dev_priv = dev->dev_private;
3059
 
5060 serge 3060
	GEN5_IRQ_RESET(GT);
3061
	if (INTEL_INFO(dev)->gen >= 6)
3062
		GEN5_IRQ_RESET(GEN6_PM);
4104 Serge 3063
}
3064
 
3065
/* drm_dma.h hooks
3066
*/
5060 serge 3067
static void ironlake_irq_reset(struct drm_device *dev)
4104 Serge 3068
{
5060 serge 3069
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3070
 
5060 serge 3071
	I915_WRITE(HWSTAM, 0xffffffff);
4104 Serge 3072
 
5060 serge 3073
	GEN5_IRQ_RESET(DE);
3074
	if (IS_GEN7(dev))
3075
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
4104 Serge 3076
 
5060 serge 3077
	gen5_gt_irq_reset(dev);
4104 Serge 3078
 
5060 serge 3079
	ibx_irq_reset(dev);
4104 Serge 3080
}
3081
 
5354 serge 3082
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3083
{
3084
	enum pipe pipe;
3085
 
6084 serge 3086
	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
5354 serge 3087
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3088
 
3089
	for_each_pipe(dev_priv, pipe)
3090
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3091
 
3092
	GEN5_IRQ_RESET(VLV_);
3093
}
3094
 
3031 serge 3095
static void valleyview_irq_preinstall(struct drm_device *dev)
3096
{
5060 serge 3097
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3098
 
3099
	/* VLV magic */
3100
	I915_WRITE(VLV_IMR, 0);
3101
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3102
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3103
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3104
 
5060 serge 3105
	gen5_gt_irq_reset(dev);
4104 Serge 3106
 
5354 serge 3107
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3031 serge 3108
 
5354 serge 3109
	vlv_display_irq_reset(dev_priv);
3031 serge 3110
}
3111
 
5060 serge 3112
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
4560 Serge 3113
{
5060 serge 3114
	GEN8_IRQ_RESET_NDX(GT, 0);
3115
	GEN8_IRQ_RESET_NDX(GT, 1);
3116
	GEN8_IRQ_RESET_NDX(GT, 2);
3117
	GEN8_IRQ_RESET_NDX(GT, 3);
3118
}
3119
 
3120
static void gen8_irq_reset(struct drm_device *dev)
3121
{
4560 Serge 3122
	struct drm_i915_private *dev_priv = dev->dev_private;
3123
	int pipe;
3124
 
3125
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3126
	POSTING_READ(GEN8_MASTER_IRQ);
3127
 
5060 serge 3128
	gen8_gt_irq_reset(dev_priv);
4560 Serge 3129
 
5354 serge 3130
	for_each_pipe(dev_priv, pipe)
3131
		if (intel_display_power_is_enabled(dev_priv,
6084 serge 3132
						   POWER_DOMAIN_PIPE(pipe)))
3133
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
4560 Serge 3134
 
5060 serge 3135
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3136
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3137
	GEN5_IRQ_RESET(GEN8_PCU_);
4560 Serge 3138
 
6084 serge 3139
	if (HAS_PCH_SPLIT(dev))
3140
		ibx_irq_reset(dev);
5060 serge 3141
}
4560 Serge 3142
 
6084 serge 3143
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3144
				     unsigned int pipe_mask)
5060 serge 3145
{
5354 serge 3146
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
4560 Serge 3147
 
5354 serge 3148
	spin_lock_irq(&dev_priv->irq_lock);
6084 serge 3149
	if (pipe_mask & 1 << PIPE_A)
3150
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3151
				  dev_priv->de_irq_mask[PIPE_A],
3152
				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3153
	if (pipe_mask & 1 << PIPE_B)
3154
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3155
				  dev_priv->de_irq_mask[PIPE_B],
3156
				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3157
	if (pipe_mask & 1 << PIPE_C)
3158
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3159
				  dev_priv->de_irq_mask[PIPE_C],
3160
				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
5354 serge 3161
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 3162
}
3163
 
3164
static void cherryview_irq_preinstall(struct drm_device *dev)
3165
{
3166
	struct drm_i915_private *dev_priv = dev->dev_private;
3167
 
3168
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3169
	POSTING_READ(GEN8_MASTER_IRQ);
3170
 
3171
	gen8_gt_irq_reset(dev_priv);
3172
 
3173
	GEN5_IRQ_RESET(GEN8_PCU_);
3174
 
3175
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3176
 
5354 serge 3177
	vlv_display_irq_reset(dev_priv);
4560 Serge 3178
}
3179
 
6084 serge 3180
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3181
				  const u32 hpd[HPD_NUM_PINS])
3182
{
3183
	struct drm_i915_private *dev_priv = to_i915(dev);
3184
	struct intel_encoder *encoder;
3185
	u32 enabled_irqs = 0;
3186
 
3187
	for_each_intel_encoder(dev, encoder)
3188
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3189
			enabled_irqs |= hpd[encoder->hpd_pin];
3190
 
3191
	return enabled_irqs;
3192
}
3193
 
3746 Serge 3194
static void ibx_hpd_irq_setup(struct drm_device *dev)
3195
{
5060 serge 3196
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3197
	u32 hotplug_irqs, hotplug, enabled_irqs;
3746 Serge 3198
 
3199
	if (HAS_PCH_IBX(dev)) {
4104 Serge 3200
		hotplug_irqs = SDE_HOTPLUG_MASK;
6084 serge 3201
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3746 Serge 3202
	} else {
4104 Serge 3203
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
6084 serge 3204
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3746 Serge 3205
	}
3206
 
4104 Serge 3207
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3746 Serge 3208
 
3209
	/*
6084 serge 3210
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3211
	 * duration to 2ms (which is the minimum in the Display Port spec).
3212
	 * The pulse duration bits are reserved on LPT+.
3213
	 */
2351 Serge 3214
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3215
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3216
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3217
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3218
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
6084 serge 3219
	/*
3220
	 * When CPU and PCH are on the same package, port A
3221
	 * HPD must be enabled in both north and south.
3222
	 */
3223
	if (HAS_PCH_LPT_LP(dev))
3224
		hotplug |= PORTA_HOTPLUG_ENABLE;
2351 Serge 3225
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3226
}
3227
 
6084 serge 3228
static void spt_hpd_irq_setup(struct drm_device *dev)
3229
{
3230
	struct drm_i915_private *dev_priv = dev->dev_private;
3231
	u32 hotplug_irqs, hotplug, enabled_irqs;
3232
 
3233
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3234
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3235
 
3236
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3237
 
3238
	/* Enable digital hotplug on the PCH */
3239
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3240
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3241
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3242
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3243
 
3244
	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3245
	hotplug |= PORTE_HOTPLUG_ENABLE;
3246
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3247
}
3248
 
3249
static void ilk_hpd_irq_setup(struct drm_device *dev)
3250
{
3251
	struct drm_i915_private *dev_priv = dev->dev_private;
3252
	u32 hotplug_irqs, hotplug, enabled_irqs;
3253
 
3254
	if (INTEL_INFO(dev)->gen >= 8) {
3255
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3256
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3257
 
3258
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3259
	} else if (INTEL_INFO(dev)->gen >= 7) {
3260
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3261
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3262
 
3263
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3264
	} else {
3265
		hotplug_irqs = DE_DP_A_HOTPLUG;
3266
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3267
 
3268
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3269
	}
3270
 
3271
	/*
3272
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3273
	 * duration to 2ms (which is the minimum in the Display Port spec)
3274
	 * The pulse duration bits are reserved on HSW+.
3275
	 */
3276
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3277
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3278
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3279
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3280
 
3281
	ibx_hpd_irq_setup(dev);
3282
}
3283
 
3284
static void bxt_hpd_irq_setup(struct drm_device *dev)
3285
{
3286
	struct drm_i915_private *dev_priv = dev->dev_private;
3287
	u32 hotplug_irqs, hotplug, enabled_irqs;
3288
 
3289
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3290
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3291
 
3292
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3293
 
3294
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3295
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3296
		PORTA_HOTPLUG_ENABLE;
3297
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3298
}
3299
 
3480 Serge 3300
static void ibx_irq_postinstall(struct drm_device *dev)
3301
{
5060 serge 3302
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 3303
	u32 mask;
3304
 
3746 Serge 3305
	if (HAS_PCH_NOP(dev))
3306
		return;
3307
 
5060 serge 3308
	if (HAS_PCH_IBX(dev))
3309
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3310
	else
3311
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4104 Serge 3312
 
6084 serge 3313
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3480 Serge 3314
	I915_WRITE(SDEIMR, ~mask);
3315
}
3316
 
4104 Serge 3317
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2351 Serge 3318
{
4104 Serge 3319
	struct drm_i915_private *dev_priv = dev->dev_private;
3320
	u32 pm_irqs, gt_irqs;
2351 Serge 3321
 
4104 Serge 3322
	pm_irqs = gt_irqs = 0;
2351 Serge 3323
 
3324
	dev_priv->gt_irq_mask = ~0;
4560 Serge 3325
	if (HAS_L3_DPF(dev)) {
4104 Serge 3326
		/* L3 parity interrupt is always unmasked. */
4560 Serge 3327
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3328
		gt_irqs |= GT_PARITY_ERROR(dev);
4104 Serge 3329
	}
2351 Serge 3330
 
4104 Serge 3331
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3332
	if (IS_GEN5(dev)) {
3333
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3334
			   ILK_BSD_USER_INTERRUPT;
3335
	} else {
3336
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3337
	}
2351 Serge 3338
 
5060 serge 3339
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
2351 Serge 3340
 
4104 Serge 3341
	if (INTEL_INFO(dev)->gen >= 6) {
5354 serge 3342
		/*
3343
		 * RPS interrupts will get enabled/disabled on demand when RPS
3344
		 * itself is enabled/disabled.
3345
		 */
4104 Serge 3346
		if (HAS_VEBOX(dev))
3347
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3348
 
3349
		dev_priv->pm_irq_mask = 0xffffffff;
5060 serge 3350
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
6084 serge 3351
	}
2351 Serge 3352
}
3353
 
4104 Serge 3354
static int ironlake_irq_postinstall(struct drm_device *dev)
3031 serge 3355
{
5060 serge 3356
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3357
	u32 display_mask, extra_mask;
3358
 
3359
	if (INTEL_INFO(dev)->gen >= 7) {
3360
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3361
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
6084 serge 3362
				DE_PLANEB_FLIP_DONE_IVB |
5060 serge 3363
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
4104 Serge 3364
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
6084 serge 3365
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3366
			      DE_DP_A_HOTPLUG_IVB);
4104 Serge 3367
	} else {
3368
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3369
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
4560 Serge 3370
				DE_AUX_CHANNEL_A |
3371
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3372
				DE_POISON);
6084 serge 3373
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3374
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3375
			      DE_DP_A_HOTPLUG);
4104 Serge 3376
	}
3377
 
3031 serge 3378
	dev_priv->irq_mask = ~display_mask;
3379
 
5060 serge 3380
	I915_WRITE(HWSTAM, 0xeffe);
3031 serge 3381
 
5060 serge 3382
	ibx_irq_pre_postinstall(dev);
3383
 
3384
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3385
 
4104 Serge 3386
	gen5_gt_irq_postinstall(dev);
3031 serge 3387
 
4104 Serge 3388
	ibx_irq_postinstall(dev);
3031 serge 3389
 
4104 Serge 3390
	if (IS_IRONLAKE_M(dev)) {
3391
		/* Enable PCU event interrupts
3392
		 *
3393
		 * spinlocking not required here for correctness since interrupt
3394
		 * setup is guaranteed to run in single-threaded context. But we
3395
		 * need it to make the assert_spin_locked happy. */
5354 serge 3396
		spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 3397
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
5354 serge 3398
		spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 3399
	}
3031 serge 3400
 
3401
	return 0;
3402
}
3403
 
5060 serge 3404
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3405
{
3406
	u32 pipestat_mask;
3407
	u32 iir_mask;
5354 serge 3408
	enum pipe pipe;
5060 serge 3409
 
3410
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3411
			PIPE_FIFO_UNDERRUN_STATUS;
3412
 
5354 serge 3413
	for_each_pipe(dev_priv, pipe)
3414
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3415
	POSTING_READ(PIPESTAT(PIPE_A));
3416
 
3417
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3418
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3419
 
5354 serge 3420
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3421
	for_each_pipe(dev_priv, pipe)
3422
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3423
 
3424
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3425
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3426
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3427
	if (IS_CHERRYVIEW(dev_priv))
3428
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3429
	dev_priv->irq_mask &= ~iir_mask;
3430
 
3431
	I915_WRITE(VLV_IIR, iir_mask);
3432
	I915_WRITE(VLV_IIR, iir_mask);
5354 serge 3433
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
5060 serge 3434
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3435
	POSTING_READ(VLV_IMR);
5060 serge 3436
}
3437
 
3438
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3439
{
3440
	u32 pipestat_mask;
3441
	u32 iir_mask;
5354 serge 3442
	enum pipe pipe;
5060 serge 3443
 
3444
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3445
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3446
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3447
	if (IS_CHERRYVIEW(dev_priv))
3448
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3449
 
3450
	dev_priv->irq_mask |= iir_mask;
5354 serge 3451
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5060 serge 3452
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3453
	I915_WRITE(VLV_IIR, iir_mask);
3454
	I915_WRITE(VLV_IIR, iir_mask);
3455
	POSTING_READ(VLV_IIR);
3456
 
3457
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3458
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3459
 
5354 serge 3460
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3461
	for_each_pipe(dev_priv, pipe)
3462
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3463
 
3464
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3465
			PIPE_FIFO_UNDERRUN_STATUS;
5354 serge 3466
 
3467
	for_each_pipe(dev_priv, pipe)
3468
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3469
	POSTING_READ(PIPESTAT(PIPE_A));
3470
}
3471
 
3472
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3473
{
3474
	assert_spin_locked(&dev_priv->irq_lock);
3475
 
3476
	if (dev_priv->display_irqs_enabled)
3477
		return;
3478
 
3479
	dev_priv->display_irqs_enabled = true;
3480
 
5354 serge 3481
	if (intel_irqs_enabled(dev_priv))
5060 serge 3482
		valleyview_display_irqs_install(dev_priv);
3483
}
3484
 
3485
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3486
{
3487
	assert_spin_locked(&dev_priv->irq_lock);
3488
 
3489
	if (!dev_priv->display_irqs_enabled)
3490
		return;
3491
 
3492
	dev_priv->display_irqs_enabled = false;
3493
 
5354 serge 3494
	if (intel_irqs_enabled(dev_priv))
5060 serge 3495
		valleyview_display_irqs_uninstall(dev_priv);
3496
}
3497
 
5354 serge 3498
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3031 serge 3499
{
5060 serge 3500
	dev_priv->irq_mask = ~0;
3031 serge 3501
 
6084 serge 3502
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 3503
	POSTING_READ(PORT_HOTPLUG_EN);
3504
 
5354 serge 3505
	I915_WRITE(VLV_IIR, 0xffffffff);
3506
	I915_WRITE(VLV_IIR, 0xffffffff);
3507
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3031 serge 3508
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3509
	POSTING_READ(VLV_IMR);
3031 serge 3510
 
4104 Serge 3511
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3512
	 * just to make the assert_spin_locked check happy. */
5354 serge 3513
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3514
	if (dev_priv->display_irqs_enabled)
3515
		valleyview_display_irqs_install(dev_priv);
5354 serge 3516
	spin_unlock_irq(&dev_priv->irq_lock);
3517
}
3031 serge 3518
 
5354 serge 3519
static int valleyview_irq_postinstall(struct drm_device *dev)
3520
{
3521
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3522
 
5354 serge 3523
	vlv_display_irq_postinstall(dev_priv);
3524
 
4104 Serge 3525
	gen5_gt_irq_postinstall(dev);
3243 Serge 3526
 
3031 serge 3527
	/* ack & enable invalid PTE error interrupts */
3528
#if 0 /* FIXME: add support to irq handler for checking these bits */
3529
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3530
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3531
#endif
3532
 
3533
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 3534
 
3535
	return 0;
3536
}
3537
 
4560 Serge 3538
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3539
{
3540
	/* These are interrupts we'll toggle with the ring mask register */
3541
	uint32_t gt_interrupts[] = {
3542
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
5354 serge 3543
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4560 Serge 3544
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
5354 serge 3545
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3546
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4560 Serge 3547
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
5354 serge 3548
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3549
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3550
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4560 Serge 3551
		0,
5354 serge 3552
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3553
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4560 Serge 3554
		};
3555
 
5060 serge 3556
	dev_priv->pm_irq_mask = 0xffffffff;
5354 serge 3557
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3558
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3559
	/*
3560
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3561
	 * is enabled/disabled.
3562
	 */
3563
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3564
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4560 Serge 3565
}
3566
 
3567
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3568
{
5354 serge 3569
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3570
	uint32_t de_pipe_enables;
6084 serge 3571
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3572
	u32 de_port_enables;
3573
	enum pipe pipe;
5354 serge 3574
 
6084 serge 3575
	if (INTEL_INFO(dev_priv)->gen >= 9) {
5354 serge 3576
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3577
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
6084 serge 3578
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3579
				  GEN9_AUX_CHANNEL_D;
3580
		if (IS_BROXTON(dev_priv))
3581
			de_port_masked |= BXT_DE_PORT_GMBUS;
3582
	} else {
5354 serge 3583
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
6084 serge 3584
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3585
	}
5354 serge 3586
 
3587
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
6084 serge 3588
					   GEN8_PIPE_FIFO_UNDERRUN;
5354 serge 3589
 
6084 serge 3590
	de_port_enables = de_port_masked;
3591
	if (IS_BROXTON(dev_priv))
3592
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3593
	else if (IS_BROADWELL(dev_priv))
3594
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3595
 
4560 Serge 3596
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3597
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3598
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3599
 
5354 serge 3600
	for_each_pipe(dev_priv, pipe)
3601
		if (intel_display_power_is_enabled(dev_priv,
5060 serge 3602
				POWER_DOMAIN_PIPE(pipe)))
3603
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3604
					  dev_priv->de_irq_mask[pipe],
6084 serge 3605
					  de_pipe_enables);
4560 Serge 3606
 
6084 serge 3607
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4560 Serge 3608
}
3609
 
3610
static int gen8_irq_postinstall(struct drm_device *dev)
3611
{
3612
	struct drm_i915_private *dev_priv = dev->dev_private;
3613
 
6084 serge 3614
	if (HAS_PCH_SPLIT(dev))
3615
		ibx_irq_pre_postinstall(dev);
5060 serge 3616
 
4560 Serge 3617
	gen8_gt_irq_postinstall(dev_priv);
3618
	gen8_de_irq_postinstall(dev_priv);
3619
 
6084 serge 3620
	if (HAS_PCH_SPLIT(dev))
3621
		ibx_irq_postinstall(dev);
4560 Serge 3622
 
3623
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3624
	POSTING_READ(GEN8_MASTER_IRQ);
3625
 
3626
	return 0;
3627
}
3628
 
5060 serge 3629
static int cherryview_irq_postinstall(struct drm_device *dev)
4560 Serge 3630
{
3631
	struct drm_i915_private *dev_priv = dev->dev_private;
3632
 
5354 serge 3633
	vlv_display_irq_postinstall(dev_priv);
4560 Serge 3634
 
5060 serge 3635
	gen8_gt_irq_postinstall(dev_priv);
4560 Serge 3636
 
5060 serge 3637
	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3638
	POSTING_READ(GEN8_MASTER_IRQ);
4560 Serge 3639
 
5060 serge 3640
	return 0;
3641
}
4560 Serge 3642
 
5060 serge 3643
static void gen8_irq_uninstall(struct drm_device *dev)
3644
{
3645
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3646
 
5060 serge 3647
	if (!dev_priv)
3648
		return;
3649
 
3650
	gen8_irq_reset(dev);
4560 Serge 3651
}
3652
 
5354 serge 3653
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3654
{
3655
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3656
	 * just to make the assert_spin_locked check happy. */
3657
	spin_lock_irq(&dev_priv->irq_lock);
3658
	if (dev_priv->display_irqs_enabled)
3659
		valleyview_display_irqs_uninstall(dev_priv);
3660
	spin_unlock_irq(&dev_priv->irq_lock);
3661
 
3662
	vlv_display_irq_reset(dev_priv);
3663
 
3664
	dev_priv->irq_mask = ~0;
3665
}
3666
 
3031 serge 3667
static void valleyview_irq_uninstall(struct drm_device *dev)
3668
{
5060 serge 3669
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3670
 
3671
	if (!dev_priv)
3672
		return;
3673
 
5060 serge 3674
	I915_WRITE(VLV_MASTER_IER, 0);
4293 Serge 3675
 
5354 serge 3676
	gen5_gt_irq_reset(dev);
3031 serge 3677
 
3678
	I915_WRITE(HWSTAM, 0xffffffff);
5060 serge 3679
 
5354 serge 3680
	vlv_display_irq_uninstall(dev_priv);
3031 serge 3681
}
3682
 
5060 serge 3683
static void cherryview_irq_uninstall(struct drm_device *dev)
3031 serge 3684
{
5060 serge 3685
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3686
 
3687
	if (!dev_priv)
3688
		return;
3689
 
5060 serge 3690
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3691
	POSTING_READ(GEN8_MASTER_IRQ);
4293 Serge 3692
 
5354 serge 3693
	gen8_gt_irq_reset(dev_priv);
3031 serge 3694
 
5354 serge 3695
	GEN5_IRQ_RESET(GEN8_PCU_);
3031 serge 3696
 
5354 serge 3697
	vlv_display_irq_uninstall(dev_priv);
5060 serge 3698
}
3699
 
3700
static void ironlake_irq_uninstall(struct drm_device *dev)
3701
{
3702
	struct drm_i915_private *dev_priv = dev->dev_private;
3703
 
3704
	if (!dev_priv)
3746 Serge 3705
		return;
3706
 
5060 serge 3707
	ironlake_irq_reset(dev);
3031 serge 3708
}
3709
 
3710
#if 0
3711
static void i8xx_irq_preinstall(struct drm_device * dev)
3712
{
5060 serge 3713
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3714
	int pipe;
3715
 
5354 serge 3716
	for_each_pipe(dev_priv, pipe)
3031 serge 3717
		I915_WRITE(PIPESTAT(pipe), 0);
3718
	I915_WRITE16(IMR, 0xffff);
3719
	I915_WRITE16(IER, 0x0);
3720
	POSTING_READ16(IER);
3721
}
3722
 
3723
static int i8xx_irq_postinstall(struct drm_device *dev)
3724
{
5060 serge 3725
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3726
 
3727
	I915_WRITE16(EMR,
3728
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3729
 
3730
	/* Unmask the interrupts that we always want on. */
3731
	dev_priv->irq_mask =
3732
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3733
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3734
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
6084 serge 3735
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3736
	I915_WRITE16(IMR, dev_priv->irq_mask);
3737
 
3738
	I915_WRITE16(IER,
3739
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3740
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3741
		     I915_USER_INTERRUPT);
3742
	POSTING_READ16(IER);
3743
 
4560 Serge 3744
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3745
	 * just to make the assert_spin_locked check happy. */
5354 serge 3746
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3747
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3748
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3749
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3750
 
3031 serge 3751
	return 0;
3752
}
3753
 
3746 Serge 3754
/*
3755
 * Returns true when a page flip has completed.
3756
 */
3757
static bool i8xx_handle_vblank(struct drm_device *dev,
4560 Serge 3758
			       int plane, int pipe, u32 iir)
3746 Serge 3759
{
5060 serge 3760
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3761
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3746 Serge 3762
 
6084 serge 3763
	if (!intel_pipe_handle_vblank(dev, pipe))
3764
		return false;
3746 Serge 3765
 
3766
	if ((iir & flip_pending) == 0)
5354 serge 3767
		goto check_page_flip;
3746 Serge 3768
 
3769
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3770
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3771
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3772
	 * the flip is completed (no longer pending). Since this doesn't raise
3773
	 * an interrupt per se, we watch for the change at vblank.
3774
	 */
3775
	if (I915_READ16(ISR) & flip_pending)
5354 serge 3776
		goto check_page_flip;
3746 Serge 3777
 
6084 serge 3778
//   intel_prepare_page_flip(dev, plane);
3779
//   intel_finish_page_flip(dev, pipe);
5354 serge 3780
	return true;
3746 Serge 3781
 
5354 serge 3782
check_page_flip:
6084 serge 3783
//   intel_check_page_flip(dev, pipe);
5354 serge 3784
	return false;
3746 Serge 3785
}
3786
 
3243 Serge 3787
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 3788
{
5060 serge 3789
	struct drm_device *dev = arg;
3790
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3791
	u16 iir, new_iir;
3792
	u32 pipe_stats[2];
3793
	int pipe;
3794
	u16 flip_mask =
3795
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3796
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3797
 
6084 serge 3798
	if (!intel_irqs_enabled(dev_priv))
3799
		return IRQ_NONE;
3800
 
3031 serge 3801
	iir = I915_READ16(IIR);
3802
	if (iir == 0)
3803
		return IRQ_NONE;
3804
 
3805
	while (iir & ~flip_mask) {
3806
		/* Can't rely on pipestat interrupt bit in iir as it might
3807
		 * have been cleared after the pipestat interrupt was received.
3808
		 * It doesn't set the bit in iir again, but it still produces
3809
		 * interrupts (for non-MSI).
3810
		 */
5354 serge 3811
		spin_lock(&dev_priv->irq_lock);
4126 Serge 3812
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 3813
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 3814
 
5354 serge 3815
		for_each_pipe(dev_priv, pipe) {
3031 serge 3816
			int reg = PIPESTAT(pipe);
3817
			pipe_stats[pipe] = I915_READ(reg);
3818
 
3819
			/*
3820
			 * Clear the PIPE*STAT regs before the IIR
3821
			 */
5060 serge 3822
			if (pipe_stats[pipe] & 0x8000ffff)
3031 serge 3823
				I915_WRITE(reg, pipe_stats[pipe]);
6084 serge 3824
		}
5354 serge 3825
		spin_unlock(&dev_priv->irq_lock);
3031 serge 3826
 
3827
		I915_WRITE16(IIR, iir & ~flip_mask);
3828
		new_iir = I915_READ16(IIR); /* Flush posted writes */
3829
 
3830
		if (iir & I915_USER_INTERRUPT)
6084 serge 3831
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 3832
 
5354 serge 3833
		for_each_pipe(dev_priv, pipe) {
4560 Serge 3834
			int plane = pipe;
3835
			if (HAS_FBC(dev))
3836
				plane = !plane;
3031 serge 3837
 
4560 Serge 3838
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3839
			    i8xx_handle_vblank(dev, plane, pipe, iir))
3840
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3031 serge 3841
 
4560 Serge 3842
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3843
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 3844
 
5354 serge 3845
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3846
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3847
								    pipe);
4560 Serge 3848
		}
3849
 
3031 serge 3850
		iir = new_iir;
3851
	}
3852
 
3853
	return IRQ_HANDLED;
3854
}
3855
 
3856
static void i8xx_irq_uninstall(struct drm_device * dev)
3857
{
5060 serge 3858
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3859
	int pipe;
3860
 
5354 serge 3861
	for_each_pipe(dev_priv, pipe) {
3031 serge 3862
		/* Clear enable bits; then clear status bits */
3863
		I915_WRITE(PIPESTAT(pipe), 0);
3864
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3865
	}
3866
	I915_WRITE16(IMR, 0xffff);
3867
	I915_WRITE16(IER, 0x0);
3868
	I915_WRITE16(IIR, I915_READ16(IIR));
3869
}
3870
 
3871
#endif
3872
 
3873
static void i915_irq_preinstall(struct drm_device * dev)
3874
{
5060 serge 3875
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3876
	int pipe;
3877
 
3878
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 3879
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 3880
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3881
	}
3882
 
3883
	I915_WRITE16(HWSTAM, 0xeffe);
5354 serge 3884
	for_each_pipe(dev_priv, pipe)
3031 serge 3885
		I915_WRITE(PIPESTAT(pipe), 0);
3886
	I915_WRITE(IMR, 0xffffffff);
3887
	I915_WRITE(IER, 0x0);
3888
	POSTING_READ(IER);
3889
}
3890
 
3891
static int i915_irq_postinstall(struct drm_device *dev)
3892
{
5060 serge 3893
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3894
	u32 enable_mask;
3895
 
3896
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3897
 
3898
	/* Unmask the interrupts that we always want on. */
3899
	dev_priv->irq_mask =
3900
		~(I915_ASLE_INTERRUPT |
3901
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3902
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3903
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
6084 serge 3904
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3905
 
3906
	enable_mask =
3907
		I915_ASLE_INTERRUPT |
3908
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3909
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3910
		I915_USER_INTERRUPT;
3480 Serge 3911
 
3031 serge 3912
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 3913
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 3914
		POSTING_READ(PORT_HOTPLUG_EN);
3915
 
3031 serge 3916
		/* Enable in IER... */
3917
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3918
		/* and unmask in IMR */
3919
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3920
	}
3921
 
3922
	I915_WRITE(IMR, dev_priv->irq_mask);
3923
	I915_WRITE(IER, enable_mask);
3924
	POSTING_READ(IER);
3925
 
4126 Serge 3926
	i915_enable_asle_pipestat(dev);
3480 Serge 3927
 
4560 Serge 3928
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3929
	 * just to make the assert_spin_locked check happy. */
5354 serge 3930
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3931
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3932
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3933
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3934
 
3480 Serge 3935
	return 0;
3936
}
3937
 
3746 Serge 3938
/*
3939
 * Returns true when a page flip has completed.
3940
 */
3941
static bool i915_handle_vblank(struct drm_device *dev,
3942
			       int plane, int pipe, u32 iir)
3480 Serge 3943
{
5060 serge 3944
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3945
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3480 Serge 3946
 
6088 serge 3947
	if (!intel_pipe_handle_vblank(dev, pipe))
3948
		return false;
3480 Serge 3949
 
3746 Serge 3950
	if ((iir & flip_pending) == 0)
5354 serge 3951
		goto check_page_flip;
3480 Serge 3952
 
3746 Serge 3953
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3954
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3955
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3956
	 * the flip is completed (no longer pending). Since this doesn't raise
3957
	 * an interrupt per se, we watch for the change at vblank.
3958
	 */
3959
	if (I915_READ(ISR) & flip_pending)
5354 serge 3960
		goto check_page_flip;
3746 Serge 3961
 
5354 serge 3962
	return true;
3746 Serge 3963
 
5354 serge 3964
check_page_flip:
3965
	return false;
3031 serge 3966
}
3967
 
3243 Serge 3968
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 3969
{
5060 serge 3970
	struct drm_device *dev = arg;
3971
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3972
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3973
	u32 flip_mask =
3974
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3975
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3976
	int pipe, ret = IRQ_NONE;
3977
 
6084 serge 3978
	if (!intel_irqs_enabled(dev_priv))
3979
		return IRQ_NONE;
3980
 
3031 serge 3981
	iir = I915_READ(IIR);
3982
	do {
3983
		bool irq_received = (iir & ~flip_mask) != 0;
3984
		bool blc_event = false;
3985
 
3986
		/* Can't rely on pipestat interrupt bit in iir as it might
3987
		 * have been cleared after the pipestat interrupt was received.
3988
		 * It doesn't set the bit in iir again, but it still produces
3989
		 * interrupts (for non-MSI).
3990
		 */
5354 serge 3991
		spin_lock(&dev_priv->irq_lock);
4126 Serge 3992
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 3993
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 3994
 
5354 serge 3995
		for_each_pipe(dev_priv, pipe) {
3031 serge 3996
			int reg = PIPESTAT(pipe);
3997
			pipe_stats[pipe] = I915_READ(reg);
3998
 
3999
			/* Clear the PIPE*STAT regs before the IIR */
4000
			if (pipe_stats[pipe] & 0x8000ffff) {
4001
				I915_WRITE(reg, pipe_stats[pipe]);
4002
				irq_received = true;
4003
			}
4004
		}
5354 serge 4005
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4006
 
4007
		if (!irq_received)
4008
			break;
4009
 
4010
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4011
		if (I915_HAS_HOTPLUG(dev) &&
4012
		    iir & I915_DISPLAY_PORT_INTERRUPT)
4013
			i9xx_hpd_irq_handler(dev);
3031 serge 4014
 
4015
		I915_WRITE(IIR, iir & ~flip_mask);
4016
		new_iir = I915_READ(IIR); /* Flush posted writes */
4017
 
4018
		if (iir & I915_USER_INTERRUPT)
6084 serge 4019
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4020
 
5354 serge 4021
		for_each_pipe(dev_priv, pipe) {
3031 serge 4022
			int plane = pipe;
4560 Serge 4023
			if (HAS_FBC(dev))
3031 serge 4024
				plane = !plane;
4025
 
3746 Serge 4026
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4027
			    i915_handle_vblank(dev, plane, pipe, iir))
4028
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4029
 
3031 serge 4030
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4031
				blc_event = true;
4560 Serge 4032
 
4033
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4034
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4035
 
5354 serge 4036
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4037
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4038
								    pipe);
3031 serge 4039
		}
4040
 
4126 Serge 4041
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4042
			intel_opregion_asle_intr(dev);
3031 serge 4043
 
4044
		/* With MSI, interrupts are only generated when iir
4045
		 * transitions from zero to nonzero.  If another bit got
4046
		 * set while we were handling the existing iir bits, then
4047
		 * we would never get another interrupt.
4048
		 *
4049
		 * This is fine on non-MSI as well, as if we hit this path
4050
		 * we avoid exiting the interrupt handler only to generate
4051
		 * another one.
4052
		 *
4053
		 * Note that for MSI this could cause a stray interrupt report
4054
		 * if an interrupt landed in the time between writing IIR and
4055
		 * the posting read.  This should be rare enough to never
4056
		 * trigger the 99% of 100,000 interrupts test for disabling
4057
		 * stray interrupts.
4058
		 */
4059
		ret = IRQ_HANDLED;
4060
		iir = new_iir;
4061
	} while (iir & ~flip_mask);
4062
 
4063
	return ret;
4064
}
4065
 
4066
static void i915_irq_uninstall(struct drm_device * dev)
4067
{
5060 serge 4068
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4069
	int pipe;
4070
 
4071
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 4072
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4073
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4074
	}
4075
 
4076
	I915_WRITE16(HWSTAM, 0xffff);
5354 serge 4077
	for_each_pipe(dev_priv, pipe) {
3031 serge 4078
		/* Clear enable bits; then clear status bits */
4079
		I915_WRITE(PIPESTAT(pipe), 0);
4080
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4081
	}
4082
	I915_WRITE(IMR, 0xffffffff);
4083
	I915_WRITE(IER, 0x0);
4084
 
4085
	I915_WRITE(IIR, I915_READ(IIR));
4086
}
4087
 
4088
static void i965_irq_preinstall(struct drm_device * dev)
4089
{
5060 serge 4090
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4091
	int pipe;
4092
 
6084 serge 4093
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4094
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4095
 
4096
	I915_WRITE(HWSTAM, 0xeffe);
5354 serge 4097
	for_each_pipe(dev_priv, pipe)
3031 serge 4098
		I915_WRITE(PIPESTAT(pipe), 0);
4099
	I915_WRITE(IMR, 0xffffffff);
4100
	I915_WRITE(IER, 0x0);
4101
	POSTING_READ(IER);
4102
}
4103
 
4104
static int i965_irq_postinstall(struct drm_device *dev)
4105
{
5060 serge 4106
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4107
	u32 enable_mask;
4108
	u32 error_mask;
4109
 
4110
	/* Unmask the interrupts that we always want on. */
4111
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4112
			       I915_DISPLAY_PORT_INTERRUPT |
4113
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4114
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4115
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4116
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4117
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4118
 
4119
	enable_mask = ~dev_priv->irq_mask;
3746 Serge 4120
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4121
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 4122
	enable_mask |= I915_USER_INTERRUPT;
4123
 
4124
	if (IS_G4X(dev))
4125
		enable_mask |= I915_BSD_USER_INTERRUPT;
4126
 
4104 Serge 4127
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4128
	 * just to make the assert_spin_locked check happy. */
5354 serge 4129
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 4130
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4131
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4132
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 4133
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 4134
 
4135
	/*
4136
	 * Enable some error detection, note the instruction error mask
4137
	 * bit is reserved, so we leave it masked.
4138
	 */
4139
	if (IS_G4X(dev)) {
4140
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4141
			       GM45_ERROR_MEM_PRIV |
4142
			       GM45_ERROR_CP_PRIV |
4143
			       I915_ERROR_MEMORY_REFRESH);
4144
	} else {
4145
		error_mask = ~(I915_ERROR_PAGE_TABLE |
4146
			       I915_ERROR_MEMORY_REFRESH);
4147
	}
4148
	I915_WRITE(EMR, error_mask);
4149
 
4150
	I915_WRITE(IMR, dev_priv->irq_mask);
4151
	I915_WRITE(IER, enable_mask);
4152
	POSTING_READ(IER);
4153
 
6084 serge 4154
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 4155
	POSTING_READ(PORT_HOTPLUG_EN);
4156
 
4126 Serge 4157
	i915_enable_asle_pipestat(dev);
3480 Serge 4158
 
4159
	return 0;
4160
}
4161
 
3746 Serge 4162
static void i915_hpd_irq_setup(struct drm_device *dev)
3480 Serge 4163
{
5060 serge 4164
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 4165
	u32 hotplug_en;
4166
 
4104 Serge 4167
	assert_spin_locked(&dev_priv->irq_lock);
4168
 
3031 serge 4169
	/* Note HDMI and DP share hotplug bits */
6084 serge 4170
	/* enable bits are the same for all generations */
4171
	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4172
	/* Programming the CRT detection parameters tends
4173
	   to generate a spurious hotplug event about three
4174
	   seconds later.  So just do it once.
4175
	*/
4176
	if (IS_G4X(dev))
4177
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4178
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3480 Serge 4179
 
3031 serge 4180
	/* Ignore TV since it's buggy */
6084 serge 4181
	i915_hotplug_interrupt_update_locked(dev_priv,
4182
					     HOTPLUG_INT_EN_MASK |
4183
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4184
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4185
					     hotplug_en);
3031 serge 4186
}
4187
 
3243 Serge 4188
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 4189
{
5060 serge 4190
	struct drm_device *dev = arg;
4191
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4192
	u32 iir, new_iir;
4193
	u32 pipe_stats[I915_MAX_PIPES];
4194
	int ret = IRQ_NONE, pipe;
3746 Serge 4195
	u32 flip_mask =
4196
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4197
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3031 serge 4198
 
6084 serge 4199
	if (!intel_irqs_enabled(dev_priv))
4200
		return IRQ_NONE;
4201
 
3031 serge 4202
	iir = I915_READ(IIR);
4203
 
4204
	for (;;) {
5060 serge 4205
		bool irq_received = (iir & ~flip_mask) != 0;
3031 serge 4206
		bool blc_event = false;
4207
 
4208
		/* Can't rely on pipestat interrupt bit in iir as it might
4209
		 * have been cleared after the pipestat interrupt was received.
4210
		 * It doesn't set the bit in iir again, but it still produces
4211
		 * interrupts (for non-MSI).
4212
		 */
5354 serge 4213
		spin_lock(&dev_priv->irq_lock);
4126 Serge 4214
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 4215
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 4216
 
5354 serge 4217
		for_each_pipe(dev_priv, pipe) {
3031 serge 4218
			int reg = PIPESTAT(pipe);
4219
			pipe_stats[pipe] = I915_READ(reg);
4220
 
4221
			/*
4222
			 * Clear the PIPE*STAT regs before the IIR
4223
			 */
4224
			if (pipe_stats[pipe] & 0x8000ffff) {
4225
				I915_WRITE(reg, pipe_stats[pipe]);
5060 serge 4226
				irq_received = true;
3031 serge 4227
			}
4228
		}
5354 serge 4229
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4230
 
4231
		if (!irq_received)
4232
			break;
4233
 
4234
		ret = IRQ_HANDLED;
4235
 
4236
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4237
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4238
			i9xx_hpd_irq_handler(dev);
3031 serge 4239
 
3746 Serge 4240
		I915_WRITE(IIR, iir & ~flip_mask);
3031 serge 4241
		new_iir = I915_READ(IIR); /* Flush posted writes */
4242
 
4243
		if (iir & I915_USER_INTERRUPT)
6084 serge 4244
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4245
		if (iir & I915_BSD_USER_INTERRUPT)
6084 serge 4246
			notify_ring(&dev_priv->ring[VCS]);
3031 serge 4247
 
5354 serge 4248
		for_each_pipe(dev_priv, pipe) {
3746 Serge 4249
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4250
			    i915_handle_vblank(dev, pipe, pipe, iir))
4251
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3031 serge 4252
 
4253
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4254
				blc_event = true;
4560 Serge 4255
 
4256
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4257
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4258
 
5354 serge 4259
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4260
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3031 serge 4261
		}
4262
 
4126 Serge 4263
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4264
			intel_opregion_asle_intr(dev);
3031 serge 4265
 
3480 Serge 4266
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4267
			gmbus_irq_handler(dev);
4268
 
3031 serge 4269
		/* With MSI, interrupts are only generated when iir
4270
		 * transitions from zero to nonzero.  If another bit got
4271
		 * set while we were handling the existing iir bits, then
4272
		 * we would never get another interrupt.
4273
		 *
4274
		 * This is fine on non-MSI as well, as if we hit this path
4275
		 * we avoid exiting the interrupt handler only to generate
4276
		 * another one.
4277
		 *
4278
		 * Note that for MSI this could cause a stray interrupt report
4279
		 * if an interrupt landed in the time between writing IIR and
4280
		 * the posting read.  This should be rare enough to never
4281
		 * trigger the 99% of 100,000 interrupts test for disabling
4282
		 * stray interrupts.
4283
		 */
4284
		iir = new_iir;
4285
	}
4286
 
4287
	return ret;
4288
}
4289
 
4290
static void i965_irq_uninstall(struct drm_device * dev)
4291
{
5060 serge 4292
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4293
	int pipe;
4294
 
4295
	if (!dev_priv)
4296
		return;
4297
 
6084 serge 4298
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4299
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4300
 
4301
	I915_WRITE(HWSTAM, 0xffffffff);
5354 serge 4302
	for_each_pipe(dev_priv, pipe)
3031 serge 4303
		I915_WRITE(PIPESTAT(pipe), 0);
4304
	I915_WRITE(IMR, 0xffffffff);
4305
	I915_WRITE(IER, 0x0);
4306
 
5354 serge 4307
	for_each_pipe(dev_priv, pipe)
3031 serge 4308
		I915_WRITE(PIPESTAT(pipe),
4309
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4310
	I915_WRITE(IIR, I915_READ(IIR));
4311
}
4312
 
5354 serge 4313
/**
4314
 * intel_irq_init - initializes irq support
4315
 * @dev_priv: i915 device instance
4316
 *
4317
 * This function initializes all the irq support including work items, timers
4318
 * and all the vtables. It does not setup the interrupt itself though.
4319
 */
4320
void intel_irq_init(struct drm_i915_private *dev_priv)
2351 Serge 4321
{
5354 serge 4322
	struct drm_device *dev = dev_priv->dev;
3031 serge 4323
 
6296 serge 4324
	intel_hpd_init_work(dev_priv);
6084 serge 4325
 
4126 Serge 4326
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4327
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3480 Serge 4328
 
5060 serge 4329
	/* Let's track the enabled rps events */
5354 serge 4330
	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4331
		/* WaGsvRC0ResidencyMethod:vlv */
6084 serge 4332
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
5060 serge 4333
	else
6084 serge 4334
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
3480 Serge 4335
 
6084 serge 4336
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4337
			  i915_hangcheck_elapsed);
4560 Serge 4338
 
5354 serge 4339
 
4340
	if (IS_GEN2(dev_priv)) {
4560 Serge 4341
		dev->max_vblank_count = 0;
4342
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
5354 serge 4343
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4560 Serge 4344
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
6084 serge 4345
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4560 Serge 4346
	} else {
6084 serge 4347
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4348
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4293 Serge 4349
	}
3480 Serge 4350
 
5354 serge 4351
	/*
4352
	 * Opt out of the vblank disable timer on everything except gen2.
4353
	 * Gen2 doesn't have a hardware frame counter and so depends on
4354
	 * vblank interrupts to produce sane vblank seuquence numbers.
4355
	 */
4356
	if (!IS_GEN2(dev_priv))
4357
		dev->vblank_disable_immediate = true;
4358
 
6084 serge 4359
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4293 Serge 4360
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3480 Serge 4361
 
5354 serge 4362
	if (IS_CHERRYVIEW(dev_priv)) {
5060 serge 4363
		dev->driver->irq_handler = cherryview_irq_handler;
4364
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4365
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4366
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4367
		dev->driver->enable_vblank = valleyview_enable_vblank;
4368
		dev->driver->disable_vblank = valleyview_disable_vblank;
4369
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4370
	} else if (IS_VALLEYVIEW(dev_priv)) {
3243 Serge 4371
		dev->driver->irq_handler = valleyview_irq_handler;
4372
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4373
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4293 Serge 4374
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4375
		dev->driver->enable_vblank = valleyview_enable_vblank;
4376
		dev->driver->disable_vblank = valleyview_disable_vblank;
3746 Serge 4377
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4378
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4560 Serge 4379
		dev->driver->irq_handler = gen8_irq_handler;
5060 serge 4380
		dev->driver->irq_preinstall = gen8_irq_reset;
4560 Serge 4381
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4382
		dev->driver->irq_uninstall = gen8_irq_uninstall;
4383
		dev->driver->enable_vblank = gen8_enable_vblank;
4384
		dev->driver->disable_vblank = gen8_disable_vblank;
6084 serge 4385
		if (IS_BROXTON(dev))
4386
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4387
		else if (HAS_PCH_SPT(dev))
4388
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4389
		else
4390
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
2351 Serge 4391
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 4392
		dev->driver->irq_handler = ironlake_irq_handler;
5060 serge 4393
		dev->driver->irq_preinstall = ironlake_irq_reset;
3243 Serge 4394
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4293 Serge 4395
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4396
		dev->driver->enable_vblank = ironlake_enable_vblank;
4397
		dev->driver->disable_vblank = ironlake_disable_vblank;
6084 serge 4398
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
2351 Serge 4399
	} else {
5354 serge 4400
		if (INTEL_INFO(dev_priv)->gen == 2) {
4401
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
3243 Serge 4402
			dev->driver->irq_preinstall = i915_irq_preinstall;
4403
			dev->driver->irq_postinstall = i915_irq_postinstall;
4293 Serge 4404
			dev->driver->irq_uninstall = i915_irq_uninstall;
3243 Serge 4405
			dev->driver->irq_handler = i915_irq_handler;
3031 serge 4406
		} else {
3243 Serge 4407
			dev->driver->irq_preinstall = i965_irq_preinstall;
4408
			dev->driver->irq_postinstall = i965_irq_postinstall;
4293 Serge 4409
			dev->driver->irq_uninstall = i965_irq_uninstall;
3243 Serge 4410
			dev->driver->irq_handler = i965_irq_handler;
6084 serge 4411
		}
4412
		if (I915_HAS_HOTPLUG(dev_priv))
3746 Serge 4413
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4293 Serge 4414
		dev->driver->enable_vblank = i915_enable_vblank;
4415
		dev->driver->disable_vblank = i915_disable_vblank;
2351 Serge 4416
	}
3480 Serge 4417
}
3243 Serge 4418
 
5354 serge 4419
/**
4420
 * intel_irq_install - enables the hardware interrupt
4421
 * @dev_priv: i915 device instance
4422
 *
4423
 * This function enables the hardware interrupt handling, but leaves the hotplug
4424
 * handling still disabled. It is called after intel_irq_init().
4425
 *
4426
 * In the driver load and resume code we need working interrupts in a few places
4427
 * but don't want to deal with the hassle of concurrent probe and hotplug
4428
 * workers. Hence the split into this two-stage approach.
4429
 */
4430
int intel_irq_install(struct drm_i915_private *dev_priv)
3243 Serge 4431
{
5354 serge 4432
	/*
4433
	 * We enable some interrupt sources in our postinstall hooks, so mark
4434
	 * interrupts as enabled _before_ actually enabling them to avoid
4435
	 * special cases in our ordering checks.
4436
	 */
4437
	dev_priv->pm.irqs_enabled = true;
2351 Serge 4438
 
5354 serge 4439
	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
3243 Serge 4440
}
4441
 
5354 serge 4442
/**
4443
 * intel_irq_uninstall - finilizes all irq handling
4444
 * @dev_priv: i915 device instance
4445
 *
4446
 * This stops interrupt and hotplug handling and unregisters and frees all
4447
 * resources acquired in the init functions.
4448
 */
4449
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
2351 Serge 4450
{
5354 serge 4451
//	drm_irq_uninstall(dev_priv->dev);
4452
//	intel_hpd_cancel_work(dev_priv);
4453
	dev_priv->pm.irqs_enabled = false;
4454
}
2351 Serge 4455
 
5354 serge 4456
/**
4457
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4458
 * @dev_priv: i915 device instance
4459
 *
4460
 * This function is used to disable interrupts at runtime, both in the runtime
4461
 * pm and the system suspend/resume code.
4462
 */
4463
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4464
{
4465
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4466
	dev_priv->pm.irqs_enabled = false;
4104 Serge 4467
}
2351 Serge 4468
 
5354 serge 4469
/**
4470
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4471
 * @dev_priv: i915 device instance
4472
 *
4473
 * This function is used to enable interrupts at runtime, both in the runtime
4474
 * pm and the system suspend/resume code.
4475
 */
4476
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4477
{
4478
	dev_priv->pm.irqs_enabled = true;
4479
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4480
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4481
}