Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6103 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3746 Serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3031 serge 30
 
31
#include 
6088 serge 32
#include 
3031 serge 33
#include 
34
#include 
2351 Serge 35
#include "i915_drv.h"
36
#include "i915_trace.h"
37
#include "intel_drv.h"
38
 
5354 serge 39
/**
40
 * DOC: interrupt handling
41
 *
42
 * These functions provide the basic support for enabling and disabling the
43
 * interrupt handling support. There's a lot more functionality in i915_irq.c
44
 * and related files, but that will be described in separate chapters.
45
 */
4104 Serge 46
 
6084 serge 47
static const u32 hpd_ilk[HPD_NUM_PINS] = {
48
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
49
};
50
 
51
static const u32 hpd_ivb[HPD_NUM_PINS] = {
52
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
53
};
54
 
55
static const u32 hpd_bdw[HPD_NUM_PINS] = {
56
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
57
};
58
 
59
static const u32 hpd_ibx[HPD_NUM_PINS] = {
3746 Serge 60
	[HPD_CRT] = SDE_CRT_HOTPLUG,
61
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
62
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
63
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
64
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
65
};
3031 serge 66
 
6084 serge 67
static const u32 hpd_cpt[HPD_NUM_PINS] = {
3746 Serge 68
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
69
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
70
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
71
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
72
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
73
};
74
 
6084 serge 75
static const u32 hpd_spt[HPD_NUM_PINS] = {
76
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
77
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
78
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
79
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
80
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
81
};
82
 
83
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
3746 Serge 84
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
85
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
86
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
87
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
88
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
89
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
90
};
91
 
6084 serge 92
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
3746 Serge 93
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
94
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
95
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
96
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
97
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
98
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
99
};
100
 
6084 serge 101
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
3746 Serge 102
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
103
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
104
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
105
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
106
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
107
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
108
};
109
 
6084 serge 110
/* BXT hpd list */
111
static const u32 hpd_bxt[HPD_NUM_PINS] = {
112
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
113
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
114
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
115
};
116
 
5060 serge 117
/* IIR can theoretically queue up two events. Be paranoid. */
118
#define GEN8_IRQ_RESET_NDX(type, which) do { \
119
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
120
	POSTING_READ(GEN8_##type##_IMR(which)); \
121
	I915_WRITE(GEN8_##type##_IER(which), 0); \
122
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
123
	POSTING_READ(GEN8_##type##_IIR(which)); \
124
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
125
	POSTING_READ(GEN8_##type##_IIR(which)); \
126
} while (0)
3746 Serge 127
 
5060 serge 128
#define GEN5_IRQ_RESET(type) do { \
129
	I915_WRITE(type##IMR, 0xffffffff); \
130
	POSTING_READ(type##IMR); \
131
	I915_WRITE(type##IER, 0); \
132
	I915_WRITE(type##IIR, 0xffffffff); \
133
	POSTING_READ(type##IIR); \
134
	I915_WRITE(type##IIR, 0xffffffff); \
135
	POSTING_READ(type##IIR); \
136
} while (0)
137
 
138
/*
139
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
140
 */
6084 serge 141
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
142
{
143
	u32 val = I915_READ(reg);
5060 serge 144
 
6084 serge 145
	if (val == 0)
146
		return;
147
 
148
	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
149
	     reg, val);
150
	I915_WRITE(reg, 0xffffffff);
151
	POSTING_READ(reg);
152
	I915_WRITE(reg, 0xffffffff);
153
	POSTING_READ(reg);
154
}
155
 
5060 serge 156
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
6084 serge 157
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
5354 serge 158
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
5060 serge 159
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
5354 serge 160
	POSTING_READ(GEN8_##type##_IMR(which)); \
5060 serge 161
} while (0)
162
 
163
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
6084 serge 164
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
5354 serge 165
	I915_WRITE(type##IER, (ier_val)); \
5060 serge 166
	I915_WRITE(type##IMR, (imr_val)); \
5354 serge 167
	POSTING_READ(type##IMR); \
5060 serge 168
} while (0)
169
 
5354 serge 170
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
3031 serge 171
 
2351 Serge 172
/* For display hotplug interrupt */
6084 serge 173
static inline void
174
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
175
				     uint32_t mask,
176
				     uint32_t bits)
2351 Serge 177
{
6084 serge 178
	uint32_t val;
179
 
4104 Serge 180
	assert_spin_locked(&dev_priv->irq_lock);
6084 serge 181
	WARN_ON(bits & ~mask);
4104 Serge 182
 
6084 serge 183
	val = I915_READ(PORT_HOTPLUG_EN);
184
	val &= ~mask;
185
	val |= bits;
186
	I915_WRITE(PORT_HOTPLUG_EN, val);
187
}
4104 Serge 188
 
6084 serge 189
/**
190
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
191
 * @dev_priv: driver private
192
 * @mask: bits to update
193
 * @bits: bits to enable
194
 * NOTE: the HPD enable bits are modified both inside and outside
195
 * of an interrupt context. To avoid that read-modify-write cycles
196
 * interfer, these bits are protected by a spinlock. Since this
197
 * function is usually not called from a context where the lock is
198
 * held already, this function acquires the lock itself. A non-locking
199
 * version is also available.
200
 */
201
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
202
				   uint32_t mask,
203
				   uint32_t bits)
204
{
205
	spin_lock_irq(&dev_priv->irq_lock);
206
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
207
	spin_unlock_irq(&dev_priv->irq_lock);
2351 Serge 208
}
209
 
6084 serge 210
/**
211
 * ilk_update_display_irq - update DEIMR
212
 * @dev_priv: driver private
213
 * @interrupt_mask: mask of interrupt bits to update
214
 * @enabled_irq_mask: mask of interrupt bits to enable
215
 */
216
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
217
				   uint32_t interrupt_mask,
218
				   uint32_t enabled_irq_mask)
2351 Serge 219
{
6084 serge 220
	uint32_t new_val;
221
 
4104 Serge 222
	assert_spin_locked(&dev_priv->irq_lock);
223
 
6084 serge 224
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
225
 
5354 serge 226
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 227
		return;
228
 
6084 serge 229
	new_val = dev_priv->irq_mask;
230
	new_val &= ~interrupt_mask;
231
	new_val |= (~enabled_irq_mask & interrupt_mask);
232
 
233
	if (new_val != dev_priv->irq_mask) {
234
		dev_priv->irq_mask = new_val;
235
		I915_WRITE(DEIMR, dev_priv->irq_mask);
236
		POSTING_READ(DEIMR);
237
	}
2351 Serge 238
}
3031 serge 239
 
6084 serge 240
void
241
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
242
{
243
	ilk_update_display_irq(dev_priv, mask, mask);
244
}
245
 
246
void
247
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
248
{
249
	ilk_update_display_irq(dev_priv, mask, 0);
250
}
251
 
4104 Serge 252
/**
253
 * ilk_update_gt_irq - update GTIMR
254
 * @dev_priv: driver private
255
 * @interrupt_mask: mask of interrupt bits to update
256
 * @enabled_irq_mask: mask of interrupt bits to enable
257
 */
258
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
259
			      uint32_t interrupt_mask,
260
			      uint32_t enabled_irq_mask)
261
{
262
	assert_spin_locked(&dev_priv->irq_lock);
263
 
6084 serge 264
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
265
 
5060 serge 266
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 267
		return;
268
 
269
	dev_priv->gt_irq_mask &= ~interrupt_mask;
270
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
271
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
272
	POSTING_READ(GTIMR);
273
}
274
 
5060 serge 275
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 276
{
277
	ilk_update_gt_irq(dev_priv, mask, mask);
278
}
279
 
5060 serge 280
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 281
{
282
	ilk_update_gt_irq(dev_priv, mask, 0);
283
}
284
 
5354 serge 285
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
4104 Serge 286
{
5354 serge 287
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
4104 Serge 288
}
289
 
5354 serge 290
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
4104 Serge 291
{
5354 serge 292
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
4104 Serge 293
}
294
 
5354 serge 295
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
4104 Serge 296
{
5354 serge 297
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
4104 Serge 298
}
299
 
5060 serge 300
/**
5354 serge 301
  * snb_update_pm_irq - update GEN6_PMIMR
5060 serge 302
  * @dev_priv: driver private
303
  * @interrupt_mask: mask of interrupt bits to update
304
  * @enabled_irq_mask: mask of interrupt bits to enable
305
  */
5354 serge 306
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
5060 serge 307
			      uint32_t interrupt_mask,
308
			      uint32_t enabled_irq_mask)
309
{
310
	uint32_t new_val;
311
 
6084 serge 312
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
313
 
5060 serge 314
	assert_spin_locked(&dev_priv->irq_lock);
315
 
316
	new_val = dev_priv->pm_irq_mask;
317
	new_val &= ~interrupt_mask;
318
	new_val |= (~enabled_irq_mask & interrupt_mask);
319
 
320
	if (new_val != dev_priv->pm_irq_mask) {
321
		dev_priv->pm_irq_mask = new_val;
5354 serge 322
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
323
		POSTING_READ(gen6_pm_imr(dev_priv));
5060 serge 324
	}
325
}
326
 
5354 serge 327
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
5060 serge 328
{
5354 serge 329
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
330
		return;
331
 
332
	snb_update_pm_irq(dev_priv, mask, mask);
5060 serge 333
}
334
 
5354 serge 335
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
336
				  uint32_t mask)
5060 serge 337
{
5354 serge 338
	snb_update_pm_irq(dev_priv, mask, 0);
5060 serge 339
}
340
 
5354 serge 341
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
4104 Serge 342
{
5354 serge 343
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
344
		return;
4104 Serge 345
 
5354 serge 346
	__gen6_disable_pm_irq(dev_priv, mask);
4104 Serge 347
}
348
 
5354 serge 349
void gen6_reset_rps_interrupts(struct drm_device *dev)
5060 serge 350
{
351
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 352
	uint32_t reg = gen6_pm_iir(dev_priv);
5060 serge 353
 
5354 serge 354
	spin_lock_irq(&dev_priv->irq_lock);
355
	I915_WRITE(reg, dev_priv->pm_rps_events);
356
	I915_WRITE(reg, dev_priv->pm_rps_events);
6084 serge 357
	POSTING_READ(reg);
358
	dev_priv->rps.pm_iir = 0;
5354 serge 359
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 360
}
361
 
5354 serge 362
void gen6_enable_rps_interrupts(struct drm_device *dev)
5060 serge 363
{
364
	struct drm_i915_private *dev_priv = dev->dev_private;
365
 
5354 serge 366
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 367
 
5354 serge 368
	WARN_ON(dev_priv->rps.pm_iir);
369
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
370
	dev_priv->rps.interrupts_enabled = true;
371
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
372
				dev_priv->pm_rps_events);
373
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
374
 
375
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 376
}
377
 
6084 serge 378
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
379
{
380
	/*
381
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
382
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
383
	 *
384
	 * TODO: verify if this can be reproduced on VLV,CHV.
385
	 */
386
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
387
		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
388
 
389
	if (INTEL_INFO(dev_priv)->gen >= 8)
390
		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
391
 
392
	return mask;
393
}
394
 
5354 serge 395
void gen6_disable_rps_interrupts(struct drm_device *dev)
4104 Serge 396
{
397
	struct drm_i915_private *dev_priv = dev->dev_private;
398
 
5354 serge 399
	spin_lock_irq(&dev_priv->irq_lock);
400
	dev_priv->rps.interrupts_enabled = false;
401
	spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 402
 
5354 serge 403
	cancel_work_sync(&dev_priv->rps.work);
4104 Serge 404
 
5354 serge 405
	spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 406
 
6084 serge 407
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
4104 Serge 408
 
5354 serge 409
	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
410
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
411
				~dev_priv->pm_rps_events);
4104 Serge 412
 
6084 serge 413
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 414
 
415
}
416
 
4104 Serge 417
/**
6084 serge 418
  * bdw_update_port_irq - update DE port interrupt
419
  * @dev_priv: driver private
420
  * @interrupt_mask: mask of interrupt bits to update
421
  * @enabled_irq_mask: mask of interrupt bits to enable
422
  */
423
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
424
				uint32_t interrupt_mask,
425
				uint32_t enabled_irq_mask)
426
{
427
	uint32_t new_val;
428
	uint32_t old_val;
429
 
430
	assert_spin_locked(&dev_priv->irq_lock);
431
 
432
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
433
 
434
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
435
		return;
436
 
437
	old_val = I915_READ(GEN8_DE_PORT_IMR);
438
 
439
	new_val = old_val;
440
	new_val &= ~interrupt_mask;
441
	new_val |= (~enabled_irq_mask & interrupt_mask);
442
 
443
	if (new_val != old_val) {
444
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
445
		POSTING_READ(GEN8_DE_PORT_IMR);
446
	}
447
}
448
 
449
/**
4104 Serge 450
 * ibx_display_interrupt_update - update SDEIMR
451
 * @dev_priv: driver private
452
 * @interrupt_mask: mask of interrupt bits to update
453
 * @enabled_irq_mask: mask of interrupt bits to enable
454
 */
5354 serge 455
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
6084 serge 456
				  uint32_t interrupt_mask,
457
				  uint32_t enabled_irq_mask)
4104 Serge 458
{
459
	uint32_t sdeimr = I915_READ(SDEIMR);
460
	sdeimr &= ~interrupt_mask;
461
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
462
 
6084 serge 463
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
464
 
4104 Serge 465
	assert_spin_locked(&dev_priv->irq_lock);
466
 
5060 serge 467
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4104 Serge 468
		return;
469
 
470
	I915_WRITE(SDEIMR, sdeimr);
471
	POSTING_READ(SDEIMR);
472
}
473
 
5060 serge 474
static void
475
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
476
		       u32 enable_mask, u32 status_mask)
3031 serge 477
{
6084 serge 478
	u32 reg = PIPESTAT(pipe);
5060 serge 479
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 480
 
4104 Serge 481
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 482
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 483
 
5060 serge 484
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
485
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
486
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
487
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 488
		return;
489
 
5060 serge 490
	if ((pipestat & enable_mask) == enable_mask)
491
		return;
492
 
493
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
494
 
6084 serge 495
	/* Enable the interrupt, clear any pending status */
5060 serge 496
	pipestat |= enable_mask | status_mask;
3746 Serge 497
	I915_WRITE(reg, pipestat);
6084 serge 498
	POSTING_READ(reg);
3031 serge 499
}
500
 
5060 serge 501
static void
502
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
503
		        u32 enable_mask, u32 status_mask)
3031 serge 504
{
6084 serge 505
	u32 reg = PIPESTAT(pipe);
5060 serge 506
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
3031 serge 507
 
4104 Serge 508
	assert_spin_locked(&dev_priv->irq_lock);
5354 serge 509
	WARN_ON(!intel_irqs_enabled(dev_priv));
4104 Serge 510
 
5060 serge 511
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
512
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
513
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
514
		      pipe_name(pipe), enable_mask, status_mask))
3746 Serge 515
		return;
516
 
5060 serge 517
	if ((pipestat & enable_mask) == 0)
518
		return;
519
 
520
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
521
 
522
	pipestat &= ~enable_mask;
3746 Serge 523
	I915_WRITE(reg, pipestat);
6084 serge 524
	POSTING_READ(reg);
3031 serge 525
}
526
 
5060 serge 527
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
528
{
529
	u32 enable_mask = status_mask << 16;
530
 
531
	/*
532
	 * On pipe A we don't support the PSR interrupt yet,
533
	 * on pipe B and C the same bit MBZ.
534
	 */
535
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
536
		return 0;
537
	/*
538
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
539
	 * A the same bit is for perf counters which we don't use either.
540
	 */
541
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
542
		return 0;
543
 
544
	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
545
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
546
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
547
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
548
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
549
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
550
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
551
 
552
	return enable_mask;
553
}
554
 
555
void
556
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
557
		     u32 status_mask)
558
{
559
	u32 enable_mask;
560
 
561
	if (IS_VALLEYVIEW(dev_priv->dev))
562
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
563
							   status_mask);
564
	else
565
		enable_mask = status_mask << 16;
566
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
567
}
568
 
569
void
570
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
571
		      u32 status_mask)
572
{
573
	u32 enable_mask;
574
 
575
	if (IS_VALLEYVIEW(dev_priv->dev))
576
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
577
							   status_mask);
578
	else
579
		enable_mask = status_mask << 16;
580
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
581
}
582
 
3031 serge 583
/**
4104 Serge 584
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
6084 serge 585
 * @dev: drm device
3031 serge 586
 */
4104 Serge 587
static void i915_enable_asle_pipestat(struct drm_device *dev)
3031 serge 588
{
5060 serge 589
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 590
 
4104 Serge 591
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
3031 serge 592
		return;
593
 
5354 serge 594
	spin_lock_irq(&dev_priv->irq_lock);
3031 serge 595
 
5060 serge 596
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
6084 serge 597
	if (INTEL_INFO(dev)->gen >= 4)
4560 Serge 598
		i915_enable_pipestat(dev_priv, PIPE_A,
5060 serge 599
				     PIPE_LEGACY_BLC_EVENT_STATUS);
3031 serge 600
 
5354 serge 601
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 602
}
603
 
5060 serge 604
/*
605
 * This timing diagram depicts the video signal in and
606
 * around the vertical blanking period.
607
 *
608
 * Assumptions about the fictitious mode used in this example:
609
 *  vblank_start >= 3
610
 *  vsync_start = vblank_start + 1
611
 *  vsync_end = vblank_start + 2
612
 *  vtotal = vblank_start + 3
613
 *
614
 *           start of vblank:
615
 *           latch double buffered registers
616
 *           increment frame counter (ctg+)
617
 *           generate start of vblank interrupt (gen4+)
618
 *           |
619
 *           |          frame start:
620
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
621
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
622
 *           |          |
623
 *           |          |  start of vsync:
624
 *           |          |  generate vsync interrupt
625
 *           |          |  |
626
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
627
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
628
 * ----va---> <-----------------vb--------------------> <--------va-------------
629
 *       |          |       <----vs----->                     |
630
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
631
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
632
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
633
 *       |          |                                         |
634
 *       last visible pixel                                   first visible pixel
635
 *                  |                                         increment frame counter (gen3/4)
636
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
637
 *
638
 * x  = horizontal active
639
 * _  = horizontal blanking
640
 * hs = horizontal sync
641
 * va = vertical active
642
 * vb = vertical blanking
643
 * vs = vertical sync
644
 * vbs = vblank_start (number)
645
 *
646
 * Summary:
647
 * - most events happen at the start of horizontal sync
648
 * - frame start happens at the start of horizontal blank, 1-4 lines
649
 *   (depending on PIPECONF settings) after the start of vblank
650
 * - gen3/4 pixel and frame counter are synchronized with the start
651
 *   of horizontal active on the first line of vertical active
652
 */
653
 
6084 serge 654
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4560 Serge 655
{
656
	/* Gen2 doesn't have a hardware frame counter */
657
	return 0;
658
}
659
 
3031 serge 660
/* Called from drm generic code, passed a 'crtc', which
661
 * we use as a pipe index
662
 */
6084 serge 663
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
3031 serge 664
{
5060 serge 665
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 666
	unsigned long high_frame;
667
	unsigned long low_frame;
5060 serge 668
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
6084 serge 669
	struct intel_crtc *intel_crtc =
670
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
671
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
3031 serge 672
 
6084 serge 673
	htotal = mode->crtc_htotal;
674
	hsync_start = mode->crtc_hsync_start;
675
	vbl_start = mode->crtc_vblank_start;
676
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
677
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
3031 serge 678
 
5060 serge 679
	/* Convert to pixel count */
6084 serge 680
	vbl_start *= htotal;
4560 Serge 681
 
5060 serge 682
	/* Start of vblank event occurs at start of hsync */
683
	vbl_start -= htotal - hsync_start;
684
 
3031 serge 685
	high_frame = PIPEFRAME(pipe);
686
	low_frame = PIPEFRAMEPIXEL(pipe);
687
 
688
	/*
689
	 * High & low register fields aren't synchronized, so make sure
690
	 * we get a low value that's stable across two reads of the high
691
	 * register.
692
	 */
693
	do {
694
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
4560 Serge 695
		low   = I915_READ(low_frame);
3031 serge 696
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
697
	} while (high1 != high2);
698
 
699
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
4560 Serge 700
	pixel = low & PIPE_PIXEL_MASK;
3031 serge 701
	low >>= PIPE_FRAME_LOW_SHIFT;
4560 Serge 702
 
703
	/*
704
	 * The frame counter increments at beginning of active.
705
	 * Cook up a vblank counter by also checking the pixel
706
	 * counter against vblank start.
707
	 */
708
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
3031 serge 709
}
710
 
6084 serge 711
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
3031 serge 712
{
5060 serge 713
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 714
 
6084 serge 715
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
3031 serge 716
}
717
 
4560 Serge 718
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
719
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
720
 
5060 serge 721
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
4560 Serge 722
{
5060 serge 723
	struct drm_device *dev = crtc->base.dev;
4560 Serge 724
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 725
	const struct drm_display_mode *mode = &crtc->base.hwmode;
5060 serge 726
	enum pipe pipe = crtc->pipe;
727
	int position, vtotal;
4560 Serge 728
 
5060 serge 729
	vtotal = mode->crtc_vtotal;
730
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
731
		vtotal /= 2;
4560 Serge 732
 
5060 serge 733
	if (IS_GEN2(dev))
734
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
735
	else
736
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
737
 
738
	/*
6084 serge 739
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
740
	 * read it just before the start of vblank.  So try it again
741
	 * so we don't accidentally end up spanning a vblank frame
742
	 * increment, causing the pipe_update_end() code to squak at us.
743
	 *
744
	 * The nature of this problem means we can't simply check the ISR
745
	 * bit and return the vblank start value; nor can we use the scanline
746
	 * debug register in the transcoder as it appears to have the same
747
	 * problem.  We may need to extend this to include other platforms,
748
	 * but so far testing only shows the problem on HSW.
749
	 */
750
	if (HAS_DDI(dev) && !position) {
751
		int i, temp;
752
 
753
		for (i = 0; i < 100; i++) {
754
			udelay(1);
755
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
756
				DSL_LINEMASK_GEN3;
757
			if (temp != position) {
758
				position = temp;
759
				break;
760
			}
761
		}
762
	}
763
 
764
	/*
5060 serge 765
	 * See update_scanline_offset() for the details on the
766
	 * scanline_offset adjustment.
767
	 */
768
	return (position + crtc->scanline_offset) % vtotal;
4560 Serge 769
}
770
 
6084 serge 771
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
4560 Serge 772
				    unsigned int flags, int *vpos, int *hpos,
6084 serge 773
				    ktime_t *stime, ktime_t *etime,
774
				    const struct drm_display_mode *mode)
3746 Serge 775
{
4560 Serge 776
	struct drm_i915_private *dev_priv = dev->dev_private;
777
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
778
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
779
	int position;
5060 serge 780
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
3746 Serge 781
	bool in_vbl = true;
782
	int ret = 0;
4560 Serge 783
	unsigned long irqflags;
3746 Serge 784
 
6084 serge 785
	if (WARN_ON(!mode->crtc_clock)) {
3746 Serge 786
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
787
				 "pipe %c\n", pipe_name(pipe));
788
		return 0;
789
	}
790
 
4560 Serge 791
	htotal = mode->crtc_htotal;
5060 serge 792
	hsync_start = mode->crtc_hsync_start;
4560 Serge 793
	vtotal = mode->crtc_vtotal;
794
	vbl_start = mode->crtc_vblank_start;
795
	vbl_end = mode->crtc_vblank_end;
3746 Serge 796
 
4560 Serge 797
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
798
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
799
		vbl_end /= 2;
800
		vtotal /= 2;
801
	}
802
 
803
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
804
 
805
	/*
806
	 * Lock uncore.lock, as we will do multiple timing critical raw
807
	 * register reads, potentially with preemption disabled, so the
808
	 * following code must not block on uncore.lock.
809
	 */
810
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
811
 
812
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
813
 
6084 serge 814
	/* Get optional system timestamp before query. */
815
	if (stime)
816
		*stime = ktime_get();
4560 Serge 817
 
818
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3746 Serge 819
		/* No obvious pixelcount register. Only query vertical
820
		 * scanout position from Display scan line register.
821
		 */
5060 serge 822
		position = __intel_get_crtc_scanline(intel_crtc);
3746 Serge 823
	} else {
824
		/* Have access to pixelcount since start of frame.
825
		 * We can split this into vertical and horizontal
826
		 * scanout position.
827
		 */
4560 Serge 828
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
3746 Serge 829
 
4560 Serge 830
		/* convert to pixel counts */
831
		vbl_start *= htotal;
832
		vbl_end *= htotal;
833
		vtotal *= htotal;
5060 serge 834
 
835
		/*
836
		 * In interlaced modes, the pixel counter counts all pixels,
837
		 * so one field will have htotal more pixels. In order to avoid
838
		 * the reported position from jumping backwards when the pixel
839
		 * counter is beyond the length of the shorter field, just
840
		 * clamp the position the length of the shorter field. This
841
		 * matches how the scanline counter based position works since
842
		 * the scanline counter doesn't count the two half lines.
843
		 */
844
		if (position >= vtotal)
845
			position = vtotal - 1;
846
 
847
		/*
848
		 * Start of vblank interrupt is triggered at start of hsync,
849
		 * just prior to the first active line of vblank. However we
850
		 * consider lines to start at the leading edge of horizontal
851
		 * active. So, should we get here before we've crossed into
852
		 * the horizontal active of the first line in vblank, we would
853
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
854
		 * always add htotal-hsync_start to the current pixel position.
855
		 */
856
		position = (position + htotal - hsync_start) % vtotal;
3746 Serge 857
	}
858
 
6084 serge 859
	/* Get optional system timestamp after query. */
860
	if (etime)
861
		*etime = ktime_get();
3746 Serge 862
 
4560 Serge 863
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
3746 Serge 864
 
4560 Serge 865
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3746 Serge 866
 
4560 Serge 867
	in_vbl = position >= vbl_start && position < vbl_end;
3746 Serge 868
 
4560 Serge 869
	/*
870
	 * While in vblank, position will be negative
871
	 * counting up towards 0 at vbl_end. And outside
872
	 * vblank, position will be positive counting
873
	 * up since vbl_end.
874
	 */
875
	if (position >= vbl_start)
876
		position -= vbl_end;
877
	else
878
		position += vtotal - vbl_end;
3746 Serge 879
 
4560 Serge 880
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
881
		*vpos = position;
882
		*hpos = 0;
883
	} else {
884
		*vpos = position / htotal;
885
		*hpos = position - (*vpos * htotal);
886
	}
887
 
3746 Serge 888
	/* In vblank? */
889
	if (in_vbl)
5354 serge 890
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
3746 Serge 891
 
892
	return ret;
893
}
894
 
5060 serge 895
int intel_get_crtc_scanline(struct intel_crtc *crtc)
896
{
897
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
898
	unsigned long irqflags;
899
	int position;
900
 
901
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
902
	position = __intel_get_crtc_scanline(crtc);
903
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
904
 
905
	return position;
906
}
907
 
6084 serge 908
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
3746 Serge 909
			      int *max_error,
910
			      struct timeval *vblank_time,
911
			      unsigned flags)
912
{
913
	struct drm_crtc *crtc;
914
 
6084 serge 915
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
916
		DRM_ERROR("Invalid crtc %u\n", pipe);
3746 Serge 917
		return -EINVAL;
918
	}
919
 
920
	/* Get drm_crtc to timestamp: */
921
	crtc = intel_get_crtc_for_pipe(dev, pipe);
922
	if (crtc == NULL) {
6084 serge 923
		DRM_ERROR("Invalid crtc %u\n", pipe);
3746 Serge 924
		return -EINVAL;
925
	}
926
 
6084 serge 927
	if (!crtc->hwmode.crtc_clock) {
928
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
3746 Serge 929
		return -EBUSY;
930
	}
931
 
932
	/* Helper routine in DRM core does all the work: */
933
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
934
						     vblank_time, flags,
6084 serge 935
						     &crtc->hwmode);
3746 Serge 936
}
937
 
4104 Serge 938
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
3746 Serge 939
{
5060 serge 940
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 941
	u32 busy_up, busy_down, max_avg, min_avg;
942
	u8 new_delay;
943
 
4104 Serge 944
	spin_lock(&mchdev_lock);
3746 Serge 945
 
946
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
947
 
948
	new_delay = dev_priv->ips.cur_delay;
949
 
950
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
951
	busy_up = I915_READ(RCPREVBSYTUPAVG);
952
	busy_down = I915_READ(RCPREVBSYTDNAVG);
953
	max_avg = I915_READ(RCBMAXAVG);
954
	min_avg = I915_READ(RCBMINAVG);
955
 
956
	/* Handle RCS change request from hw */
957
	if (busy_up > max_avg) {
958
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
959
			new_delay = dev_priv->ips.cur_delay - 1;
960
		if (new_delay < dev_priv->ips.max_delay)
961
			new_delay = dev_priv->ips.max_delay;
962
	} else if (busy_down < min_avg) {
963
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
964
			new_delay = dev_priv->ips.cur_delay + 1;
965
		if (new_delay > dev_priv->ips.min_delay)
966
			new_delay = dev_priv->ips.min_delay;
967
	}
968
 
969
	if (ironlake_set_drps(dev, new_delay))
970
		dev_priv->ips.cur_delay = new_delay;
971
 
4104 Serge 972
	spin_unlock(&mchdev_lock);
3746 Serge 973
 
974
	return;
975
}
976
 
6084 serge 977
static void notify_ring(struct intel_engine_cs *ring)
2352 Serge 978
{
5060 serge 979
	if (!intel_ring_initialized(ring))
2352 Serge 980
		return;
2351 Serge 981
 
6084 serge 982
	trace_i915_gem_request_notify(ring);
2351 Serge 983
 
2352 Serge 984
	wake_up_all(&ring->irq_queue);
985
}
986
 
6084 serge 987
static void vlv_c0_read(struct drm_i915_private *dev_priv,
988
			struct intel_rps_ei *ei)
5060 serge 989
{
6084 serge 990
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
993
}
5060 serge 994
 
6084 serge 995
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996
			 const struct intel_rps_ei *old,
997
			 const struct intel_rps_ei *now,
998
			 int threshold)
999
{
1000
	u64 time, c0;
1001
	unsigned int mul = 100;
5060 serge 1002
 
6084 serge 1003
	if (old->cz_clock == 0)
1004
		return false;
5060 serge 1005
 
6084 serge 1006
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1007
		mul <<= 8;
5060 serge 1008
 
6084 serge 1009
	time = now->cz_clock - old->cz_clock;
1010
	time *= threshold * dev_priv->czclk_freq;
5060 serge 1011
 
6084 serge 1012
	/* Workload can be split between render + media, e.g. SwapBuffers
1013
	 * being blitted in X after being rendered in mesa. To account for
1014
	 * this we need to combine both engines into our activity counter.
5060 serge 1015
	 */
6084 serge 1016
	c0 = now->render_c0 - old->render_c0;
1017
	c0 += now->media_c0 - old->media_c0;
1018
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
5060 serge 1019
 
6084 serge 1020
	return c0 >= time;
5060 serge 1021
}
1022
 
6084 serge 1023
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
5060 serge 1024
{
6084 serge 1025
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1027
}
5060 serge 1028
 
6084 serge 1029
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1030
{
1031
	struct intel_rps_ei now;
1032
	u32 events = 0;
5060 serge 1033
 
6084 serge 1034
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1035
		return 0;
5060 serge 1036
 
6084 serge 1037
	vlv_c0_read(dev_priv, &now);
1038
	if (now.cz_clock == 0)
1039
		return 0;
5060 serge 1040
 
6084 serge 1041
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042
		if (!vlv_c0_above(dev_priv,
1043
				  &dev_priv->rps.down_ei, &now,
1044
				  dev_priv->rps.down_threshold))
1045
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046
		dev_priv->rps.down_ei = now;
5060 serge 1047
	}
1048
 
6084 serge 1049
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050
		if (vlv_c0_above(dev_priv,
1051
				 &dev_priv->rps.up_ei, &now,
1052
				 dev_priv->rps.up_threshold))
1053
			events |= GEN6_PM_RP_UP_THRESHOLD;
1054
		dev_priv->rps.up_ei = now;
5060 serge 1055
	}
1056
 
6084 serge 1057
	return events;
1058
}
5060 serge 1059
 
6084 serge 1060
static bool any_waiters(struct drm_i915_private *dev_priv)
1061
{
1062
	struct intel_engine_cs *ring;
1063
	int i;
5060 serge 1064
 
6084 serge 1065
	for_each_ring(ring, dev_priv, i)
1066
		if (ring->irq_refcount)
1067
			return true;
5060 serge 1068
 
6084 serge 1069
	return false;
5060 serge 1070
}
1071
 
3031 serge 1072
static void gen6_pm_rps_work(struct work_struct *work)
1073
{
5060 serge 1074
	struct drm_i915_private *dev_priv =
1075
		container_of(work, struct drm_i915_private, rps.work);
6084 serge 1076
	bool client_boost;
1077
	int new_delay, adj, min, max;
4104 Serge 1078
	u32 pm_iir;
2352 Serge 1079
 
4104 Serge 1080
	spin_lock_irq(&dev_priv->irq_lock);
5354 serge 1081
	/* Speed up work cancelation during disabling rps interrupts. */
1082
	if (!dev_priv->rps.interrupts_enabled) {
1083
		spin_unlock_irq(&dev_priv->irq_lock);
1084
		return;
1085
	}
3031 serge 1086
	pm_iir = dev_priv->rps.pm_iir;
1087
	dev_priv->rps.pm_iir = 0;
5354 serge 1088
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
6084 serge 1089
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1090
	client_boost = dev_priv->rps.client_boost;
1091
	dev_priv->rps.client_boost = false;
4104 Serge 1092
	spin_unlock_irq(&dev_priv->irq_lock);
2352 Serge 1093
 
4104 Serge 1094
	/* Make sure we didn't queue anything we're not going to process. */
5060 serge 1095
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
4104 Serge 1096
 
6084 serge 1097
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
3031 serge 1098
		return;
1099
 
3243 Serge 1100
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 1101
 
6084 serge 1102
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1103
 
4560 Serge 1104
	adj = dev_priv->rps.last_adj;
6084 serge 1105
	new_delay = dev_priv->rps.cur_freq;
1106
	min = dev_priv->rps.min_freq_softlimit;
1107
	max = dev_priv->rps.max_freq_softlimit;
1108
 
1109
	if (client_boost) {
1110
		new_delay = dev_priv->rps.max_freq_softlimit;
1111
		adj = 0;
1112
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
4560 Serge 1113
		if (adj > 0)
1114
			adj *= 2;
6084 serge 1115
		else /* CHV needs even encode values */
1116
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
4104 Serge 1117
		/*
1118
		 * For better performance, jump directly
1119
		 * to RPe if we're below it.
1120
		 */
6084 serge 1121
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
5060 serge 1122
			new_delay = dev_priv->rps.efficient_freq;
6084 serge 1123
			adj = 0;
1124
		}
1125
	} else if (any_waiters(dev_priv)) {
1126
		adj = 0;
4560 Serge 1127
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
5060 serge 1128
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129
			new_delay = dev_priv->rps.efficient_freq;
4560 Serge 1130
		else
5060 serge 1131
			new_delay = dev_priv->rps.min_freq_softlimit;
4560 Serge 1132
		adj = 0;
1133
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134
		if (adj < 0)
1135
			adj *= 2;
6084 serge 1136
		else /* CHV needs even encode values */
1137
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
4560 Serge 1138
	} else { /* unknown event */
6084 serge 1139
		adj = 0;
4560 Serge 1140
	}
3031 serge 1141
 
6084 serge 1142
	dev_priv->rps.last_adj = adj;
1143
 
3031 serge 1144
	/* sysfs frequency interfaces may have snuck in while servicing the
1145
	 * interrupt
1146
	 */
6084 serge 1147
	new_delay += adj;
1148
	new_delay = clamp_t(int, new_delay, min, max);
4560 Serge 1149
 
6084 serge 1150
	intel_set_rps(dev_priv->dev, new_delay);
5060 serge 1151
 
3243 Serge 1152
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 1153
}
1154
 
1155
 
1156
/**
1157
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158
 * occurred.
1159
 * @work: workqueue struct
1160
 *
1161
 * Doesn't actually do anything except notify userspace. As a consequence of
1162
 * this event, userspace should try to remap the bad rows since statistically
1163
 * it is likely the same row is more likely to go bad again.
1164
 */
1165
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 1166
{
5060 serge 1167
	struct drm_i915_private *dev_priv =
1168
		container_of(work, struct drm_i915_private, l3_parity.error_work);
3031 serge 1169
	u32 error_status, row, bank, subbank;
4560 Serge 1170
	char *parity_event[6];
3031 serge 1171
	uint32_t misccpctl;
4560 Serge 1172
	uint8_t slice = 0;
3031 serge 1173
 
1174
	/* We must turn off DOP level clock gating to access the L3 registers.
1175
	 * In order to prevent a get/put style interface, acquire struct mutex
1176
	 * any time we access those registers.
1177
	 */
1178
	mutex_lock(&dev_priv->dev->struct_mutex);
1179
 
4560 Serge 1180
	/* If we've screwed up tracking, just let the interrupt fire again */
1181
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182
		goto out;
1183
 
3031 serge 1184
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1185
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186
	POSTING_READ(GEN7_MISCCPCTL);
1187
 
4560 Serge 1188
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1189
		u32 reg;
1190
 
1191
		slice--;
1192
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1193
			break;
1194
 
1195
		dev_priv->l3_parity.which_slice &= ~(1<
1196
 
1197
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1198
 
1199
		error_status = I915_READ(reg);
6084 serge 1200
		row = GEN7_PARITY_ERROR_ROW(error_status);
1201
		bank = GEN7_PARITY_ERROR_BANK(error_status);
1202
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
3031 serge 1203
 
4560 Serge 1204
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205
		POSTING_READ(reg);
3031 serge 1206
 
4560 Serge 1207
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1208
			  slice, row, bank, subbank);
1209
 
1210
	}
1211
 
3031 serge 1212
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1213
 
4560 Serge 1214
out:
1215
	WARN_ON(dev_priv->l3_parity.which_slice);
5354 serge 1216
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 1217
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
5354 serge 1218
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 1219
 
1220
	mutex_unlock(&dev_priv->dev->struct_mutex);
1221
}
1222
 
4560 Serge 1223
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1224
{
5060 serge 1225
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1226
 
4560 Serge 1227
	if (!HAS_L3_DPF(dev))
3031 serge 1228
		return;
1229
 
4104 Serge 1230
	spin_lock(&dev_priv->irq_lock);
5060 serge 1231
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
4104 Serge 1232
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1233
 
4560 Serge 1234
	iir &= GT_PARITY_ERROR(dev);
1235
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1236
		dev_priv->l3_parity.which_slice |= 1 << 1;
1237
 
1238
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1239
		dev_priv->l3_parity.which_slice |= 1 << 0;
1240
 
3243 Serge 1241
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 1242
}
1243
 
4104 Serge 1244
static void ilk_gt_irq_handler(struct drm_device *dev,
1245
			       struct drm_i915_private *dev_priv,
1246
			       u32 gt_iir)
1247
{
1248
	if (gt_iir &
1249
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
6084 serge 1250
		notify_ring(&dev_priv->ring[RCS]);
4104 Serge 1251
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
6084 serge 1252
		notify_ring(&dev_priv->ring[VCS]);
4104 Serge 1253
}
1254
 
3031 serge 1255
static void snb_gt_irq_handler(struct drm_device *dev,
1256
			       struct drm_i915_private *dev_priv,
1257
			       u32 gt_iir)
1258
{
1259
 
4104 Serge 1260
	if (gt_iir &
1261
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
6084 serge 1262
		notify_ring(&dev_priv->ring[RCS]);
4104 Serge 1263
	if (gt_iir & GT_BSD_USER_INTERRUPT)
6084 serge 1264
		notify_ring(&dev_priv->ring[VCS]);
4104 Serge 1265
	if (gt_iir & GT_BLT_USER_INTERRUPT)
6084 serge 1266
		notify_ring(&dev_priv->ring[BCS]);
3031 serge 1267
 
4104 Serge 1268
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1269
		      GT_BSD_CS_ERROR_INTERRUPT |
5354 serge 1270
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1271
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
3031 serge 1272
 
4560 Serge 1273
	if (gt_iir & GT_PARITY_ERROR(dev))
1274
		ivybridge_parity_error_irq_handler(dev, gt_iir);
3031 serge 1275
}
1276
 
6084 serge 1277
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
4560 Serge 1278
				       u32 master_ctl)
1279
{
1280
	irqreturn_t ret = IRQ_NONE;
1281
 
1282
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
6084 serge 1283
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
4560 Serge 1284
		if (tmp) {
6084 serge 1285
			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
4560 Serge 1286
			ret = IRQ_HANDLED;
5354 serge 1287
 
6084 serge 1288
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1289
				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1290
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1291
				notify_ring(&dev_priv->ring[RCS]);
5354 serge 1292
 
6084 serge 1293
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1294
				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1295
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1296
				notify_ring(&dev_priv->ring[BCS]);
4560 Serge 1297
		} else
1298
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1299
	}
1300
 
5060 serge 1301
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
6084 serge 1302
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
4560 Serge 1303
		if (tmp) {
6084 serge 1304
			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
4560 Serge 1305
			ret = IRQ_HANDLED;
5354 serge 1306
 
6084 serge 1307
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1308
				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1309
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1310
				notify_ring(&dev_priv->ring[VCS]);
5354 serge 1311
 
6084 serge 1312
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1313
				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1314
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1315
				notify_ring(&dev_priv->ring[VCS2]);
4560 Serge 1316
		} else
1317
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1318
	}
1319
 
6084 serge 1320
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1321
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1322
		if (tmp) {
1323
			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1324
			ret = IRQ_HANDLED;
1325
 
1326
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1327
				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1328
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1329
				notify_ring(&dev_priv->ring[VECS]);
1330
		} else
1331
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1332
	}
1333
 
5060 serge 1334
	if (master_ctl & GEN8_GT_PM_IRQ) {
6084 serge 1335
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
5060 serge 1336
		if (tmp & dev_priv->pm_rps_events) {
6084 serge 1337
			I915_WRITE_FW(GEN8_GT_IIR(2),
1338
				      tmp & dev_priv->pm_rps_events);
5060 serge 1339
			ret = IRQ_HANDLED;
5354 serge 1340
			gen6_rps_irq_handler(dev_priv, tmp);
5060 serge 1341
		} else
1342
			DRM_ERROR("The master control interrupt lied (PM)!\n");
1343
	}
1344
 
6084 serge 1345
	return ret;
1346
}
5354 serge 1347
 
6084 serge 1348
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1349
{
1350
	switch (port) {
1351
	case PORT_A:
1352
		return val & PORTA_HOTPLUG_LONG_DETECT;
1353
	case PORT_B:
1354
		return val & PORTB_HOTPLUG_LONG_DETECT;
1355
	case PORT_C:
1356
		return val & PORTC_HOTPLUG_LONG_DETECT;
1357
	default:
1358
		return false;
4560 Serge 1359
	}
6084 serge 1360
}
4560 Serge 1361
 
6084 serge 1362
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1363
{
1364
	switch (port) {
1365
	case PORT_E:
1366
		return val & PORTE_HOTPLUG_LONG_DETECT;
1367
	default:
1368
		return false;
1369
	}
4560 Serge 1370
}
1371
 
6084 serge 1372
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1373
{
1374
	switch (port) {
1375
	case PORT_A:
6084 serge 1376
		return val & PORTA_HOTPLUG_LONG_DETECT;
5060 serge 1377
	case PORT_B:
6084 serge 1378
		return val & PORTB_HOTPLUG_LONG_DETECT;
5060 serge 1379
	case PORT_C:
6084 serge 1380
		return val & PORTC_HOTPLUG_LONG_DETECT;
5060 serge 1381
	case PORT_D:
6084 serge 1382
		return val & PORTD_HOTPLUG_LONG_DETECT;
1383
	default:
1384
		return false;
5060 serge 1385
	}
1386
}
1387
 
6084 serge 1388
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1389
{
1390
	switch (port) {
1391
	case PORT_A:
6084 serge 1392
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
5060 serge 1393
	default:
6084 serge 1394
		return false;
1395
	}
1396
}
1397
 
1398
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1399
{
1400
	switch (port) {
5060 serge 1401
	case PORT_B:
6084 serge 1402
		return val & PORTB_HOTPLUG_LONG_DETECT;
5060 serge 1403
	case PORT_C:
6084 serge 1404
		return val & PORTC_HOTPLUG_LONG_DETECT;
5060 serge 1405
	case PORT_D:
6084 serge 1406
		return val & PORTD_HOTPLUG_LONG_DETECT;
1407
	default:
1408
		return false;
5060 serge 1409
	}
1410
}
1411
 
6084 serge 1412
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
5060 serge 1413
{
6084 serge 1414
	switch (port) {
1415
	case PORT_B:
1416
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1417
	case PORT_C:
1418
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1419
	case PORT_D:
1420
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
5060 serge 1421
	default:
6084 serge 1422
		return false;
5060 serge 1423
	}
1424
}
1425
 
6084 serge 1426
/*
1427
 * Get a bit mask of pins that have triggered, and which ones may be long.
1428
 * This can be called multiple times with the same masks to accumulate
1429
 * hotplug detection results from several registers.
1430
 *
1431
 * Note that the caller is expected to zero out the masks initially.
1432
 */
1433
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1434
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1435
			     const u32 hpd[HPD_NUM_PINS],
1436
			     bool long_pulse_detect(enum port port, u32 val))
3746 Serge 1437
{
6084 serge 1438
	enum port port;
3746 Serge 1439
	int i;
1440
 
6084 serge 1441
	for_each_hpd_pin(i) {
1442
		if ((hpd[i] & hotplug_trigger) == 0)
5060 serge 1443
			continue;
3746 Serge 1444
 
6084 serge 1445
		*pin_mask |= BIT(i);
5060 serge 1446
 
6084 serge 1447
//       if (!intel_hpd_pin_to_port(i, &port))
6088 serge 1448
//			continue;
5060 serge 1449
 
6084 serge 1450
		if (long_pulse_detect(port, dig_hotplug_reg))
1451
			*long_mask |= BIT(i);
3746 Serge 1452
	}
1453
 
6084 serge 1454
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1455
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
3746 Serge 1456
 
1457
}
1458
 
3480 Serge 1459
static void gmbus_irq_handler(struct drm_device *dev)
1460
{
5060 serge 1461
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1462
 
1463
	wake_up_all(&dev_priv->gmbus_wait_queue);
1464
}
1465
 
1466
static void dp_aux_irq_handler(struct drm_device *dev)
1467
{
5060 serge 1468
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 1469
 
1470
	wake_up_all(&dev_priv->gmbus_wait_queue);
1471
}
1472
 
4560 Serge 1473
#if defined(CONFIG_DEBUG_FS)
1474
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1475
					 uint32_t crc0, uint32_t crc1,
1476
					 uint32_t crc2, uint32_t crc3,
1477
					 uint32_t crc4)
1478
{
1479
	struct drm_i915_private *dev_priv = dev->dev_private;
1480
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1481
	struct intel_pipe_crc_entry *entry;
1482
	int head, tail;
1483
 
1484
	spin_lock(&pipe_crc->lock);
1485
 
1486
	if (!pipe_crc->entries) {
1487
		spin_unlock(&pipe_crc->lock);
5354 serge 1488
		DRM_DEBUG_KMS("spurious interrupt\n");
4560 Serge 1489
		return;
1490
	}
1491
 
1492
	head = pipe_crc->head;
1493
	tail = pipe_crc->tail;
1494
 
1495
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1496
		spin_unlock(&pipe_crc->lock);
1497
		DRM_ERROR("CRC buffer overflowing\n");
1498
		return;
1499
	}
1500
 
1501
	entry = &pipe_crc->entries[head];
1502
 
1503
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1504
	entry->crc[0] = crc0;
1505
	entry->crc[1] = crc1;
1506
	entry->crc[2] = crc2;
1507
	entry->crc[3] = crc3;
1508
	entry->crc[4] = crc4;
1509
 
1510
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1511
	pipe_crc->head = head;
1512
 
1513
	spin_unlock(&pipe_crc->lock);
1514
 
1515
	wake_up_interruptible(&pipe_crc->wq);
1516
}
1517
#else
1518
static inline void
1519
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1520
			     uint32_t crc0, uint32_t crc1,
1521
			     uint32_t crc2, uint32_t crc3,
1522
			     uint32_t crc4) {}
1523
#endif
1524
 
1525
 
1526
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1527
{
1528
	struct drm_i915_private *dev_priv = dev->dev_private;
1529
 
1530
	display_pipe_crc_irq_handler(dev, pipe,
1531
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1532
				     0, 0, 0, 0);
1533
}
1534
 
1535
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1536
{
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
1538
 
1539
	display_pipe_crc_irq_handler(dev, pipe,
1540
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1541
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1542
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1543
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1544
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1545
}
1546
 
1547
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1548
{
1549
	struct drm_i915_private *dev_priv = dev->dev_private;
1550
	uint32_t res1, res2;
1551
 
1552
	if (INTEL_INFO(dev)->gen >= 3)
1553
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1554
	else
1555
		res1 = 0;
1556
 
1557
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1558
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1559
	else
1560
		res2 = 0;
1561
 
1562
	display_pipe_crc_irq_handler(dev, pipe,
1563
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1564
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1565
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1566
				     res1, res2);
1567
}
1568
 
4104 Serge 1569
/* The RPS events need forcewake, so we add them to a work queue and mask their
1570
 * IMR bits until the work is done. Other interrupts can be processed without
1571
 * the work queue. */
1572
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1573
{
5060 serge 1574
	if (pm_iir & dev_priv->pm_rps_events) {
4104 Serge 1575
		spin_lock(&dev_priv->irq_lock);
5354 serge 1576
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1577
		if (dev_priv->rps.interrupts_enabled) {
6084 serge 1578
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
5354 serge 1579
			queue_work(dev_priv->wq, &dev_priv->rps.work);
1580
		}
4104 Serge 1581
		spin_unlock(&dev_priv->irq_lock);
1582
	}
1583
 
5354 serge 1584
	if (INTEL_INFO(dev_priv)->gen >= 8)
1585
		return;
1586
 
4104 Serge 1587
	if (HAS_VEBOX(dev_priv->dev)) {
1588
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
6084 serge 1589
			notify_ring(&dev_priv->ring[VECS]);
4104 Serge 1590
 
5354 serge 1591
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1592
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
4104 Serge 1593
	}
1594
}
1595
 
5354 serge 1596
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1597
{
6088 serge 1598
	if (!drm_handle_vblank(dev, pipe))
1599
		return false;
5354 serge 1600
 
1601
	return true;
1602
}
1603
 
5060 serge 1604
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
3031 serge 1605
{
5060 serge 1606
	struct drm_i915_private *dev_priv = dev->dev_private;
1607
	u32 pipe_stats[I915_MAX_PIPES] = { };
3031 serge 1608
	int pipe;
1609
 
5060 serge 1610
	spin_lock(&dev_priv->irq_lock);
5354 serge 1611
	for_each_pipe(dev_priv, pipe) {
5060 serge 1612
		int reg;
1613
		u32 mask, iir_bit = 0;
3031 serge 1614
 
5060 serge 1615
		/*
1616
		 * PIPESTAT bits get signalled even when the interrupt is
1617
		 * disabled with the mask bits, and some of the status bits do
1618
		 * not generate interrupts at all (like the underrun bit). Hence
1619
		 * we need to be careful that we only handle what we want to
1620
		 * handle.
1621
		 */
3031 serge 1622
 
5354 serge 1623
		/* fifo underruns are filterered in the underrun handler. */
1624
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1625
 
5060 serge 1626
		switch (pipe) {
1627
		case PIPE_A:
1628
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1629
			break;
1630
		case PIPE_B:
1631
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1632
			break;
1633
		case PIPE_C:
1634
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1635
			break;
1636
		}
1637
		if (iir & iir_bit)
1638
			mask |= dev_priv->pipestat_irq_mask[pipe];
3031 serge 1639
 
5060 serge 1640
		if (!mask)
1641
			continue;
3031 serge 1642
 
5060 serge 1643
		reg = PIPESTAT(pipe);
1644
		mask |= PIPESTAT_INT_ENABLE_MASK;
1645
		pipe_stats[pipe] = I915_READ(reg) & mask;
3031 serge 1646
 
6084 serge 1647
		/*
1648
		 * Clear the PIPE*STAT regs before the IIR
1649
		 */
5060 serge 1650
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1651
					PIPESTAT_INT_STATUS_MASK))
6084 serge 1652
			I915_WRITE(reg, pipe_stats[pipe]);
1653
	}
5060 serge 1654
	spin_unlock(&dev_priv->irq_lock);
3031 serge 1655
 
5354 serge 1656
	for_each_pipe(dev_priv, pipe) {
6084 serge 1657
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1658
		    intel_pipe_handle_vblank(dev, pipe))
1659
            /*intel_check_page_flip(dev, pipe)*/;
3031 serge 1660
 
6084 serge 1661
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1662
//           intel_prepare_page_flip(dev, pipe);
1663
//           intel_finish_page_flip(dev, pipe);
1664
		}
4560 Serge 1665
 
6084 serge 1666
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1667
			i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 1668
 
5354 serge 1669
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1670
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
6084 serge 1671
	}
3031 serge 1672
 
5060 serge 1673
	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1674
		gmbus_irq_handler(dev);
1675
}
3031 serge 1676
 
5060 serge 1677
static void i9xx_hpd_irq_handler(struct drm_device *dev)
1678
{
1679
	struct drm_i915_private *dev_priv = dev->dev_private;
1680
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
6084 serge 1681
	u32 pin_mask = 0, long_mask = 0;
4104 Serge 1682
 
6084 serge 1683
	if (!hotplug_status)
1684
		return;
4104 Serge 1685
 
6084 serge 1686
	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1687
	/*
1688
	 * Make sure hotplug status is cleared before we clear IIR, or else we
1689
	 * may miss hotplug events.
1690
	 */
1691
	POSTING_READ(PORT_HOTPLUG_STAT);
1692
 
1693
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5060 serge 1694
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
4560 Serge 1695
 
6084 serge 1696
		if (hotplug_trigger) {
1697
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1698
					   hotplug_trigger, hpd_status_g4x,
1699
					   i9xx_port_hotplug_long_detect);
1700
 
1701
//           intel_hpd_irq_handler(dev, pin_mask, long_mask);
1702
		}
1703
 
1704
		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1705
			dp_aux_irq_handler(dev);
5060 serge 1706
	} else {
1707
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1708
 
6084 serge 1709
		if (hotplug_trigger) {
1710
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1711
					   hotplug_trigger, hpd_status_i915,
1712
					   i9xx_port_hotplug_long_detect);
1713
//           intel_hpd_irq_handler(dev, pin_mask, long_mask);
1714
		}
5060 serge 1715
	}
1716
}
1717
 
1718
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1719
{
1720
	struct drm_device *dev = arg;
1721
	struct drm_i915_private *dev_priv = dev->dev_private;
1722
	u32 iir, gt_iir, pm_iir;
1723
	irqreturn_t ret = IRQ_NONE;
1724
 
6084 serge 1725
	if (!intel_irqs_enabled(dev_priv))
1726
		return IRQ_NONE;
1727
 
5060 serge 1728
	while (true) {
1729
		/* Find, clear, then process each source of interrupt */
1730
 
1731
		gt_iir = I915_READ(GTIIR);
1732
		if (gt_iir)
1733
			I915_WRITE(GTIIR, gt_iir);
1734
 
1735
		pm_iir = I915_READ(GEN6_PMIIR);
1736
		if (pm_iir)
1737
			I915_WRITE(GEN6_PMIIR, pm_iir);
1738
 
1739
		iir = I915_READ(VLV_IIR);
1740
		if (iir) {
1741
			/* Consume port before clearing IIR or we'll miss events */
1742
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1743
				i9xx_hpd_irq_handler(dev);
1744
			I915_WRITE(VLV_IIR, iir);
3031 serge 1745
		}
1746
 
5060 serge 1747
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1748
			goto out;
3031 serge 1749
 
5060 serge 1750
		ret = IRQ_HANDLED;
1751
 
1752
		if (gt_iir)
6084 serge 1753
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
4126 Serge 1754
		if (pm_iir)
1755
			gen6_rps_irq_handler(dev_priv, pm_iir);
5060 serge 1756
		/* Call regardless, as some status bits might not be
1757
		 * signalled in iir */
1758
		valleyview_pipestat_irq_handler(dev, iir);
3031 serge 1759
	}
1760
 
1761
out:
1762
	return ret;
1763
}
1764
 
5060 serge 1765
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1766
{
1767
	struct drm_device *dev = arg;
1768
	struct drm_i915_private *dev_priv = dev->dev_private;
1769
	u32 master_ctl, iir;
1770
	irqreturn_t ret = IRQ_NONE;
1771
 
6084 serge 1772
	if (!intel_irqs_enabled(dev_priv))
1773
		return IRQ_NONE;
1774
 
5060 serge 1775
	for (;;) {
1776
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1777
		iir = I915_READ(VLV_IIR);
1778
 
1779
		if (master_ctl == 0 && iir == 0)
1780
			break;
1781
 
1782
		ret = IRQ_HANDLED;
1783
 
1784
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1785
 
1786
		/* Find, clear, then process each source of interrupt */
1787
 
1788
		if (iir) {
1789
			/* Consume port before clearing IIR or we'll miss events */
1790
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1791
				i9xx_hpd_irq_handler(dev);
1792
			I915_WRITE(VLV_IIR, iir);
1793
		}
1794
 
6084 serge 1795
		gen8_gt_irq_handler(dev_priv, master_ctl);
5060 serge 1796
 
1797
		/* Call regardless, as some status bits might not be
1798
		 * signalled in iir */
1799
		valleyview_pipestat_irq_handler(dev, iir);
1800
 
1801
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1802
		POSTING_READ(GEN8_MASTER_IRQ);
1803
	}
1804
 
1805
	return ret;
1806
}
1807
 
6084 serge 1808
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1809
				const u32 hpd[HPD_NUM_PINS])
1810
{
1811
	struct drm_i915_private *dev_priv = to_i915(dev);
1812
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1813
 
1814
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1815
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1816
 
1817
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1818
			   dig_hotplug_reg, hpd,
1819
			   pch_port_hotplug_long_detect);
1820
 
1821
//   intel_hpd_irq_handler(dev, pin_mask, long_mask);
1822
}
1823
 
3031 serge 1824
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1825
{
5060 serge 1826
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1827
	int pipe;
3746 Serge 1828
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
3031 serge 1829
 
6084 serge 1830
	if (hotplug_trigger)
1831
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
4104 Serge 1832
 
1833
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1834
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1835
			       SDE_AUDIO_POWER_SHIFT);
1836
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1837
				 port_name(port));
3746 Serge 1838
	}
3031 serge 1839
 
3480 Serge 1840
	if (pch_iir & SDE_AUX_MASK)
1841
		dp_aux_irq_handler(dev);
1842
 
3031 serge 1843
	if (pch_iir & SDE_GMBUS)
3480 Serge 1844
		gmbus_irq_handler(dev);
3031 serge 1845
 
1846
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1847
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1848
 
1849
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1850
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1851
 
1852
	if (pch_iir & SDE_POISON)
1853
		DRM_ERROR("PCH poison interrupt\n");
1854
 
1855
	if (pch_iir & SDE_FDI_MASK)
5354 serge 1856
		for_each_pipe(dev_priv, pipe)
3031 serge 1857
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1858
					 pipe_name(pipe),
1859
					 I915_READ(FDI_RX_IIR(pipe)));
1860
 
1861
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1862
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1863
 
1864
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1865
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1866
 
4104 Serge 1867
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
5354 serge 1868
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1869
 
3031 serge 1870
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
5354 serge 1871
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
3031 serge 1872
}
1873
 
4104 Serge 1874
static void ivb_err_int_handler(struct drm_device *dev)
1875
{
1876
	struct drm_i915_private *dev_priv = dev->dev_private;
1877
	u32 err_int = I915_READ(GEN7_ERR_INT);
4560 Serge 1878
	enum pipe pipe;
4104 Serge 1879
 
1880
	if (err_int & ERR_INT_POISON)
1881
		DRM_ERROR("Poison interrupt\n");
1882
 
5354 serge 1883
	for_each_pipe(dev_priv, pipe) {
1884
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1885
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4104 Serge 1886
 
4560 Serge 1887
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1888
			if (IS_IVYBRIDGE(dev))
1889
				ivb_pipe_crc_irq_handler(dev, pipe);
1890
			else
1891
				hsw_pipe_crc_irq_handler(dev, pipe);
1892
		}
1893
	}
4104 Serge 1894
 
1895
	I915_WRITE(GEN7_ERR_INT, err_int);
1896
}
1897
 
1898
static void cpt_serr_int_handler(struct drm_device *dev)
1899
{
1900
	struct drm_i915_private *dev_priv = dev->dev_private;
1901
	u32 serr_int = I915_READ(SERR_INT);
1902
 
1903
	if (serr_int & SERR_INT_POISON)
1904
		DRM_ERROR("PCH poison interrupt\n");
1905
 
1906
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
5354 serge 1907
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
4104 Serge 1908
 
1909
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
5354 serge 1910
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
4104 Serge 1911
 
1912
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
5354 serge 1913
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
4104 Serge 1914
 
1915
	I915_WRITE(SERR_INT, serr_int);
1916
}
1917
 
3031 serge 1918
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1919
{
5060 serge 1920
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 1921
	int pipe;
3746 Serge 1922
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
3031 serge 1923
 
6084 serge 1924
	if (hotplug_trigger)
1925
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
4104 Serge 1926
 
1927
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1928
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1929
			       SDE_AUDIO_POWER_SHIFT_CPT);
1930
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1931
				 port_name(port));
3746 Serge 1932
	}
3031 serge 1933
 
1934
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 1935
		dp_aux_irq_handler(dev);
3031 serge 1936
 
1937
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 1938
		gmbus_irq_handler(dev);
3031 serge 1939
 
1940
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1941
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1942
 
1943
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1944
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1945
 
1946
	if (pch_iir & SDE_FDI_MASK_CPT)
5354 serge 1947
		for_each_pipe(dev_priv, pipe)
3031 serge 1948
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1949
					 pipe_name(pipe),
1950
					 I915_READ(FDI_RX_IIR(pipe)));
1951
 
4104 Serge 1952
	if (pch_iir & SDE_ERROR_CPT)
1953
		cpt_serr_int_handler(dev);
4539 Serge 1954
}
3480 Serge 1955
 
6084 serge 1956
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1957
{
1958
	struct drm_i915_private *dev_priv = dev->dev_private;
1959
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1960
		~SDE_PORTE_HOTPLUG_SPT;
1961
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1962
	u32 pin_mask = 0, long_mask = 0;
1963
 
1964
	if (hotplug_trigger) {
1965
		u32 dig_hotplug_reg;
1966
 
1967
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1968
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1969
 
1970
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1971
				   dig_hotplug_reg, hpd_spt,
1972
				   spt_port_hotplug_long_detect);
1973
	}
1974
 
1975
	if (hotplug2_trigger) {
1976
		u32 dig_hotplug_reg;
1977
 
1978
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1979
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1980
 
1981
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1982
				   dig_hotplug_reg, hpd_spt,
1983
				   spt_port_hotplug2_long_detect);
1984
	}
1985
 
1986
	if (pch_iir & SDE_GMBUS_CPT)
1987
		gmbus_irq_handler(dev);
1988
}
1989
 
1990
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1991
				const u32 hpd[HPD_NUM_PINS])
1992
{
1993
	struct drm_i915_private *dev_priv = to_i915(dev);
1994
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1995
 
1996
	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1997
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1998
 
1999
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2000
			   dig_hotplug_reg, hpd,
2001
			   ilk_port_hotplug_long_detect);
2002
 
2003
}
2004
 
4104 Serge 2005
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2006
{
4104 Serge 2007
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2008
	enum pipe pipe;
6084 serge 2009
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
3031 serge 2010
 
6084 serge 2011
	if (hotplug_trigger)
2012
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2013
 
3480 Serge 2014
	if (de_iir & DE_AUX_CHANNEL_A)
2015
		dp_aux_irq_handler(dev);
2016
 
3031 serge 2017
	if (de_iir & DE_GSE)
4104 Serge 2018
		intel_opregion_asle_intr(dev);
2351 Serge 2019
 
4104 Serge 2020
	if (de_iir & DE_POISON)
2021
		DRM_ERROR("Poison interrupt\n");
2022
 
5354 serge 2023
	for_each_pipe(dev_priv, pipe) {
6084 serge 2024
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2025
		    intel_pipe_handle_vblank(dev, pipe))
2026
            /*intel_check_page_flip(dev, pipe)*/;
4104 Serge 2027
 
4560 Serge 2028
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
5354 serge 2029
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2351 Serge 2030
 
4560 Serge 2031
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2032
			i9xx_pipe_crc_irq_handler(dev, pipe);
2033
 
2034
		/* plane/pipes map 1:1 on ilk+ */
2035
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2036
//			intel_prepare_page_flip(dev, pipe);
2037
//			intel_finish_page_flip_plane(dev, pipe);
2038
		}
3031 serge 2039
	}
2351 Serge 2040
 
3031 serge 2041
	/* check event from PCH */
2042
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 2043
		u32 pch_iir = I915_READ(SDEIIR);
2044
 
3031 serge 2045
		if (HAS_PCH_CPT(dev))
2046
			cpt_irq_handler(dev, pch_iir);
2047
		else
2048
			ibx_irq_handler(dev, pch_iir);
3480 Serge 2049
 
2050
		/* should clear PCH hotplug event before clear CPU irq */
2051
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 2052
	}
4104 Serge 2053
 
6084 serge 2054
	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
4104 Serge 2055
		ironlake_rps_change_irq_handler(dev);
2351 Serge 2056
}
2057
 
4104 Serge 2058
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
3031 serge 2059
{
2060
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2061
	enum pipe pipe;
6084 serge 2062
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2351 Serge 2063
 
6084 serge 2064
	if (hotplug_trigger)
2065
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2066
 
4126 Serge 2067
	if (de_iir & DE_ERR_INT_IVB)
2068
		ivb_err_int_handler(dev);
2351 Serge 2069
 
4104 Serge 2070
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2071
		dp_aux_irq_handler(dev);
3031 serge 2072
 
4104 Serge 2073
	if (de_iir & DE_GSE_IVB)
2074
		intel_opregion_asle_intr(dev);
4560 Serge 2075
 
5354 serge 2076
	for_each_pipe(dev_priv, pipe) {
6084 serge 2077
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2078
		    intel_pipe_handle_vblank(dev, pipe))
2079
            /*intel_check_page_flip(dev, pipe)*/;
4560 Serge 2080
 
2081
		/* plane/pipes map 1:1 on ilk+ */
5060 serge 2082
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2083
//			intel_prepare_page_flip(dev, pipe);
2084
//			intel_finish_page_flip_plane(dev, pipe);
3031 serge 2085
		}
2086
	}
2087
 
4104 Serge 2088
	/* check event from PCH */
2089
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2090
		u32 pch_iir = I915_READ(SDEIIR);
3031 serge 2091
 
4104 Serge 2092
		cpt_irq_handler(dev, pch_iir);
3031 serge 2093
 
4104 Serge 2094
		/* clear PCH hotplug event before clear CPU irq */
2095
		I915_WRITE(SDEIIR, pch_iir);
4539 Serge 2096
	}
3031 serge 2097
}
2098
 
5060 serge 2099
/*
2100
 * To handle irqs with the minimum potential races with fresh interrupts, we:
2101
 * 1 - Disable Master Interrupt Control.
2102
 * 2 - Find the source(s) of the interrupt.
2103
 * 3 - Clear the Interrupt Identity bits (IIR).
2104
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2105
 * 5 - Re-enable Master Interrupt Control.
2106
 */
4104 Serge 2107
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 2108
{
5060 serge 2109
	struct drm_device *dev = arg;
2110
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 2111
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2112
	irqreturn_t ret = IRQ_NONE;
3031 serge 2113
 
6084 serge 2114
	if (!intel_irqs_enabled(dev_priv))
2115
		return IRQ_NONE;
2116
 
4104 Serge 2117
	/* We get interrupts on unclaimed registers, so check for this before we
2118
	 * do any I915_{READ,WRITE}. */
2119
	intel_uncore_check_errors(dev);
3031 serge 2120
 
4104 Serge 2121
	/* disable master interrupt before clearing iir  */
2122
	de_ier = I915_READ(DEIER);
2123
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2124
	POSTING_READ(DEIER);
3031 serge 2125
 
4104 Serge 2126
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2127
	 * interrupts will will be stored on its back queue, and then we'll be
2128
	 * able to process them after we restore SDEIER (as soon as we restore
2129
	 * it, we'll get an interrupt if SDEIIR still has something to process
2130
	 * due to its back queue). */
2131
	if (!HAS_PCH_NOP(dev)) {
2132
		sde_ier = I915_READ(SDEIER);
2133
		I915_WRITE(SDEIER, 0);
2134
		POSTING_READ(SDEIER);
3031 serge 2135
	}
2136
 
5060 serge 2137
	/* Find, clear, then process each source of interrupt */
2138
 
4104 Serge 2139
	gt_iir = I915_READ(GTIIR);
2140
	if (gt_iir) {
5060 serge 2141
		I915_WRITE(GTIIR, gt_iir);
2142
		ret = IRQ_HANDLED;
4104 Serge 2143
		if (INTEL_INFO(dev)->gen >= 6)
2144
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2145
		else
2146
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
4539 Serge 2147
	}
3031 serge 2148
 
4104 Serge 2149
	de_iir = I915_READ(DEIIR);
2150
	if (de_iir) {
5060 serge 2151
		I915_WRITE(DEIIR, de_iir);
2152
		ret = IRQ_HANDLED;
4104 Serge 2153
		if (INTEL_INFO(dev)->gen >= 7)
2154
			ivb_display_irq_handler(dev, de_iir);
2155
		else
2156
			ilk_display_irq_handler(dev, de_iir);
3480 Serge 2157
	}
2158
 
4104 Serge 2159
	if (INTEL_INFO(dev)->gen >= 6) {
2160
		u32 pm_iir = I915_READ(GEN6_PMIIR);
2161
		if (pm_iir) {
2162
			I915_WRITE(GEN6_PMIIR, pm_iir);
2163
			ret = IRQ_HANDLED;
5060 serge 2164
			gen6_rps_irq_handler(dev_priv, pm_iir);
4560 Serge 2165
		}
3031 serge 2166
	}
2167
 
4104 Serge 2168
	I915_WRITE(DEIER, de_ier);
2169
	POSTING_READ(DEIER);
2170
	if (!HAS_PCH_NOP(dev)) {
2171
		I915_WRITE(SDEIER, sde_ier);
2172
		POSTING_READ(SDEIER);
3031 serge 2173
	}
2174
 
4104 Serge 2175
	return ret;
3031 serge 2176
}
2177
 
6084 serge 2178
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2179
				const u32 hpd[HPD_NUM_PINS])
2180
{
2181
	struct drm_i915_private *dev_priv = to_i915(dev);
2182
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2183
 
2184
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2185
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2186
 
2187
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2188
			   dig_hotplug_reg, hpd,
2189
			   bxt_port_hotplug_long_detect);
2190
 
2191
}
2192
 
4560 Serge 2193
static irqreturn_t gen8_irq_handler(int irq, void *arg)
2194
{
2195
	struct drm_device *dev = arg;
2196
	struct drm_i915_private *dev_priv = dev->dev_private;
2197
	u32 master_ctl;
2198
	irqreturn_t ret = IRQ_NONE;
2199
	uint32_t tmp = 0;
2200
	enum pipe pipe;
5354 serge 2201
	u32 aux_mask = GEN8_AUX_CHANNEL_A;
4560 Serge 2202
 
6084 serge 2203
	if (!intel_irqs_enabled(dev_priv))
2204
		return IRQ_NONE;
2205
 
2206
	if (INTEL_INFO(dev_priv)->gen >= 9)
5354 serge 2207
		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2208
			GEN9_AUX_CHANNEL_D;
2209
 
6084 serge 2210
	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
4560 Serge 2211
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2212
	if (!master_ctl)
2213
		return IRQ_NONE;
2214
 
6084 serge 2215
	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
4560 Serge 2216
 
5060 serge 2217
	/* Find, clear, then process each source of interrupt */
2218
 
6084 serge 2219
	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
4560 Serge 2220
 
2221
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2222
		tmp = I915_READ(GEN8_DE_MISC_IIR);
5060 serge 2223
		if (tmp) {
2224
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2225
			ret = IRQ_HANDLED;
6084 serge 2226
			if (tmp & GEN8_DE_MISC_GSE)
2227
				intel_opregion_asle_intr(dev);
5060 serge 2228
			else
6084 serge 2229
				DRM_ERROR("Unexpected DE Misc interrupt\n");
5060 serge 2230
		}
4560 Serge 2231
		else
2232
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2233
	}
2234
 
2235
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2236
		tmp = I915_READ(GEN8_DE_PORT_IIR);
5060 serge 2237
		if (tmp) {
6084 serge 2238
			bool found = false;
2239
			u32 hotplug_trigger = 0;
2240
 
2241
			if (IS_BROXTON(dev_priv))
2242
				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2243
			else if (IS_BROADWELL(dev_priv))
2244
				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2245
 
5060 serge 2246
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2247
			ret = IRQ_HANDLED;
5354 serge 2248
 
6084 serge 2249
			if (tmp & aux_mask) {
2250
				dp_aux_irq_handler(dev);
2251
				found = true;
2252
			}
2253
 
2254
			if (hotplug_trigger) {
2255
				if (IS_BROXTON(dev))
2256
					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2257
				else
2258
					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2259
				found = true;
2260
			}
2261
 
2262
			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2263
				gmbus_irq_handler(dev);
2264
				found = true;
2265
			}
2266
 
2267
			if (!found)
2268
				DRM_ERROR("Unexpected DE Port interrupt\n");
5060 serge 2269
		}
4560 Serge 2270
		else
2271
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2272
	}
2273
 
5354 serge 2274
	for_each_pipe(dev_priv, pipe) {
2275
		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
4560 Serge 2276
 
2277
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2278
			continue;
2279
 
2280
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
5060 serge 2281
		if (pipe_iir) {
2282
			ret = IRQ_HANDLED;
2283
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
4560 Serge 2284
 
6088 serge 2285
			if (pipe_iir & GEN8_PIPE_VBLANK &&
2286
			    intel_pipe_handle_vblank(dev, pipe))
2287
			/*	intel_check_page_flip(dev, pipe)*/;
4560 Serge 2288
 
6084 serge 2289
			if (INTEL_INFO(dev_priv)->gen >= 9)
5354 serge 2290
				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2291
			else
2292
				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2293
 
2294
 
6084 serge 2295
			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2296
				hsw_pipe_crc_irq_handler(dev, pipe);
4560 Serge 2297
 
5354 serge 2298
			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2299
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2300
								    pipe);
4560 Serge 2301
 
5354 serge 2302
 
6084 serge 2303
			if (INTEL_INFO(dev_priv)->gen >= 9)
5354 serge 2304
				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2305
			else
2306
				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2307
 
2308
			if (fault_errors)
6084 serge 2309
				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2310
					  pipe_name(pipe),
2311
					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
4560 Serge 2312
		} else
2313
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2314
	}
2315
 
6084 serge 2316
	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2317
	    master_ctl & GEN8_DE_PCH_IRQ) {
4560 Serge 2318
		/*
2319
		 * FIXME(BDW): Assume for now that the new interrupt handling
2320
		 * scheme also closed the SDE interrupt handling race we've seen
2321
		 * on older pch-split platforms. But this needs testing.
2322
		 */
2323
		u32 pch_iir = I915_READ(SDEIIR);
2324
		if (pch_iir) {
2325
			I915_WRITE(SDEIIR, pch_iir);
2326
			ret = IRQ_HANDLED;
6084 serge 2327
 
2328
			if (HAS_PCH_SPT(dev_priv))
2329
				spt_irq_handler(dev, pch_iir);
2330
			else
2331
				cpt_irq_handler(dev, pch_iir);
5060 serge 2332
		} else
2333
			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2334
 
4560 Serge 2335
	}
2336
 
6084 serge 2337
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2338
	POSTING_READ_FW(GEN8_MASTER_IRQ);
4560 Serge 2339
 
2340
	return ret;
2341
}
2342
 
4104 Serge 2343
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2344
			       bool reset_completed)
3746 Serge 2345
{
5060 serge 2346
	struct intel_engine_cs *ring;
4104 Serge 2347
	int i;
3031 serge 2348
 
4104 Serge 2349
	/*
2350
	 * Notify all waiters for GPU completion events that reset state has
2351
	 * been changed, and that they need to restart their wait after
2352
	 * checking for potential errors (and bail out to drop locks if there is
2353
	 * a gpu reset pending so that i915_error_work_func can acquire them).
2354
	 */
3031 serge 2355
 
4104 Serge 2356
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2357
	for_each_ring(ring, dev_priv, i)
2358
		wake_up_all(&ring->irq_queue);
3031 serge 2359
 
2360
 
4104 Serge 2361
	/*
2362
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2363
	 * reset state is cleared.
2364
	 */
2365
	if (reset_completed)
2366
		wake_up_all(&dev_priv->gpu_error.reset_queue);
3031 serge 2367
}
2368
 
2369
/**
6084 serge 2370
 * i915_reset_and_wakeup - do process context error handling work
2371
 * @dev: drm device
3031 serge 2372
 *
4104 Serge 2373
 * Fire an error uevent so userspace can see that a hang or error
2374
 * was detected.
3031 serge 2375
 */
6084 serge 2376
static void i915_reset_and_wakeup(struct drm_device *dev)
3031 serge 2377
{
6084 serge 2378
	struct drm_i915_private *dev_priv = to_i915(dev);
2379
	struct i915_gpu_error *error = &dev_priv->gpu_error;
4104 Serge 2380
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2381
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2382
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2383
	int ret;
3031 serge 2384
 
4104 Serge 2385
	/*
2386
	 * Note that there's only one work item which does gpu resets, so we
2387
	 * need not worry about concurrent gpu resets potentially incrementing
2388
	 * error->reset_counter twice. We only need to take care of another
2389
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2390
	 * quick check for that is good enough: schedule_work ensures the
2391
	 * correct ordering between hang detection and this work item, and since
2392
	 * the reset in-progress bit is only ever set by code outside of this
2393
	 * work we don't need to worry about any other races.
2394
	 */
2395
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2396
		DRM_DEBUG_DRIVER("resetting chip\n");
6084 serge 2397
		intel_runtime_pm_get(dev_priv);
3031 serge 2398
 
4104 Serge 2399
		/*
2400
		 * All state reset _must_ be completed before we update the
2401
		 * reset counter, for otherwise waiters might miss the reset
2402
		 * pending state and not properly drop locks, resulting in
2403
		 * deadlocks with the reset work.
2404
		 */
4560 Serge 2405
//		ret = i915_reset(dev);
3031 serge 2406
 
6084 serge 2407
//		intel_finish_reset(dev);
3031 serge 2408
 
6084 serge 2409
		intel_runtime_pm_put(dev_priv);
2410
 
4104 Serge 2411
		if (ret == 0) {
2412
			/*
2413
			 * After all the gem state is reset, increment the reset
2414
			 * counter and wake up everyone waiting for the reset to
2415
			 * complete.
2416
			 *
2417
			 * Since unlock operations are a one-sided barrier only,
2418
			 * we need to insert a barrier here to order any seqno
2419
			 * updates before
2420
			 * the counter increment.
2421
			 */
6084 serge 2422
			smp_mb__before_atomic();
4104 Serge 2423
			atomic_inc(&dev_priv->gpu_error.reset_counter);
3031 serge 2424
 
4104 Serge 2425
		} else {
6088 serge 2426
			atomic_or(I915_WEDGED, &error->reset_counter);
2427
		}
3031 serge 2428
 
4104 Serge 2429
		/*
2430
		 * Note: The wake_up also serves as a memory barrier so that
2431
		 * waiters see the update value of the reset counter atomic_t.
2432
		 */
2433
		i915_error_wake_up(dev_priv, true);
3031 serge 2434
	}
2435
}
2436
 
2437
static void i915_report_and_clear_eir(struct drm_device *dev)
2438
{
2439
	struct drm_i915_private *dev_priv = dev->dev_private;
2440
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2441
	u32 eir = I915_READ(EIR);
2442
	int pipe, i;
2443
 
2444
	if (!eir)
2445
		return;
2446
 
2447
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2448
 
2449
	i915_get_extra_instdone(dev, instdone);
2450
 
2451
	if (IS_G4X(dev)) {
2452
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2453
			u32 ipeir = I915_READ(IPEIR_I965);
2454
 
2455
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2456
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2457
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2458
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2459
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2460
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2461
			I915_WRITE(IPEIR_I965, ipeir);
2462
			POSTING_READ(IPEIR_I965);
2463
		}
2464
		if (eir & GM45_ERROR_PAGE_TABLE) {
2465
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2466
			pr_err("page table error\n");
2467
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2468
			I915_WRITE(PGTBL_ER, pgtbl_err);
2469
			POSTING_READ(PGTBL_ER);
2470
		}
2471
	}
2472
 
2473
	if (!IS_GEN2(dev)) {
2474
		if (eir & I915_ERROR_PAGE_TABLE) {
2475
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2476
			pr_err("page table error\n");
2477
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2478
			I915_WRITE(PGTBL_ER, pgtbl_err);
2479
			POSTING_READ(PGTBL_ER);
2480
		}
2481
	}
2482
 
2483
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2484
		pr_err("memory refresh error:\n");
5354 serge 2485
		for_each_pipe(dev_priv, pipe)
3031 serge 2486
			pr_err("pipe %c stat: 0x%08x\n",
2487
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2488
		/* pipestat has already been acked */
2489
	}
2490
	if (eir & I915_ERROR_INSTRUCTION) {
2491
		pr_err("instruction error\n");
2492
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2493
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2494
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2495
		if (INTEL_INFO(dev)->gen < 4) {
2496
			u32 ipeir = I915_READ(IPEIR);
2497
 
2498
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2499
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2500
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2501
			I915_WRITE(IPEIR, ipeir);
2502
			POSTING_READ(IPEIR);
2503
		} else {
2504
			u32 ipeir = I915_READ(IPEIR_I965);
2505
 
2506
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2507
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2508
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2509
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2510
			I915_WRITE(IPEIR_I965, ipeir);
2511
			POSTING_READ(IPEIR_I965);
2512
		}
2513
	}
2514
 
2515
	I915_WRITE(EIR, eir);
2516
	POSTING_READ(EIR);
2517
	eir = I915_READ(EIR);
2518
	if (eir) {
2519
		/*
2520
		 * some errors might have become stuck,
2521
		 * mask them.
2522
		 */
2523
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2524
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2525
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2526
	}
2527
}
2528
 
2529
/**
6084 serge 2530
 * i915_handle_error - handle a gpu error
3031 serge 2531
 * @dev: drm device
2532
 *
6084 serge 2533
 * Do some basic checking of register state at error time and
3031 serge 2534
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2535
 * sure we get a record and make it available in debugfs.  Fire a uevent
2536
 * so userspace knows something bad happened (should trigger collection
2537
 * of a ring dump etc.).
2538
 */
5060 serge 2539
void i915_handle_error(struct drm_device *dev, bool wedged,
2540
		       const char *fmt, ...)
3031 serge 2541
{
2542
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2543
	va_list args;
2544
	char error_msg[80];
3031 serge 2545
 
5060 serge 2546
	va_start(args, fmt);
2547
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2548
	va_end(args);
2549
 
4560 Serge 2550
//	i915_capture_error_state(dev);
3031 serge 2551
	i915_report_and_clear_eir(dev);
2552
 
2553
	if (wedged) {
6084 serge 2554
		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
3480 Serge 2555
				&dev_priv->gpu_error.reset_counter);
3031 serge 2556
 
2557
		/*
6084 serge 2558
		 * Wakeup waiting processes so that the reset function
2559
		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2560
		 * various locks. By bumping the reset counter first, the woken
4104 Serge 2561
		 * processes will see a reset in progress and back off,
2562
		 * releasing their locks and then wait for the reset completion.
2563
		 * We must do this for _all_ gpu waiters that might hold locks
2564
		 * that the reset work needs to acquire.
2565
		 *
2566
		 * Note: The wake_up serves as the required memory barrier to
2567
		 * ensure that the waiters see the updated value of the reset
2568
		 * counter atomic_t.
3031 serge 2569
		 */
4104 Serge 2570
		i915_error_wake_up(dev_priv, false);
3031 serge 2571
	}
2572
 
6084 serge 2573
	i915_reset_and_wakeup(dev);
3031 serge 2574
}
2575
 
2576
/* Called from drm generic code, passed 'crtc' which
2577
 * we use as a pipe index
2578
 */
6084 serge 2579
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2580
{
5060 serge 2581
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2582
	unsigned long irqflags;
2583
 
2584
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2585
	if (INTEL_INFO(dev)->gen >= 4)
2586
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2587
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2588
	else
2589
		i915_enable_pipestat(dev_priv, pipe,
5060 serge 2590
				     PIPE_VBLANK_INTERRUPT_STATUS);
3031 serge 2591
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2592
 
2593
	return 0;
2594
}
2595
 
6084 serge 2596
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2597
{
5060 serge 2598
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2599
	unsigned long irqflags;
4104 Serge 2600
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2601
						     DE_PIPE_VBLANK(pipe);
3031 serge 2602
 
2603
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2604
	ironlake_enable_display_irq(dev_priv, bit);
3031 serge 2605
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2606
 
2607
	return 0;
2608
}
2609
 
6084 serge 2610
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2611
{
5060 serge 2612
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2613
	unsigned long irqflags;
2614
 
2615
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2616
	i915_enable_pipestat(dev_priv, pipe,
5060 serge 2617
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2618
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2619
 
2620
	return 0;
2621
}
2622
 
6084 serge 2623
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
4560 Serge 2624
{
2625
	struct drm_i915_private *dev_priv = dev->dev_private;
2626
	unsigned long irqflags;
2627
 
2628
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2629
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2630
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2631
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2632
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2633
	return 0;
2634
}
2635
 
3031 serge 2636
/* Called from drm generic code, passed 'crtc' which
2637
 * we use as a pipe index
2638
 */
6084 serge 2639
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2640
{
5060 serge 2641
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2642
	unsigned long irqflags;
2643
 
2644
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2645
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2646
			      PIPE_VBLANK_INTERRUPT_STATUS |
2647
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2648
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649
}
2650
 
6084 serge 2651
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2652
{
5060 serge 2653
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2654
	unsigned long irqflags;
4104 Serge 2655
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
4560 Serge 2656
						     DE_PIPE_VBLANK(pipe);
3031 serge 2657
 
2658
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4104 Serge 2659
	ironlake_disable_display_irq(dev_priv, bit);
3031 serge 2660
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2661
}
2662
 
6084 serge 2663
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
3031 serge 2664
{
5060 serge 2665
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2666
	unsigned long irqflags;
2667
 
2668
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2669
	i915_disable_pipestat(dev_priv, pipe,
5060 serge 2670
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3031 serge 2671
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2672
}
2673
 
6084 serge 2674
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
4560 Serge 2675
{
2676
	struct drm_i915_private *dev_priv = dev->dev_private;
2677
	unsigned long irqflags;
2678
 
2679
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2680
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2681
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2682
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2683
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2684
}
2685
 
4104 Serge 2686
static bool
5060 serge 2687
ring_idle(struct intel_engine_cs *ring, u32 seqno)
2351 Serge 2688
{
4104 Serge 2689
	return (list_empty(&ring->request_list) ||
6084 serge 2690
		i915_seqno_passed(seqno, ring->last_submitted_seqno));
4104 Serge 2691
}
2351 Serge 2692
 
5060 serge 2693
static bool
2694
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
4104 Serge 2695
{
5060 serge 2696
	if (INTEL_INFO(dev)->gen >= 8) {
2697
		return (ipehr >> 23) == 0x1c;
2698
	} else {
2699
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2700
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2701
				 MI_SEMAPHORE_REGISTER);
2702
	}
2703
}
2704
 
2705
static struct intel_engine_cs *
2706
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2707
{
4104 Serge 2708
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2709
	struct intel_engine_cs *signaller;
2710
	int i;
2351 Serge 2711
 
5060 serge 2712
	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2713
		for_each_ring(signaller, dev_priv, i) {
2714
			if (ring == signaller)
2715
				continue;
2716
 
2717
			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2718
				return signaller;
2719
		}
2720
	} else {
2721
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2722
 
2723
		for_each_ring(signaller, dev_priv, i) {
2724
			if(ring == signaller)
2725
				continue;
2726
 
2727
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2728
				return signaller;
2729
		}
2730
	}
2731
 
2732
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2733
		  ring->id, ipehr, offset);
2734
 
2735
	return NULL;
2736
}
2737
 
2738
static struct intel_engine_cs *
2739
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2740
{
2741
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2742
	u32 cmd, ipehr, head;
2743
	u64 offset = 0;
2744
	int i, backwards;
2745
 
6084 serge 2746
	/*
2747
	 * This function does not support execlist mode - any attempt to
2748
	 * proceed further into this function will result in a kernel panic
2749
	 * when dereferencing ring->buffer, which is not set up in execlist
2750
	 * mode.
2751
	 *
2752
	 * The correct way of doing it would be to derive the currently
2753
	 * executing ring buffer from the current context, which is derived
2754
	 * from the currently running request. Unfortunately, to get the
2755
	 * current request we would have to grab the struct_mutex before doing
2756
	 * anything else, which would be ill-advised since some other thread
2757
	 * might have grabbed it already and managed to hang itself, causing
2758
	 * the hang checker to deadlock.
2759
	 *
2760
	 * Therefore, this function does not support execlist mode in its
2761
	 * current form. Just return NULL and move on.
2762
	 */
2763
	if (ring->buffer == NULL)
2764
		return NULL;
2765
 
4104 Serge 2766
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
5060 serge 2767
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
4104 Serge 2768
		return NULL;
2351 Serge 2769
 
5060 serge 2770
	/*
2771
	 * HEAD is likely pointing to the dword after the actual command,
2772
	 * so scan backwards until we find the MBOX. But limit it to just 3
2773
	 * or 4 dwords depending on the semaphore wait command size.
2774
	 * Note that we don't care about ACTHD here since that might
2775
	 * point at at batch, and semaphores are always emitted into the
2776
	 * ringbuffer itself.
4104 Serge 2777
	 */
5060 serge 2778
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2779
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2780
 
2781
	for (i = backwards; i; --i) {
2782
		/*
2783
		 * Be paranoid and presume the hw has gone off into the wild -
2784
		 * our ring is smaller than what the hardware (and hence
2785
		 * HEAD_ADDR) allows. Also handles wrap-around.
2786
		 */
2787
		head &= ring->buffer->size - 1;
2788
 
2789
		/* This here seems to blow up */
2790
		cmd = ioread32(ring->buffer->virtual_start + head);
4104 Serge 2791
		if (cmd == ipehr)
2792
			break;
2351 Serge 2793
 
5060 serge 2794
		head -= 4;
2795
	}
2796
 
2797
	if (!i)
6084 serge 2798
		return NULL;
2351 Serge 2799
 
5060 serge 2800
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2801
	if (INTEL_INFO(ring->dev)->gen >= 8) {
2802
		offset = ioread32(ring->buffer->virtual_start + head + 12);
2803
		offset <<= 32;
2804
		offset = ioread32(ring->buffer->virtual_start + head + 8);
2805
	}
2806
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
4104 Serge 2807
}
2351 Serge 2808
 
5060 serge 2809
static int semaphore_passed(struct intel_engine_cs *ring)
4104 Serge 2810
{
2811
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
5060 serge 2812
	struct intel_engine_cs *signaller;
2813
	u32 seqno;
4104 Serge 2814
 
5060 serge 2815
	ring->hangcheck.deadlock++;
4104 Serge 2816
 
2817
	signaller = semaphore_waits_for(ring, &seqno);
5060 serge 2818
	if (signaller == NULL)
4104 Serge 2819
		return -1;
2820
 
5060 serge 2821
	/* Prevent pathological recursion due to driver bugs */
2822
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2823
		return -1;
2824
 
2825
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2826
		return 1;
2827
 
4104 Serge 2828
	/* cursory check for an unkickable deadlock */
5060 serge 2829
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2830
	    semaphore_passed(signaller) < 0)
4104 Serge 2831
		return -1;
2832
 
5060 serge 2833
	return 0;
4104 Serge 2834
}
2835
 
2836
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2837
{
5060 serge 2838
	struct intel_engine_cs *ring;
4104 Serge 2839
	int i;
2840
 
2841
	for_each_ring(ring, dev_priv, i)
5060 serge 2842
		ring->hangcheck.deadlock = 0;
4104 Serge 2843
}
2844
 
2845
static enum intel_ring_hangcheck_action
5060 serge 2846
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
4104 Serge 2847
{
2848
	struct drm_device *dev = ring->dev;
2849
	struct drm_i915_private *dev_priv = dev->dev_private;
2850
	u32 tmp;
2851
 
5060 serge 2852
	if (acthd != ring->hangcheck.acthd) {
2853
		if (acthd > ring->hangcheck.max_acthd) {
2854
			ring->hangcheck.max_acthd = acthd;
6084 serge 2855
			return HANGCHECK_ACTIVE;
5060 serge 2856
		}
4104 Serge 2857
 
5060 serge 2858
		return HANGCHECK_ACTIVE_LOOP;
2859
	}
2860
 
4104 Serge 2861
	if (IS_GEN2(dev))
2862
		return HANGCHECK_HUNG;
2863
 
2864
	/* Is the chip hanging on a WAIT_FOR_EVENT?
2865
	 * If so we can simply poke the RB_WAIT bit
2866
	 * and break the hang. This should work on
2867
	 * all but the second generation chipsets.
2868
	 */
2869
	tmp = I915_READ_CTL(ring);
2870
	if (tmp & RING_WAIT) {
5060 serge 2871
		i915_handle_error(dev, false,
2872
				  "Kicking stuck wait on %s",
6084 serge 2873
				  ring->name);
4104 Serge 2874
		I915_WRITE_CTL(ring, tmp);
2875
		return HANGCHECK_KICK;
2876
	}
2877
 
2878
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2879
		switch (semaphore_passed(ring)) {
2880
		default:
2881
			return HANGCHECK_HUNG;
2882
		case 1:
5060 serge 2883
			i915_handle_error(dev, false,
2884
					  "Kicking stuck semaphore on %s",
6084 serge 2885
					  ring->name);
4104 Serge 2886
			I915_WRITE_CTL(ring, tmp);
2887
			return HANGCHECK_KICK;
2888
		case 0:
2889
			return HANGCHECK_WAIT;
2890
		}
2891
	}
2892
 
2893
	return HANGCHECK_HUNG;
2894
}
2895
 
6084 serge 2896
/*
4104 Serge 2897
 * This is called when the chip hasn't reported back with completed
2898
 * batchbuffers in a long time. We keep track per ring seqno progress and
2899
 * if there are no progress, hangcheck score for that ring is increased.
2900
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2901
 * we kick the ring. If we see no progress on three subsequent calls
2902
 * we assume chip is wedged and try to fix it by resetting the chip.
2903
 */
6084 serge 2904
static void i915_hangcheck_elapsed(struct work_struct *work)
4104 Serge 2905
{
6084 serge 2906
	struct drm_i915_private *dev_priv =
2907
		container_of(work, typeof(*dev_priv),
2908
			     gpu_error.hangcheck_work.work);
2909
	struct drm_device *dev = dev_priv->dev;
5060 serge 2910
	struct intel_engine_cs *ring;
4104 Serge 2911
	int i;
2912
	int busy_count = 0, rings_hung = 0;
2913
	bool stuck[I915_NUM_RINGS] = { 0 };
2914
#define BUSY 1
2915
#define KICK 5
2916
#define HUNG 20
2917
 
5060 serge 2918
	if (!i915.enable_hangcheck)
4104 Serge 2919
		return;
2920
 
2921
	for_each_ring(ring, dev_priv, i) {
5060 serge 2922
		u64 acthd;
2923
		u32 seqno;
4104 Serge 2924
		bool busy = true;
2925
 
2926
		semaphore_clear_deadlocks(dev_priv);
2927
 
2928
		seqno = ring->get_seqno(ring, false);
2929
		acthd = intel_ring_get_active_head(ring);
2930
 
2931
		if (ring->hangcheck.seqno == seqno) {
2932
			if (ring_idle(ring, seqno)) {
5060 serge 2933
				ring->hangcheck.action = HANGCHECK_IDLE;
2934
 
6084 serge 2935
				if (waitqueue_active(&ring->irq_queue)) {
4104 Serge 2936
					/* Issue a wake-up to catch stuck h/w. */
6084 serge 2937
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2938
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2939
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2940
								  ring->name);
2941
						else
2942
							DRM_INFO("Fake missed irq on %s\n",
2943
								 ring->name);
2944
						wake_up_all(&ring->irq_queue);
2945
					}
2946
					/* Safeguard against driver failure */
2947
					ring->hangcheck.score += BUSY;
2948
				} else
4104 Serge 2949
					busy = false;
2950
			} else {
2951
				/* We always increment the hangcheck score
2952
				 * if the ring is busy and still processing
2953
				 * the same request, so that no single request
2954
				 * can run indefinitely (such as a chain of
2955
				 * batches). The only time we do not increment
2956
				 * the hangcheck score on this ring, if this
2957
				 * ring is in a legitimate wait for another
2958
				 * ring. In that case the waiting ring is a
2959
				 * victim and we want to be sure we catch the
2960
				 * right culprit. Then every time we do kick
2961
				 * the ring, add a small increment to the
2962
				 * score so that we can catch a batch that is
2963
				 * being repeatedly kicked and so responsible
2964
				 * for stalling the machine.
2965
				 */
2966
				ring->hangcheck.action = ring_stuck(ring,
2967
								    acthd);
2968
 
2969
				switch (ring->hangcheck.action) {
4560 Serge 2970
				case HANGCHECK_IDLE:
4104 Serge 2971
				case HANGCHECK_WAIT:
5060 serge 2972
				case HANGCHECK_ACTIVE:
4104 Serge 2973
					break;
5060 serge 2974
				case HANGCHECK_ACTIVE_LOOP:
4104 Serge 2975
					ring->hangcheck.score += BUSY;
2976
					break;
2977
				case HANGCHECK_KICK:
2978
					ring->hangcheck.score += KICK;
2979
					break;
2980
				case HANGCHECK_HUNG:
2981
					ring->hangcheck.score += HUNG;
2982
					stuck[i] = true;
2983
					break;
2984
				}
2985
			}
2986
		} else {
4560 Serge 2987
			ring->hangcheck.action = HANGCHECK_ACTIVE;
2988
 
4104 Serge 2989
			/* Gradually reduce the count so that we catch DoS
2990
			 * attempts across multiple batches.
2991
			 */
2992
			if (ring->hangcheck.score > 0)
2993
				ring->hangcheck.score--;
5060 serge 2994
 
2995
			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
4104 Serge 2996
		}
2997
 
2998
		ring->hangcheck.seqno = seqno;
2999
		ring->hangcheck.acthd = acthd;
3000
		busy_count += busy;
3001
	}
3002
 
3003
	for_each_ring(ring, dev_priv, i) {
5060 serge 3004
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
4104 Serge 3005
			DRM_INFO("%s on %s\n",
6084 serge 3006
				 stuck[i] ? "stuck" : "no progress",
3007
				 ring->name);
4104 Serge 3008
			rings_hung++;
3009
		}
3010
	}
3011
 
3012
//   if (rings_hung)
3013
//       return i915_handle_error(dev, true);
3014
 
3015
}
6088 serge 3016
 
5060 serge 3017
static void ibx_irq_reset(struct drm_device *dev)
3018
{
3019
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3020
 
5060 serge 3021
	if (HAS_PCH_NOP(dev))
3022
		return;
3023
 
3024
	GEN5_IRQ_RESET(SDE);
3025
 
3026
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3027
		I915_WRITE(SERR_INT, 0xffffffff);
3028
}
3029
 
3030
/*
3031
 * SDEIER is also touched by the interrupt handler to work around missed PCH
3032
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3033
 * instead we unconditionally enable all PCH interrupt sources here, but then
3034
 * only unmask them as needed with SDEIMR.
3035
 *
3036
 * This function needs to be called before interrupts are enabled.
3037
 */
3038
static void ibx_irq_pre_postinstall(struct drm_device *dev)
4104 Serge 3039
{
3040
	struct drm_i915_private *dev_priv = dev->dev_private;
3041
 
3746 Serge 3042
	if (HAS_PCH_NOP(dev))
3043
		return;
3044
 
5060 serge 3045
	WARN_ON(I915_READ(SDEIER) != 0);
3746 Serge 3046
	I915_WRITE(SDEIER, 0xffffffff);
4104 Serge 3047
	POSTING_READ(SDEIER);
2351 Serge 3048
}
3049
 
5060 serge 3050
static void gen5_gt_irq_reset(struct drm_device *dev)
4104 Serge 3051
{
3052
	struct drm_i915_private *dev_priv = dev->dev_private;
3053
 
5060 serge 3054
	GEN5_IRQ_RESET(GT);
3055
	if (INTEL_INFO(dev)->gen >= 6)
3056
		GEN5_IRQ_RESET(GEN6_PM);
4104 Serge 3057
}
3058
 
3059
/* drm_dma.h hooks
3060
*/
5060 serge 3061
static void ironlake_irq_reset(struct drm_device *dev)
4104 Serge 3062
{
5060 serge 3063
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3064
 
5060 serge 3065
	I915_WRITE(HWSTAM, 0xffffffff);
4104 Serge 3066
 
5060 serge 3067
	GEN5_IRQ_RESET(DE);
3068
	if (IS_GEN7(dev))
3069
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
4104 Serge 3070
 
5060 serge 3071
	gen5_gt_irq_reset(dev);
4104 Serge 3072
 
5060 serge 3073
	ibx_irq_reset(dev);
4104 Serge 3074
}
3075
 
5354 serge 3076
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3077
{
3078
	enum pipe pipe;
3079
 
6084 serge 3080
	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
5354 serge 3081
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3082
 
3083
	for_each_pipe(dev_priv, pipe)
3084
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3085
 
3086
	GEN5_IRQ_RESET(VLV_);
3087
}
3088
 
3031 serge 3089
static void valleyview_irq_preinstall(struct drm_device *dev)
3090
{
5060 serge 3091
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3092
 
3093
	/* VLV magic */
3094
	I915_WRITE(VLV_IMR, 0);
3095
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3096
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3097
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3098
 
5060 serge 3099
	gen5_gt_irq_reset(dev);
4104 Serge 3100
 
5354 serge 3101
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3031 serge 3102
 
5354 serge 3103
	vlv_display_irq_reset(dev_priv);
3031 serge 3104
}
3105
 
5060 serge 3106
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
4560 Serge 3107
{
5060 serge 3108
	GEN8_IRQ_RESET_NDX(GT, 0);
3109
	GEN8_IRQ_RESET_NDX(GT, 1);
3110
	GEN8_IRQ_RESET_NDX(GT, 2);
3111
	GEN8_IRQ_RESET_NDX(GT, 3);
3112
}
3113
 
3114
static void gen8_irq_reset(struct drm_device *dev)
3115
{
4560 Serge 3116
	struct drm_i915_private *dev_priv = dev->dev_private;
3117
	int pipe;
3118
 
3119
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3120
	POSTING_READ(GEN8_MASTER_IRQ);
3121
 
5060 serge 3122
	gen8_gt_irq_reset(dev_priv);
4560 Serge 3123
 
5354 serge 3124
	for_each_pipe(dev_priv, pipe)
3125
		if (intel_display_power_is_enabled(dev_priv,
6084 serge 3126
						   POWER_DOMAIN_PIPE(pipe)))
3127
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
4560 Serge 3128
 
5060 serge 3129
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3130
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3131
	GEN5_IRQ_RESET(GEN8_PCU_);
4560 Serge 3132
 
6084 serge 3133
	if (HAS_PCH_SPLIT(dev))
3134
		ibx_irq_reset(dev);
5060 serge 3135
}
4560 Serge 3136
 
6084 serge 3137
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3138
				     unsigned int pipe_mask)
5060 serge 3139
{
5354 serge 3140
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
4560 Serge 3141
 
5354 serge 3142
	spin_lock_irq(&dev_priv->irq_lock);
6084 serge 3143
	if (pipe_mask & 1 << PIPE_A)
3144
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3145
				  dev_priv->de_irq_mask[PIPE_A],
3146
				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3147
	if (pipe_mask & 1 << PIPE_B)
3148
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3149
				  dev_priv->de_irq_mask[PIPE_B],
3150
				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3151
	if (pipe_mask & 1 << PIPE_C)
3152
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3153
				  dev_priv->de_irq_mask[PIPE_C],
3154
				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
5354 serge 3155
	spin_unlock_irq(&dev_priv->irq_lock);
5060 serge 3156
}
3157
 
3158
static void cherryview_irq_preinstall(struct drm_device *dev)
3159
{
3160
	struct drm_i915_private *dev_priv = dev->dev_private;
3161
 
3162
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3163
	POSTING_READ(GEN8_MASTER_IRQ);
3164
 
3165
	gen8_gt_irq_reset(dev_priv);
3166
 
3167
	GEN5_IRQ_RESET(GEN8_PCU_);
3168
 
3169
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3170
 
5354 serge 3171
	vlv_display_irq_reset(dev_priv);
4560 Serge 3172
}
3173
 
6084 serge 3174
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3175
				  const u32 hpd[HPD_NUM_PINS])
3176
{
3177
	struct drm_i915_private *dev_priv = to_i915(dev);
3178
	struct intel_encoder *encoder;
3179
	u32 enabled_irqs = 0;
3180
 
3181
	for_each_intel_encoder(dev, encoder)
3182
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3183
			enabled_irqs |= hpd[encoder->hpd_pin];
3184
 
3185
	return enabled_irqs;
3186
}
3187
 
3746 Serge 3188
static void ibx_hpd_irq_setup(struct drm_device *dev)
3189
{
5060 serge 3190
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3191
	u32 hotplug_irqs, hotplug, enabled_irqs;
3746 Serge 3192
 
3193
	if (HAS_PCH_IBX(dev)) {
4104 Serge 3194
		hotplug_irqs = SDE_HOTPLUG_MASK;
6084 serge 3195
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3746 Serge 3196
	} else {
4104 Serge 3197
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
6084 serge 3198
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3746 Serge 3199
	}
3200
 
4104 Serge 3201
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3746 Serge 3202
 
3203
	/*
6084 serge 3204
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3205
	 * duration to 2ms (which is the minimum in the Display Port spec).
3206
	 * The pulse duration bits are reserved on LPT+.
3207
	 */
2351 Serge 3208
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3209
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3210
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3211
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3212
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
6084 serge 3213
	/*
3214
	 * When CPU and PCH are on the same package, port A
3215
	 * HPD must be enabled in both north and south.
3216
	 */
3217
	if (HAS_PCH_LPT_LP(dev))
3218
		hotplug |= PORTA_HOTPLUG_ENABLE;
2351 Serge 3219
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3220
}
3221
 
6084 serge 3222
static void spt_hpd_irq_setup(struct drm_device *dev)
3223
{
3224
	struct drm_i915_private *dev_priv = dev->dev_private;
3225
	u32 hotplug_irqs, hotplug, enabled_irqs;
3226
 
3227
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3228
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3229
 
3230
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3231
 
3232
	/* Enable digital hotplug on the PCH */
3233
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3234
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3235
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3236
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3237
 
3238
	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3239
	hotplug |= PORTE_HOTPLUG_ENABLE;
3240
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3241
}
3242
 
3243
static void ilk_hpd_irq_setup(struct drm_device *dev)
3244
{
3245
	struct drm_i915_private *dev_priv = dev->dev_private;
3246
	u32 hotplug_irqs, hotplug, enabled_irqs;
3247
 
3248
	if (INTEL_INFO(dev)->gen >= 8) {
3249
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3250
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3251
 
3252
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3253
	} else if (INTEL_INFO(dev)->gen >= 7) {
3254
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3255
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3256
 
3257
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3258
	} else {
3259
		hotplug_irqs = DE_DP_A_HOTPLUG;
3260
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3261
 
3262
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3263
	}
3264
 
3265
	/*
3266
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3267
	 * duration to 2ms (which is the minimum in the Display Port spec)
3268
	 * The pulse duration bits are reserved on HSW+.
3269
	 */
3270
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3271
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3272
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3273
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3274
 
3275
	ibx_hpd_irq_setup(dev);
3276
}
3277
 
3278
static void bxt_hpd_irq_setup(struct drm_device *dev)
3279
{
3280
	struct drm_i915_private *dev_priv = dev->dev_private;
3281
	u32 hotplug_irqs, hotplug, enabled_irqs;
3282
 
3283
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3284
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3285
 
3286
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3287
 
3288
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3289
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3290
		PORTA_HOTPLUG_ENABLE;
3291
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3292
}
3293
 
3480 Serge 3294
static void ibx_irq_postinstall(struct drm_device *dev)
3295
{
5060 serge 3296
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 3297
	u32 mask;
3298
 
3746 Serge 3299
	if (HAS_PCH_NOP(dev))
3300
		return;
3301
 
5060 serge 3302
	if (HAS_PCH_IBX(dev))
3303
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3304
	else
3305
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4104 Serge 3306
 
6084 serge 3307
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3480 Serge 3308
	I915_WRITE(SDEIMR, ~mask);
3309
}
3310
 
4104 Serge 3311
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2351 Serge 3312
{
4104 Serge 3313
	struct drm_i915_private *dev_priv = dev->dev_private;
3314
	u32 pm_irqs, gt_irqs;
2351 Serge 3315
 
4104 Serge 3316
	pm_irqs = gt_irqs = 0;
2351 Serge 3317
 
3318
	dev_priv->gt_irq_mask = ~0;
4560 Serge 3319
	if (HAS_L3_DPF(dev)) {
4104 Serge 3320
		/* L3 parity interrupt is always unmasked. */
4560 Serge 3321
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3322
		gt_irqs |= GT_PARITY_ERROR(dev);
4104 Serge 3323
	}
2351 Serge 3324
 
4104 Serge 3325
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3326
	if (IS_GEN5(dev)) {
3327
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3328
			   ILK_BSD_USER_INTERRUPT;
3329
	} else {
3330
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3331
	}
2351 Serge 3332
 
5060 serge 3333
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
2351 Serge 3334
 
4104 Serge 3335
	if (INTEL_INFO(dev)->gen >= 6) {
5354 serge 3336
		/*
3337
		 * RPS interrupts will get enabled/disabled on demand when RPS
3338
		 * itself is enabled/disabled.
3339
		 */
4104 Serge 3340
		if (HAS_VEBOX(dev))
3341
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3342
 
3343
		dev_priv->pm_irq_mask = 0xffffffff;
5060 serge 3344
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
6084 serge 3345
	}
2351 Serge 3346
}
3347
 
4104 Serge 3348
static int ironlake_irq_postinstall(struct drm_device *dev)
3031 serge 3349
{
5060 serge 3350
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3351
	u32 display_mask, extra_mask;
3352
 
3353
	if (INTEL_INFO(dev)->gen >= 7) {
3354
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3355
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
6084 serge 3356
				DE_PLANEB_FLIP_DONE_IVB |
5060 serge 3357
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
4104 Serge 3358
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
6084 serge 3359
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3360
			      DE_DP_A_HOTPLUG_IVB);
4104 Serge 3361
	} else {
3362
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3363
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
4560 Serge 3364
				DE_AUX_CHANNEL_A |
3365
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3366
				DE_POISON);
6084 serge 3367
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3368
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3369
			      DE_DP_A_HOTPLUG);
4104 Serge 3370
	}
3371
 
3031 serge 3372
	dev_priv->irq_mask = ~display_mask;
3373
 
5060 serge 3374
	I915_WRITE(HWSTAM, 0xeffe);
3031 serge 3375
 
5060 serge 3376
	ibx_irq_pre_postinstall(dev);
3377
 
3378
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3379
 
4104 Serge 3380
	gen5_gt_irq_postinstall(dev);
3031 serge 3381
 
4104 Serge 3382
	ibx_irq_postinstall(dev);
3031 serge 3383
 
4104 Serge 3384
	if (IS_IRONLAKE_M(dev)) {
3385
		/* Enable PCU event interrupts
3386
		 *
3387
		 * spinlocking not required here for correctness since interrupt
3388
		 * setup is guaranteed to run in single-threaded context. But we
3389
		 * need it to make the assert_spin_locked happy. */
5354 serge 3390
		spin_lock_irq(&dev_priv->irq_lock);
4104 Serge 3391
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
5354 serge 3392
		spin_unlock_irq(&dev_priv->irq_lock);
4104 Serge 3393
	}
3031 serge 3394
 
3395
	return 0;
3396
}
3397
 
5060 serge 3398
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3399
{
3400
	u32 pipestat_mask;
3401
	u32 iir_mask;
5354 serge 3402
	enum pipe pipe;
5060 serge 3403
 
3404
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3405
			PIPE_FIFO_UNDERRUN_STATUS;
3406
 
5354 serge 3407
	for_each_pipe(dev_priv, pipe)
3408
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3409
	POSTING_READ(PIPESTAT(PIPE_A));
3410
 
3411
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3412
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3413
 
5354 serge 3414
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3415
	for_each_pipe(dev_priv, pipe)
3416
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3417
 
3418
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3419
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3420
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3421
	if (IS_CHERRYVIEW(dev_priv))
3422
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3423
	dev_priv->irq_mask &= ~iir_mask;
3424
 
3425
	I915_WRITE(VLV_IIR, iir_mask);
3426
	I915_WRITE(VLV_IIR, iir_mask);
5354 serge 3427
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
5060 serge 3428
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3429
	POSTING_READ(VLV_IMR);
5060 serge 3430
}
3431
 
3432
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3433
{
3434
	u32 pipestat_mask;
3435
	u32 iir_mask;
5354 serge 3436
	enum pipe pipe;
5060 serge 3437
 
3438
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3439
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3440
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
5354 serge 3441
	if (IS_CHERRYVIEW(dev_priv))
3442
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
5060 serge 3443
 
3444
	dev_priv->irq_mask |= iir_mask;
5354 serge 3445
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5060 serge 3446
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3447
	I915_WRITE(VLV_IIR, iir_mask);
3448
	I915_WRITE(VLV_IIR, iir_mask);
3449
	POSTING_READ(VLV_IIR);
3450
 
3451
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3452
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3453
 
5354 serge 3454
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3455
	for_each_pipe(dev_priv, pipe)
3456
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
5060 serge 3457
 
3458
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3459
			PIPE_FIFO_UNDERRUN_STATUS;
5354 serge 3460
 
3461
	for_each_pipe(dev_priv, pipe)
3462
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
5060 serge 3463
	POSTING_READ(PIPESTAT(PIPE_A));
3464
}
3465
 
3466
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3467
{
3468
	assert_spin_locked(&dev_priv->irq_lock);
3469
 
3470
	if (dev_priv->display_irqs_enabled)
3471
		return;
3472
 
3473
	dev_priv->display_irqs_enabled = true;
3474
 
5354 serge 3475
	if (intel_irqs_enabled(dev_priv))
5060 serge 3476
		valleyview_display_irqs_install(dev_priv);
3477
}
3478
 
3479
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3480
{
3481
	assert_spin_locked(&dev_priv->irq_lock);
3482
 
3483
	if (!dev_priv->display_irqs_enabled)
3484
		return;
3485
 
3486
	dev_priv->display_irqs_enabled = false;
3487
 
5354 serge 3488
	if (intel_irqs_enabled(dev_priv))
5060 serge 3489
		valleyview_display_irqs_uninstall(dev_priv);
3490
}
3491
 
5354 serge 3492
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3031 serge 3493
{
5060 serge 3494
	dev_priv->irq_mask = ~0;
3031 serge 3495
 
6084 serge 3496
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 3497
	POSTING_READ(PORT_HOTPLUG_EN);
3498
 
5354 serge 3499
	I915_WRITE(VLV_IIR, 0xffffffff);
3500
	I915_WRITE(VLV_IIR, 0xffffffff);
3501
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3031 serge 3502
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
5354 serge 3503
	POSTING_READ(VLV_IMR);
3031 serge 3504
 
4104 Serge 3505
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3506
	 * just to make the assert_spin_locked check happy. */
5354 serge 3507
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3508
	if (dev_priv->display_irqs_enabled)
3509
		valleyview_display_irqs_install(dev_priv);
5354 serge 3510
	spin_unlock_irq(&dev_priv->irq_lock);
3511
}
3031 serge 3512
 
5354 serge 3513
static int valleyview_irq_postinstall(struct drm_device *dev)
3514
{
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3516
 
5354 serge 3517
	vlv_display_irq_postinstall(dev_priv);
3518
 
4104 Serge 3519
	gen5_gt_irq_postinstall(dev);
3243 Serge 3520
 
3031 serge 3521
	/* ack & enable invalid PTE error interrupts */
3522
#if 0 /* FIXME: add support to irq handler for checking these bits */
3523
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3524
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3525
#endif
3526
 
3527
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 3528
 
3529
	return 0;
3530
}
3531
 
4560 Serge 3532
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3533
{
3534
	/* These are interrupts we'll toggle with the ring mask register */
3535
	uint32_t gt_interrupts[] = {
3536
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
5354 serge 3537
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4560 Serge 3538
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
5354 serge 3539
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3540
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4560 Serge 3541
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
5354 serge 3542
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3543
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3544
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4560 Serge 3545
		0,
5354 serge 3546
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3547
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4560 Serge 3548
		};
3549
 
5060 serge 3550
	dev_priv->pm_irq_mask = 0xffffffff;
5354 serge 3551
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3552
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3553
	/*
3554
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3555
	 * is enabled/disabled.
3556
	 */
3557
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3558
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4560 Serge 3559
}
3560
 
3561
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3562
{
5354 serge 3563
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3564
	uint32_t de_pipe_enables;
6084 serge 3565
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3566
	u32 de_port_enables;
3567
	enum pipe pipe;
5354 serge 3568
 
6084 serge 3569
	if (INTEL_INFO(dev_priv)->gen >= 9) {
5354 serge 3570
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3571
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
6084 serge 3572
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3573
				  GEN9_AUX_CHANNEL_D;
3574
		if (IS_BROXTON(dev_priv))
3575
			de_port_masked |= BXT_DE_PORT_GMBUS;
3576
	} else {
5354 serge 3577
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
6084 serge 3578
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3579
	}
5354 serge 3580
 
3581
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
6084 serge 3582
					   GEN8_PIPE_FIFO_UNDERRUN;
5354 serge 3583
 
6084 serge 3584
	de_port_enables = de_port_masked;
3585
	if (IS_BROXTON(dev_priv))
3586
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3587
	else if (IS_BROADWELL(dev_priv))
3588
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3589
 
4560 Serge 3590
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3591
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3592
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3593
 
5354 serge 3594
	for_each_pipe(dev_priv, pipe)
3595
		if (intel_display_power_is_enabled(dev_priv,
5060 serge 3596
				POWER_DOMAIN_PIPE(pipe)))
3597
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3598
					  dev_priv->de_irq_mask[pipe],
6084 serge 3599
					  de_pipe_enables);
4560 Serge 3600
 
6084 serge 3601
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4560 Serge 3602
}
3603
 
3604
static int gen8_irq_postinstall(struct drm_device *dev)
3605
{
3606
	struct drm_i915_private *dev_priv = dev->dev_private;
3607
 
6084 serge 3608
	if (HAS_PCH_SPLIT(dev))
3609
		ibx_irq_pre_postinstall(dev);
5060 serge 3610
 
4560 Serge 3611
	gen8_gt_irq_postinstall(dev_priv);
3612
	gen8_de_irq_postinstall(dev_priv);
3613
 
6084 serge 3614
	if (HAS_PCH_SPLIT(dev))
3615
		ibx_irq_postinstall(dev);
4560 Serge 3616
 
3617
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3618
	POSTING_READ(GEN8_MASTER_IRQ);
3619
 
3620
	return 0;
3621
}
3622
 
5060 serge 3623
static int cherryview_irq_postinstall(struct drm_device *dev)
4560 Serge 3624
{
3625
	struct drm_i915_private *dev_priv = dev->dev_private;
3626
 
5354 serge 3627
	vlv_display_irq_postinstall(dev_priv);
4560 Serge 3628
 
5060 serge 3629
	gen8_gt_irq_postinstall(dev_priv);
4560 Serge 3630
 
5060 serge 3631
	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3632
	POSTING_READ(GEN8_MASTER_IRQ);
4560 Serge 3633
 
5060 serge 3634
	return 0;
3635
}
4560 Serge 3636
 
5060 serge 3637
static void gen8_irq_uninstall(struct drm_device *dev)
3638
{
3639
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3640
 
5060 serge 3641
	if (!dev_priv)
3642
		return;
3643
 
3644
	gen8_irq_reset(dev);
4560 Serge 3645
}
3646
 
5354 serge 3647
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3648
{
3649
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3650
	 * just to make the assert_spin_locked check happy. */
3651
	spin_lock_irq(&dev_priv->irq_lock);
3652
	if (dev_priv->display_irqs_enabled)
3653
		valleyview_display_irqs_uninstall(dev_priv);
3654
	spin_unlock_irq(&dev_priv->irq_lock);
3655
 
3656
	vlv_display_irq_reset(dev_priv);
3657
 
3658
	dev_priv->irq_mask = ~0;
3659
}
3660
 
3031 serge 3661
static void valleyview_irq_uninstall(struct drm_device *dev)
3662
{
5060 serge 3663
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3664
 
3665
	if (!dev_priv)
3666
		return;
3667
 
5060 serge 3668
	I915_WRITE(VLV_MASTER_IER, 0);
4293 Serge 3669
 
5354 serge 3670
	gen5_gt_irq_reset(dev);
3031 serge 3671
 
3672
	I915_WRITE(HWSTAM, 0xffffffff);
5060 serge 3673
 
5354 serge 3674
	vlv_display_irq_uninstall(dev_priv);
3031 serge 3675
}
3676
 
5060 serge 3677
static void cherryview_irq_uninstall(struct drm_device *dev)
3031 serge 3678
{
5060 serge 3679
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3680
 
3681
	if (!dev_priv)
3682
		return;
3683
 
5060 serge 3684
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3685
	POSTING_READ(GEN8_MASTER_IRQ);
4293 Serge 3686
 
5354 serge 3687
	gen8_gt_irq_reset(dev_priv);
3031 serge 3688
 
5354 serge 3689
	GEN5_IRQ_RESET(GEN8_PCU_);
3031 serge 3690
 
5354 serge 3691
	vlv_display_irq_uninstall(dev_priv);
5060 serge 3692
}
3693
 
3694
static void ironlake_irq_uninstall(struct drm_device *dev)
3695
{
3696
	struct drm_i915_private *dev_priv = dev->dev_private;
3697
 
3698
	if (!dev_priv)
3746 Serge 3699
		return;
3700
 
5060 serge 3701
	ironlake_irq_reset(dev);
3031 serge 3702
}
3703
 
3704
#if 0
3705
static void i8xx_irq_preinstall(struct drm_device * dev)
3706
{
5060 serge 3707
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3708
	int pipe;
3709
 
5354 serge 3710
	for_each_pipe(dev_priv, pipe)
3031 serge 3711
		I915_WRITE(PIPESTAT(pipe), 0);
3712
	I915_WRITE16(IMR, 0xffff);
3713
	I915_WRITE16(IER, 0x0);
3714
	POSTING_READ16(IER);
3715
}
3716
 
3717
static int i8xx_irq_postinstall(struct drm_device *dev)
3718
{
5060 serge 3719
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3720
 
3721
	I915_WRITE16(EMR,
3722
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3723
 
3724
	/* Unmask the interrupts that we always want on. */
3725
	dev_priv->irq_mask =
3726
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3727
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3728
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
6084 serge 3729
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3730
	I915_WRITE16(IMR, dev_priv->irq_mask);
3731
 
3732
	I915_WRITE16(IER,
3733
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3734
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3735
		     I915_USER_INTERRUPT);
3736
	POSTING_READ16(IER);
3737
 
4560 Serge 3738
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3739
	 * just to make the assert_spin_locked check happy. */
5354 serge 3740
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3741
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3742
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3743
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3744
 
3031 serge 3745
	return 0;
3746
}
3747
 
3746 Serge 3748
/*
3749
 * Returns true when a page flip has completed.
3750
 */
3751
static bool i8xx_handle_vblank(struct drm_device *dev,
4560 Serge 3752
			       int plane, int pipe, u32 iir)
3746 Serge 3753
{
5060 serge 3754
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3755
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3746 Serge 3756
 
6084 serge 3757
	if (!intel_pipe_handle_vblank(dev, pipe))
3758
		return false;
3746 Serge 3759
 
3760
	if ((iir & flip_pending) == 0)
5354 serge 3761
		goto check_page_flip;
3746 Serge 3762
 
3763
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3764
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3765
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3766
	 * the flip is completed (no longer pending). Since this doesn't raise
3767
	 * an interrupt per se, we watch for the change at vblank.
3768
	 */
3769
	if (I915_READ16(ISR) & flip_pending)
5354 serge 3770
		goto check_page_flip;
3746 Serge 3771
 
6084 serge 3772
//   intel_prepare_page_flip(dev, plane);
3773
//   intel_finish_page_flip(dev, pipe);
5354 serge 3774
	return true;
3746 Serge 3775
 
5354 serge 3776
check_page_flip:
6084 serge 3777
//   intel_check_page_flip(dev, pipe);
5354 serge 3778
	return false;
3746 Serge 3779
}
3780
 
3243 Serge 3781
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 3782
{
5060 serge 3783
	struct drm_device *dev = arg;
3784
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3785
	u16 iir, new_iir;
3786
	u32 pipe_stats[2];
3787
	int pipe;
3788
	u16 flip_mask =
3789
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3790
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3791
 
6084 serge 3792
	if (!intel_irqs_enabled(dev_priv))
3793
		return IRQ_NONE;
3794
 
3031 serge 3795
	iir = I915_READ16(IIR);
3796
	if (iir == 0)
3797
		return IRQ_NONE;
3798
 
3799
	while (iir & ~flip_mask) {
3800
		/* Can't rely on pipestat interrupt bit in iir as it might
3801
		 * have been cleared after the pipestat interrupt was received.
3802
		 * It doesn't set the bit in iir again, but it still produces
3803
		 * interrupts (for non-MSI).
3804
		 */
5354 serge 3805
		spin_lock(&dev_priv->irq_lock);
4126 Serge 3806
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 3807
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 3808
 
5354 serge 3809
		for_each_pipe(dev_priv, pipe) {
3031 serge 3810
			int reg = PIPESTAT(pipe);
3811
			pipe_stats[pipe] = I915_READ(reg);
3812
 
3813
			/*
3814
			 * Clear the PIPE*STAT regs before the IIR
3815
			 */
5060 serge 3816
			if (pipe_stats[pipe] & 0x8000ffff)
3031 serge 3817
				I915_WRITE(reg, pipe_stats[pipe]);
6084 serge 3818
		}
5354 serge 3819
		spin_unlock(&dev_priv->irq_lock);
3031 serge 3820
 
3821
		I915_WRITE16(IIR, iir & ~flip_mask);
3822
		new_iir = I915_READ16(IIR); /* Flush posted writes */
3823
 
3824
		if (iir & I915_USER_INTERRUPT)
6084 serge 3825
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 3826
 
5354 serge 3827
		for_each_pipe(dev_priv, pipe) {
4560 Serge 3828
			int plane = pipe;
3829
			if (HAS_FBC(dev))
3830
				plane = !plane;
3031 serge 3831
 
4560 Serge 3832
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3833
			    i8xx_handle_vblank(dev, plane, pipe, iir))
3834
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3031 serge 3835
 
4560 Serge 3836
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3837
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 3838
 
5354 serge 3839
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3840
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3841
								    pipe);
4560 Serge 3842
		}
3843
 
3031 serge 3844
		iir = new_iir;
3845
	}
3846
 
3847
	return IRQ_HANDLED;
3848
}
3849
 
3850
static void i8xx_irq_uninstall(struct drm_device * dev)
3851
{
5060 serge 3852
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3853
	int pipe;
3854
 
5354 serge 3855
	for_each_pipe(dev_priv, pipe) {
3031 serge 3856
		/* Clear enable bits; then clear status bits */
3857
		I915_WRITE(PIPESTAT(pipe), 0);
3858
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3859
	}
3860
	I915_WRITE16(IMR, 0xffff);
3861
	I915_WRITE16(IER, 0x0);
3862
	I915_WRITE16(IIR, I915_READ16(IIR));
3863
}
3864
 
3865
#endif
3866
 
3867
static void i915_irq_preinstall(struct drm_device * dev)
3868
{
5060 serge 3869
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3870
	int pipe;
3871
 
3872
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 3873
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 3874
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3875
	}
3876
 
3877
	I915_WRITE16(HWSTAM, 0xeffe);
5354 serge 3878
	for_each_pipe(dev_priv, pipe)
3031 serge 3879
		I915_WRITE(PIPESTAT(pipe), 0);
3880
	I915_WRITE(IMR, 0xffffffff);
3881
	I915_WRITE(IER, 0x0);
3882
	POSTING_READ(IER);
3883
}
3884
 
3885
static int i915_irq_postinstall(struct drm_device *dev)
3886
{
5060 serge 3887
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3888
	u32 enable_mask;
3889
 
3890
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3891
 
3892
	/* Unmask the interrupts that we always want on. */
3893
	dev_priv->irq_mask =
3894
		~(I915_ASLE_INTERRUPT |
3895
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3896
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3897
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
6084 serge 3898
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 3899
 
3900
	enable_mask =
3901
		I915_ASLE_INTERRUPT |
3902
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3903
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3904
		I915_USER_INTERRUPT;
3480 Serge 3905
 
3031 serge 3906
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 3907
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 3908
		POSTING_READ(PORT_HOTPLUG_EN);
3909
 
3031 serge 3910
		/* Enable in IER... */
3911
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3912
		/* and unmask in IMR */
3913
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3914
	}
3915
 
3916
	I915_WRITE(IMR, dev_priv->irq_mask);
3917
	I915_WRITE(IER, enable_mask);
3918
	POSTING_READ(IER);
3919
 
4126 Serge 3920
	i915_enable_asle_pipestat(dev);
3480 Serge 3921
 
4560 Serge 3922
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3923
	 * just to make the assert_spin_locked check happy. */
5354 serge 3924
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 3925
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3926
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 3927
	spin_unlock_irq(&dev_priv->irq_lock);
4560 Serge 3928
 
3480 Serge 3929
	return 0;
3930
}
3931
 
3746 Serge 3932
/*
3933
 * Returns true when a page flip has completed.
3934
 */
3935
static bool i915_handle_vblank(struct drm_device *dev,
3936
			       int plane, int pipe, u32 iir)
3480 Serge 3937
{
5060 serge 3938
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3939
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3480 Serge 3940
 
6088 serge 3941
	if (!intel_pipe_handle_vblank(dev, pipe))
3942
		return false;
3480 Serge 3943
 
3746 Serge 3944
	if ((iir & flip_pending) == 0)
5354 serge 3945
		goto check_page_flip;
3480 Serge 3946
 
3746 Serge 3947
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3948
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3949
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3950
	 * the flip is completed (no longer pending). Since this doesn't raise
3951
	 * an interrupt per se, we watch for the change at vblank.
3952
	 */
3953
	if (I915_READ(ISR) & flip_pending)
5354 serge 3954
		goto check_page_flip;
3746 Serge 3955
 
5354 serge 3956
	return true;
3746 Serge 3957
 
5354 serge 3958
check_page_flip:
3959
	return false;
3031 serge 3960
}
3961
 
3243 Serge 3962
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 3963
{
5060 serge 3964
	struct drm_device *dev = arg;
3965
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 3966
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3967
	u32 flip_mask =
3968
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3969
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3970
	int pipe, ret = IRQ_NONE;
3971
 
6084 serge 3972
	if (!intel_irqs_enabled(dev_priv))
3973
		return IRQ_NONE;
3974
 
3031 serge 3975
	iir = I915_READ(IIR);
3976
	do {
3977
		bool irq_received = (iir & ~flip_mask) != 0;
3978
		bool blc_event = false;
3979
 
3980
		/* Can't rely on pipestat interrupt bit in iir as it might
3981
		 * have been cleared after the pipestat interrupt was received.
3982
		 * It doesn't set the bit in iir again, but it still produces
3983
		 * interrupts (for non-MSI).
3984
		 */
5354 serge 3985
		spin_lock(&dev_priv->irq_lock);
4126 Serge 3986
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 3987
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 3988
 
5354 serge 3989
		for_each_pipe(dev_priv, pipe) {
3031 serge 3990
			int reg = PIPESTAT(pipe);
3991
			pipe_stats[pipe] = I915_READ(reg);
3992
 
3993
			/* Clear the PIPE*STAT regs before the IIR */
3994
			if (pipe_stats[pipe] & 0x8000ffff) {
3995
				I915_WRITE(reg, pipe_stats[pipe]);
3996
				irq_received = true;
3997
			}
3998
		}
5354 serge 3999
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4000
 
4001
		if (!irq_received)
4002
			break;
4003
 
4004
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4005
		if (I915_HAS_HOTPLUG(dev) &&
4006
		    iir & I915_DISPLAY_PORT_INTERRUPT)
4007
			i9xx_hpd_irq_handler(dev);
3031 serge 4008
 
4009
		I915_WRITE(IIR, iir & ~flip_mask);
4010
		new_iir = I915_READ(IIR); /* Flush posted writes */
4011
 
4012
		if (iir & I915_USER_INTERRUPT)
6084 serge 4013
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4014
 
5354 serge 4015
		for_each_pipe(dev_priv, pipe) {
3031 serge 4016
			int plane = pipe;
4560 Serge 4017
			if (HAS_FBC(dev))
3031 serge 4018
				plane = !plane;
4019
 
3746 Serge 4020
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4021
			    i915_handle_vblank(dev, plane, pipe, iir))
4022
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4023
 
3031 serge 4024
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4025
				blc_event = true;
4560 Serge 4026
 
4027
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4028
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4029
 
5354 serge 4030
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4031
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4032
								    pipe);
3031 serge 4033
		}
4034
 
4126 Serge 4035
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4036
			intel_opregion_asle_intr(dev);
3031 serge 4037
 
4038
		/* With MSI, interrupts are only generated when iir
4039
		 * transitions from zero to nonzero.  If another bit got
4040
		 * set while we were handling the existing iir bits, then
4041
		 * we would never get another interrupt.
4042
		 *
4043
		 * This is fine on non-MSI as well, as if we hit this path
4044
		 * we avoid exiting the interrupt handler only to generate
4045
		 * another one.
4046
		 *
4047
		 * Note that for MSI this could cause a stray interrupt report
4048
		 * if an interrupt landed in the time between writing IIR and
4049
		 * the posting read.  This should be rare enough to never
4050
		 * trigger the 99% of 100,000 interrupts test for disabling
4051
		 * stray interrupts.
4052
		 */
4053
		ret = IRQ_HANDLED;
4054
		iir = new_iir;
4055
	} while (iir & ~flip_mask);
4056
 
4057
	return ret;
4058
}
4059
 
4060
static void i915_irq_uninstall(struct drm_device * dev)
4061
{
5060 serge 4062
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4063
	int pipe;
4064
 
4065
	if (I915_HAS_HOTPLUG(dev)) {
6084 serge 4066
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4067
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4068
	}
4069
 
4070
	I915_WRITE16(HWSTAM, 0xffff);
5354 serge 4071
	for_each_pipe(dev_priv, pipe) {
3031 serge 4072
		/* Clear enable bits; then clear status bits */
4073
		I915_WRITE(PIPESTAT(pipe), 0);
4074
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4075
	}
4076
	I915_WRITE(IMR, 0xffffffff);
4077
	I915_WRITE(IER, 0x0);
4078
 
4079
	I915_WRITE(IIR, I915_READ(IIR));
4080
}
4081
 
4082
static void i965_irq_preinstall(struct drm_device * dev)
4083
{
5060 serge 4084
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4085
	int pipe;
4086
 
6084 serge 4087
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4088
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4089
 
4090
	I915_WRITE(HWSTAM, 0xeffe);
5354 serge 4091
	for_each_pipe(dev_priv, pipe)
3031 serge 4092
		I915_WRITE(PIPESTAT(pipe), 0);
4093
	I915_WRITE(IMR, 0xffffffff);
4094
	I915_WRITE(IER, 0x0);
4095
	POSTING_READ(IER);
4096
}
4097
 
4098
static int i965_irq_postinstall(struct drm_device *dev)
4099
{
5060 serge 4100
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4101
	u32 enable_mask;
4102
	u32 error_mask;
4103
 
4104
	/* Unmask the interrupts that we always want on. */
4105
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4106
			       I915_DISPLAY_PORT_INTERRUPT |
4107
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4108
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4109
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4110
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4111
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4112
 
4113
	enable_mask = ~dev_priv->irq_mask;
3746 Serge 4114
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4115
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3031 serge 4116
	enable_mask |= I915_USER_INTERRUPT;
4117
 
4118
	if (IS_G4X(dev))
4119
		enable_mask |= I915_BSD_USER_INTERRUPT;
4120
 
4104 Serge 4121
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4122
	 * just to make the assert_spin_locked check happy. */
5354 serge 4123
	spin_lock_irq(&dev_priv->irq_lock);
5060 serge 4124
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4125
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4126
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
5354 serge 4127
	spin_unlock_irq(&dev_priv->irq_lock);
3031 serge 4128
 
4129
	/*
4130
	 * Enable some error detection, note the instruction error mask
4131
	 * bit is reserved, so we leave it masked.
4132
	 */
4133
	if (IS_G4X(dev)) {
4134
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4135
			       GM45_ERROR_MEM_PRIV |
4136
			       GM45_ERROR_CP_PRIV |
4137
			       I915_ERROR_MEMORY_REFRESH);
4138
	} else {
4139
		error_mask = ~(I915_ERROR_PAGE_TABLE |
4140
			       I915_ERROR_MEMORY_REFRESH);
4141
	}
4142
	I915_WRITE(EMR, error_mask);
4143
 
4144
	I915_WRITE(IMR, dev_priv->irq_mask);
4145
	I915_WRITE(IER, enable_mask);
4146
	POSTING_READ(IER);
4147
 
6084 serge 4148
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3480 Serge 4149
	POSTING_READ(PORT_HOTPLUG_EN);
4150
 
4126 Serge 4151
	i915_enable_asle_pipestat(dev);
3480 Serge 4152
 
4153
	return 0;
4154
}
4155
 
3746 Serge 4156
static void i915_hpd_irq_setup(struct drm_device *dev)
3480 Serge 4157
{
5060 serge 4158
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 4159
	u32 hotplug_en;
4160
 
4104 Serge 4161
	assert_spin_locked(&dev_priv->irq_lock);
4162
 
3031 serge 4163
	/* Note HDMI and DP share hotplug bits */
6084 serge 4164
	/* enable bits are the same for all generations */
4165
	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4166
	/* Programming the CRT detection parameters tends
4167
	   to generate a spurious hotplug event about three
4168
	   seconds later.  So just do it once.
4169
	*/
4170
	if (IS_G4X(dev))
4171
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4172
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3480 Serge 4173
 
3031 serge 4174
	/* Ignore TV since it's buggy */
6084 serge 4175
	i915_hotplug_interrupt_update_locked(dev_priv,
4176
					     HOTPLUG_INT_EN_MASK |
4177
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4178
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4179
					     hotplug_en);
3031 serge 4180
}
4181
 
3243 Serge 4182
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 4183
{
5060 serge 4184
	struct drm_device *dev = arg;
4185
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4186
	u32 iir, new_iir;
4187
	u32 pipe_stats[I915_MAX_PIPES];
4188
	int ret = IRQ_NONE, pipe;
3746 Serge 4189
	u32 flip_mask =
4190
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4191
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3031 serge 4192
 
6084 serge 4193
	if (!intel_irqs_enabled(dev_priv))
4194
		return IRQ_NONE;
4195
 
3031 serge 4196
	iir = I915_READ(IIR);
4197
 
4198
	for (;;) {
5060 serge 4199
		bool irq_received = (iir & ~flip_mask) != 0;
3031 serge 4200
		bool blc_event = false;
4201
 
4202
		/* Can't rely on pipestat interrupt bit in iir as it might
4203
		 * have been cleared after the pipestat interrupt was received.
4204
		 * It doesn't set the bit in iir again, but it still produces
4205
		 * interrupts (for non-MSI).
4206
		 */
5354 serge 4207
		spin_lock(&dev_priv->irq_lock);
4126 Serge 4208
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
5354 serge 4209
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3031 serge 4210
 
5354 serge 4211
		for_each_pipe(dev_priv, pipe) {
3031 serge 4212
			int reg = PIPESTAT(pipe);
4213
			pipe_stats[pipe] = I915_READ(reg);
4214
 
4215
			/*
4216
			 * Clear the PIPE*STAT regs before the IIR
4217
			 */
4218
			if (pipe_stats[pipe] & 0x8000ffff) {
4219
				I915_WRITE(reg, pipe_stats[pipe]);
5060 serge 4220
				irq_received = true;
3031 serge 4221
			}
4222
		}
5354 serge 4223
		spin_unlock(&dev_priv->irq_lock);
3031 serge 4224
 
4225
		if (!irq_received)
4226
			break;
4227
 
4228
		ret = IRQ_HANDLED;
4229
 
4230
		/* Consume port.  Then clear IIR or we'll miss events */
5060 serge 4231
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4232
			i9xx_hpd_irq_handler(dev);
3031 serge 4233
 
3746 Serge 4234
		I915_WRITE(IIR, iir & ~flip_mask);
3031 serge 4235
		new_iir = I915_READ(IIR); /* Flush posted writes */
4236
 
4237
		if (iir & I915_USER_INTERRUPT)
6084 serge 4238
			notify_ring(&dev_priv->ring[RCS]);
3031 serge 4239
		if (iir & I915_BSD_USER_INTERRUPT)
6084 serge 4240
			notify_ring(&dev_priv->ring[VCS]);
3031 serge 4241
 
5354 serge 4242
		for_each_pipe(dev_priv, pipe) {
3746 Serge 4243
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4244
			    i915_handle_vblank(dev, pipe, pipe, iir))
4245
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3031 serge 4246
 
4247
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4248
				blc_event = true;
4560 Serge 4249
 
4250
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4251
				i9xx_pipe_crc_irq_handler(dev, pipe);
5060 serge 4252
 
5354 serge 4253
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4254
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3031 serge 4255
		}
4256
 
4126 Serge 4257
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4258
			intel_opregion_asle_intr(dev);
3031 serge 4259
 
3480 Serge 4260
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4261
			gmbus_irq_handler(dev);
4262
 
3031 serge 4263
		/* With MSI, interrupts are only generated when iir
4264
		 * transitions from zero to nonzero.  If another bit got
4265
		 * set while we were handling the existing iir bits, then
4266
		 * we would never get another interrupt.
4267
		 *
4268
		 * This is fine on non-MSI as well, as if we hit this path
4269
		 * we avoid exiting the interrupt handler only to generate
4270
		 * another one.
4271
		 *
4272
		 * Note that for MSI this could cause a stray interrupt report
4273
		 * if an interrupt landed in the time between writing IIR and
4274
		 * the posting read.  This should be rare enough to never
4275
		 * trigger the 99% of 100,000 interrupts test for disabling
4276
		 * stray interrupts.
4277
		 */
4278
		iir = new_iir;
4279
	}
4280
 
4281
	return ret;
4282
}
4283
 
4284
static void i965_irq_uninstall(struct drm_device * dev)
4285
{
5060 serge 4286
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4287
	int pipe;
4288
 
4289
	if (!dev_priv)
4290
		return;
4291
 
6084 serge 4292
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3031 serge 4293
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4294
 
4295
	I915_WRITE(HWSTAM, 0xffffffff);
5354 serge 4296
	for_each_pipe(dev_priv, pipe)
3031 serge 4297
		I915_WRITE(PIPESTAT(pipe), 0);
4298
	I915_WRITE(IMR, 0xffffffff);
4299
	I915_WRITE(IER, 0x0);
4300
 
5354 serge 4301
	for_each_pipe(dev_priv, pipe)
3031 serge 4302
		I915_WRITE(PIPESTAT(pipe),
4303
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4304
	I915_WRITE(IIR, I915_READ(IIR));
4305
}
4306
 
5354 serge 4307
/**
4308
 * intel_irq_init - initializes irq support
4309
 * @dev_priv: i915 device instance
4310
 *
4311
 * This function initializes all the irq support including work items, timers
4312
 * and all the vtables. It does not setup the interrupt itself though.
4313
 */
4314
void intel_irq_init(struct drm_i915_private *dev_priv)
2351 Serge 4315
{
5354 serge 4316
	struct drm_device *dev = dev_priv->dev;
3031 serge 4317
 
6084 serge 4318
//   intel_hpd_init_work(dev_priv);
4319
 
4126 Serge 4320
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4321
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3480 Serge 4322
 
5060 serge 4323
	/* Let's track the enabled rps events */
5354 serge 4324
	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4325
		/* WaGsvRC0ResidencyMethod:vlv */
6084 serge 4326
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
5060 serge 4327
	else
6084 serge 4328
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
3480 Serge 4329
 
6084 serge 4330
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4331
			  i915_hangcheck_elapsed);
4560 Serge 4332
 
5354 serge 4333
 
4334
	if (IS_GEN2(dev_priv)) {
4560 Serge 4335
		dev->max_vblank_count = 0;
4336
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
5354 serge 4337
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4560 Serge 4338
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
6084 serge 4339
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4560 Serge 4340
	} else {
6084 serge 4341
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4342
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4293 Serge 4343
	}
3480 Serge 4344
 
5354 serge 4345
	/*
4346
	 * Opt out of the vblank disable timer on everything except gen2.
4347
	 * Gen2 doesn't have a hardware frame counter and so depends on
4348
	 * vblank interrupts to produce sane vblank seuquence numbers.
4349
	 */
4350
	if (!IS_GEN2(dev_priv))
4351
		dev->vblank_disable_immediate = true;
4352
 
6084 serge 4353
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4293 Serge 4354
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3480 Serge 4355
 
5354 serge 4356
	if (IS_CHERRYVIEW(dev_priv)) {
5060 serge 4357
		dev->driver->irq_handler = cherryview_irq_handler;
4358
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4359
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4360
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4361
		dev->driver->enable_vblank = valleyview_enable_vblank;
4362
		dev->driver->disable_vblank = valleyview_disable_vblank;
4363
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4364
	} else if (IS_VALLEYVIEW(dev_priv)) {
3243 Serge 4365
		dev->driver->irq_handler = valleyview_irq_handler;
4366
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4367
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4293 Serge 4368
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4369
		dev->driver->enable_vblank = valleyview_enable_vblank;
4370
		dev->driver->disable_vblank = valleyview_disable_vblank;
3746 Serge 4371
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5354 serge 4372
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4560 Serge 4373
		dev->driver->irq_handler = gen8_irq_handler;
5060 serge 4374
		dev->driver->irq_preinstall = gen8_irq_reset;
4560 Serge 4375
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4376
		dev->driver->irq_uninstall = gen8_irq_uninstall;
4377
		dev->driver->enable_vblank = gen8_enable_vblank;
4378
		dev->driver->disable_vblank = gen8_disable_vblank;
6084 serge 4379
		if (IS_BROXTON(dev))
4380
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4381
		else if (HAS_PCH_SPT(dev))
4382
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4383
		else
4384
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
2351 Serge 4385
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 4386
		dev->driver->irq_handler = ironlake_irq_handler;
5060 serge 4387
		dev->driver->irq_preinstall = ironlake_irq_reset;
3243 Serge 4388
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4293 Serge 4389
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4390
		dev->driver->enable_vblank = ironlake_enable_vblank;
4391
		dev->driver->disable_vblank = ironlake_disable_vblank;
6084 serge 4392
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
2351 Serge 4393
	} else {
5354 serge 4394
		if (INTEL_INFO(dev_priv)->gen == 2) {
4395
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
3243 Serge 4396
			dev->driver->irq_preinstall = i915_irq_preinstall;
4397
			dev->driver->irq_postinstall = i915_irq_postinstall;
4293 Serge 4398
			dev->driver->irq_uninstall = i915_irq_uninstall;
3243 Serge 4399
			dev->driver->irq_handler = i915_irq_handler;
3031 serge 4400
		} else {
3243 Serge 4401
			dev->driver->irq_preinstall = i965_irq_preinstall;
4402
			dev->driver->irq_postinstall = i965_irq_postinstall;
4293 Serge 4403
			dev->driver->irq_uninstall = i965_irq_uninstall;
3243 Serge 4404
			dev->driver->irq_handler = i965_irq_handler;
6084 serge 4405
		}
4406
		if (I915_HAS_HOTPLUG(dev_priv))
3746 Serge 4407
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4293 Serge 4408
		dev->driver->enable_vblank = i915_enable_vblank;
4409
		dev->driver->disable_vblank = i915_disable_vblank;
2351 Serge 4410
	}
3480 Serge 4411
}
3243 Serge 4412
 
5354 serge 4413
/**
4414
 * intel_irq_install - enables the hardware interrupt
4415
 * @dev_priv: i915 device instance
4416
 *
4417
 * This function enables the hardware interrupt handling, but leaves the hotplug
4418
 * handling still disabled. It is called after intel_irq_init().
4419
 *
4420
 * In the driver load and resume code we need working interrupts in a few places
4421
 * but don't want to deal with the hassle of concurrent probe and hotplug
4422
 * workers. Hence the split into this two-stage approach.
4423
 */
4424
int intel_irq_install(struct drm_i915_private *dev_priv)
3243 Serge 4425
{
5354 serge 4426
	/*
4427
	 * We enable some interrupt sources in our postinstall hooks, so mark
4428
	 * interrupts as enabled _before_ actually enabling them to avoid
4429
	 * special cases in our ordering checks.
4430
	 */
4431
	dev_priv->pm.irqs_enabled = true;
2351 Serge 4432
 
5354 serge 4433
	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
3243 Serge 4434
}
4435
 
5354 serge 4436
/**
4437
 * intel_irq_uninstall - finilizes all irq handling
4438
 * @dev_priv: i915 device instance
4439
 *
4440
 * This stops interrupt and hotplug handling and unregisters and frees all
4441
 * resources acquired in the init functions.
4442
 */
4443
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
2351 Serge 4444
{
5354 serge 4445
//	drm_irq_uninstall(dev_priv->dev);
4446
//	intel_hpd_cancel_work(dev_priv);
4447
	dev_priv->pm.irqs_enabled = false;
4448
}
2351 Serge 4449
 
5354 serge 4450
/**
4451
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4452
 * @dev_priv: i915 device instance
4453
 *
4454
 * This function is used to disable interrupts at runtime, both in the runtime
4455
 * pm and the system suspend/resume code.
4456
 */
4457
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4458
{
4459
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4460
	dev_priv->pm.irqs_enabled = false;
4104 Serge 4461
}
2351 Serge 4462
 
5354 serge 4463
/**
4464
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4465
 * @dev_priv: i915 device instance
4466
 *
4467
 * This function is used to enable interrupts at runtime, both in the runtime
4468
 * pm and the system suspend/resume code.
4469
 */
4470
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4471
{
4472
	dev_priv->pm.irqs_enabled = true;
4473
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4474
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4475
}
2351 Serge 4476
 
4104 Serge 4477
irqreturn_t intel_irq_handler(struct drm_device *dev)
4478
{
2351 Serge 4479
 
4104 Serge 4480
//    printf("i915 irq\n");
4481
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
4482
 
4483
    return dev->driver->irq_handler(0, dev);
2351 Serge 4484
}
4485