Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4126 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3746 Rev 4104
Line 33... Line 33...
33
#include 
33
#include 
34
#include "i915_drv.h"
34
#include "i915_drv.h"
35
#include "i915_trace.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
36
#include "intel_drv.h"
Line -... Line 37...
-
 
37
 
-
 
38
#define assert_spin_locked(a)
37
 
39
 
38
static const u32 hpd_ibx[] = {
40
static const u32 hpd_ibx[] = {
39
	[HPD_CRT] = SDE_CRT_HOTPLUG,
41
	[HPD_CRT] = SDE_CRT_HOTPLUG,
40
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
41
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
Line 67... Line 69...
67
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
68
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
69
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
70
};
72
};
Line 71... Line -...
71
 
-
 
72
static const u32 hpd_status_i965[] = {
-
 
73
	 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
-
 
74
	 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
-
 
75
	 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
-
 
76
	 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
-
 
77
	 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-
 
78
	 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-
 
79
};
-
 
80
 
73
 
81
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
82
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
83
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
84
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
85
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
86
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
87
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
Line 88... Line -...
88
};
-
 
89
 
-
 
Line 90... Line 81...
90
static void ibx_hpd_irq_setup(struct drm_device *dev);
81
};
91
static void i915_hpd_irq_setup(struct drm_device *dev);
82
 
Line 103... Line 94...
103
 
94
 
104
/* For display hotplug interrupt */
95
/* For display hotplug interrupt */
105
static void
96
static void
106
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
-
 
98
{
-
 
99
	assert_spin_locked(&dev_priv->irq_lock);
-
 
100
 
-
 
101
	if (dev_priv->pc8.irqs_disabled) {
-
 
102
		WARN(1, "IRQs disabled\n");
-
 
103
		dev_priv->pc8.regsave.deimr &= ~mask;
-
 
104
		return;
-
 
105
	}
107
{
106
 
108
    if ((dev_priv->irq_mask & mask) != 0) {
107
    if ((dev_priv->irq_mask & mask) != 0) {
109
        dev_priv->irq_mask &= ~mask;
108
        dev_priv->irq_mask &= ~mask;
110
        I915_WRITE(DEIMR, dev_priv->irq_mask);
109
        I915_WRITE(DEIMR, dev_priv->irq_mask);
111
        POSTING_READ(DEIMR);
110
        POSTING_READ(DEIMR);
112
    }
111
    }
Line 113... Line 112...
113
}
112
}
114
 
113
 
115
static void
114
static void
-
 
115
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
-
 
116
{
-
 
117
	assert_spin_locked(&dev_priv->irq_lock);
-
 
118
 
-
 
119
	if (dev_priv->pc8.irqs_disabled) {
-
 
120
		WARN(1, "IRQs disabled\n");
-
 
121
		dev_priv->pc8.regsave.deimr |= mask;
-
 
122
		return;
116
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
123
	}
117
{
124
 
118
    if ((dev_priv->irq_mask & mask) != mask) {
125
    if ((dev_priv->irq_mask & mask) != mask) {
119
        dev_priv->irq_mask |= mask;
126
        dev_priv->irq_mask |= mask;
120
        I915_WRITE(DEIMR, dev_priv->irq_mask);
127
        I915_WRITE(DEIMR, dev_priv->irq_mask);
121
        POSTING_READ(DEIMR);
128
        POSTING_READ(DEIMR);
Line -... Line 129...
-
 
129
    }
-
 
130
}
-
 
131
 
-
 
132
/**
-
 
133
 * ilk_update_gt_irq - update GTIMR
-
 
134
 * @dev_priv: driver private
-
 
135
 * @interrupt_mask: mask of interrupt bits to update
-
 
136
 * @enabled_irq_mask: mask of interrupt bits to enable
-
 
137
 */
-
 
138
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
-
 
139
			      uint32_t interrupt_mask,
-
 
140
			      uint32_t enabled_irq_mask)
-
 
141
{
-
 
142
	assert_spin_locked(&dev_priv->irq_lock);
-
 
143
 
-
 
144
	if (dev_priv->pc8.irqs_disabled) {
-
 
145
		WARN(1, "IRQs disabled\n");
-
 
146
		dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
-
 
147
		dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
-
 
148
						interrupt_mask);
-
 
149
		return;
-
 
150
	}
-
 
151
 
-
 
152
	dev_priv->gt_irq_mask &= ~interrupt_mask;
-
 
153
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
-
 
154
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
155
	POSTING_READ(GTIMR);
-
 
156
}
-
 
157
 
-
 
158
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
159
{
-
 
160
	ilk_update_gt_irq(dev_priv, mask, mask);
-
 
161
}
-
 
162
 
-
 
163
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
164
{
-
 
165
	ilk_update_gt_irq(dev_priv, mask, 0);
-
 
166
}
-
 
167
 
-
 
168
/**
-
 
169
  * snb_update_pm_irq - update GEN6_PMIMR
-
 
170
  * @dev_priv: driver private
-
 
171
  * @interrupt_mask: mask of interrupt bits to update
-
 
172
  * @enabled_irq_mask: mask of interrupt bits to enable
-
 
173
  */
-
 
174
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
-
 
175
			      uint32_t interrupt_mask,
-
 
176
			      uint32_t enabled_irq_mask)
-
 
177
{
-
 
178
	uint32_t new_val;
-
 
179
 
-
 
180
	assert_spin_locked(&dev_priv->irq_lock);
-
 
181
 
-
 
182
	if (dev_priv->pc8.irqs_disabled) {
-
 
183
		WARN(1, "IRQs disabled\n");
-
 
184
		dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
-
 
185
		dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
-
 
186
						     interrupt_mask);
-
 
187
		return;
-
 
188
	}
-
 
189
 
-
 
190
	new_val = dev_priv->pm_irq_mask;
-
 
191
	new_val &= ~interrupt_mask;
-
 
192
	new_val |= (~enabled_irq_mask & interrupt_mask);
-
 
193
 
-
 
194
	if (new_val != dev_priv->pm_irq_mask) {
-
 
195
		dev_priv->pm_irq_mask = new_val;
-
 
196
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
-
 
197
		POSTING_READ(GEN6_PMIMR);
-
 
198
	}
-
 
199
}
-
 
200
 
-
 
201
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
202
{
-
 
203
	snb_update_pm_irq(dev_priv, mask, mask);
-
 
204
}
-
 
205
 
-
 
206
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
207
{
-
 
208
	snb_update_pm_irq(dev_priv, mask, 0);
-
 
209
}
-
 
210
 
-
 
211
static bool ivb_can_enable_err_int(struct drm_device *dev)
-
 
212
{
-
 
213
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
214
	struct intel_crtc *crtc;
-
 
215
	enum pipe pipe;
-
 
216
 
-
 
217
	assert_spin_locked(&dev_priv->irq_lock);
-
 
218
 
-
 
219
	for_each_pipe(pipe) {
-
 
220
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
 
221
 
-
 
222
		if (crtc->cpu_fifo_underrun_disabled)
-
 
223
			return false;
-
 
224
	}
-
 
225
 
-
 
226
	return true;
-
 
227
}
-
 
228
 
-
 
229
static bool cpt_can_enable_serr_int(struct drm_device *dev)
-
 
230
{
-
 
231
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
232
	enum pipe pipe;
-
 
233
	struct intel_crtc *crtc;
-
 
234
 
-
 
235
	assert_spin_locked(&dev_priv->irq_lock);
-
 
236
 
-
 
237
	for_each_pipe(pipe) {
-
 
238
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
 
239
 
-
 
240
		if (crtc->pch_fifo_underrun_disabled)
-
 
241
			return false;
-
 
242
	}
-
 
243
 
-
 
244
	return true;
-
 
245
}
-
 
246
 
-
 
247
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
248
						 enum pipe pipe, bool enable)
-
 
249
{
-
 
250
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
251
	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
-
 
252
					  DE_PIPEB_FIFO_UNDERRUN;
-
 
253
 
-
 
254
	if (enable)
-
 
255
		ironlake_enable_display_irq(dev_priv, bit);
-
 
256
	else
-
 
257
		ironlake_disable_display_irq(dev_priv, bit);
-
 
258
}
-
 
259
 
-
 
260
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
261
						  enum pipe pipe, bool enable)
-
 
262
{
-
 
263
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
264
	if (enable) {
-
 
265
		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
-
 
266
 
-
 
267
		if (!ivb_can_enable_err_int(dev))
-
 
268
			return;
-
 
269
 
-
 
270
		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
271
	} else {
-
 
272
		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
-
 
273
 
-
 
274
		/* Change the state _after_ we've read out the current one. */
-
 
275
		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
276
 
-
 
277
		if (!was_enabled &&
-
 
278
		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
-
 
279
			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
-
 
280
				      pipe_name(pipe));
-
 
281
	}
-
 
282
}
-
 
283
}
-
 
284
 
-
 
285
/**
-
 
286
 * ibx_display_interrupt_update - update SDEIMR
-
 
287
 * @dev_priv: driver private
-
 
288
 * @interrupt_mask: mask of interrupt bits to update
-
 
289
 * @enabled_irq_mask: mask of interrupt bits to enable
-
 
290
 */
-
 
291
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
-
 
292
					 uint32_t interrupt_mask,
-
 
293
					 uint32_t enabled_irq_mask)
-
 
294
{
-
 
295
	uint32_t sdeimr = I915_READ(SDEIMR);
-
 
296
	sdeimr &= ~interrupt_mask;
-
 
297
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
-
 
298
 
-
 
299
	assert_spin_locked(&dev_priv->irq_lock);
-
 
300
 
-
 
301
	if (dev_priv->pc8.irqs_disabled &&
-
 
302
	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
-
 
303
		WARN(1, "IRQs disabled\n");
-
 
304
		dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
-
 
305
		dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
-
 
306
						 interrupt_mask);
-
 
307
		return;
-
 
308
	}
-
 
309
 
-
 
310
	I915_WRITE(SDEIMR, sdeimr);
-
 
311
	POSTING_READ(SDEIMR);
-
 
312
}
-
 
313
#define ibx_enable_display_interrupt(dev_priv, bits) \
-
 
314
	ibx_display_interrupt_update((dev_priv), (bits), (bits))
-
 
315
#define ibx_disable_display_interrupt(dev_priv, bits) \
-
 
316
	ibx_display_interrupt_update((dev_priv), (bits), 0)
-
 
317
 
-
 
318
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
319
					    enum transcoder pch_transcoder,
-
 
320
					    bool enable)
-
 
321
{
-
 
322
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
323
	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
-
 
324
		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
 
325
 
-
 
326
	if (enable)
-
 
327
		ibx_enable_display_interrupt(dev_priv, bit);
-
 
328
	else
-
 
329
		ibx_disable_display_interrupt(dev_priv, bit);
-
 
330
}
-
 
331
 
-
 
332
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
333
					    enum transcoder pch_transcoder,
-
 
334
					    bool enable)
-
 
335
{
-
 
336
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
337
 
-
 
338
	if (enable) {
-
 
339
		I915_WRITE(SERR_INT,
-
 
340
			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
 
341
 
-
 
342
		if (!cpt_can_enable_serr_int(dev))
-
 
343
			return;
-
 
344
 
-
 
345
		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
 
346
	} else {
-
 
347
		uint32_t tmp = I915_READ(SERR_INT);
-
 
348
		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
-
 
349
 
-
 
350
		/* Change the state _after_ we've read out the current one. */
-
 
351
		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
 
352
 
-
 
353
		if (!was_enabled &&
-
 
354
		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
-
 
355
			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
-
 
356
				      transcoder_name(pch_transcoder));
-
 
357
		}
-
 
358
	}
-
 
359
}
-
 
360
 
-
 
361
/**
-
 
362
 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
-
 
363
 * @dev: drm device
-
 
364
 * @pipe: pipe
-
 
365
 * @enable: true if we want to report FIFO underrun errors, false otherwise
-
 
366
 *
-
 
367
 * This function makes us disable or enable CPU fifo underruns for a specific
-
 
368
 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
-
 
369
 * reporting for one pipe may also disable all the other CPU error interruts for
-
 
370
 * the other pipes, due to the fact that there's just one interrupt mask/enable
-
 
371
 * bit for all the pipes.
-
 
372
 *
-
 
373
 * Returns the previous state of underrun reporting.
-
 
374
 */
-
 
375
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-
 
376
					   enum pipe pipe, bool enable)
-
 
377
{
-
 
378
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
379
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
 
380
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
381
	unsigned long flags;
-
 
382
	bool ret;
-
 
383
 
-
 
384
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
385
 
-
 
386
	ret = !intel_crtc->cpu_fifo_underrun_disabled;
-
 
387
 
-
 
388
	if (enable == ret)
-
 
389
		goto done;
-
 
390
 
-
 
391
	intel_crtc->cpu_fifo_underrun_disabled = !enable;
-
 
392
 
-
 
393
	if (IS_GEN5(dev) || IS_GEN6(dev))
-
 
394
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-
 
395
	else if (IS_GEN7(dev))
-
 
396
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
-
 
397
 
-
 
398
done:
-
 
399
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
400
	return ret;
-
 
401
}
-
 
402
 
-
 
403
/**
-
 
404
 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
-
 
405
 * @dev: drm device
-
 
406
 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
-
 
407
 * @enable: true if we want to report FIFO underrun errors, false otherwise
-
 
408
 *
-
 
409
 * This function makes us disable or enable PCH fifo underruns for a specific
-
 
410
 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
-
 
411
 * underrun reporting for one transcoder may also disable all the other PCH
-
 
412
 * error interruts for the other transcoders, due to the fact that there's just
-
 
413
 * one interrupt mask/enable bit for all the transcoders.
-
 
414
 *
-
 
415
 * Returns the previous state of underrun reporting.
-
 
416
 */
-
 
417
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
-
 
418
					   enum transcoder pch_transcoder,
-
 
419
					   bool enable)
-
 
420
{
-
 
421
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
422
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
-
 
423
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
424
	unsigned long flags;
-
 
425
	bool ret;
-
 
426
 
-
 
427
	/*
-
 
428
	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
-
 
429
	 * has only one pch transcoder A that all pipes can use. To avoid racy
-
 
430
	 * pch transcoder -> pipe lookups from interrupt code simply store the
-
 
431
	 * underrun statistics in crtc A. Since we never expose this anywhere
-
 
432
	 * nor use it outside of the fifo underrun code here using the "wrong"
-
 
433
	 * crtc on LPT won't cause issues.
-
 
434
	 */
-
 
435
 
-
 
436
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
437
 
-
 
438
	ret = !intel_crtc->pch_fifo_underrun_disabled;
-
 
439
 
-
 
440
	if (enable == ret)
-
 
441
		goto done;
-
 
442
 
-
 
443
	intel_crtc->pch_fifo_underrun_disabled = !enable;
-
 
444
 
-
 
445
	if (HAS_PCH_IBX(dev))
-
 
446
		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
-
 
447
	else
-
 
448
		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
-
 
449
 
-
 
450
done:
-
 
451
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
452
	return ret;
122
    }
453
}
123
}
454
 
124
 
455
 
125
void
456
void
126
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
457
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
Line -... Line 458...
-
 
458
{
-
 
459
		u32 reg = PIPESTAT(pipe);
127
{
460
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
128
		u32 reg = PIPESTAT(pipe);
461
 
Line 129... Line 462...
129
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
462
	assert_spin_locked(&dev_priv->irq_lock);
130
 
463
 
Line 141... Line 474...
141
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
474
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
142
{
475
{
143
		u32 reg = PIPESTAT(pipe);
476
		u32 reg = PIPESTAT(pipe);
144
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
477
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
Line -... Line 478...
-
 
478
 
-
 
479
	assert_spin_locked(&dev_priv->irq_lock);
145
 
480
 
146
	if ((pipestat & mask) == 0)
481
	if ((pipestat & mask) == 0)
Line 147... Line 482...
147
		return;
482
		return;
148
 
483
 
149
	pipestat &= ~mask;
484
	pipestat &= ~mask;
150
	I915_WRITE(reg, pipestat);
485
	I915_WRITE(reg, pipestat);
Line 151... Line 486...
151
		POSTING_READ(reg);
486
		POSTING_READ(reg);
152
}
487
}
153
 
488
 
154
#if 0
489
#if 0
155
/**
490
/**
156
 * intel_enable_asle - enable ASLE interrupt for OpRegion
491
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
157
 */
492
 */
158
void intel_enable_asle(struct drm_device *dev)
493
static void i915_enable_asle_pipestat(struct drm_device *dev)
Line 159... Line 494...
159
{
494
{
160
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
161
	unsigned long irqflags;
495
	drm_i915_private_t *dev_priv = dev->dev_private;
Line 162... Line 496...
162
 
496
	unsigned long irqflags;
Line 163... Line -...
163
	/* FIXME: opregion/asle for VLV */
-
 
164
	if (IS_VALLEYVIEW(dev))
-
 
165
		return;
-
 
166
 
-
 
167
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
497
 
168
 
498
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
169
	if (HAS_PCH_SPLIT(dev))
-
 
170
		ironlake_enable_display_irq(dev_priv, DE_GSE);
499
		return;
171
	else {
-
 
Line 172... Line 500...
172
		i915_enable_pipestat(dev_priv, 1,
500
 
173
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
501
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
174
		if (INTEL_INFO(dev)->gen >= 4)
502
 
Line 191... Line 519...
191
 */
519
 */
192
static int
520
static int
193
i915_pipe_enabled(struct drm_device *dev, int pipe)
521
i915_pipe_enabled(struct drm_device *dev, int pipe)
194
{
522
{
195
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
523
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-
 
197
								      pipe);
-
 
Line -... Line 524...
-
 
524
 
-
 
525
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
526
		/* Locking is horribly broken here, but whatever. */
-
 
527
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
 
528
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
529
 
-
 
530
		return intel_crtc->active;
198
 
531
	} else {
-
 
532
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
199
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
533
	}
Line 200... Line 534...
200
}
534
}
201
 
535
 
202
/* Called from drm generic code, passed a 'crtc', which
536
/* Called from drm generic code, passed a 'crtc', which
Line 344... Line 678...
344
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
678
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
345
						     vblank_time, flags,
679
						     vblank_time, flags,
346
						     crtc);
680
						     crtc);
347
}
681
}
Line -... Line 682...
-
 
682
 
-
 
683
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
-
 
684
{
-
 
685
	enum drm_connector_status old_status;
-
 
686
 
-
 
687
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-
 
688
	old_status = connector->status;
-
 
689
 
-
 
690
	connector->status = connector->funcs->detect(connector, false);
-
 
691
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
-
 
692
		      connector->base.id,
-
 
693
		      drm_get_connector_name(connector),
-
 
694
		      old_status, connector->status);
-
 
695
	return (old_status != connector->status);
-
 
696
}
348
 
697
 
349
/*
698
/*
350
 * Handle hotplug events outside the interrupt handler proper.
699
 * Handle hotplug events outside the interrupt handler proper.
351
 */
700
 */
Line 360... Line 709...
360
	struct intel_connector *intel_connector;
709
	struct intel_connector *intel_connector;
361
	struct intel_encoder *intel_encoder;
710
	struct intel_encoder *intel_encoder;
362
	struct drm_connector *connector;
711
	struct drm_connector *connector;
363
	unsigned long irqflags;
712
	unsigned long irqflags;
364
	bool hpd_disabled = false;
713
	bool hpd_disabled = false;
-
 
714
	bool changed = false;
-
 
715
	u32 hpd_event_bits;
Line 365... Line 716...
365
 
716
 
366
	/* HPD irq before everything is fully set up. */
717
	/* HPD irq before everything is fully set up. */
367
	if (!dev_priv->enable_hotplug_processing)
718
	if (!dev_priv->enable_hotplug_processing)
Line 368... Line 719...
368
		return;
719
		return;
369
 
720
 
Line 370... Line 721...
370
	mutex_lock(&mode_config->mutex);
721
	mutex_lock(&mode_config->mutex);
-
 
722
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
 
723
 
-
 
724
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
371
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
725
 
372
 
726
	hpd_event_bits = dev_priv->hpd_event_bits;
373
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
727
	dev_priv->hpd_event_bits = 0;
374
	list_for_each_entry(connector, &mode_config->connector_list, head) {
728
	list_for_each_entry(connector, &mode_config->connector_list, head) {
375
		intel_connector = to_intel_connector(connector);
729
		intel_connector = to_intel_connector(connector);
Line 383... Line 737...
383
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
737
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
384
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
738
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
385
				| DRM_CONNECTOR_POLL_DISCONNECT;
739
				| DRM_CONNECTOR_POLL_DISCONNECT;
386
			hpd_disabled = true;
740
			hpd_disabled = true;
387
		}
741
		}
-
 
742
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-
 
743
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-
 
744
				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
-
 
745
		}
388
	}
746
	}
389
	 /* if there were no outputs to poll, poll was disabled,
747
	 /* if there were no outputs to poll, poll was disabled,
390
	  * therefore make sure it's enabled when disabling HPD on
748
	  * therefore make sure it's enabled when disabling HPD on
391
	  * some connectors */
749
	  * some connectors */
392
	if (hpd_disabled) {
750
	if (hpd_disabled) {
Line 395... Line 753...
395
//             jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
753
//             jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
396
	}
754
	}
Line 397... Line 755...
397
 
755
 
Line 398... Line 756...
398
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
756
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
757
 
-
 
758
	list_for_each_entry(connector, &mode_config->connector_list, head) {
-
 
759
		intel_connector = to_intel_connector(connector);
399
 
760
		intel_encoder = intel_connector->encoder;
400
	list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
761
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-
 
762
		if (intel_encoder->hot_plug)
-
 
763
			intel_encoder->hot_plug(intel_encoder);
-
 
764
			if (intel_hpd_irq_event(dev, connector))
401
		if (intel_encoder->hot_plug)
765
				changed = true;
402
			intel_encoder->hot_plug(intel_encoder);
766
		}
Line 403... Line 767...
403
 
767
	}
404
	mutex_unlock(&mode_config->mutex);
768
	mutex_unlock(&mode_config->mutex);
405
 
769
 
Line 406... Line 770...
406
	/* Just fire off a uevent and let userspace tell us what to do */
770
	if (changed)
407
	drm_helper_hpd_irq_event(dev);
771
		drm_kms_helper_hotplug_event(dev);
408
}
772
}
409
 
773
 
410
static void ironlake_handle_rps_change(struct drm_device *dev)
774
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
411
{
-
 
Line 412... Line 775...
412
	drm_i915_private_t *dev_priv = dev->dev_private;
775
{
Line 413... Line 776...
413
	u32 busy_up, busy_down, max_avg, min_avg;
776
	drm_i915_private_t *dev_priv = dev->dev_private;
Line 414... Line 777...
414
	u8 new_delay;
777
	u32 busy_up, busy_down, max_avg, min_avg;
Line 440... Line 803...
440
	}
803
	}
Line 441... Line 804...
441
 
804
 
442
	if (ironlake_set_drps(dev, new_delay))
805
	if (ironlake_set_drps(dev, new_delay))
Line 443... Line 806...
443
		dev_priv->ips.cur_delay = new_delay;
806
		dev_priv->ips.cur_delay = new_delay;
Line 444... Line 807...
444
 
807
 
445
	spin_unlock_irqrestore(&mchdev_lock, flags);
808
	spin_unlock(&mchdev_lock);
Line 446... Line 809...
446
 
809
 
447
	return;
810
	return;
448
}
811
}
449
 
-
 
450
static void notify_ring(struct drm_device *dev,
-
 
451
			struct intel_ring_buffer *ring)
812
 
452
{
813
static void notify_ring(struct drm_device *dev,
Line 453... Line 814...
453
	struct drm_i915_private *dev_priv = dev->dev_private;
814
			struct intel_ring_buffer *ring)
Line 454... Line 815...
454
 
815
{
455
	if (ring->obj == NULL)
-
 
456
		return;
-
 
457
 
-
 
458
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
-
 
459
 
-
 
460
	wake_up_all(&ring->irq_queue);
-
 
461
//   if (i915_enable_hangcheck) {
816
	if (ring->obj == NULL)
Line 462... Line 817...
462
//       dev_priv->hangcheck_count = 0;
817
		return;
463
//       mod_timer(&dev_priv->hangcheck_timer,
818
 
464
//             jiffies +
819
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
465
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
820
 
466
//   }
821
	wake_up_all(&ring->irq_queue);
467
}
822
}
468
 
823
 
Line 469... Line 824...
469
#if 0
824
#if 0
470
static void gen6_pm_rps_work(struct work_struct *work)
825
static void gen6_pm_rps_work(struct work_struct *work)
471
{
826
{
472
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
827
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
473
						    rps.work);
828
						    rps.work);
474
	u32 pm_iir, pm_imr;
829
	u32 pm_iir;
-
 
830
	u8 new_delay;
-
 
831
 
-
 
832
	spin_lock_irq(&dev_priv->irq_lock);
Line 475... Line 833...
475
	u8 new_delay;
833
	pm_iir = dev_priv->rps.pm_iir;
476
 
834
	dev_priv->rps.pm_iir = 0;
Line 477... Line 835...
477
	spin_lock_irq(&dev_priv->rps.lock);
835
	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
Line 478... Line 836...
478
	pm_iir = dev_priv->rps.pm_iir;
836
	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
479
	dev_priv->rps.pm_iir = 0;
837
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
838
 
-
 
839
	/* Make sure we didn't queue anything we're not going to process. */
-
 
840
	WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
-
 
841
 
-
 
842
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
-
 
843
		return;
-
 
844
 
-
 
845
	mutex_lock(&dev_priv->rps.hw_lock);
480
	pm_imr = I915_READ(GEN6_PMIMR);
846
 
481
	I915_WRITE(GEN6_PMIMR, 0);
847
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Line 482... Line 848...
482
	spin_unlock_irq(&dev_priv->rps.lock);
848
		new_delay = dev_priv->rps.cur_delay + 1;
483
 
849
 
484
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
850
		/*
485
		return;
851
		 * For better performance, jump directly
486
 
852
		 * to RPe if we're below it.
-
 
853
		 */
-
 
854
		if (IS_VALLEYVIEW(dev_priv->dev) &&
-
 
855
		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
487
	mutex_lock(&dev_priv->rps.hw_lock);
856
			new_delay = dev_priv->rps.rpe_delay;
488
 
857
	} else
Line -... Line 858...
-
 
858
		new_delay = dev_priv->rps.cur_delay - 1;
-
 
859
 
-
 
860
	/* sysfs frequency interfaces may have snuck in while servicing the
-
 
861
	 * interrupt
-
 
862
	 */
-
 
863
	if (new_delay >= dev_priv->rps.min_delay &&
-
 
864
	    new_delay <= dev_priv->rps.max_delay) {
-
 
865
		if (IS_VALLEYVIEW(dev_priv->dev))
-
 
866
			valleyview_set_rps(dev_priv->dev, new_delay);
-
 
867
		else
-
 
868
		gen6_set_rps(dev_priv->dev, new_delay);
489
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
869
	}
490
		new_delay = dev_priv->rps.cur_delay + 1;
870
 
Line 491... Line 871...
491
	else
871
	if (IS_VALLEYVIEW(dev_priv->dev)) {
Line 541... Line 921...
541
	POSTING_READ(GEN7_L3CDERRST1);
921
	POSTING_READ(GEN7_L3CDERRST1);
Line 542... Line 922...
542
 
922
 
Line 543... Line 923...
543
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
923
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
544
 
924
 
545
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
546
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
925
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 547... Line 926...
547
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
926
	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Line 548... Line 927...
548
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
927
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
549
 
928
 
550
	mutex_unlock(&dev_priv->dev->struct_mutex);
929
	mutex_unlock(&dev_priv->dev->struct_mutex);
551
 
930
 
552
	parity_event[0] = "L3_PARITY_ERROR=1";
931
	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
Line 564... Line 943...
564
	kfree(parity_event[3]);
943
	kfree(parity_event[3]);
565
	kfree(parity_event[2]);
944
	kfree(parity_event[2]);
566
	kfree(parity_event[1]);
945
	kfree(parity_event[1]);
567
}
946
}
Line 568... Line 947...
568
 
947
 
569
static void ivybridge_handle_parity_error(struct drm_device *dev)
948
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
570
{
949
{
571
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
Line 572... Line 950...
572
	unsigned long flags;
950
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
573
 
951
 
Line 574... Line 952...
574
	if (!HAS_L3_GPU_CACHE(dev))
952
	if (!HAS_L3_GPU_CACHE(dev))
575
		return;
953
		return;
576
 
-
 
577
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
954
 
Line 578... Line 955...
578
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
955
	spin_lock(&dev_priv->irq_lock);
579
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
956
	ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
Line 580... Line 957...
580
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
957
	spin_unlock(&dev_priv->irq_lock);
Line -... Line 958...
-
 
958
 
-
 
959
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
-
 
960
}
-
 
961
 
-
 
962
#endif
-
 
963
 
-
 
964
static void ilk_gt_irq_handler(struct drm_device *dev,
-
 
965
			       struct drm_i915_private *dev_priv,
-
 
966
			       u32 gt_iir)
-
 
967
{
-
 
968
	if (gt_iir &
581
 
969
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
582
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
970
		notify_ring(dev, &dev_priv->ring[RCS]);
583
}
971
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
584
 
972
		notify_ring(dev, &dev_priv->ring[VCS]);
Line 585... Line 973...
585
#endif
973
}
586
 
974
 
587
static void snb_gt_irq_handler(struct drm_device *dev,
975
static void snb_gt_irq_handler(struct drm_device *dev,
588
			       struct drm_i915_private *dev_priv,
976
			       struct drm_i915_private *dev_priv,
589
			       u32 gt_iir)
977
			       u32 gt_iir)
590
{
978
{
591
 
979
 
Line 592... Line 980...
592
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
980
	if (gt_iir &
593
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
981
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
594
		notify_ring(dev, &dev_priv->ring[RCS]);
982
		notify_ring(dev, &dev_priv->ring[RCS]);
595
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
983
	if (gt_iir & GT_BSD_USER_INTERRUPT)
596
		notify_ring(dev, &dev_priv->ring[VCS]);
984
		notify_ring(dev, &dev_priv->ring[VCS]);
597
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
985
	if (gt_iir & GT_BLT_USER_INTERRUPT)
Line 598... Line 986...
598
		notify_ring(dev, &dev_priv->ring[BCS]);
986
		notify_ring(dev, &dev_priv->ring[BCS]);
599
 
987
 
600
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
988
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
Line 601... Line -...
601
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
-
 
602
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
-
 
603
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
-
 
604
		i915_handle_error(dev, false);
-
 
605
	}
-
 
606
 
-
 
607
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
-
 
608
//		ivybridge_handle_parity_error(dev);
-
 
609
}
-
 
610
 
-
 
611
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
-
 
612
				u32 pm_iir)
-
 
613
{
-
 
614
	unsigned long flags;
-
 
615
 
-
 
616
	/*
-
 
617
	 * IIR bits should never already be set because IMR should
-
 
618
	 * prevent an interrupt from being shown in IIR. The warning
-
 
619
	 * displays a case where we've unsafely cleared
-
 
620
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
-
 
621
	 * type is not a problem, it displays a problem in the logic.
-
 
622
	 *
-
 
623
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
-
 
624
	 */
-
 
625
 
989
		      GT_BSD_CS_ERROR_INTERRUPT |
626
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
990
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
Line 627... Line 991...
627
	dev_priv->rps.pm_iir |= pm_iir;
991
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
628
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
992
//       i915_handle_error(dev, false);
629
	POSTING_READ(GEN6_PMIMR);
993
	}
630
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
994
 
631
 
995
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
632
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
-
 
633
}
996
//		ivybridge_handle_parity_error(dev);
634
 
997
}
Line 635... Line 998...
635
#define HPD_STORM_DETECT_PERIOD 1000
998
 
-
 
999
#define HPD_STORM_DETECT_PERIOD 1000
Line -... Line 1000...
-
 
1000
#define HPD_STORM_THRESHOLD 5
636
#define HPD_STORM_THRESHOLD 5
1001
 
Line -... Line 1002...
-
 
1002
static inline void intel_hpd_irq_handler(struct drm_device *dev,
-
 
1003
					    u32 hotplug_trigger,
-
 
1004
					    const u32 *hpd)
-
 
1005
{
637
 
1006
	drm_i915_private_t *dev_priv = dev->dev_private;
638
static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
1007
	int i;
639
					    u32 hotplug_trigger,
1008
	bool storm_detected = false;
Line -... Line 1009...
-
 
1009
 
640
					    const u32 *hpd)
1010
	if (!hotplug_trigger)
641
{
1011
		return;
642
	drm_i915_private_t *dev_priv = dev->dev_private;
1012
 
643
	unsigned long irqflags;
1013
	spin_lock(&dev_priv->irq_lock);
644
	int i;
1014
	for (i = 1; i < HPD_NUM_PINS; i++) {
Line 664... Line 1034...
664
//       } else {
1034
//       } else {
665
			dev_priv->hpd_stats[i].hpd_cnt++;
1035
			dev_priv->hpd_stats[i].hpd_cnt++;
666
//       }
1036
//       }
667
	}
1037
	}
Line -... Line 1038...
-
 
1038
 
-
 
1039
	if (storm_detected)
668
 
1040
		dev_priv->display.hpd_irq_setup(dev);
-
 
1041
	spin_unlock(&dev_priv->irq_lock);
Line 669... Line -...
669
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
670
 
1042
 
Line 671... Line 1043...
671
	return ret;
1043
 
672
}
1044
}
673
 
1045
 
Line 683... Line 1055...
683
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1055
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
Line 684... Line 1056...
684
 
1056
 
685
	wake_up_all(&dev_priv->gmbus_wait_queue);
1057
	wake_up_all(&dev_priv->gmbus_wait_queue);
Line -... Line 1058...
-
 
1058
}
-
 
1059
 
-
 
1060
/* The RPS events need forcewake, so we add them to a work queue and mask their
-
 
1061
 * IMR bits until the work is done. Other interrupts can be processed without
-
 
1062
 * the work queue. */
-
 
1063
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-
 
1064
{
-
 
1065
	if (pm_iir & GEN6_PM_RPS_EVENTS) {
-
 
1066
		spin_lock(&dev_priv->irq_lock);
-
 
1067
		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
-
 
1068
		snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
-
 
1069
		spin_unlock(&dev_priv->irq_lock);
-
 
1070
 
-
 
1071
		queue_work(dev_priv->wq, &dev_priv->rps.work);
-
 
1072
	}
-
 
1073
 
-
 
1074
	if (HAS_VEBOX(dev_priv->dev)) {
-
 
1075
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-
 
1076
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
-
 
1077
 
-
 
1078
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
-
 
1079
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
-
 
1080
//           i915_handle_error(dev_priv->dev, false);
-
 
1081
		}
-
 
1082
	}
686
}
1083
}
687
 
1084
 
688
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1085
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
689
{
1086
{
690
	struct drm_device *dev = (struct drm_device *) arg;
1087
	struct drm_device *dev = (struct drm_device *) arg;
Line 743... Line 1140...
743
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1140
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
744
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1141
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Line 745... Line 1142...
745
 
1142
 
746
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1143
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
747
					 hotplug_status);
-
 
-
 
1144
					 hotplug_status);
748
			if (hotplug_trigger) {
1145
 
749
				if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
-
 
750
					i915_hpd_irq_setup(dev);
-
 
751
				queue_work(dev_priv->wq,
-
 
752
					   &dev_priv->hotplug_work);
1146
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
753
			}
1147
 
754
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1148
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
755
			I915_READ(PORT_HOTPLUG_STAT);
1149
			I915_READ(PORT_HOTPLUG_STAT);
Line 756... Line 1150...
756
		}
1150
		}
Line 774... Line 1168...
774
{
1168
{
775
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1169
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
776
	int pipe;
1170
	int pipe;
777
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1171
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Line 778... Line -...
778
 
-
 
779
	if (hotplug_trigger) {
1172
 
780
		if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
-
 
781
			ibx_hpd_irq_setup(dev);
-
 
782
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1173
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
783
	}
1174
 
784
	if (pch_iir & SDE_AUDIO_POWER_MASK)
-
 
785
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1175
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
786
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
1176
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
-
 
1177
			       SDE_AUDIO_POWER_SHIFT);
-
 
1178
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-
 
1179
				 port_name(port));
Line 787... Line 1180...
787
				 SDE_AUDIO_POWER_SHIFT);
1180
	}
788
 
1181
 
Line 789... Line 1182...
789
	if (pch_iir & SDE_AUX_MASK)
1182
	if (pch_iir & SDE_AUX_MASK)
Line 811... Line 1204...
811
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1204
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
Line 812... Line 1205...
812
 
1205
 
813
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1206
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
Line 814... Line -...
814
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
-
 
815
 
-
 
816
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1207
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
-
 
1208
 
-
 
1209
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
-
 
1210
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
-
 
1211
							  false))
-
 
1212
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
-
 
1213
 
-
 
1214
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-
 
1215
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
-
 
1216
							  false))
-
 
1217
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
-
 
1218
}
-
 
1219
 
-
 
1220
static void ivb_err_int_handler(struct drm_device *dev)
-
 
1221
{
-
 
1222
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1223
	u32 err_int = I915_READ(GEN7_ERR_INT);
-
 
1224
 
-
 
1225
	if (err_int & ERR_INT_POISON)
-
 
1226
		DRM_ERROR("Poison interrupt\n");
-
 
1227
 
-
 
1228
	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
-
 
1229
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-
 
1230
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
 
1231
 
-
 
1232
	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
-
 
1233
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-
 
1234
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
 
1235
 
-
 
1236
	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
-
 
1237
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
-
 
1238
			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
-
 
1239
 
-
 
1240
	I915_WRITE(GEN7_ERR_INT, err_int);
-
 
1241
}
-
 
1242
 
-
 
1243
static void cpt_serr_int_handler(struct drm_device *dev)
-
 
1244
{
-
 
1245
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1246
	u32 serr_int = I915_READ(SERR_INT);
-
 
1247
 
-
 
1248
	if (serr_int & SERR_INT_POISON)
-
 
1249
		DRM_ERROR("PCH poison interrupt\n");
-
 
1250
 
-
 
1251
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
817
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
1252
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
-
 
1253
							  false))
-
 
1254
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
-
 
1255
 
-
 
1256
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
-
 
1257
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
-
 
1258
							  false))
-
 
1259
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
-
 
1260
 
-
 
1261
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
-
 
1262
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
-
 
1263
							  false))
-
 
1264
			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
818
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1265
 
Line 819... Line 1266...
819
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
1266
	I915_WRITE(SERR_INT, serr_int);
820
}
1267
}
821
 
1268
 
822
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1269
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
823
{
1270
{
Line 824... Line -...
824
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
825
	int pipe;
1271
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
826
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
-
 
827
 
-
 
828
	if (hotplug_trigger) {
1272
	int pipe;
829
		if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1273
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
830
			ibx_hpd_irq_setup(dev);
-
 
831
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1274
 
832
	}
1275
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
-
 
1276
 
-
 
1277
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
-
 
1278
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
Line 833... Line 1279...
833
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
1279
			       SDE_AUDIO_POWER_SHIFT_CPT);
834
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1280
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
Line 835... Line 1281...
835
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1281
				 port_name(port));
Line 850... Line 1296...
850
	if (pch_iir & SDE_FDI_MASK_CPT)
1296
	if (pch_iir & SDE_FDI_MASK_CPT)
851
		for_each_pipe(pipe)
1297
		for_each_pipe(pipe)
852
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1298
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
853
					 pipe_name(pipe),
1299
					 pipe_name(pipe),
854
					 I915_READ(FDI_RX_IIR(pipe)));
1300
					 I915_READ(FDI_RX_IIR(pipe)));
855
}
-
 
856
 
-
 
857
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
-
 
858
{
-
 
859
	struct drm_device *dev = (struct drm_device *) arg;
-
 
860
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
861
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
-
 
862
	irqreturn_t ret = IRQ_NONE;
-
 
863
	int i;
-
 
864
 
-
 
865
	atomic_inc(&dev_priv->irq_received);
-
 
866
 
-
 
867
	/* disable master interrupt before clearing iir  */
-
 
868
	de_ier = I915_READ(DEIER);
-
 
869
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
 
870
 
-
 
871
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
-
 
872
	 * interrupts will will be stored on its back queue, and then we'll be
-
 
873
	 * able to process them after we restore SDEIER (as soon as we restore
-
 
874
	 * it, we'll get an interrupt if SDEIIR still has something to process
-
 
875
	 * due to its back queue). */
-
 
876
	if (!HAS_PCH_NOP(dev)) {
-
 
877
	sde_ier = I915_READ(SDEIER);
-
 
878
	I915_WRITE(SDEIER, 0);
-
 
879
	POSTING_READ(SDEIER);
-
 
880
	}
-
 
881
 
-
 
882
	gt_iir = I915_READ(GTIIR);
-
 
883
	if (gt_iir) {
-
 
884
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
 
885
		I915_WRITE(GTIIR, gt_iir);
-
 
886
		ret = IRQ_HANDLED;
-
 
887
	}
-
 
888
 
-
 
889
	de_iir = I915_READ(DEIIR);
-
 
890
	if (de_iir) {
-
 
891
		if (de_iir & DE_AUX_CHANNEL_A_IVB)
-
 
892
			dp_aux_irq_handler(dev);
-
 
893
#if 0
-
 
894
		if (de_iir & DE_GSE_IVB)
-
 
895
			intel_opregion_gse_intr(dev);
-
 
896
 
-
 
897
		for (i = 0; i < 3; i++) {
-
 
898
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-
 
899
				drm_handle_vblank(dev, i);
-
 
900
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
-
 
901
				intel_prepare_page_flip(dev, i);
-
 
902
				intel_finish_page_flip_plane(dev, i);
-
 
903
			}
-
 
904
		}
-
 
905
#endif
-
 
906
		/* check event from PCH */
-
 
907
		if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
-
 
908
			u32 pch_iir = I915_READ(SDEIIR);
-
 
Line 909... Line 1301...
909
 
1301
 
910
			cpt_irq_handler(dev, pch_iir);
-
 
911
 
-
 
912
			/* clear PCH hotplug event before clear CPU irq */
1302
	if (pch_iir & SDE_ERROR_CPT)
913
			I915_WRITE(SDEIIR, pch_iir);
1303
		cpt_serr_int_handler(dev);
Line 914... Line -...
914
		}
-
 
915
 
-
 
916
		I915_WRITE(DEIIR, de_iir);
-
 
917
		ret = IRQ_HANDLED;
-
 
918
	}
-
 
919
 
-
 
920
	pm_iir = I915_READ(GEN6_PMIIR);
-
 
921
	if (pm_iir) {
-
 
922
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
-
 
923
//			gen6_queue_rps_work(dev_priv, pm_iir);
-
 
924
		I915_WRITE(GEN6_PMIIR, pm_iir);
-
 
925
		ret = IRQ_HANDLED;
-
 
926
	}
-
 
927
 
-
 
928
	I915_WRITE(DEIER, de_ier);
-
 
929
	POSTING_READ(DEIER);
-
 
930
	if (!HAS_PCH_NOP(dev)) {
-
 
931
	I915_WRITE(SDEIER, sde_ier);
-
 
932
	POSTING_READ(SDEIER);
-
 
933
	}
-
 
934
 
-
 
935
	return ret;
-
 
936
}
1304
	}
937
 
-
 
938
static void ilk_gt_irq_handler(struct drm_device *dev,
-
 
939
			       struct drm_i915_private *dev_priv,
-
 
940
			       u32 gt_iir)
-
 
941
{
-
 
942
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
-
 
943
		notify_ring(dev, &dev_priv->ring[RCS]);
-
 
944
	if (gt_iir & GT_BSD_USER_INTERRUPT)
-
 
945
		notify_ring(dev, &dev_priv->ring[VCS]);
-
 
946
}
-
 
947
 
1305
 
948
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-
 
949
{
1306
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
950
	struct drm_device *dev = (struct drm_device *) arg;
-
 
951
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
952
    int ret = IRQ_NONE;
-
 
953
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
 
954
 
-
 
955
    atomic_inc(&dev_priv->irq_received);
-
 
956
 
-
 
957
    /* disable master interrupt before clearing iir  */
-
 
958
    de_ier = I915_READ(DEIER);
-
 
959
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
 
960
    POSTING_READ(DEIER);
-
 
961
 
-
 
962
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
-
 
963
	 * interrupts will will be stored on its back queue, and then we'll be
-
 
964
	 * able to process them after we restore SDEIER (as soon as we restore
-
 
965
	 * it, we'll get an interrupt if SDEIIR still has something to process
-
 
966
	 * due to its back queue). */
-
 
967
	sde_ier = I915_READ(SDEIER);
-
 
968
	I915_WRITE(SDEIER, 0);
-
 
969
	POSTING_READ(SDEIER);
-
 
970
 
-
 
971
    de_iir = I915_READ(DEIIR);
-
 
972
    gt_iir = I915_READ(GTIIR);
-
 
973
    pm_iir = I915_READ(GEN6_PMIIR);
-
 
974
 
-
 
975
	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
-
 
976
        goto done;
-
 
977
 
-
 
978
    ret = IRQ_HANDLED;
-
 
979
 
-
 
980
	if (IS_GEN5(dev))
-
 
981
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-
 
Line 982... Line 1307...
982
	else
1307
{
983
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1308
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 984... Line 1309...
984
 
1309
 
985
	if (de_iir & DE_AUX_CHANNEL_A)
1310
	if (de_iir & DE_AUX_CHANNEL_A)
986
		dp_aux_irq_handler(dev);
1311
		dp_aux_irq_handler(dev);
Line 987... Line 1312...
987
 
1312
 
988
#if 0
1313
#if 0
Line 989... Line 1314...
989
	if (de_iir & DE_GSE)
1314
	if (de_iir & DE_GSE)
990
		intel_opregion_gse_intr(dev);
1315
		intel_opregion_asle_intr(dev);
Line -... Line 1316...
-
 
1316
 
-
 
1317
	if (de_iir & DE_PIPEA_VBLANK)
-
 
1318
		drm_handle_vblank(dev, 0);
-
 
1319
 
-
 
1320
	if (de_iir & DE_PIPEB_VBLANK)
-
 
1321
		drm_handle_vblank(dev, 1);
-
 
1322
 
-
 
1323
	if (de_iir & DE_POISON)
-
 
1324
		DRM_ERROR("Poison interrupt\n");
-
 
1325
#endif
-
 
1326
 
-
 
1327
	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
991
 
1328
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
992
	if (de_iir & DE_PIPEA_VBLANK)
1329
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
993
		drm_handle_vblank(dev, 0);
1330
 
994
 
1331
	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
Line 1016... Line 1353...
1016
			ibx_irq_handler(dev, pch_iir);
1353
			ibx_irq_handler(dev, pch_iir);
Line 1017... Line 1354...
1017
 
1354
 
1018
		/* should clear PCH hotplug event before clear CPU irq */
1355
		/* should clear PCH hotplug event before clear CPU irq */
1019
		I915_WRITE(SDEIIR, pch_iir);
1356
		I915_WRITE(SDEIIR, pch_iir);
1020
	}
-
 
1021
#if 0
-
 
1022
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
-
 
1023
		ironlake_handle_rps_change(dev);
-
 
1024
 
-
 
1025
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
-
 
1026
		gen6_queue_rps_work(dev_priv, pm_iir);
-
 
1027
#endif
-
 
1028
    I915_WRITE(GTIIR, gt_iir);
-
 
1029
    I915_WRITE(DEIIR, de_iir);
-
 
1030
    I915_WRITE(GEN6_PMIIR, pm_iir);
-
 
1031
 
-
 
1032
done:
-
 
1033
    I915_WRITE(DEIER, de_ier);
-
 
1034
    POSTING_READ(DEIER);
-
 
1035
	I915_WRITE(SDEIER, sde_ier);
-
 
Line -... Line 1357...
-
 
1357
	}
1036
	POSTING_READ(SDEIER);
1358
 
1037
 
1359
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
Line 1038... Line -...
1038
    return ret;
-
 
1039
}
-
 
1040
 
-
 
1041
 
-
 
1042
 
1360
		ironlake_rps_change_irq_handler(dev);
1043
 
-
 
1044
/* NB: please notice the memset */
1361
}
1045
static void i915_get_extra_instdone(struct drm_device *dev,
1362
 
1046
				    uint32_t *instdone)
-
 
1047
{
-
 
1048
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1049
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
 
1050
 
-
 
1051
	switch(INTEL_INFO(dev)->gen) {
-
 
1052
	case 2:
-
 
1053
	case 3:
-
 
1054
		instdone[0] = I915_READ(INSTDONE);
-
 
1055
		break;
-
 
1056
	case 4:
-
 
1057
	case 5:
-
 
1058
	case 6:
-
 
1059
		instdone[0] = I915_READ(INSTDONE_I965);
-
 
1060
		instdone[1] = I915_READ(INSTDONE1);
-
 
1061
		break;
-
 
1062
	default:
-
 
1063
		WARN_ONCE(1, "Unsupported platform\n");
-
 
1064
	case 7:
-
 
1065
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
-
 
1066
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
-
 
1067
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
-
 
1068
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
-
 
1069
		break;
-
 
1070
	}
-
 
1071
}
-
 
1072
 
-
 
1073
#ifdef CONFIG_DEBUG_FS
-
 
1074
static struct drm_i915_error_object *
-
 
1075
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
-
 
1076
			       struct drm_i915_gem_object *src,
-
 
1077
			       const int num_pages)
1363
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1078
{
-
 
1079
	struct drm_i915_error_object *dst;
-
 
1080
	int i;
-
 
1081
	u32 reloc_offset;
-
 
Line 1082... Line -...
1082
 
-
 
1083
	if (src == NULL || src->pages == NULL)
1364
{
1084
		return NULL;
-
 
1085
 
-
 
1086
	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
-
 
1087
	if (dst == NULL)
1365
	struct drm_i915_private *dev_priv = dev->dev_private;
1088
		return NULL;
-
 
1089
 
-
 
Line 1090... Line 1366...
1090
	reloc_offset = src->gtt_offset;
1366
	int i;
1091
	for (i = 0; i < num_pages; i++) {
-
 
1092
		unsigned long flags;
-
 
1093
		void *d;
-
 
1094
 
1367
 
1095
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
-
 
1096
		if (d == NULL)
-
 
1097
			goto unwind;
-
 
1098
 
-
 
1099
		local_irq_save(flags);
-
 
1100
		if (reloc_offset < dev_priv->gtt.mappable_end &&
-
 
1101
		    src->has_global_gtt_mapping) {
-
 
1102
			void __iomem *s;
-
 
1103
 
-
 
1104
			/* Simply ignore tiling or any overlapping fence.
-
 
1105
			 * It's part of the error state, and this hopefully
-
 
1106
			 * captures what the GPU read.
-
 
1107
			 */
-
 
1108
 
-
 
1109
			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-
 
1110
						     reloc_offset);
-
 
1111
			memcpy_fromio(d, s, PAGE_SIZE);
-
 
1112
			io_mapping_unmap_atomic(s);
-
 
1113
		} else if (src->stolen) {
-
 
1114
			unsigned long offset;
-
 
1115
 
-
 
1116
			offset = dev_priv->mm.stolen_base;
-
 
1117
			offset += src->stolen->start;
-
 
1118
			offset += i << PAGE_SHIFT;
-
 
1119
 
-
 
1120
			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
-
 
1121
		} else {
-
 
1122
			struct page *page;
-
 
1123
			void *s;
-
 
1124
 
-
 
1125
			page = i915_gem_object_get_page(src, i);
-
 
1126
 
-
 
1127
			drm_clflush_pages(&page, 1);
-
 
1128
 
-
 
1129
			s = kmap_atomic(page);
-
 
1130
			memcpy(d, s, PAGE_SIZE);
-
 
1131
			kunmap_atomic(s);
-
 
1132
 
-
 
1133
			drm_clflush_pages(&page, 1);
-
 
1134
		}
-
 
1135
		local_irq_restore(flags);
-
 
1136
 
-
 
1137
		dst->pages[i] = d;
-
 
1138
 
-
 
1139
		reloc_offset += PAGE_SIZE;
-
 
Line -... Line 1368...
-
 
1368
//	if (de_iir & DE_ERR_INT_IVB)
-
 
1369
//		ivb_err_int_handler(dev);
1140
	}
1370
 
1141
	dst->page_count = num_pages;
1371
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
-
 
1372
		dp_aux_irq_handler(dev);
1142
	dst->gtt_offset = src->gtt_offset;
1373
 
-
 
1374
	if (de_iir & DE_GSE_IVB)
1143
 
1375
		intel_opregion_asle_intr(dev);
1144
	return dst;
1376
#if 0
1145
 
1377
	for (i = 0; i < 3; i++) {
1146
unwind:
-
 
1147
	while (i--)
-
 
1148
		kfree(dst->pages[i]);
-
 
1149
	kfree(dst);
-
 
1150
	return NULL;
-
 
1151
}
-
 
1152
#define i915_error_object_create(dev_priv, src) \
-
 
1153
	i915_error_object_create_sized((dev_priv), (src), \
-
 
1154
				       (src)->base.size>>PAGE_SHIFT)
-
 
1155
 
-
 
1156
static void
-
 
1157
i915_error_object_free(struct drm_i915_error_object *obj)
-
 
1158
{
-
 
1159
	int page;
-
 
1160
 
-
 
1161
	if (obj == NULL)
-
 
1162
		return;
1378
		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-
 
1379
			drm_handle_vblank(dev, i);
Line 1163... Line -...
1163
 
-
 
1164
	for (page = 0; page < obj->page_count; page++)
-
 
1165
		kfree(obj->pages[page]);
-
 
1166
 
-
 
1167
	kfree(obj);
-
 
1168
}
-
 
1169
 
-
 
1170
void
-
 
1171
i915_error_state_free(struct kref *error_ref)
-
 
1172
{
-
 
1173
	struct drm_i915_error_state *error = container_of(error_ref,
-
 
1174
							  typeof(*error), ref);
-
 
1175
	int i;
-
 
1176
 
-
 
1177
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-
 
1178
		i915_error_object_free(error->ring[i].batchbuffer);
-
 
1179
		i915_error_object_free(error->ring[i].ringbuffer);
-
 
1180
		kfree(error->ring[i].requests);
-
 
1181
	}
-
 
1182
 
-
 
1183
	kfree(error->active_bo);
-
 
1184
	kfree(error->overlay);
-
 
1185
	kfree(error);
-
 
1186
}
-
 
1187
static void capture_bo(struct drm_i915_error_buffer *err,
-
 
1188
		       struct drm_i915_gem_object *obj)
-
 
1189
{
-
 
1190
	err->size = obj->base.size;
-
 
1191
	err->name = obj->base.name;
-
 
1192
	err->rseqno = obj->last_read_seqno;
1380
		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1193
	err->wseqno = obj->last_write_seqno;
-
 
1194
	err->gtt_offset = obj->gtt_offset;
-
 
1195
	err->read_domains = obj->base.read_domains;
-
 
1196
	err->write_domain = obj->base.write_domain;
-
 
1197
	err->fence_reg = obj->fence_reg;
-
 
1198
	err->pinned = 0;
1381
			intel_prepare_page_flip(dev, i);
1199
	if (obj->pin_count > 0)
-
 
1200
		err->pinned = 1;
1382
			intel_finish_page_flip_plane(dev, i);
1201
	if (obj->user_pin_count > 0)
-
 
Line 1202... Line -...
1202
		err->pinned = -1;
-
 
1203
	err->tiling = obj->tiling_mode;
-
 
1204
	err->dirty = obj->dirty;
-
 
1205
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
1383
		}
1206
	err->ring = obj->ring ? obj->ring->id : -1;
-
 
Line 1207... Line 1384...
1207
	err->cache_level = obj->cache_level;
1384
	}
1208
}
1385
#endif
1209
 
-
 
1210
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
-
 
1211
			     int count, struct list_head *head)
1386
 
1212
{
-
 
1213
	struct drm_i915_gem_object *obj;
-
 
1214
	int i = 0;
1387
	/* check event from PCH */
Line 1215... Line 1388...
1215
 
1388
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1216
	list_for_each_entry(obj, head, mm_list) {
-
 
1217
		capture_bo(err++, obj);
1389
		u32 pch_iir = I915_READ(SDEIIR);
1218
		if (++i == count)
1390
 
1219
			break;
-
 
1220
	}
-
 
1221
 
1391
		cpt_irq_handler(dev, pch_iir);
1222
	return i;
1392
 
1223
}
-
 
1224
 
-
 
1225
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1393
		/* clear PCH hotplug event before clear CPU irq */
1226
			     int count, struct list_head *head)
1394
		I915_WRITE(SDEIIR, pch_iir);
1227
{
-
 
1228
	struct drm_i915_gem_object *obj;
-
 
Line 1229... Line 1395...
1229
	int i = 0;
1395
}
1230
 
-
 
Line 1231... Line 1396...
1231
	list_for_each_entry(obj, head, gtt_list) {
1396
}
1232
		if (obj->pin_count == 0)
1397
 
1233
			continue;
-
 
1234
 
1398
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1235
		capture_bo(err++, obj);
-
 
Line 1236... Line -...
1236
		if (++i == count)
-
 
1237
			break;
-
 
1238
	}
-
 
1239
 
-
 
1240
	return i;
1399
{
1241
}
-
 
1242
 
-
 
1243
static void i915_gem_record_fences(struct drm_device *dev,
-
 
1244
				   struct drm_i915_error_state *error)
-
 
1245
{
1400
	struct drm_device *dev = (struct drm_device *) arg;
1246
	struct drm_i915_private *dev_priv = dev->dev_private;
1401
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1247
	int i;
-
 
1248
 
-
 
1249
	/* Fences */
-
 
1250
	switch (INTEL_INFO(dev)->gen) {
1402
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1251
	case 7:
-
 
1252
	case 6:
-
 
1253
		for (i = 0; i < dev_priv->num_fence_regs; i++)
-
 
1254
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
-
 
1255
		break;
-
 
Line -... Line 1403...
-
 
1403
	irqreturn_t ret = IRQ_NONE;
-
 
1404
	bool err_int_reenable = false;
-
 
1405
 
-
 
1406
	atomic_inc(&dev_priv->irq_received);
1256
	case 5:
1407
 
-
 
1408
	/* We get interrupts on unclaimed registers, so check for this before we
-
 
1409
	 * do any I915_{READ,WRITE}. */
-
 
1410
	intel_uncore_check_errors(dev);
1257
	case 4:
1411
 
1258
		for (i = 0; i < 16; i++)
-
 
1259
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1412
	/* disable master interrupt before clearing iir  */
Line 1260... Line 1413...
1260
		break;
1413
	de_ier = I915_READ(DEIER);
1261
	case 3:
1414
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1262
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-
 
1263
			for (i = 0; i < 8; i++)
-
 
1264
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
-
 
1265
	case 2:
1415
	POSTING_READ(DEIER);
1266
		for (i = 0; i < 8; i++)
-
 
1267
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1416
 
1268
		break;
-
 
1269
 
-
 
1270
	default:
1417
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1271
		BUG();
1418
	 * interrupts will will be stored on its back queue, and then we'll be
1272
	}
-
 
1273
}
-
 
1274
 
-
 
1275
static struct drm_i915_error_object *
-
 
1276
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1419
	 * able to process them after we restore SDEIER (as soon as we restore
1277
			     struct intel_ring_buffer *ring)
-
 
1278
{
1420
	 * it, we'll get an interrupt if SDEIIR still has something to process
1279
	struct drm_i915_gem_object *obj;
1421
	 * due to its back queue). */
1280
	u32 seqno;
1422
	if (!HAS_PCH_NOP(dev)) {
Line 1281... Line -...
1281
 
-
 
1282
	if (!ring->get_seqno)
-
 
1283
		return NULL;
1423
		sde_ier = I915_READ(SDEIER);
1284
 
1424
		I915_WRITE(SDEIER, 0);
1285
	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
-
 
1286
		u32 acthd = I915_READ(ACTHD);
1425
		POSTING_READ(SDEIER);
1287
 
-
 
1288
		if (WARN_ON(ring->id != RCS))
-
 
1289
			return NULL;
1426
	}
1290
 
1427
 
1291
		obj = ring->private;
-
 
1292
		if (acthd >= obj->gtt_offset &&
1428
	/* On Haswell, also mask ERR_INT because we don't want to risk
1293
		    acthd < obj->gtt_offset + obj->base.size)
1429
	 * generating "unclaimed register" interrupts from inside the interrupt
1294
			return i915_error_object_create(dev_priv, obj);
-
 
1295
	}
1430
	 * handler. */
1296
 
1431
	if (IS_HASWELL(dev)) {
Line -... Line 1432...
-
 
1432
		spin_lock(&dev_priv->irq_lock);
-
 
1433
		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-
 
1434
		if (err_int_reenable)
-
 
1435
			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
1436
		spin_unlock(&dev_priv->irq_lock);
-
 
1437
}
-
 
1438
 
1297
	seqno = ring->get_seqno(ring, false);
1439
	gt_iir = I915_READ(GTIIR);
1298
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1440
	if (gt_iir) {
Line 1299... Line -...
1299
		if (obj->ring != ring)
-
 
1300
			continue;
-
 
1301
 
-
 
1302
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
-
 
1303
			continue;
-
 
1304
 
-
 
1305
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1441
		if (INTEL_INFO(dev)->gen >= 6)
1306
			continue;
-
 
1307
 
1442
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1308
		/* We need to copy these to an anonymous buffer as the simplest
1443
		else
1309
		 * method to avoid being overwritten by userspace.
-
 
1310
		 */
1444
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1311
		return i915_error_object_create(dev_priv, obj);
1445
		I915_WRITE(GTIIR, gt_iir);
1312
	}
-
 
1313
 
1446
		ret = IRQ_HANDLED;
1314
	return NULL;
1447
}
1315
}
-
 
1316
 
-
 
1317
static void i915_record_ring_state(struct drm_device *dev,
-
 
1318
				   struct drm_i915_error_state *error,
-
 
1319
				   struct intel_ring_buffer *ring)
-
 
1320
{
-
 
1321
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1322
 
-
 
1323
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
1324
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
-
 
1325
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
-
 
1326
		error->semaphore_mboxes[ring->id][0]
-
 
1327
			= I915_READ(RING_SYNC_0(ring->mmio_base));
-
 
1328
		error->semaphore_mboxes[ring->id][1]
-
 
1329
			= I915_READ(RING_SYNC_1(ring->mmio_base));
1448
 
Line 1330... Line -...
1330
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
-
 
1331
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
-
 
1332
	}
1449
	de_iir = I915_READ(DEIIR);
1333
 
-
 
1334
	if (INTEL_INFO(dev)->gen >= 4) {
1450
	if (de_iir) {
1335
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1451
		if (INTEL_INFO(dev)->gen >= 7)
1336
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1452
			ivb_display_irq_handler(dev, de_iir);
1337
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
-
 
1338
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1453
		else
1339
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-
 
1340
		if (ring->id == RCS)
1454
			ilk_display_irq_handler(dev, de_iir);
Line 1341... Line -...
1341
			error->bbaddr = I915_READ64(BB_ADDR);
-
 
1342
	} else {
-
 
1343
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
-
 
1344
		error->ipeir[ring->id] = I915_READ(IPEIR);
1455
		I915_WRITE(DEIIR, de_iir);
1345
		error->ipehr[ring->id] = I915_READ(IPEHR);
-
 
1346
		error->instdone[ring->id] = I915_READ(INSTDONE);
-
 
1347
	}
1456
		ret = IRQ_HANDLED;
1348
 
-
 
1349
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
-
 
1350
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1457
	}
1351
	error->seqno[ring->id] = ring->get_seqno(ring, false);
-
 
1352
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
-
 
1353
	error->head[ring->id] = I915_READ_HEAD(ring);
-
 
1354
	error->tail[ring->id] = I915_READ_TAIL(ring);
-
 
1355
	error->ctl[ring->id] = I915_READ_CTL(ring);
1458
 
1356
 
1459
	if (INTEL_INFO(dev)->gen >= 6) {
1357
	error->cpu_ring_head[ring->id] = ring->head;
-
 
1358
	error->cpu_ring_tail[ring->id] = ring->tail;
1460
		u32 pm_iir = I915_READ(GEN6_PMIIR);
-
 
1461
		if (pm_iir) {
-
 
1462
			gen6_rps_irq_handler(dev_priv, pm_iir);
1359
}
1463
			I915_WRITE(GEN6_PMIIR, pm_iir);
Line 1360... Line 1464...
1360
 
1464
			ret = IRQ_HANDLED;
1361
 
1465
	}
1362
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1466
}
1363
					   struct drm_i915_error_state *error,
-
 
1364
					   struct drm_i915_error_ring *ering)
1467
 
1365
{
-
 
1366
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1468
	if (err_int_reenable) {
1367
	struct drm_i915_gem_object *obj;
-
 
1368
 
-
 
1369
	/* Currently render ring is the only HW context user */
-
 
1370
	if (ring->id != RCS || !error->ccid)
-
 
1371
		return;
-
 
1372
 
-
 
1373
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
-
 
1374
		if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
-
 
1375
			ering->ctx = i915_error_object_create_sized(dev_priv,
-
 
1376
								    obj, 1);
-
 
Line -... Line 1469...
-
 
1469
		spin_lock(&dev_priv->irq_lock);
-
 
1470
		if (ivb_can_enable_err_int(dev))
-
 
1471
			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
1472
		spin_unlock(&dev_priv->irq_lock);
1377
		}
1473
	}
-
 
1474
 
Line 1378... Line 1475...
1378
	}
1475
	I915_WRITE(DEIER, de_ier);
1379
}
1476
	POSTING_READ(DEIER);
1380
 
1477
	if (!HAS_PCH_NOP(dev)) {
Line 1381... Line -...
1381
static void i915_gem_record_rings(struct drm_device *dev,
-
 
1382
				  struct drm_i915_error_state *error)
-
 
1383
{
-
 
1384
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1385
	struct intel_ring_buffer *ring;
-
 
1386
	struct drm_i915_gem_request *request;
-
 
1387
	int i, count;
-
 
1388
 
-
 
Line 1389... Line 1478...
1389
	for_each_ring(ring, dev_priv, i) {
1478
		I915_WRITE(SDEIER, sde_ier);
1390
		i915_record_ring_state(dev, error, ring);
1479
		POSTING_READ(SDEIER);
1391
 
1480
	}
1392
		error->ring[i].batchbuffer =
1481
 
1393
			i915_error_first_batchbuffer(dev_priv, ring);
-
 
1394
 
1482
	return ret;
1395
		error->ring[i].ringbuffer =
1483
}
1396
			i915_error_object_create(dev_priv, ring->obj);
-
 
1397
 
-
 
1398
 
-
 
1399
		i915_gem_record_active_context(ring, error, &error->ring[i]);
1484
 
Line -... Line 1485...
-
 
1485
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1400
 
1486
			       bool reset_completed)
1401
		count = 0;
1487
{
1402
		list_for_each_entry(request, &ring->request_list, list)
1488
	struct intel_ring_buffer *ring;
1403
			count++;
1489
	int i;
1404
 
1490
 
1405
		error->ring[i].num_requests = count;
-
 
1406
		error->ring[i].requests =
-
 
1407
			kmalloc(count*sizeof(struct drm_i915_error_request),
1491
	/*
1408
				GFP_ATOMIC);
1492
	 * Notify all waiters for GPU completion events that reset state has
1409
		if (error->ring[i].requests == NULL) {
1493
	 * been changed, and that they need to restart their wait after
1410
			error->ring[i].num_requests = 0;
1494
	 * checking for potential errors (and bail out to drop locks if there is
1411
			continue;
1495
	 * a gpu reset pending so that i915_error_work_func can acquire them).
1412
		}
-
 
1413
 
-
 
1414
		count = 0;
-
 
1415
		list_for_each_entry(request, &ring->request_list, list) {
1496
	 */
1416
			struct drm_i915_error_request *erq;
-
 
1417
 
1497
 
1418
			erq = &error->ring[i].requests[count++];
-
 
1419
			erq->seqno = request->seqno;
-
 
1420
			erq->jiffies = request->emitted_jiffies;
-
 
1421
			erq->tail = request->tail;
-
 
1422
		}
-
 
1423
	}
-
 
1424
}
-
 
1425
 
1498
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1426
/**
-
 
1427
 * i915_capture_error_state - capture an error record for later analysis
-
 
1428
 * @dev: drm device
-
 
1429
 *
-
 
1430
 * Should be called when an error is detected (either a hang or an error
-
 
1431
 * interrupt) to capture error state from the time of the error.  Fills
-
 
1432
 * out a structure which becomes available in debugfs for user level tools
-
 
1433
 * to pick up.
-
 
1434
 */
-
 
1435
static void i915_capture_error_state(struct drm_device *dev)
-
 
1436
{
-
 
1437
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1438
	struct drm_i915_gem_object *obj;
-
 
1439
	struct drm_i915_error_state *error;
-
 
1440
	unsigned long flags;
-
 
1441
	int i, pipe;
-
 
1442
 
-
 
1443
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-
 
1444
	error = dev_priv->gpu_error.first_error;
-
 
1445
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
 
1446
	if (error)
-
 
1447
		return;
-
 
1448
 
-
 
1449
	/* Account for pipe specific data like PIPE*STAT */
-
 
1450
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1499
	for_each_ring(ring, dev_priv, i)
1451
	if (!error) {
-
 
1452
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
-
 
1453
		return;
1500
		wake_up_all(&ring->irq_queue);
1454
	}
-
 
1455
 
-
 
1456
	DRM_INFO("capturing error event; look for more information in "
-
 
1457
		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
-
 
1458
		 dev->primary->index);
-
 
1459
 
-
 
1460
	kref_init(&error->ref);
-
 
1461
	error->eir = I915_READ(EIR);
1501
 
1462
	error->pgtbl_er = I915_READ(PGTBL_ER);
-
 
1463
	if (HAS_HW_CONTEXTS(dev))
-
 
1464
	error->ccid = I915_READ(CCID);
-
 
1465
 
-
 
1466
	if (HAS_PCH_SPLIT(dev))
-
 
1467
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
-
 
1468
	else if (IS_VALLEYVIEW(dev))
-
 
1469
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1502
 
1470
	else if (IS_GEN2(dev))
-
 
1471
		error->ier = I915_READ16(IER);
-
 
1472
	else
-
 
1473
		error->ier = I915_READ(IER);
-
 
1474
 
-
 
1475
	if (INTEL_INFO(dev)->gen >= 6)
-
 
1476
		error->derrmr = I915_READ(DERRMR);
-
 
1477
 
-
 
1478
	if (IS_VALLEYVIEW(dev))
-
 
1479
		error->forcewake = I915_READ(FORCEWAKE_VLV);
-
 
1480
	else if (INTEL_INFO(dev)->gen >= 7)
1503
	/*
1481
		error->forcewake = I915_READ(FORCEWAKE_MT);
-
 
1482
	else if (INTEL_INFO(dev)->gen == 6)
-
 
1483
		error->forcewake = I915_READ(FORCEWAKE);
-
 
1484
 
-
 
1485
	if (!HAS_PCH_SPLIT(dev))
-
 
1486
	for_each_pipe(pipe)
-
 
1487
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
 
Line 1488... Line -...
1488
 
-
 
1489
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
1490
		error->error = I915_READ(ERROR_GEN6);
-
 
1491
		error->done_reg = I915_READ(DONE_REG);
1504
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1492
	}
-
 
1493
 
-
 
1494
	if (INTEL_INFO(dev)->gen == 7)
-
 
1495
		error->err_int = I915_READ(GEN7_ERR_INT);
-
 
1496
 
-
 
Line -... Line 1505...
-
 
1505
	 * reset state is cleared.
-
 
1506
	 */
-
 
1507
	if (reset_completed)
1497
	i915_get_extra_instdone(dev, error->extra_instdone);
1508
		wake_up_all(&dev_priv->gpu_error.reset_queue);
-
 
1509
}
-
 
1510
 
-
 
1511
#if 0
-
 
1512
/**
1498
 
1513
 * i915_error_work_func - do process context error handling work
-
 
1514
 * @work: work struct
-
 
1515
 *
1499
	i915_gem_record_fences(dev, error);
1516
 * Fire an error uevent so userspace can see that a hang or error
1500
	i915_gem_record_rings(dev, error);
1517
 * was detected.
1501
 
1518
 */
Line -... Line 1519...
-
 
1519
static void i915_error_work_func(struct work_struct *work)
1502
	/* Record buffers on the active and pinned lists. */
1520
{
1503
	error->active_bo = NULL;
1521
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1504
	error->pinned_bo = NULL;
1522
						    work);
1505
 
1523
	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
-
 
1524
						    gpu_error);
1506
	i = 0;
1525
	struct drm_device *dev = dev_priv->dev;
Line 1507... Line 1526...
1507
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1526
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
Line -... Line 1527...
-
 
1527
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
-
 
1528
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-
 
1529
	int ret;
-
 
1530
 
-
 
1531
	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
-
 
1532
 
-
 
1533
	/*
1508
		i++;
1534
	 * Note that there's only one work item which does gpu resets, so we
-
 
1535
	 * need not worry about concurrent gpu resets potentially incrementing
-
 
1536
	 * error->reset_counter twice. We only need to take care of another
-
 
1537
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
-
 
1538
	 * quick check for that is good enough: schedule_work ensures the
1509
	error->active_bo_count = i;
1539
	 * correct ordering between hang detection and this work item, and since
Line 1510... Line 1540...
1510
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1540
	 * the reset in-progress bit is only ever set by code outside of this
1511
		if (obj->pin_count)
-
 
1512
			i++;
1541
	 * work we don't need to worry about any other races.
1513
	error->pinned_bo_count = i - error->active_bo_count;
1542
	 */
-
 
1543
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1514
 
1544
		DRM_DEBUG_DRIVER("resetting chip\n");
1515
	error->active_bo = NULL;
-
 
Line -... Line 1545...
-
 
1545
		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
-
 
1546
				   reset_event);
-
 
1547
 
1516
	error->pinned_bo = NULL;
1548
		/*
1517
	if (i) {
1549
		 * All state reset _must_ be completed before we update the
1518
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1550
		 * reset counter, for otherwise waiters might miss the reset
1519
					   GFP_ATOMIC);
-
 
1520
		if (error->active_bo)
-
 
1521
			error->pinned_bo =
-
 
1522
				error->active_bo + error->active_bo_count;
-
 
1523
	}
-
 
1524
 
-
 
1525
	if (error->active_bo)
-
 
1526
		error->active_bo_count =
-
 
1527
			capture_active_bo(error->active_bo,
-
 
1528
					  error->active_bo_count,
-
 
1529
					  &dev_priv->mm.active_list);
-
 
1530
 
-
 
1531
	if (error->pinned_bo)
-
 
1532
		error->pinned_bo_count =
-
 
1533
			capture_pinned_bo(error->pinned_bo,
1551
		 * pending state and not properly drop locks, resulting in
1534
					  error->pinned_bo_count,
-
 
1535
					  &dev_priv->mm.bound_list);
-
 
1536
 
-
 
Line 1537... Line 1552...
1537
	do_gettimeofday(&error->time);
1552
		 * deadlocks with the reset work.
1538
 
1553
		 */
1539
	error->overlay = intel_overlay_capture_error_state(dev);
1554
		ret = i915_reset(dev);
1540
	error->display = intel_display_capture_error_state(dev);
1555
 
Line 1671... Line 1686...
1671
 * of a ring dump etc.).
1686
 * of a ring dump etc.).
1672
 */
1687
 */
1673
void i915_handle_error(struct drm_device *dev, bool wedged)
1688
void i915_handle_error(struct drm_device *dev, bool wedged)
1674
{
1689
{
1675
	struct drm_i915_private *dev_priv = dev->dev_private;
1690
	struct drm_i915_private *dev_priv = dev->dev_private;
1676
	struct intel_ring_buffer *ring;
-
 
1677
	int i;
-
 
Line 1678... Line 1691...
1678
 
1691
 
1679
	i915_capture_error_state(dev);
1692
	i915_capture_error_state(dev);
Line 1680... Line 1693...
1680
	i915_report_and_clear_eir(dev);
1693
	i915_report_and_clear_eir(dev);
1681
 
1694
 
1682
	if (wedged) {
1695
	if (wedged) {
Line 1683... Line 1696...
1683
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1696
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1684
				&dev_priv->gpu_error.reset_counter);
1697
				&dev_priv->gpu_error.reset_counter);
1685
 
1698
 
-
 
1699
		/*
-
 
1700
		 * Wakeup waiting processes so that the reset work function
-
 
1701
		 * i915_error_work_func doesn't deadlock trying to grab various
-
 
1702
		 * locks. By bumping the reset counter first, the woken
-
 
1703
		 * processes will see a reset in progress and back off,
-
 
1704
		 * releasing their locks and then wait for the reset completion.
-
 
1705
		 * We must do this for _all_ gpu waiters that might hold locks
-
 
1706
		 * that the reset work needs to acquire.
-
 
1707
		 *
1686
		/*
1708
		 * Note: The wake_up serves as the required memory barrier to
1687
		 * Wakeup waiting processes so that the reset work item
1709
		 * ensure that the waiters see the updated value of the reset
1688
		 * doesn't deadlock trying to grab various locks.
-
 
1689
		 */
1710
		 * counter atomic_t.
Line -... Line 1711...
-
 
1711
		 */
-
 
1712
		i915_error_wake_up(dev_priv, false);
-
 
1713
	}
-
 
1714
 
-
 
1715
	/*
-
 
1716
	 * Our reset work can grab modeset locks (since it needs to reset the
1690
		for_each_ring(ring, dev_priv, i)
1717
	 * state of outstanding pagelips). Hence it must not be run on our own
1691
			wake_up_all(&ring->irq_queue);
1718
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
Line 1692... Line -...
1692
	}
-
 
1693
 
-
 
1694
//	queue_work(dev_priv->wq, &dev_priv->error_work);
-
 
1695
}
1719
	 * code will deadlock.
1696
 
1720
	 */
1697
#if 0
1721
	schedule_work(&dev_priv->gpu_error.work);
1698
 
1722
}
1699
 
1723
 
Line 1725... Line 1749...
1725
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1749
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1726
	obj = work->pending_flip_obj;
1750
	obj = work->pending_flip_obj;
1727
	if (INTEL_INFO(dev)->gen >= 4) {
1751
	if (INTEL_INFO(dev)->gen >= 4) {
1728
		int dspsurf = DSPSURF(intel_crtc->plane);
1752
		int dspsurf = DSPSURF(intel_crtc->plane);
1729
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1753
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1730
					obj->gtt_offset;
1754
					i915_gem_obj_ggtt_offset(obj);
1731
	} else {
1755
	} else {
1732
		int dspaddr = DSPADDR(intel_crtc->plane);
1756
		int dspaddr = DSPADDR(intel_crtc->plane);
1733
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1757
		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
1734
							crtc->y * crtc->fb->pitches[0] +
1758
							crtc->y * crtc->fb->pitches[0] +
1735
							crtc->x * crtc->fb->bits_per_pixel/8);
1759
							crtc->x * crtc->fb->bits_per_pixel/8);
1736
	}
1760
	}
Line 1737... Line 1761...
1737
 
1761
 
Line 1774... Line 1798...
1774
 
1798
 
1775
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1799
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1776
{
1800
{
1777
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1801
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1802
	unsigned long irqflags;
-
 
1803
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Line 1778... Line 1804...
1778
	unsigned long irqflags;
1804
						     DE_PIPE_VBLANK_ILK(pipe);
1779
 
1805
 
Line 1780... Line 1806...
1780
	if (!i915_pipe_enabled(dev, pipe))
1806
	if (!i915_pipe_enabled(dev, pipe))
1781
		return -EINVAL;
-
 
1782
 
-
 
1783
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
1784
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-
 
1785
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-
 
1786
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
1787
 
-
 
1788
	return 0;
-
 
1789
}
-
 
1790
 
-
 
1791
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-
 
1792
{
-
 
1793
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1794
	unsigned long irqflags;
-
 
1795
 
-
 
1796
	if (!i915_pipe_enabled(dev, pipe))
-
 
1797
		return -EINVAL;
1807
		return -EINVAL;
1798
 
-
 
1799
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1808
 
Line 1800... Line 1809...
1800
	ironlake_enable_display_irq(dev_priv,
1809
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1801
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1810
	ironlake_enable_display_irq(dev_priv, bit);
Line 1847... Line 1856...
1847
 
1856
 
1848
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1857
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1849
{
1858
{
1850
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1859
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1860
	unsigned long irqflags;
-
 
1861
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
Line 1851... Line 1862...
1851
	unsigned long irqflags;
1862
						     DE_PIPE_VBLANK_ILK(pipe);
1852
 
-
 
1853
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
1854
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-
 
1855
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-
 
1856
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
1857
}
-
 
1858
 
-
 
1859
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-
 
1860
{
-
 
1861
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1862
	unsigned long irqflags;
-
 
1863
 
1863
 
1864
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
1865
	ironlake_disable_display_irq(dev_priv,
1864
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1866
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1865
	ironlake_disable_display_irq(dev_priv, bit);
Line 1867... Line 1866...
1867
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1866
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1868
}
1867
}
Line 1889... Line 1888...
1889
ring_last_seqno(struct intel_ring_buffer *ring)
1888
ring_last_seqno(struct intel_ring_buffer *ring)
1890
{
1889
{
1891
	return list_entry(ring->request_list.prev,
1890
	return list_entry(ring->request_list.prev,
1892
			  struct drm_i915_gem_request, list)->seqno;
1891
			  struct drm_i915_gem_request, list)->seqno;
1893
}
1892
}
-
 
1893
 
-
 
1894
static bool
-
 
1895
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
-
 
1896
{
-
 
1897
	return (list_empty(&ring->request_list) ||
-
 
1898
		i915_seqno_passed(seqno, ring_last_seqno(ring)));
-
 
1899
}
-
 
1900
 
-
 
1901
static struct intel_ring_buffer *
-
 
1902
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
-
 
1903
{
-
 
1904
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1905
	u32 cmd, ipehr, acthd, acthd_min;
-
 
1906
 
-
 
1907
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-
 
1908
	if ((ipehr & ~(0x3 << 16)) !=
-
 
1909
	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1894
/* drm_dma.h hooks
1910
		return NULL;
-
 
1911
 
-
 
1912
	/* ACTHD is likely pointing to the dword after the actual command,
-
 
1913
	 * so scan backwards until we find the MBOX.
1895
*/
1914
	 */
-
 
1915
	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
-
 
1916
	acthd_min = max((int)acthd - 3 * 4, 0);
-
 
1917
	do {
-
 
1918
		cmd = ioread32(ring->virtual_start + acthd);
-
 
1919
		if (cmd == ipehr)
-
 
1920
			break;
-
 
1921
 
-
 
1922
		acthd -= 4;
-
 
1923
		if (acthd < acthd_min)
-
 
1924
			return NULL;
-
 
1925
	} while (1);
-
 
1926
 
-
 
1927
	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
-
 
1928
	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
-
 
1929
}
-
 
1930
 
1896
static void ironlake_irq_preinstall(struct drm_device *dev)
1931
static int semaphore_passed(struct intel_ring_buffer *ring)
1897
{
1932
{
1898
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1933
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1934
	struct intel_ring_buffer *signaller;
-
 
1935
	u32 seqno, ctl;
Line 1899... Line 1936...
1899
 
1936
 
Line 1900... Line 1937...
1900
    atomic_set(&dev_priv->irq_received, 0);
1937
	ring->hangcheck.deadlock = true;
-
 
1938
 
-
 
1939
	signaller = semaphore_waits_for(ring, &seqno);
Line 1901... Line 1940...
1901
 
1940
	if (signaller == NULL || signaller->hangcheck.deadlock)
-
 
1941
		return -1;
-
 
1942
 
-
 
1943
	/* cursory check for an unkickable deadlock */
Line 1902... Line 1944...
1902
    I915_WRITE(HWSTAM, 0xeffe);
1944
	ctl = I915_READ_CTL(signaller);
1903
 
-
 
1904
    /* XXX hotplug from PCH */
-
 
-
 
1945
	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
Line -... Line 1946...
-
 
1946
		return -1;
-
 
1947
 
-
 
1948
	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
-
 
1949
}
-
 
1950
 
-
 
1951
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-
 
1952
{
-
 
1953
	struct intel_ring_buffer *ring;
-
 
1954
	int i;
-
 
1955
 
-
 
1956
	for_each_ring(ring, dev_priv, i)
-
 
1957
		ring->hangcheck.deadlock = false;
-
 
1958
}
-
 
1959
 
-
 
1960
static enum intel_ring_hangcheck_action
-
 
1961
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
-
 
1962
{
-
 
1963
	struct drm_device *dev = ring->dev;
-
 
1964
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1965
	u32 tmp;
-
 
1966
 
-
 
1967
	if (ring->hangcheck.acthd != acthd)
-
 
1968
		return HANGCHECK_ACTIVE;
-
 
1969
 
-
 
1970
	if (IS_GEN2(dev))
-
 
1971
		return HANGCHECK_HUNG;
-
 
1972
 
-
 
1973
	/* Is the chip hanging on a WAIT_FOR_EVENT?
-
 
1974
	 * If so we can simply poke the RB_WAIT bit
-
 
1975
	 * and break the hang. This should work on
1905
 
1976
	 * all but the second generation chipsets.
1906
    I915_WRITE(DEIMR, 0xffffffff);
1977
	 */
-
 
1978
	tmp = I915_READ_CTL(ring);
-
 
1979
	if (tmp & RING_WAIT) {
-
 
1980
		DRM_ERROR("Kicking stuck wait on %s\n",
-
 
1981
			  ring->name);
-
 
1982
		I915_WRITE_CTL(ring, tmp);
-
 
1983
		return HANGCHECK_KICK;
-
 
1984
	}
-
 
1985
 
-
 
1986
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
-
 
1987
		switch (semaphore_passed(ring)) {
1907
    I915_WRITE(DEIER, 0x0);
1988
		default:
-
 
1989
			return HANGCHECK_HUNG;
-
 
1990
		case 1:
-
 
1991
			DRM_ERROR("Kicking stuck semaphore on %s\n",
-
 
1992
				  ring->name);
-
 
1993
			I915_WRITE_CTL(ring, tmp);
-
 
1994
			return HANGCHECK_KICK;
-
 
1995
		case 0:
-
 
1996
			return HANGCHECK_WAIT;
-
 
1997
		}
-
 
1998
	}
-
 
1999
 
-
 
2000
	return HANGCHECK_HUNG;
-
 
2001
}
-
 
2002
 
-
 
2003
/**
-
 
2004
 * This is called when the chip hasn't reported back with completed
-
 
2005
 * batchbuffers in a long time. We keep track per ring seqno progress and
-
 
2006
 * if there are no progress, hangcheck score for that ring is increased.
-
 
2007
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
-
 
2008
 * we kick the ring. If we see no progress on three subsequent calls
-
 
2009
 * we assume chip is wedged and try to fix it by resetting the chip.
-
 
2010
 */
-
 
2011
static void i915_hangcheck_elapsed(unsigned long data)
-
 
2012
{
-
 
2013
	struct drm_device *dev = (struct drm_device *)data;
-
 
2014
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
2015
	struct intel_ring_buffer *ring;
-
 
2016
	int i;
-
 
2017
	int busy_count = 0, rings_hung = 0;
-
 
2018
	bool stuck[I915_NUM_RINGS] = { 0 };
-
 
2019
#define BUSY 1
-
 
2020
#define KICK 5
-
 
2021
#define HUNG 20
-
 
2022
#define FIRE 30
-
 
2023
 
-
 
2024
	if (!i915_enable_hangcheck)
-
 
2025
		return;
-
 
2026
 
-
 
2027
	for_each_ring(ring, dev_priv, i) {
-
 
2028
		u32 seqno, acthd;
-
 
2029
		bool busy = true;
-
 
2030
 
-
 
2031
		semaphore_clear_deadlocks(dev_priv);
-
 
2032
 
-
 
2033
		seqno = ring->get_seqno(ring, false);
-
 
2034
		acthd = intel_ring_get_active_head(ring);
-
 
2035
 
-
 
2036
		if (ring->hangcheck.seqno == seqno) {
-
 
2037
			if (ring_idle(ring, seqno)) {
-
 
2038
//               if (waitqueue_active(&ring->irq_queue)) {
-
 
2039
					/* Issue a wake-up to catch stuck h/w. */
-
 
2040
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-
 
2041
//                         ring->name);
-
 
2042
//                   wake_up_all(&ring->irq_queue);
-
 
2043
//                   ring->hangcheck.score += HUNG;
-
 
2044
//               } else
-
 
2045
					busy = false;
-
 
2046
			} else {
-
 
2047
				/* We always increment the hangcheck score
-
 
2048
				 * if the ring is busy and still processing
-
 
2049
				 * the same request, so that no single request
-
 
2050
				 * can run indefinitely (such as a chain of
-
 
2051
				 * batches). The only time we do not increment
-
 
2052
				 * the hangcheck score on this ring, if this
-
 
2053
				 * ring is in a legitimate wait for another
-
 
2054
				 * ring. In that case the waiting ring is a
-
 
2055
				 * victim and we want to be sure we catch the
-
 
2056
				 * right culprit. Then every time we do kick
-
 
2057
				 * the ring, add a small increment to the
-
 
2058
				 * score so that we can catch a batch that is
-
 
2059
				 * being repeatedly kicked and so responsible
-
 
2060
				 * for stalling the machine.
-
 
2061
				 */
-
 
2062
				ring->hangcheck.action = ring_stuck(ring,
1908
    POSTING_READ(DEIER);
2063
								    acthd);
-
 
2064
 
-
 
2065
				switch (ring->hangcheck.action) {
-
 
2066
				case HANGCHECK_WAIT:
-
 
2067
					break;
-
 
2068
				case HANGCHECK_ACTIVE:
-
 
2069
					ring->hangcheck.score += BUSY;
-
 
2070
					break;
-
 
2071
				case HANGCHECK_KICK:
-
 
2072
					ring->hangcheck.score += KICK;
-
 
2073
					break;
-
 
2074
				case HANGCHECK_HUNG:
-
 
2075
					ring->hangcheck.score += HUNG;
-
 
2076
					stuck[i] = true;
-
 
2077
					break;
-
 
2078
				}
-
 
2079
			}
-
 
2080
		} else {
-
 
2081
			/* Gradually reduce the count so that we catch DoS
-
 
2082
			 * attempts across multiple batches.
-
 
2083
			 */
-
 
2084
			if (ring->hangcheck.score > 0)
-
 
2085
				ring->hangcheck.score--;
-
 
2086
		}
-
 
2087
 
-
 
2088
		ring->hangcheck.seqno = seqno;
-
 
2089
		ring->hangcheck.acthd = acthd;
-
 
2090
		busy_count += busy;
-
 
2091
	}
-
 
2092
 
-
 
2093
	for_each_ring(ring, dev_priv, i) {
-
 
2094
		if (ring->hangcheck.score > FIRE) {
-
 
2095
			DRM_INFO("%s on %s\n",
-
 
2096
				  stuck[i] ? "stuck" : "no progress",
-
 
2097
				  ring->name);
-
 
2098
			rings_hung++;
-
 
2099
		}
-
 
2100
	}
-
 
2101
 
-
 
2102
//   if (rings_hung)
-
 
2103
//       return i915_handle_error(dev, true);
-
 
2104
 
Line 1909... Line 2105...
1909
 
2105
}
1910
    /* and GT */
2106
 
Line 1911... Line 2107...
1911
    I915_WRITE(GTIMR, 0xffffffff);
2107
static void ibx_irq_preinstall(struct drm_device *dev)
Line 1925... Line 2121...
1925
	 */
2121
	 */
1926
	I915_WRITE(SDEIER, 0xffffffff);
2122
	I915_WRITE(SDEIER, 0xffffffff);
1927
    POSTING_READ(SDEIER);
2123
	POSTING_READ(SDEIER);
1928
}
2124
}
Line -... Line 2125...
-
 
2125
 
-
 
2126
static void gen5_gt_irq_preinstall(struct drm_device *dev)
-
 
2127
{
-
 
2128
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2129
 
-
 
2130
    /* and GT */
-
 
2131
    I915_WRITE(GTIMR, 0xffffffff);
-
 
2132
    I915_WRITE(GTIER, 0x0);
-
 
2133
    POSTING_READ(GTIER);
-
 
2134
 
-
 
2135
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
2136
		/* and PM */
-
 
2137
		I915_WRITE(GEN6_PMIMR, 0xffffffff);
-
 
2138
		I915_WRITE(GEN6_PMIER, 0x0);
-
 
2139
		POSTING_READ(GEN6_PMIER);
-
 
2140
}
-
 
2141
}
-
 
2142
 
-
 
2143
/* drm_dma.h hooks
-
 
2144
*/
-
 
2145
static void ironlake_irq_preinstall(struct drm_device *dev)
-
 
2146
{
-
 
2147
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2148
 
-
 
2149
	atomic_set(&dev_priv->irq_received, 0);
-
 
2150
 
-
 
2151
	I915_WRITE(HWSTAM, 0xeffe);
-
 
2152
 
-
 
2153
	I915_WRITE(DEIMR, 0xffffffff);
-
 
2154
	I915_WRITE(DEIER, 0x0);
-
 
2155
	POSTING_READ(DEIER);
-
 
2156
 
-
 
2157
	gen5_gt_irq_preinstall(dev);
-
 
2158
 
-
 
2159
	ibx_irq_preinstall(dev);
-
 
2160
}
1929
 
2161
 
1930
static void valleyview_irq_preinstall(struct drm_device *dev)
2162
static void valleyview_irq_preinstall(struct drm_device *dev)
1931
{
2163
{
1932
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2164
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
Line 1941... Line 2173...
1941
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2173
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
Line 1942... Line 2174...
1942
 
2174
 
1943
	/* and GT */
2175
	/* and GT */
1944
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2176
	I915_WRITE(GTIIR, I915_READ(GTIIR));
-
 
2177
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1945
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2178
 
1946
	I915_WRITE(GTIMR, 0xffffffff);
-
 
1947
	I915_WRITE(GTIER, 0x0);
-
 
Line 1948... Line 2179...
1948
	POSTING_READ(GTIER);
2179
	gen5_gt_irq_preinstall(dev);
Line 1949... Line 2180...
1949
 
2180
 
1950
	I915_WRITE(DPINVGTT, 0xff);
2181
	I915_WRITE(DPINVGTT, 0xff);
Line 1962... Line 2193...
1962
static void ibx_hpd_irq_setup(struct drm_device *dev)
2193
static void ibx_hpd_irq_setup(struct drm_device *dev)
1963
{
2194
{
1964
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2195
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1965
	struct drm_mode_config *mode_config = &dev->mode_config;
2196
	struct drm_mode_config *mode_config = &dev->mode_config;
1966
	struct intel_encoder *intel_encoder;
2197
	struct intel_encoder *intel_encoder;
1967
	u32 mask = ~I915_READ(SDEIMR);
2198
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
1968
	u32 hotplug;
-
 
Line 1969... Line 2199...
1969
 
2199
 
1970
	if (HAS_PCH_IBX(dev)) {
2200
	if (HAS_PCH_IBX(dev)) {
1971
		mask &= ~SDE_HOTPLUG_MASK;
2201
		hotplug_irqs = SDE_HOTPLUG_MASK;
1972
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2202
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
1973
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2203
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
1974
				mask |= hpd_ibx[intel_encoder->hpd_pin];
2204
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
1975
	} else {
2205
	} else {
1976
		mask &= ~SDE_HOTPLUG_MASK_CPT;
2206
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
1977
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2207
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
1978
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2208
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
1979
				mask |= hpd_cpt[intel_encoder->hpd_pin];
2209
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
Line 1980... Line 2210...
1980
	}
2210
	}
Line 1981... Line 2211...
1981
 
2211
 
1982
	I915_WRITE(SDEIMR, ~mask);
2212
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
1983
 
2213
 
1984
	/*
2214
	/*
Line 1998... Line 2228...
1998
static void ibx_irq_postinstall(struct drm_device *dev)
2228
static void ibx_irq_postinstall(struct drm_device *dev)
1999
{
2229
{
2000
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2230
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2001
	u32 mask;
2231
	u32 mask;
Line 2002... Line -...
2002
 
-
 
2003
	if (HAS_PCH_IBX(dev))
-
 
2004
		mask = SDE_GMBUS | SDE_AUX_MASK;
-
 
2005
	else
-
 
2006
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
-
 
2007
 
2232
 
2008
	if (HAS_PCH_NOP(dev))
2233
	if (HAS_PCH_NOP(dev))
Line -... Line 2234...
-
 
2234
		return;
-
 
2235
 
-
 
2236
	if (HAS_PCH_IBX(dev)) {
-
 
2237
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
-
 
2238
		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
-
 
2239
	} else {
-
 
2240
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
-
 
2241
 
-
 
2242
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2009
		return;
2243
	}
2010
 
2244
 
2011
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2245
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
Line 2012... Line 2246...
2012
	I915_WRITE(SDEIMR, ~mask);
2246
	I915_WRITE(SDEIMR, ~mask);
2013
}
2247
}
2014
 
2248
 
2015
static int ironlake_irq_postinstall(struct drm_device *dev)
-
 
2016
{
-
 
2017
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2018
    /* enable kind of interrupts always enabled */
-
 
2019
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2249
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2020
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-
 
2021
			   DE_AUX_CHANNEL_A;
-
 
Line 2022... Line -...
2022
    u32 render_irqs;
-
 
2023
 
-
 
2024
    dev_priv->irq_mask = ~display_mask;
-
 
2025
 
-
 
2026
    /* should always can generate irq */
2250
{
Line 2027... Line 2251...
2027
    I915_WRITE(DEIIR, I915_READ(DEIIR));
2251
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2252
	u32 pm_irqs, gt_irqs;
-
 
2253
 
-
 
2254
	pm_irqs = gt_irqs = 0;
-
 
2255
 
-
 
2256
	dev_priv->gt_irq_mask = ~0;
-
 
2257
	if (HAS_L3_GPU_CACHE(dev)) {
-
 
2258
		/* L3 parity interrupt is always unmasked. */
-
 
2259
		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
 
2260
		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
 
2261
	}
-
 
2262
 
-
 
2263
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
-
 
2264
	if (IS_GEN5(dev)) {
Line 2028... Line 2265...
2028
    I915_WRITE(DEIMR, dev_priv->irq_mask);
2265
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2029
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2266
			   ILK_BSD_USER_INTERRUPT;
2030
    POSTING_READ(DEIER);
-
 
2031
 
-
 
2032
	dev_priv->gt_irq_mask = ~0;
-
 
2033
 
-
 
2034
    I915_WRITE(GTIIR, I915_READ(GTIIR));
-
 
2035
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
2036
 
-
 
2037
    if (IS_GEN6(dev))
-
 
2038
        render_irqs =
-
 
2039
            GT_USER_INTERRUPT |
-
 
2040
			GEN6_BSD_USER_INTERRUPT |
-
 
2041
			GEN6_BLITTER_USER_INTERRUPT;
2267
	} else {
2042
    else
2268
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
Line -... Line 2269...
-
 
2269
	}
2043
        render_irqs =
2270
 
Line 2044... Line 2271...
2044
            GT_USER_INTERRUPT |
2271
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2045
            GT_PIPE_NOTIFY |
-
 
2046
            GT_BSD_USER_INTERRUPT;
2272
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2047
    I915_WRITE(GTIER, render_irqs);
-
 
2048
    POSTING_READ(GTIER);
-
 
2049
 
-
 
Line -... Line 2273...
-
 
2273
	I915_WRITE(GTIER, gt_irqs);
-
 
2274
    POSTING_READ(GTIER);
-
 
2275
 
-
 
2276
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
2277
		pm_irqs |= GEN6_PM_RPS_EVENTS;
2050
	ibx_irq_postinstall(dev);
2278
 
2051
 
2279
		if (HAS_VEBOX(dev))
Line 2052... Line 2280...
2052
    if (IS_IRONLAKE_M(dev)) {
2280
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2053
        /* Clear & enable PCU event interrupts */
2281
 
-
 
2282
		dev_priv->pm_irq_mask = 0xffffffff;
2054
        I915_WRITE(DEIIR, DE_PCU_EVENT);
2283
		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2055
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2284
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
-
 
2285
		I915_WRITE(GEN6_PMIER, pm_irqs);
2056
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2286
		POSTING_READ(GEN6_PMIER);
2057
    }
2287
    }
2058
 
2288
}
2059
    return 0;
2289
 
2060
}
2290
static int ironlake_irq_postinstall(struct drm_device *dev)
2061
 
2291
{
-
 
2292
	unsigned long irqflags;
-
 
2293
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2294
	u32 display_mask, extra_mask;
-
 
2295
 
2062
static int ivybridge_irq_postinstall(struct drm_device *dev)
2296
	if (INTEL_INFO(dev)->gen >= 7) {
-
 
2297
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
-
 
2298
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
-
 
2299
		DE_PLANEB_FLIP_DONE_IVB |
-
 
2300
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
-
 
2301
				DE_ERR_INT_IVB);
-
 
2302
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Line 2063... Line 2303...
2063
{
2303
			      DE_PIPEA_VBLANK_IVB);
Line 2064... Line 2304...
2064
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2304
 
2065
	/* enable kind of interrupts always enabled */
2305
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2066
	u32 display_mask =
2306
	} else {
2067
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2307
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2068
		DE_PLANEC_FLIP_DONE_IVB |
-
 
2069
		DE_PLANEB_FLIP_DONE_IVB |
-
 
2070
		DE_PLANEA_FLIP_DONE_IVB |
-
 
2071
		DE_AUX_CHANNEL_A_IVB;
-
 
2072
	u32 render_irqs;
2308
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Line 2073... Line -...
2073
 
-
 
2074
	dev_priv->irq_mask = ~display_mask;
-
 
2075
 
-
 
2076
	/* should always can generate irq */
-
 
2077
	I915_WRITE(DEIIR, I915_READ(DEIIR));
-
 
2078
	I915_WRITE(DEIMR, dev_priv->irq_mask);
-
 
2079
	I915_WRITE(DEIER,
-
 
2080
		   display_mask |
2309
				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2081
		   DE_PIPEC_VBLANK_IVB |
-
 
Line 2082... Line 2310...
2082
		   DE_PIPEB_VBLANK_IVB |
2310
				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
Line -... Line 2311...
-
 
2311
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
-
 
2312
	}
-
 
2313
 
-
 
2314
	dev_priv->irq_mask = ~display_mask;
-
 
2315
 
-
 
2316
	/* should always can generate irq */
-
 
2317
	I915_WRITE(DEIIR, I915_READ(DEIIR));
-
 
2318
	I915_WRITE(DEIMR, dev_priv->irq_mask);
-
 
2319
	I915_WRITE(DEIER, display_mask | extra_mask);
-
 
2320
	POSTING_READ(DEIER);
-
 
2321
 
2083
		   DE_PIPEA_VBLANK_IVB);
2322
	gen5_gt_irq_postinstall(dev);
2084
	POSTING_READ(DEIER);
2323
 
Line 2085... Line 2324...
2085
 
2324
	ibx_irq_postinstall(dev);
2086
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2325
 
2087
 
2326
	if (IS_IRONLAKE_M(dev)) {
2088
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2327
		/* Enable PCU event interrupts
2089
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2328
		 *
2090
 
2329
		 * spinlocking not required here for correctness since interrupt
2091
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-
 
Line 2092... Line 2330...
2092
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2330
		 * setup is guaranteed to run in single-threaded context. But we
2093
	I915_WRITE(GTIER, render_irqs);
2331
		 * need it to make the assert_spin_locked happy. */
2094
	POSTING_READ(GTIER);
2332
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2095
 
2333
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
Line 2118... Line 2356...
2118
	 */
2356
	 */
2119
	dev_priv->irq_mask = (~enable_mask) |
2357
	dev_priv->irq_mask = (~enable_mask) |
2120
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2358
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2121
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2359
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
Line 2122... Line -...
2122
 
-
 
2123
	/* Hack for broken MSIs on VLV */
-
 
2124
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
-
 
2125
//   pci_read_config_word(dev->pdev, 0x98, &msid);
-
 
2126
//   msid &= 0xff; /* mask out delivery bits */
-
 
2127
//   msid |= (1<<14);
-
 
2128
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
-
 
2129
 
2360
 
2130
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2361
	I915_WRITE(PORT_HOTPLUG_EN, 0);
Line 2131... Line 2362...
2131
	POSTING_READ(PORT_HOTPLUG_EN);
2362
	POSTING_READ(PORT_HOTPLUG_EN);
2132
 
2363
 
2133
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2364
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2134
	I915_WRITE(VLV_IER, enable_mask);
2365
	I915_WRITE(VLV_IER, enable_mask);
2135
	I915_WRITE(VLV_IIR, 0xffffffff);
2366
	I915_WRITE(VLV_IIR, 0xffffffff);
2136
	I915_WRITE(PIPESTAT(0), 0xffff);
2367
	I915_WRITE(PIPESTAT(0), 0xffff);
Line -... Line 2368...
-
 
2368
	I915_WRITE(PIPESTAT(1), 0xffff);
-
 
2369
	POSTING_READ(VLV_IER);
-
 
2370
 
2137
	I915_WRITE(PIPESTAT(1), 0xffff);
2371
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2138
	POSTING_READ(VLV_IER);
2372
	 * just to make the assert_spin_locked check happy. */
2139
 
2373
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
2374
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
Line 2140... Line 2375...
2140
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2375
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2141
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2376
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
Line 2142... Line -...
2142
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
-
 
2143
 
-
 
2144
	I915_WRITE(VLV_IIR, 0xffffffff);
-
 
2145
	I915_WRITE(VLV_IIR, 0xffffffff);
-
 
2146
 
-
 
2147
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2377
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2148
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
Line 2149... Line 2378...
2149
 
2378
 
2150
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2379
	I915_WRITE(VLV_IIR, 0xffffffff);
2151
		GEN6_BLITTER_USER_INTERRUPT;
2380
	I915_WRITE(VLV_IIR, 0xffffffff);
2152
	I915_WRITE(GTIER, render_irqs);
2381
 
Line 2195... Line 2424...
2195
	I915_WRITE(HWSTAM, 0xffffffff);
2424
	I915_WRITE(HWSTAM, 0xffffffff);
Line 2196... Line 2425...
2196
 
2425
 
2197
	I915_WRITE(DEIMR, 0xffffffff);
2426
	I915_WRITE(DEIMR, 0xffffffff);
2198
	I915_WRITE(DEIER, 0x0);
2427
	I915_WRITE(DEIER, 0x0);
-
 
2428
	I915_WRITE(DEIIR, I915_READ(DEIIR));
-
 
2429
	if (IS_GEN7(dev))
Line 2199... Line 2430...
2199
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2430
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2200
 
2431
 
2201
	I915_WRITE(GTIMR, 0xffffffff);
2432
	I915_WRITE(GTIMR, 0xffffffff);
Line 2206... Line 2437...
2206
		return;
2437
		return;
Line 2207... Line 2438...
2207
 
2438
 
2208
	I915_WRITE(SDEIMR, 0xffffffff);
2439
	I915_WRITE(SDEIMR, 0xffffffff);
2209
	I915_WRITE(SDEIER, 0x0);
2440
	I915_WRITE(SDEIER, 0x0);
-
 
2441
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-
 
2442
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2210
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2443
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
Line 2211... Line 2444...
2211
}
2444
}
Line 2212... Line 2445...
2212
 
2445
 
Line 2288... Line 2521...
2288
	struct drm_device *dev = (struct drm_device *) arg;
2521
	struct drm_device *dev = (struct drm_device *) arg;
2289
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2522
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2290
	u16 iir, new_iir;
2523
	u16 iir, new_iir;
2291
	u32 pipe_stats[2];
2524
	u32 pipe_stats[2];
2292
	unsigned long irqflags;
2525
	unsigned long irqflags;
2293
	int irq_received;
-
 
2294
	int pipe;
2526
	int pipe;
2295
	u16 flip_mask =
2527
	u16 flip_mask =
2296
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2528
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2297
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2529
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
Line 2307... Line 2539...
2307
		 * have been cleared after the pipestat interrupt was received.
2539
		 * have been cleared after the pipestat interrupt was received.
2308
		 * It doesn't set the bit in iir again, but it still produces
2540
		 * It doesn't set the bit in iir again, but it still produces
2309
		 * interrupts (for non-MSI).
2541
		 * interrupts (for non-MSI).
2310
		 */
2542
		 */
2311
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2543
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2312
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2544
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2313
			i915_handle_error(dev, false);
2545
//           i915_handle_error(dev, false);
Line 2314... Line 2546...
2314
 
2546
 
2315
		for_each_pipe(pipe) {
2547
		for_each_pipe(pipe) {
2316
			int reg = PIPESTAT(pipe);
2548
			int reg = PIPESTAT(pipe);
Line 2322... Line 2554...
2322
			if (pipe_stats[pipe] & 0x8000ffff) {
2554
			if (pipe_stats[pipe] & 0x8000ffff) {
2323
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2555
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2324
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2556
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2325
							 pipe_name(pipe));
2557
							 pipe_name(pipe));
2326
				I915_WRITE(reg, pipe_stats[pipe]);
2558
				I915_WRITE(reg, pipe_stats[pipe]);
2327
				irq_received = 1;
-
 
2328
			}
2559
			}
2329
		}
2560
		}
2330
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2561
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Line 2331... Line 2562...
2331
 
2562
 
Line 2483... Line 2714...
2483
		 * have been cleared after the pipestat interrupt was received.
2714
		 * have been cleared after the pipestat interrupt was received.
2484
		 * It doesn't set the bit in iir again, but it still produces
2715
		 * It doesn't set the bit in iir again, but it still produces
2485
		 * interrupts (for non-MSI).
2716
		 * interrupts (for non-MSI).
2486
		 */
2717
		 */
2487
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2718
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2488
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2719
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2489
			i915_handle_error(dev, false);
2720
//           i915_handle_error(dev, false);
Line 2490... Line 2721...
2490
 
2721
 
2491
		for_each_pipe(pipe) {
2722
		for_each_pipe(pipe) {
2492
			int reg = PIPESTAT(pipe);
2723
			int reg = PIPESTAT(pipe);
Line 2512... Line 2743...
2512
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2743
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2513
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2744
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Line 2514... Line 2745...
2514
 
2745
 
2515
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2746
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2516
				  hotplug_status);
-
 
-
 
2747
				  hotplug_status);
2517
			if (hotplug_trigger) {
2748
 
2518
				if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
-
 
2519
					i915_hpd_irq_setup(dev);
-
 
2520
				queue_work(dev_priv->wq,
-
 
2521
					   &dev_priv->hotplug_work);
2749
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2522
			}
2750
 
2523
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2751
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2524
			POSTING_READ(PORT_HOTPLUG_STAT);
2752
			POSTING_READ(PORT_HOTPLUG_STAT);
Line 2525... Line 2753...
2525
		}
2753
		}
Line 2613... Line 2841...
2613
static int i965_irq_postinstall(struct drm_device *dev)
2841
static int i965_irq_postinstall(struct drm_device *dev)
2614
{
2842
{
2615
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2843
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2616
	u32 enable_mask;
2844
	u32 enable_mask;
2617
	u32 error_mask;
2845
	u32 error_mask;
-
 
2846
	unsigned long irqflags;
Line 2618... Line 2847...
2618
 
2847
 
2619
	/* Unmask the interrupts that we always want on. */
2848
	/* Unmask the interrupts that we always want on. */
2620
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2849
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2621
			       I915_DISPLAY_PORT_INTERRUPT |
2850
			       I915_DISPLAY_PORT_INTERRUPT |
Line 2631... Line 2860...
2631
	enable_mask |= I915_USER_INTERRUPT;
2860
	enable_mask |= I915_USER_INTERRUPT;
Line 2632... Line 2861...
2632
 
2861
 
2633
	if (IS_G4X(dev))
2862
	if (IS_G4X(dev))
Line -... Line 2863...
-
 
2863
		enable_mask |= I915_BSD_USER_INTERRUPT;
-
 
2864
 
-
 
2865
	/* Interrupt setup is already guaranteed to be single-threaded, this is
2634
		enable_mask |= I915_BSD_USER_INTERRUPT;
2866
	 * just to make the assert_spin_locked check happy. */
-
 
2867
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Line 2635... Line 2868...
2635
 
2868
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2636
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2869
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2637
 
2870
 
2638
	/*
2871
	/*
Line 2667... Line 2900...
2667
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2900
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2668
	struct drm_mode_config *mode_config = &dev->mode_config;
2901
	struct drm_mode_config *mode_config = &dev->mode_config;
2669
	struct intel_encoder *intel_encoder;
2902
	struct intel_encoder *intel_encoder;
2670
	u32 hotplug_en;
2903
	u32 hotplug_en;
Line -... Line 2904...
-
 
2904
 
-
 
2905
	assert_spin_locked(&dev_priv->irq_lock);
2671
 
2906
 
2672
	if (I915_HAS_HOTPLUG(dev)) {
2907
	if (I915_HAS_HOTPLUG(dev)) {
2673
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2908
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2674
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2909
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2675
	/* Note HDMI and DP share hotplug bits */
2910
	/* Note HDMI and DP share hotplug bits */
Line 2717... Line 2952...
2717
		 * have been cleared after the pipestat interrupt was received.
2952
		 * have been cleared after the pipestat interrupt was received.
2718
		 * It doesn't set the bit in iir again, but it still produces
2953
		 * It doesn't set the bit in iir again, but it still produces
2719
		 * interrupts (for non-MSI).
2954
		 * interrupts (for non-MSI).
2720
		 */
2955
		 */
2721
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2956
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2722
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2957
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2723
			i915_handle_error(dev, false);
2958
//           i915_handle_error(dev, false);
Line 2724... Line 2959...
2724
 
2959
 
2725
		for_each_pipe(pipe) {
2960
		for_each_pipe(pipe) {
2726
			int reg = PIPESTAT(pipe);
2961
			int reg = PIPESTAT(pipe);
Line 2747... Line 2982...
2747
		/* Consume port.  Then clear IIR or we'll miss events */
2982
		/* Consume port.  Then clear IIR or we'll miss events */
2748
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2983
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2749
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2984
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2750
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2985
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2751
								  HOTPLUG_INT_STATUS_G4X :
2986
								  HOTPLUG_INT_STATUS_G4X :
2752
								  HOTPLUG_INT_STATUS_I965);
2987
								  HOTPLUG_INT_STATUS_I915);
Line 2753... Line 2988...
2753
 
2988
 
2754
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2989
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2755
				  hotplug_status);
-
 
-
 
2990
				  hotplug_status);
2756
			if (hotplug_trigger) {
2991
 
2757
				if (hotplug_irq_storm_detect(dev, hotplug_trigger,
2992
			intel_hpd_irq_handler(dev, hotplug_trigger,
2758
							    IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
-
 
2759
					i915_hpd_irq_setup(dev);
-
 
2760
				queue_work(dev_priv->wq,
-
 
2761
					   &dev_priv->hotplug_work);
2993
					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2762
			}
2994
 
2763
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2995
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2764
			I915_READ(PORT_HOTPLUG_STAT);
2996
			I915_READ(PORT_HOTPLUG_STAT);
Line 2765... Line 2997...
2765
		}
2997
		}
Line 2841... Line 3073...
2841
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3073
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
Line 2842... Line 3074...
2842
 
3074
 
Line -... Line 3075...
-
 
3075
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
Line 2843... Line 3076...
2843
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3076
 
2844
 
3077
 
2845
 
3078
//	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2846
 
3079
 
2847
	if (IS_VALLEYVIEW(dev)) {
3080
	if (IS_VALLEYVIEW(dev)) {
2848
		dev->driver->irq_handler = valleyview_irq_handler;
-
 
2849
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
-
 
2850
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
-
 
2851
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-
 
2852
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-
 
2853
		/* Share pre & uninstall handlers with ILK/SNB */
-
 
2854
		dev->driver->irq_handler = ivybridge_irq_handler;
3081
		dev->driver->irq_handler = valleyview_irq_handler;
2855
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3082
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2856
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3083
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2857
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3084
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2858
	} else if (HAS_PCH_SPLIT(dev)) {
3085
	} else if (HAS_PCH_SPLIT(dev)) {
Line 2879... Line 3106...
2879
void intel_hpd_init(struct drm_device *dev)
3106
void intel_hpd_init(struct drm_device *dev)
2880
{
3107
{
2881
	struct drm_i915_private *dev_priv = dev->dev_private;
3108
	struct drm_i915_private *dev_priv = dev->dev_private;
2882
	struct drm_mode_config *mode_config = &dev->mode_config;
3109
	struct drm_mode_config *mode_config = &dev->mode_config;
2883
	struct drm_connector *connector;
3110
	struct drm_connector *connector;
-
 
3111
	unsigned long irqflags;
2884
	int i;
3112
	int i;
Line 2885... Line 3113...
2885
 
3113
 
2886
	for (i = 1; i < HPD_NUM_PINS; i++) {
3114
	for (i = 1; i < HPD_NUM_PINS; i++) {
2887
		dev_priv->hpd_stats[i].hpd_cnt = 0;
3115
		dev_priv->hpd_stats[i].hpd_cnt = 0;
Line 2891... Line 3119...
2891
		struct intel_connector *intel_connector = to_intel_connector(connector);
3119
		struct intel_connector *intel_connector = to_intel_connector(connector);
2892
		connector->polled = intel_connector->polled;
3120
		connector->polled = intel_connector->polled;
2893
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3121
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
2894
			connector->polled = DRM_CONNECTOR_POLL_HPD;
3122
			connector->polled = DRM_CONNECTOR_POLL_HPD;
2895
	}
3123
	}
-
 
3124
 
-
 
3125
	/* Interrupt setup is already guaranteed to be single-threaded, this is
-
 
3126
	 * just to make the assert_spin_locked checks happy. */
-
 
3127
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2896
	if (dev_priv->display.hpd_irq_setup)
3128
	if (dev_priv->display.hpd_irq_setup)
2897
		dev_priv->display.hpd_irq_setup(dev);
3129
		dev_priv->display.hpd_irq_setup(dev);
-
 
3130
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2898
}
3131
}
Line 2899... Line -...
2899
 
-
 
-
 
3132
 
2900
 
3133
/* Disable interrupts so we can allow Package C8+. */
2901
irqreturn_t intel_irq_handler(struct drm_device *dev)
3134
void hsw_pc8_disable_interrupts(struct drm_device *dev)
-
 
3135
{
-
 
3136
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 2902... Line 3137...
2902
{
3137
	unsigned long irqflags;
Line -... Line 3138...
-
 
3138
 
-
 
3139
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3140
 
-
 
3141
	dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
-
 
3142
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
-
 
3143
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
-
 
3144
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
2903
 
3145
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
-
 
3146
 
-
 
3147
	ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
Line -... Line 3148...
-
 
3148
	ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
-
 
3149
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
2904
//    printf("i915 irq\n");
3150
	snb_disable_pm_irq(dev_priv, 0xffffffff);
2905
 
3151
 
Line -... Line 3152...
-
 
3152
	dev_priv->pc8.irqs_disabled = true;
2906
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
3153
 
2907
 
3154
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2908
    return dev->driver->irq_handler(0, dev);
3155
}
2909
}
3156
 
2910
 
3157
/* Restore interrupts so we can recover from Package C8+. */
Line 2911... Line 3158...
2911
int drm_irq_install(struct drm_device *dev)
3158
void hsw_pc8_restore_interrupts(struct drm_device *dev)
Line -... Line 3159...
-
 
3159
{
-
 
3160
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3161
	unsigned long irqflags;
-
 
3162
	uint32_t val, expected;
-
 
3163
 
-
 
3164
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3165
 
2912
{
3166
	val = I915_READ(DEIMR);
-
 
3167
	expected = ~DE_PCH_EVENT_IVB;
-
 
3168
	WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
-
 
3169
 
-
 
3170
	val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
-
 
3171
	expected = ~SDE_HOTPLUG_MASK_CPT;
-
 
3172
	WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
-
 
3173
	     val, expected);
-
 
3174
 
-
 
3175
	val = I915_READ(GTIMR);
-
 
3176
	expected = 0xffffffff;
-
 
3177
	WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
-
 
3178
 
-
 
3179
	val = I915_READ(GEN6_PMIMR);
-
 
3180
	expected = 0xffffffff;
-
 
3181
	WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
-
 
3182
	     expected);
-
 
3183
 
-
 
3184
	dev_priv->pc8.irqs_disabled = false;
-
 
3185
 
Line 2913... Line -...
2913
    unsigned long sh_flags = 0;
-
 
2914
    int irq_line;
-
 
2915
    int ret = 0;
3186
	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
2916
 
-
 
2917
    char *irqname;
-
 
2918
 
-
 
2919
    mutex_lock(&dev->struct_mutex);
-
 
2920
 
-
 
2921
    /* Driver must have been initialized */
-
 
2922
    if (!dev->dev_private) {
3187
	ibx_enable_display_interrupt(dev_priv,
2923
            mutex_unlock(&dev->struct_mutex);
-
 
2924
            return -EINVAL;
-
 
2925
    }
-
 
2926
 
-
 
Line 2927... Line -...
2927
    if (dev->irq_enabled) {
-
 
Line 2928... Line -...
2928
            mutex_unlock(&dev->struct_mutex);
-
 
2929
            return -EBUSY;
-
 
2930
    }
-
 
2931
    dev->irq_enabled = 1;
-
 
2932
    mutex_unlock(&dev->struct_mutex);
3188
				     ~dev_priv->pc8.regsave.sdeimr &
2933
 
3189
				     ~SDE_HOTPLUG_MASK_CPT);
2934
    irq_line   = drm_dev_to_irq(dev);
-
 
2935
 
-
 
2936
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
-
 
Line 2937... Line 3190...
2937
 
3190
	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
2938
    /* Before installing handler */
-
 
2939
    if (dev->driver->irq_preinstall)
-
 
Line 2940... Line 3191...
2940
            dev->driver->irq_preinstall(dev);
3191
	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
2941
 
-
 
2942
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
-
 
Line 2943... Line 3192...
2943
 
3192
	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
2944
    /* After installing handler */
3193