Subversion Repositories Kolibri OS

Rev

Rev 3298 | Rev 3746 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) ": " fmt
30
 
31
#include 
32
#include 
33
#include 
2351 Serge 34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
3031 serge 38
 
39
#define pr_err(fmt, ...) \
40
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
41
 
42
 
2352 Serge 43
#define DRM_WAKEUP( queue ) wake_up( queue )
44
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
45
 
2351 Serge 46
#define MAX_NOPID ((u32)~0)
47
 
48
 
49
 
50
/* For display hotplug interrupt */
51
static void
52
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
53
{
54
    if ((dev_priv->irq_mask & mask) != 0) {
55
        dev_priv->irq_mask &= ~mask;
56
        I915_WRITE(DEIMR, dev_priv->irq_mask);
57
        POSTING_READ(DEIMR);
58
    }
59
}
60
 
61
static inline void
62
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
63
{
64
    if ((dev_priv->irq_mask & mask) != mask) {
65
        dev_priv->irq_mask |= mask;
66
        I915_WRITE(DEIMR, dev_priv->irq_mask);
67
        POSTING_READ(DEIMR);
68
    }
69
}
3031 serge 70
 
71
void
72
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
73
{
74
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
75
		u32 reg = PIPESTAT(pipe);
76
 
77
		dev_priv->pipestat[pipe] |= mask;
78
		/* Enable the interrupt, clear any pending status */
79
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
80
		POSTING_READ(reg);
81
	}
82
}
83
 
84
void
85
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
86
{
87
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
88
		u32 reg = PIPESTAT(pipe);
89
 
90
		dev_priv->pipestat[pipe] &= ~mask;
91
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
92
		POSTING_READ(reg);
93
	}
94
}
95
 
96
#if 0
97
/**
98
 * intel_enable_asle - enable ASLE interrupt for OpRegion
99
 */
100
void intel_enable_asle(struct drm_device *dev)
101
{
102
	drm_i915_private_t *dev_priv = dev->dev_private;
103
	unsigned long irqflags;
104
 
105
	/* FIXME: opregion/asle for VLV */
106
	if (IS_VALLEYVIEW(dev))
107
		return;
108
 
109
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
110
 
111
	if (HAS_PCH_SPLIT(dev))
112
		ironlake_enable_display_irq(dev_priv, DE_GSE);
113
	else {
114
		i915_enable_pipestat(dev_priv, 1,
115
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
116
		if (INTEL_INFO(dev)->gen >= 4)
117
			i915_enable_pipestat(dev_priv, 0,
118
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
119
	}
120
 
121
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
122
}
123
#endif
124
 
125
/**
126
 * i915_pipe_enabled - check if a pipe is enabled
127
 * @dev: DRM device
128
 * @pipe: pipe to check
129
 *
130
 * Reading certain registers when the pipe is disabled can hang the chip.
131
 * Use this routine to make sure the PLL is running and the pipe is active
132
 * before reading such registers if unsure.
133
 */
134
static int
135
i915_pipe_enabled(struct drm_device *dev, int pipe)
136
{
137
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3243 Serge 138
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
139
								      pipe);
140
 
141
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
3031 serge 142
}
143
 
144
/* Called from drm generic code, passed a 'crtc', which
145
 * we use as a pipe index
146
 */
147
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
148
{
149
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
150
	unsigned long high_frame;
151
	unsigned long low_frame;
152
	u32 high1, high2, low;
153
 
154
	if (!i915_pipe_enabled(dev, pipe)) {
155
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
156
				"pipe %c\n", pipe_name(pipe));
157
		return 0;
158
	}
159
 
160
	high_frame = PIPEFRAME(pipe);
161
	low_frame = PIPEFRAMEPIXEL(pipe);
162
 
163
	/*
164
	 * High & low register fields aren't synchronized, so make sure
165
	 * we get a low value that's stable across two reads of the high
166
	 * register.
167
	 */
168
	do {
169
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
170
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
171
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
172
	} while (high1 != high2);
173
 
174
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
175
	low >>= PIPE_FRAME_LOW_SHIFT;
176
	return (high1 << 8) | low;
177
}
178
 
179
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
180
{
181
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
182
	int reg = PIPE_FRMCOUNT_GM45(pipe);
183
 
184
	if (!i915_pipe_enabled(dev, pipe)) {
185
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
186
				 "pipe %c\n", pipe_name(pipe));
187
		return 0;
188
	}
189
 
190
	return I915_READ(reg);
191
}
192
 
3480 Serge 193
/*
194
 * Handle hotplug events outside the interrupt handler proper.
195
 */
196
static void i915_hotplug_work_func(struct work_struct *work)
197
{
198
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
199
						    hotplug_work);
200
	struct drm_device *dev = dev_priv->dev;
201
	struct drm_mode_config *mode_config = &dev->mode_config;
202
	struct intel_encoder *encoder;
3031 serge 203
 
3480 Serge 204
	/* HPD irq before everything is fully set up. */
205
	if (!dev_priv->enable_hotplug_processing)
206
		return;
207
 
208
	mutex_lock(&mode_config->mutex);
209
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
210
 
211
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
212
		if (encoder->hot_plug)
213
			encoder->hot_plug(encoder);
214
 
215
	mutex_unlock(&mode_config->mutex);
216
 
217
	/* Just fire off a uevent and let userspace tell us what to do */
218
	drm_helper_hpd_irq_event(dev);
219
}
220
 
2352 Serge 221
static void notify_ring(struct drm_device *dev,
222
			struct intel_ring_buffer *ring)
223
{
224
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 225
 
2352 Serge 226
	if (ring->obj == NULL)
227
		return;
2351 Serge 228
 
3031 serge 229
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
2351 Serge 230
 
2352 Serge 231
	wake_up_all(&ring->irq_queue);
232
//   if (i915_enable_hangcheck) {
233
//       dev_priv->hangcheck_count = 0;
234
//       mod_timer(&dev_priv->hangcheck_timer,
235
//             jiffies +
236
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
237
//   }
238
}
239
 
3031 serge 240
#if 0
241
static void gen6_pm_rps_work(struct work_struct *work)
242
{
243
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
244
						    rps.work);
245
	u32 pm_iir, pm_imr;
246
	u8 new_delay;
2352 Serge 247
 
3031 serge 248
	spin_lock_irq(&dev_priv->rps.lock);
249
	pm_iir = dev_priv->rps.pm_iir;
250
	dev_priv->rps.pm_iir = 0;
251
	pm_imr = I915_READ(GEN6_PMIMR);
252
	I915_WRITE(GEN6_PMIMR, 0);
253
	spin_unlock_irq(&dev_priv->rps.lock);
2352 Serge 254
 
3031 serge 255
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
256
		return;
257
 
3243 Serge 258
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 259
 
260
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
261
		new_delay = dev_priv->rps.cur_delay + 1;
262
	else
263
		new_delay = dev_priv->rps.cur_delay - 1;
264
 
265
	/* sysfs frequency interfaces may have snuck in while servicing the
266
	 * interrupt
267
	 */
268
	if (!(new_delay > dev_priv->rps.max_delay ||
269
	      new_delay < dev_priv->rps.min_delay)) {
270
		gen6_set_rps(dev_priv->dev, new_delay);
271
	}
272
 
3243 Serge 273
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 274
}
275
 
276
 
277
/**
278
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
279
 * occurred.
280
 * @work: workqueue struct
281
 *
282
 * Doesn't actually do anything except notify userspace. As a consequence of
283
 * this event, userspace should try to remap the bad rows since statistically
284
 * it is likely the same row is more likely to go bad again.
285
 */
286
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 287
{
3031 serge 288
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3243 Serge 289
						    l3_parity.error_work);
3031 serge 290
	u32 error_status, row, bank, subbank;
291
	char *parity_event[5];
292
	uint32_t misccpctl;
293
	unsigned long flags;
294
 
295
	/* We must turn off DOP level clock gating to access the L3 registers.
296
	 * In order to prevent a get/put style interface, acquire struct mutex
297
	 * any time we access those registers.
298
	 */
299
	mutex_lock(&dev_priv->dev->struct_mutex);
300
 
301
	misccpctl = I915_READ(GEN7_MISCCPCTL);
302
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
303
	POSTING_READ(GEN7_MISCCPCTL);
304
 
305
	error_status = I915_READ(GEN7_L3CDERRST1);
306
	row = GEN7_PARITY_ERROR_ROW(error_status);
307
	bank = GEN7_PARITY_ERROR_BANK(error_status);
308
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
309
 
310
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
311
				    GEN7_L3CDERRST1_ENABLE);
312
	POSTING_READ(GEN7_L3CDERRST1);
313
 
314
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
315
 
316
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
317
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
318
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
319
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
320
 
321
	mutex_unlock(&dev_priv->dev->struct_mutex);
322
 
323
	parity_event[0] = "L3_PARITY_ERROR=1";
324
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
325
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
326
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
327
	parity_event[4] = NULL;
328
 
329
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
330
			   KOBJ_CHANGE, parity_event);
331
 
332
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
333
		  row, bank, subbank);
334
 
335
	kfree(parity_event[3]);
336
	kfree(parity_event[2]);
337
	kfree(parity_event[1]);
338
}
339
 
340
static void ivybridge_handle_parity_error(struct drm_device *dev)
341
{
342
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
343
	unsigned long flags;
344
 
345
	if (!HAS_L3_GPU_CACHE(dev))
346
		return;
347
 
348
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
349
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
350
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
351
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
352
 
3243 Serge 353
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 354
}
355
 
356
#endif
357
 
358
static void snb_gt_irq_handler(struct drm_device *dev,
359
			       struct drm_i915_private *dev_priv,
360
			       u32 gt_iir)
361
{
3266 Serge 362
//    printf("%s\n", __FUNCTION__);
3031 serge 363
 
364
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
365
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
366
		notify_ring(dev, &dev_priv->ring[RCS]);
367
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
368
		notify_ring(dev, &dev_priv->ring[VCS]);
369
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
370
		notify_ring(dev, &dev_priv->ring[BCS]);
371
 
372
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
373
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
374
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
375
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
376
		i915_handle_error(dev, false);
377
	}
378
 
379
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
380
//		ivybridge_handle_parity_error(dev);
381
}
382
 
383
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
384
				u32 pm_iir)
385
{
386
	unsigned long flags;
387
 
388
	/*
389
	 * IIR bits should never already be set because IMR should
390
	 * prevent an interrupt from being shown in IIR. The warning
391
	 * displays a case where we've unsafely cleared
392
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
393
	 * type is not a problem, it displays a problem in the logic.
394
	 *
395
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
396
	 */
397
 
398
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
399
	dev_priv->rps.pm_iir |= pm_iir;
400
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
401
	POSTING_READ(GEN6_PMIMR);
402
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
403
 
3243 Serge 404
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
3031 serge 405
}
406
 
3480 Serge 407
static void gmbus_irq_handler(struct drm_device *dev)
408
{
409
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
410
 
411
	wake_up_all(&dev_priv->gmbus_wait_queue);
412
}
413
 
414
static void dp_aux_irq_handler(struct drm_device *dev)
415
{
416
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
417
 
418
	wake_up_all(&dev_priv->gmbus_wait_queue);
419
}
420
 
3243 Serge 421
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
3031 serge 422
{
423
	struct drm_device *dev = (struct drm_device *) arg;
424
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
425
	u32 iir, gt_iir, pm_iir;
426
	irqreturn_t ret = IRQ_NONE;
427
	unsigned long irqflags;
428
	int pipe;
429
	u32 pipe_stats[I915_MAX_PIPES];
430
 
431
	atomic_inc(&dev_priv->irq_received);
432
 
433
	while (true) {
434
		iir = I915_READ(VLV_IIR);
435
		gt_iir = I915_READ(GTIIR);
436
		pm_iir = I915_READ(GEN6_PMIIR);
437
 
438
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
439
			goto out;
440
 
441
		ret = IRQ_HANDLED;
442
 
443
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
444
 
445
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
446
		for_each_pipe(pipe) {
447
			int reg = PIPESTAT(pipe);
448
			pipe_stats[pipe] = I915_READ(reg);
449
 
450
			/*
451
			 * Clear the PIPE*STAT regs before the IIR
452
			 */
453
			if (pipe_stats[pipe] & 0x8000ffff) {
454
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
455
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
456
							 pipe_name(pipe));
457
				I915_WRITE(reg, pipe_stats[pipe]);
458
			}
459
		}
460
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
461
 
462
#if 0
463
		for_each_pipe(pipe) {
464
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
465
				drm_handle_vblank(dev, pipe);
466
 
467
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
468
				intel_prepare_page_flip(dev, pipe);
469
				intel_finish_page_flip(dev, pipe);
470
			}
471
		}
472
#endif
473
 
474
		/* Consume port.  Then clear IIR or we'll miss events */
475
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
476
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
477
 
478
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
479
					 hotplug_status);
3480 Serge 480
			if (hotplug_status & dev_priv->hotplug_supported_mask)
481
				queue_work(dev_priv->wq,
482
					   &dev_priv->hotplug_work);
3031 serge 483
 
484
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
485
			I915_READ(PORT_HOTPLUG_STAT);
486
		}
487
 
3480 Serge 488
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
489
			gmbus_irq_handler(dev);
3031 serge 490
 
3480 Serge 491
//        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
492
//            gen6_queue_rps_work(dev_priv, pm_iir);
3031 serge 493
 
494
		I915_WRITE(GTIIR, gt_iir);
495
		I915_WRITE(GEN6_PMIIR, pm_iir);
496
		I915_WRITE(VLV_IIR, iir);
497
	}
498
 
499
out:
500
	return ret;
501
}
502
 
503
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
504
{
505
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
506
	int pipe;
507
 
3480 Serge 508
	if (pch_iir & SDE_HOTPLUG_MASK)
509
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
3243 Serge 510
 
3031 serge 511
	if (pch_iir & SDE_AUDIO_POWER_MASK)
512
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
513
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
514
				 SDE_AUDIO_POWER_SHIFT);
515
 
3480 Serge 516
	if (pch_iir & SDE_AUX_MASK)
517
		dp_aux_irq_handler(dev);
518
 
3031 serge 519
	if (pch_iir & SDE_GMBUS)
3480 Serge 520
		gmbus_irq_handler(dev);
3031 serge 521
 
522
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
523
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
524
 
525
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
526
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
527
 
528
	if (pch_iir & SDE_POISON)
529
		DRM_ERROR("PCH poison interrupt\n");
530
 
531
	if (pch_iir & SDE_FDI_MASK)
532
		for_each_pipe(pipe)
533
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
534
					 pipe_name(pipe),
535
					 I915_READ(FDI_RX_IIR(pipe)));
536
 
537
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
538
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
539
 
540
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
541
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
542
 
543
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
544
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
545
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
546
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
547
}
548
 
549
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
550
{
551
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
552
	int pipe;
553
 
3480 Serge 554
	if (pch_iir & SDE_HOTPLUG_MASK_CPT)
555
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
556
 
3031 serge 557
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
558
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
559
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
560
				 SDE_AUDIO_POWER_SHIFT_CPT);
561
 
562
	if (pch_iir & SDE_AUX_MASK_CPT)
3480 Serge 563
		dp_aux_irq_handler(dev);
3031 serge 564
 
565
	if (pch_iir & SDE_GMBUS_CPT)
3480 Serge 566
		gmbus_irq_handler(dev);
3031 serge 567
 
568
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
569
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
570
 
571
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
572
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
573
 
574
	if (pch_iir & SDE_FDI_MASK_CPT)
575
		for_each_pipe(pipe)
576
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
577
					 pipe_name(pipe),
578
					 I915_READ(FDI_RX_IIR(pipe)));
579
}
580
 
3243 Serge 581
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
3031 serge 582
{
583
	struct drm_device *dev = (struct drm_device *) arg;
584
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3480 Serge 585
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
3031 serge 586
	irqreturn_t ret = IRQ_NONE;
587
	int i;
588
 
589
	atomic_inc(&dev_priv->irq_received);
590
 
591
	/* disable master interrupt before clearing iir  */
592
	de_ier = I915_READ(DEIER);
593
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
594
 
3480 Serge 595
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
596
	 * interrupts will will be stored on its back queue, and then we'll be
597
	 * able to process them after we restore SDEIER (as soon as we restore
598
	 * it, we'll get an interrupt if SDEIIR still has something to process
599
	 * due to its back queue). */
600
	sde_ier = I915_READ(SDEIER);
601
	I915_WRITE(SDEIER, 0);
602
	POSTING_READ(SDEIER);
603
 
3031 serge 604
	gt_iir = I915_READ(GTIIR);
605
	if (gt_iir) {
606
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
607
		I915_WRITE(GTIIR, gt_iir);
608
		ret = IRQ_HANDLED;
609
	}
610
 
611
	de_iir = I915_READ(DEIIR);
612
	if (de_iir) {
3480 Serge 613
		if (de_iir & DE_AUX_CHANNEL_A_IVB)
614
			dp_aux_irq_handler(dev);
3031 serge 615
#if 0
616
		if (de_iir & DE_GSE_IVB)
617
			intel_opregion_gse_intr(dev);
618
 
619
		for (i = 0; i < 3; i++) {
620
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
621
				drm_handle_vblank(dev, i);
622
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
623
				intel_prepare_page_flip(dev, i);
624
				intel_finish_page_flip_plane(dev, i);
625
			}
626
		}
627
#endif
628
		/* check event from PCH */
629
		if (de_iir & DE_PCH_EVENT_IVB) {
630
			u32 pch_iir = I915_READ(SDEIIR);
631
 
632
			cpt_irq_handler(dev, pch_iir);
633
 
634
			/* clear PCH hotplug event before clear CPU irq */
635
			I915_WRITE(SDEIIR, pch_iir);
636
		}
637
 
638
		I915_WRITE(DEIIR, de_iir);
639
		ret = IRQ_HANDLED;
640
	}
641
 
642
	pm_iir = I915_READ(GEN6_PMIIR);
643
	if (pm_iir) {
644
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
645
//			gen6_queue_rps_work(dev_priv, pm_iir);
646
		I915_WRITE(GEN6_PMIIR, pm_iir);
647
		ret = IRQ_HANDLED;
648
	}
649
 
650
	I915_WRITE(DEIER, de_ier);
651
	POSTING_READ(DEIER);
3480 Serge 652
	I915_WRITE(SDEIER, sde_ier);
653
	POSTING_READ(SDEIER);
3031 serge 654
 
655
	return ret;
656
}
657
 
658
static void ilk_gt_irq_handler(struct drm_device *dev,
659
			       struct drm_i915_private *dev_priv,
660
			       u32 gt_iir)
661
{
662
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
663
		notify_ring(dev, &dev_priv->ring[RCS]);
664
	if (gt_iir & GT_BSD_USER_INTERRUPT)
665
		notify_ring(dev, &dev_priv->ring[VCS]);
666
}
667
 
3243 Serge 668
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 669
{
670
	struct drm_device *dev = (struct drm_device *) arg;
2351 Serge 671
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
672
    int ret = IRQ_NONE;
3480 Serge 673
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
2351 Serge 674
 
675
    atomic_inc(&dev_priv->irq_received);
676
 
677
    /* disable master interrupt before clearing iir  */
678
    de_ier = I915_READ(DEIER);
679
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
680
    POSTING_READ(DEIER);
681
 
3480 Serge 682
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
683
	 * interrupts will will be stored on its back queue, and then we'll be
684
	 * able to process them after we restore SDEIER (as soon as we restore
685
	 * it, we'll get an interrupt if SDEIIR still has something to process
686
	 * due to its back queue). */
687
	sde_ier = I915_READ(SDEIER);
688
	I915_WRITE(SDEIER, 0);
689
	POSTING_READ(SDEIER);
690
 
2351 Serge 691
    de_iir = I915_READ(DEIIR);
692
    gt_iir = I915_READ(GTIIR);
693
    pm_iir = I915_READ(GEN6_PMIIR);
694
 
3480 Serge 695
	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
2351 Serge 696
        goto done;
697
 
698
    ret = IRQ_HANDLED;
699
 
3031 serge 700
	if (IS_GEN5(dev))
701
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
702
	else
703
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
3480 Serge 704
 
705
	if (de_iir & DE_AUX_CHANNEL_A)
706
		dp_aux_irq_handler(dev);
707
 
3031 serge 708
#if 0
709
	if (de_iir & DE_GSE)
710
		intel_opregion_gse_intr(dev);
2351 Serge 711
 
3031 serge 712
	if (de_iir & DE_PIPEA_VBLANK)
713
		drm_handle_vblank(dev, 0);
2351 Serge 714
 
3031 serge 715
	if (de_iir & DE_PIPEB_VBLANK)
716
		drm_handle_vblank(dev, 1);
2351 Serge 717
 
3031 serge 718
	if (de_iir & DE_PLANEA_FLIP_DONE) {
719
		intel_prepare_page_flip(dev, 0);
720
		intel_finish_page_flip_plane(dev, 0);
721
	}
2351 Serge 722
 
3031 serge 723
	if (de_iir & DE_PLANEB_FLIP_DONE) {
724
		intel_prepare_page_flip(dev, 1);
725
		intel_finish_page_flip_plane(dev, 1);
726
	}
727
#endif
2351 Serge 728
 
3031 serge 729
	/* check event from PCH */
730
	if (de_iir & DE_PCH_EVENT) {
3480 Serge 731
		u32 pch_iir = I915_READ(SDEIIR);
732
 
3031 serge 733
		if (HAS_PCH_CPT(dev))
734
			cpt_irq_handler(dev, pch_iir);
735
		else
736
			ibx_irq_handler(dev, pch_iir);
3480 Serge 737
 
738
		/* should clear PCH hotplug event before clear CPU irq */
739
		I915_WRITE(SDEIIR, pch_iir);
3031 serge 740
	}
741
#if 0
742
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
743
		ironlake_handle_rps_change(dev);
2351 Serge 744
 
3031 serge 745
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
746
		gen6_queue_rps_work(dev_priv, pm_iir);
747
#endif
2351 Serge 748
    I915_WRITE(GTIIR, gt_iir);
749
    I915_WRITE(DEIIR, de_iir);
750
    I915_WRITE(GEN6_PMIIR, pm_iir);
751
 
752
done:
753
    I915_WRITE(DEIER, de_ier);
754
    POSTING_READ(DEIER);
3480 Serge 755
	I915_WRITE(SDEIER, sde_ier);
756
	POSTING_READ(SDEIER);
2351 Serge 757
 
758
    return ret;
759
}
760
 
761
 
762
 
763
 
3031 serge 764
/* NB: please notice the memset */
765
static void i915_get_extra_instdone(struct drm_device *dev,
766
				    uint32_t *instdone)
767
{
768
	struct drm_i915_private *dev_priv = dev->dev_private;
769
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
2351 Serge 770
 
3031 serge 771
	switch(INTEL_INFO(dev)->gen) {
772
	case 2:
773
	case 3:
774
		instdone[0] = I915_READ(INSTDONE);
775
		break;
776
	case 4:
777
	case 5:
778
	case 6:
779
		instdone[0] = I915_READ(INSTDONE_I965);
780
		instdone[1] = I915_READ(INSTDONE1);
781
		break;
782
	default:
3480 Serge 783
		WARN_ONCE(1, "Unsupported platform\n");
3031 serge 784
	case 7:
785
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
786
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
787
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
788
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
789
		break;
790
	}
791
}
2351 Serge 792
 
3031 serge 793
#ifdef CONFIG_DEBUG_FS
794
static struct drm_i915_error_object *
795
i915_error_object_create(struct drm_i915_private *dev_priv,
796
			 struct drm_i915_gem_object *src)
797
{
798
	struct drm_i915_error_object *dst;
799
	int i, count;
800
	u32 reloc_offset;
2351 Serge 801
 
3031 serge 802
	if (src == NULL || src->pages == NULL)
803
		return NULL;
2351 Serge 804
 
3031 serge 805
	count = src->base.size / PAGE_SIZE;
2351 Serge 806
 
3031 serge 807
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
808
	if (dst == NULL)
809
		return NULL;
810
 
811
	reloc_offset = src->gtt_offset;
812
	for (i = 0; i < count; i++) {
813
		unsigned long flags;
814
		void *d;
815
 
816
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
817
		if (d == NULL)
818
			goto unwind;
819
 
820
		local_irq_save(flags);
3480 Serge 821
		if (reloc_offset < dev_priv->gtt.mappable_end &&
3031 serge 822
		    src->has_global_gtt_mapping) {
823
			void __iomem *s;
824
 
825
			/* Simply ignore tiling or any overlapping fence.
826
			 * It's part of the error state, and this hopefully
827
			 * captures what the GPU read.
828
			 */
829
 
3480 Serge 830
			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
3031 serge 831
						     reloc_offset);
832
			memcpy_fromio(d, s, PAGE_SIZE);
833
			io_mapping_unmap_atomic(s);
3480 Serge 834
		} else if (src->stolen) {
835
			unsigned long offset;
836
 
837
			offset = dev_priv->mm.stolen_base;
838
			offset += src->stolen->start;
839
			offset += i << PAGE_SHIFT;
840
 
841
			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
3031 serge 842
		} else {
843
			struct page *page;
844
			void *s;
845
 
846
			page = i915_gem_object_get_page(src, i);
847
 
848
			drm_clflush_pages(&page, 1);
849
 
850
			s = kmap_atomic(page);
851
			memcpy(d, s, PAGE_SIZE);
852
			kunmap_atomic(s);
853
 
854
			drm_clflush_pages(&page, 1);
855
		}
856
		local_irq_restore(flags);
857
 
858
		dst->pages[i] = d;
859
 
860
		reloc_offset += PAGE_SIZE;
861
	}
862
	dst->page_count = count;
863
	dst->gtt_offset = src->gtt_offset;
864
 
865
	return dst;
866
 
867
unwind:
868
	while (i--)
869
		kfree(dst->pages[i]);
870
	kfree(dst);
871
	return NULL;
872
}
873
 
874
static void
875
i915_error_object_free(struct drm_i915_error_object *obj)
876
{
877
	int page;
878
 
879
	if (obj == NULL)
880
		return;
881
 
882
	for (page = 0; page < obj->page_count; page++)
883
		kfree(obj->pages[page]);
884
 
885
	kfree(obj);
886
}
887
 
888
void
889
i915_error_state_free(struct kref *error_ref)
890
{
891
	struct drm_i915_error_state *error = container_of(error_ref,
892
							  typeof(*error), ref);
893
	int i;
894
 
895
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
896
		i915_error_object_free(error->ring[i].batchbuffer);
897
		i915_error_object_free(error->ring[i].ringbuffer);
898
		kfree(error->ring[i].requests);
899
	}
900
 
901
	kfree(error->active_bo);
902
	kfree(error->overlay);
903
	kfree(error);
904
}
905
static void capture_bo(struct drm_i915_error_buffer *err,
906
		       struct drm_i915_gem_object *obj)
907
{
908
	err->size = obj->base.size;
909
	err->name = obj->base.name;
910
	err->rseqno = obj->last_read_seqno;
911
	err->wseqno = obj->last_write_seqno;
912
	err->gtt_offset = obj->gtt_offset;
913
	err->read_domains = obj->base.read_domains;
914
	err->write_domain = obj->base.write_domain;
915
	err->fence_reg = obj->fence_reg;
916
	err->pinned = 0;
917
	if (obj->pin_count > 0)
918
		err->pinned = 1;
919
	if (obj->user_pin_count > 0)
920
		err->pinned = -1;
921
	err->tiling = obj->tiling_mode;
922
	err->dirty = obj->dirty;
923
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
924
	err->ring = obj->ring ? obj->ring->id : -1;
925
	err->cache_level = obj->cache_level;
926
}
927
 
928
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
929
			     int count, struct list_head *head)
930
{
931
	struct drm_i915_gem_object *obj;
932
	int i = 0;
933
 
934
	list_for_each_entry(obj, head, mm_list) {
935
		capture_bo(err++, obj);
936
		if (++i == count)
937
			break;
938
	}
939
 
940
	return i;
941
}
942
 
943
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
944
			     int count, struct list_head *head)
945
{
946
	struct drm_i915_gem_object *obj;
947
	int i = 0;
948
 
949
	list_for_each_entry(obj, head, gtt_list) {
950
		if (obj->pin_count == 0)
951
			continue;
952
 
953
		capture_bo(err++, obj);
954
		if (++i == count)
955
			break;
956
	}
957
 
958
	return i;
959
}
960
 
961
static void i915_gem_record_fences(struct drm_device *dev,
962
				   struct drm_i915_error_state *error)
963
{
964
	struct drm_i915_private *dev_priv = dev->dev_private;
965
	int i;
966
 
967
	/* Fences */
968
	switch (INTEL_INFO(dev)->gen) {
969
	case 7:
970
	case 6:
971
		for (i = 0; i < 16; i++)
972
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
973
		break;
974
	case 5:
975
	case 4:
976
		for (i = 0; i < 16; i++)
977
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
978
		break;
979
	case 3:
980
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
981
			for (i = 0; i < 8; i++)
982
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
983
	case 2:
984
		for (i = 0; i < 8; i++)
985
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
986
		break;
987
 
3480 Serge 988
	default:
989
		BUG();
3031 serge 990
	}
991
}
992
 
993
static struct drm_i915_error_object *
994
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
995
			     struct intel_ring_buffer *ring)
996
{
997
	struct drm_i915_gem_object *obj;
998
	u32 seqno;
999
 
1000
	if (!ring->get_seqno)
1001
		return NULL;
1002
 
3480 Serge 1003
	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1004
		u32 acthd = I915_READ(ACTHD);
1005
 
1006
		if (WARN_ON(ring->id != RCS))
1007
			return NULL;
1008
 
1009
		obj = ring->private;
1010
		if (acthd >= obj->gtt_offset &&
1011
		    acthd < obj->gtt_offset + obj->base.size)
1012
			return i915_error_object_create(dev_priv, obj);
1013
	}
1014
 
3031 serge 1015
	seqno = ring->get_seqno(ring, false);
1016
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1017
		if (obj->ring != ring)
1018
			continue;
1019
 
1020
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
1021
			continue;
1022
 
1023
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1024
			continue;
1025
 
1026
		/* We need to copy these to an anonymous buffer as the simplest
1027
		 * method to avoid being overwritten by userspace.
1028
		 */
1029
		return i915_error_object_create(dev_priv, obj);
1030
	}
1031
 
1032
	return NULL;
1033
}
1034
 
1035
static void i915_record_ring_state(struct drm_device *dev,
1036
				   struct drm_i915_error_state *error,
1037
				   struct intel_ring_buffer *ring)
1038
{
1039
	struct drm_i915_private *dev_priv = dev->dev_private;
1040
 
1041
	if (INTEL_INFO(dev)->gen >= 6) {
1042
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1043
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1044
		error->semaphore_mboxes[ring->id][0]
1045
			= I915_READ(RING_SYNC_0(ring->mmio_base));
1046
		error->semaphore_mboxes[ring->id][1]
1047
			= I915_READ(RING_SYNC_1(ring->mmio_base));
3243 Serge 1048
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1049
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
3031 serge 1050
	}
1051
 
1052
	if (INTEL_INFO(dev)->gen >= 4) {
1053
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1054
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1055
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1056
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1057
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1058
		if (ring->id == RCS)
1059
			error->bbaddr = I915_READ64(BB_ADDR);
1060
	} else {
1061
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1062
		error->ipeir[ring->id] = I915_READ(IPEIR);
1063
		error->ipehr[ring->id] = I915_READ(IPEHR);
1064
		error->instdone[ring->id] = I915_READ(INSTDONE);
1065
	}
1066
 
1067
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1068
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1069
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1070
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1071
	error->head[ring->id] = I915_READ_HEAD(ring);
1072
	error->tail[ring->id] = I915_READ_TAIL(ring);
3243 Serge 1073
	error->ctl[ring->id] = I915_READ_CTL(ring);
3031 serge 1074
 
1075
	error->cpu_ring_head[ring->id] = ring->head;
1076
	error->cpu_ring_tail[ring->id] = ring->tail;
1077
}
1078
 
1079
static void i915_gem_record_rings(struct drm_device *dev,
1080
				  struct drm_i915_error_state *error)
1081
{
1082
	struct drm_i915_private *dev_priv = dev->dev_private;
1083
	struct intel_ring_buffer *ring;
1084
	struct drm_i915_gem_request *request;
1085
	int i, count;
1086
 
1087
	for_each_ring(ring, dev_priv, i) {
1088
		i915_record_ring_state(dev, error, ring);
1089
 
1090
		error->ring[i].batchbuffer =
1091
			i915_error_first_batchbuffer(dev_priv, ring);
1092
 
1093
		error->ring[i].ringbuffer =
1094
			i915_error_object_create(dev_priv, ring->obj);
1095
 
1096
		count = 0;
1097
		list_for_each_entry(request, &ring->request_list, list)
1098
			count++;
1099
 
1100
		error->ring[i].num_requests = count;
1101
		error->ring[i].requests =
1102
			kmalloc(count*sizeof(struct drm_i915_error_request),
1103
				GFP_ATOMIC);
1104
		if (error->ring[i].requests == NULL) {
1105
			error->ring[i].num_requests = 0;
1106
			continue;
1107
		}
1108
 
1109
		count = 0;
1110
		list_for_each_entry(request, &ring->request_list, list) {
1111
			struct drm_i915_error_request *erq;
1112
 
1113
			erq = &error->ring[i].requests[count++];
1114
			erq->seqno = request->seqno;
1115
			erq->jiffies = request->emitted_jiffies;
1116
			erq->tail = request->tail;
1117
		}
1118
	}
1119
}
1120
 
1121
/**
1122
 * i915_capture_error_state - capture an error record for later analysis
1123
 * @dev: drm device
1124
 *
1125
 * Should be called when an error is detected (either a hang or an error
1126
 * interrupt) to capture error state from the time of the error.  Fills
1127
 * out a structure which becomes available in debugfs for user level tools
1128
 * to pick up.
1129
 */
1130
static void i915_capture_error_state(struct drm_device *dev)
1131
{
1132
	struct drm_i915_private *dev_priv = dev->dev_private;
1133
	struct drm_i915_gem_object *obj;
1134
	struct drm_i915_error_state *error;
1135
	unsigned long flags;
1136
	int i, pipe;
1137
 
3480 Serge 1138
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1139
	error = dev_priv->gpu_error.first_error;
1140
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
3031 serge 1141
	if (error)
1142
		return;
1143
 
1144
	/* Account for pipe specific data like PIPE*STAT */
1145
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1146
	if (!error) {
1147
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1148
		return;
1149
	}
1150
 
3480 Serge 1151
	DRM_INFO("capturing error event; look for more information in"
1152
		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
3031 serge 1153
		 dev->primary->index);
1154
 
1155
	kref_init(&error->ref);
1156
	error->eir = I915_READ(EIR);
1157
	error->pgtbl_er = I915_READ(PGTBL_ER);
1158
	error->ccid = I915_READ(CCID);
1159
 
1160
	if (HAS_PCH_SPLIT(dev))
1161
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1162
	else if (IS_VALLEYVIEW(dev))
1163
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1164
	else if (IS_GEN2(dev))
1165
		error->ier = I915_READ16(IER);
1166
	else
1167
		error->ier = I915_READ(IER);
1168
 
3243 Serge 1169
	if (INTEL_INFO(dev)->gen >= 6)
1170
		error->derrmr = I915_READ(DERRMR);
1171
 
1172
	if (IS_VALLEYVIEW(dev))
1173
		error->forcewake = I915_READ(FORCEWAKE_VLV);
1174
	else if (INTEL_INFO(dev)->gen >= 7)
1175
		error->forcewake = I915_READ(FORCEWAKE_MT);
1176
	else if (INTEL_INFO(dev)->gen == 6)
1177
		error->forcewake = I915_READ(FORCEWAKE);
1178
 
3031 serge 1179
	for_each_pipe(pipe)
1180
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1181
 
1182
	if (INTEL_INFO(dev)->gen >= 6) {
1183
		error->error = I915_READ(ERROR_GEN6);
1184
		error->done_reg = I915_READ(DONE_REG);
1185
	}
1186
 
1187
	if (INTEL_INFO(dev)->gen == 7)
1188
		error->err_int = I915_READ(GEN7_ERR_INT);
1189
 
1190
	i915_get_extra_instdone(dev, error->extra_instdone);
1191
 
1192
	i915_gem_record_fences(dev, error);
1193
	i915_gem_record_rings(dev, error);
1194
 
1195
	/* Record buffers on the active and pinned lists. */
1196
	error->active_bo = NULL;
1197
	error->pinned_bo = NULL;
1198
 
1199
	i = 0;
1200
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1201
		i++;
1202
	error->active_bo_count = i;
1203
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1204
		if (obj->pin_count)
1205
			i++;
1206
	error->pinned_bo_count = i - error->active_bo_count;
1207
 
1208
	error->active_bo = NULL;
1209
	error->pinned_bo = NULL;
1210
	if (i) {
1211
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1212
					   GFP_ATOMIC);
1213
		if (error->active_bo)
1214
			error->pinned_bo =
1215
				error->active_bo + error->active_bo_count;
1216
	}
1217
 
1218
	if (error->active_bo)
1219
		error->active_bo_count =
1220
			capture_active_bo(error->active_bo,
1221
					  error->active_bo_count,
1222
					  &dev_priv->mm.active_list);
1223
 
1224
	if (error->pinned_bo)
1225
		error->pinned_bo_count =
1226
			capture_pinned_bo(error->pinned_bo,
1227
					  error->pinned_bo_count,
1228
					  &dev_priv->mm.bound_list);
1229
 
1230
	do_gettimeofday(&error->time);
1231
 
1232
	error->overlay = intel_overlay_capture_error_state(dev);
1233
	error->display = intel_display_capture_error_state(dev);
1234
 
3480 Serge 1235
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1236
	if (dev_priv->gpu_error.first_error == NULL) {
1237
		dev_priv->gpu_error.first_error = error;
3031 serge 1238
		error = NULL;
1239
	}
3480 Serge 1240
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
3031 serge 1241
 
1242
	if (error)
1243
		i915_error_state_free(&error->ref);
1244
}
1245
 
1246
void i915_destroy_error_state(struct drm_device *dev)
1247
{
1248
	struct drm_i915_private *dev_priv = dev->dev_private;
1249
	struct drm_i915_error_state *error;
1250
	unsigned long flags;
1251
 
3480 Serge 1252
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1253
	error = dev_priv->gpu_error.first_error;
1254
	dev_priv->gpu_error.first_error = NULL;
1255
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
3031 serge 1256
 
1257
	if (error)
1258
		kref_put(&error->ref, i915_error_state_free);
1259
}
1260
#else
1261
#define i915_capture_error_state(x)
1262
#endif
1263
 
1264
static void i915_report_and_clear_eir(struct drm_device *dev)
1265
{
1266
	struct drm_i915_private *dev_priv = dev->dev_private;
1267
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1268
	u32 eir = I915_READ(EIR);
1269
	int pipe, i;
1270
 
1271
	if (!eir)
1272
		return;
1273
 
1274
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1275
 
1276
	i915_get_extra_instdone(dev, instdone);
1277
 
1278
	if (IS_G4X(dev)) {
1279
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1280
			u32 ipeir = I915_READ(IPEIR_I965);
1281
 
1282
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1283
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1284
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1285
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1286
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1287
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1288
			I915_WRITE(IPEIR_I965, ipeir);
1289
			POSTING_READ(IPEIR_I965);
1290
		}
1291
		if (eir & GM45_ERROR_PAGE_TABLE) {
1292
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1293
			pr_err("page table error\n");
1294
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1295
			I915_WRITE(PGTBL_ER, pgtbl_err);
1296
			POSTING_READ(PGTBL_ER);
1297
		}
1298
	}
1299
 
1300
	if (!IS_GEN2(dev)) {
1301
		if (eir & I915_ERROR_PAGE_TABLE) {
1302
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1303
			pr_err("page table error\n");
1304
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1305
			I915_WRITE(PGTBL_ER, pgtbl_err);
1306
			POSTING_READ(PGTBL_ER);
1307
		}
1308
	}
1309
 
1310
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1311
		pr_err("memory refresh error:\n");
1312
		for_each_pipe(pipe)
1313
			pr_err("pipe %c stat: 0x%08x\n",
1314
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1315
		/* pipestat has already been acked */
1316
	}
1317
	if (eir & I915_ERROR_INSTRUCTION) {
1318
		pr_err("instruction error\n");
1319
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1320
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1321
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1322
		if (INTEL_INFO(dev)->gen < 4) {
1323
			u32 ipeir = I915_READ(IPEIR);
1324
 
1325
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1326
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1327
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1328
			I915_WRITE(IPEIR, ipeir);
1329
			POSTING_READ(IPEIR);
1330
		} else {
1331
			u32 ipeir = I915_READ(IPEIR_I965);
1332
 
1333
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1334
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1335
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1336
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1337
			I915_WRITE(IPEIR_I965, ipeir);
1338
			POSTING_READ(IPEIR_I965);
1339
		}
1340
	}
1341
 
1342
	I915_WRITE(EIR, eir);
1343
	POSTING_READ(EIR);
1344
	eir = I915_READ(EIR);
1345
	if (eir) {
1346
		/*
1347
		 * some errors might have become stuck,
1348
		 * mask them.
1349
		 */
1350
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1351
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1352
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1353
	}
1354
}
1355
 
1356
/**
1357
 * i915_handle_error - handle an error interrupt
1358
 * @dev: drm device
1359
 *
1360
 * Do some basic checking of regsiter state at error interrupt time and
1361
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1362
 * sure we get a record and make it available in debugfs.  Fire a uevent
1363
 * so userspace knows something bad happened (should trigger collection
1364
 * of a ring dump etc.).
1365
 */
1366
void i915_handle_error(struct drm_device *dev, bool wedged)
1367
{
1368
	struct drm_i915_private *dev_priv = dev->dev_private;
1369
	struct intel_ring_buffer *ring;
1370
	int i;
1371
 
1372
	i915_capture_error_state(dev);
1373
	i915_report_and_clear_eir(dev);
1374
 
1375
	if (wedged) {
3480 Serge 1376
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1377
				&dev_priv->gpu_error.reset_counter);
3031 serge 1378
 
1379
		/*
3480 Serge 1380
		 * Wakeup waiting processes so that the reset work item
1381
		 * doesn't deadlock trying to grab various locks.
3031 serge 1382
		 */
1383
		for_each_ring(ring, dev_priv, i)
1384
			wake_up_all(&ring->irq_queue);
1385
	}
1386
 
1387
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1388
}
1389
 
1390
#if 0
1391
 
1392
 
1393
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1394
{
1395
	drm_i915_private_t *dev_priv = dev->dev_private;
1396
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1397
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1398
	struct drm_i915_gem_object *obj;
1399
	struct intel_unpin_work *work;
1400
	unsigned long flags;
1401
	bool stall_detected;
1402
 
1403
	/* Ignore early vblank irqs */
1404
	if (intel_crtc == NULL)
1405
		return;
1406
 
1407
	spin_lock_irqsave(&dev->event_lock, flags);
1408
	work = intel_crtc->unpin_work;
1409
 
3243 Serge 1410
	if (work == NULL ||
1411
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1412
	    !work->enable_stall_check) {
3031 serge 1413
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1414
		spin_unlock_irqrestore(&dev->event_lock, flags);
1415
		return;
1416
	}
1417
 
1418
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1419
	obj = work->pending_flip_obj;
1420
	if (INTEL_INFO(dev)->gen >= 4) {
1421
		int dspsurf = DSPSURF(intel_crtc->plane);
1422
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1423
					obj->gtt_offset;
1424
	} else {
1425
		int dspaddr = DSPADDR(intel_crtc->plane);
1426
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1427
							crtc->y * crtc->fb->pitches[0] +
1428
							crtc->x * crtc->fb->bits_per_pixel/8);
1429
	}
1430
 
1431
	spin_unlock_irqrestore(&dev->event_lock, flags);
1432
 
1433
	if (stall_detected) {
1434
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1435
		intel_prepare_page_flip(dev, intel_crtc->plane);
1436
	}
1437
}
1438
 
1439
#endif
1440
 
1441
/* Called from drm generic code, passed 'crtc' which
1442
 * we use as a pipe index
1443
 */
1444
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1445
{
1446
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1447
	unsigned long irqflags;
1448
 
1449
	if (!i915_pipe_enabled(dev, pipe))
1450
		return -EINVAL;
1451
 
1452
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1453
	if (INTEL_INFO(dev)->gen >= 4)
1454
		i915_enable_pipestat(dev_priv, pipe,
1455
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1456
	else
1457
		i915_enable_pipestat(dev_priv, pipe,
1458
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1459
 
1460
	/* maintain vblank delivery even in deep C-states */
1461
	if (dev_priv->info->gen == 3)
1462
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1463
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1464
 
1465
	return 0;
1466
}
1467
 
1468
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1469
{
1470
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1471
	unsigned long irqflags;
1472
 
1473
	if (!i915_pipe_enabled(dev, pipe))
1474
		return -EINVAL;
1475
 
1476
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1477
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1478
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1479
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1480
 
1481
	return 0;
1482
}
1483
 
1484
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1485
{
1486
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1487
	unsigned long irqflags;
1488
 
1489
	if (!i915_pipe_enabled(dev, pipe))
1490
		return -EINVAL;
1491
 
1492
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1493
	ironlake_enable_display_irq(dev_priv,
1494
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1495
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1496
 
1497
	return 0;
1498
}
1499
 
1500
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1501
{
1502
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1503
	unsigned long irqflags;
1504
	u32 imr;
1505
 
1506
	if (!i915_pipe_enabled(dev, pipe))
1507
		return -EINVAL;
1508
 
1509
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1510
	imr = I915_READ(VLV_IMR);
1511
	if (pipe == 0)
1512
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1513
	else
1514
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1515
	I915_WRITE(VLV_IMR, imr);
1516
	i915_enable_pipestat(dev_priv, pipe,
1517
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1518
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1519
 
1520
	return 0;
1521
}
1522
 
1523
/* Called from drm generic code, passed 'crtc' which
1524
 * we use as a pipe index
1525
 */
1526
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1527
{
1528
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1529
	unsigned long irqflags;
1530
 
1531
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1532
	if (dev_priv->info->gen == 3)
1533
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1534
 
1535
	i915_disable_pipestat(dev_priv, pipe,
1536
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1537
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1538
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1539
}
1540
 
1541
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1542
{
1543
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1544
	unsigned long irqflags;
1545
 
1546
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1547
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1548
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1549
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1550
}
1551
 
1552
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1553
{
1554
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1555
	unsigned long irqflags;
1556
 
1557
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1558
	ironlake_disable_display_irq(dev_priv,
1559
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1560
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1561
}
1562
 
1563
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1564
{
1565
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1566
	unsigned long irqflags;
1567
	u32 imr;
1568
 
1569
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1570
	i915_disable_pipestat(dev_priv, pipe,
1571
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1572
	imr = I915_READ(VLV_IMR);
1573
	if (pipe == 0)
1574
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1575
	else
1576
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1577
	I915_WRITE(VLV_IMR, imr);
1578
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1579
}
1580
 
1581
static u32
1582
ring_last_seqno(struct intel_ring_buffer *ring)
1583
{
1584
	return list_entry(ring->request_list.prev,
1585
			  struct drm_i915_gem_request, list)->seqno;
1586
}
2351 Serge 1587
/* drm_dma.h hooks
1588
*/
1589
static void ironlake_irq_preinstall(struct drm_device *dev)
1590
{
1591
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1592
 
1593
    atomic_set(&dev_priv->irq_received, 0);
1594
 
1595
    I915_WRITE(HWSTAM, 0xeffe);
1596
 
1597
    /* XXX hotplug from PCH */
1598
 
1599
    I915_WRITE(DEIMR, 0xffffffff);
1600
    I915_WRITE(DEIER, 0x0);
1601
    POSTING_READ(DEIER);
1602
 
1603
    /* and GT */
1604
    I915_WRITE(GTIMR, 0xffffffff);
1605
    I915_WRITE(GTIER, 0x0);
1606
    POSTING_READ(GTIER);
1607
 
1608
    /* south display irq */
1609
    I915_WRITE(SDEIMR, 0xffffffff);
1610
    I915_WRITE(SDEIER, 0x0);
1611
    POSTING_READ(SDEIER);
1612
}
1613
 
3031 serge 1614
static void valleyview_irq_preinstall(struct drm_device *dev)
1615
{
1616
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1617
	int pipe;
1618
 
1619
	atomic_set(&dev_priv->irq_received, 0);
1620
 
1621
	/* VLV magic */
1622
	I915_WRITE(VLV_IMR, 0);
1623
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1624
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1625
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1626
 
1627
	/* and GT */
1628
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1629
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1630
	I915_WRITE(GTIMR, 0xffffffff);
1631
	I915_WRITE(GTIER, 0x0);
1632
	POSTING_READ(GTIER);
1633
 
1634
	I915_WRITE(DPINVGTT, 0xff);
1635
 
1636
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1637
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1638
	for_each_pipe(pipe)
1639
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1640
	I915_WRITE(VLV_IIR, 0xffffffff);
1641
	I915_WRITE(VLV_IMR, 0xffffffff);
1642
	I915_WRITE(VLV_IER, 0x0);
1643
	POSTING_READ(VLV_IER);
1644
}
1645
 
2351 Serge 1646
/*
1647
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1648
 * duration to 2ms (which is the minimum in the Display Port spec)
1649
 *
1650
 * This register is the same on all known PCH chips.
1651
 */
1652
 
3480 Serge 1653
static void ibx_enable_hotplug(struct drm_device *dev)
2351 Serge 1654
{
1655
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1656
	u32	hotplug;
1657
 
1658
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1659
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1660
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1661
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1662
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1663
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1664
}
1665
 
3480 Serge 1666
static void ibx_irq_postinstall(struct drm_device *dev)
1667
{
1668
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1669
	u32 mask;
1670
 
1671
	if (HAS_PCH_IBX(dev))
1672
		mask = SDE_HOTPLUG_MASK |
1673
		       SDE_GMBUS |
1674
		       SDE_AUX_MASK;
1675
	else
1676
		mask = SDE_HOTPLUG_MASK_CPT |
1677
		       SDE_GMBUS_CPT |
1678
		       SDE_AUX_MASK_CPT;
1679
 
1680
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1681
	I915_WRITE(SDEIMR, ~mask);
1682
	I915_WRITE(SDEIER, mask);
1683
	POSTING_READ(SDEIER);
1684
 
1685
	ibx_enable_hotplug(dev);
1686
}
1687
 
2351 Serge 1688
static int ironlake_irq_postinstall(struct drm_device *dev)
1689
{
1690
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1691
    /* enable kind of interrupts always enabled */
1692
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3480 Serge 1693
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
1694
			   DE_AUX_CHANNEL_A;
2351 Serge 1695
    u32 render_irqs;
1696
 
1697
    dev_priv->irq_mask = ~display_mask;
1698
 
1699
    /* should always can generate irq */
1700
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1701
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1702
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1703
    POSTING_READ(DEIER);
1704
 
1705
	dev_priv->gt_irq_mask = ~0;
1706
 
1707
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1708
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1709
 
1710
    if (IS_GEN6(dev))
1711
        render_irqs =
1712
            GT_USER_INTERRUPT |
3031 serge 1713
			GEN6_BSD_USER_INTERRUPT |
1714
			GEN6_BLITTER_USER_INTERRUPT;
2351 Serge 1715
    else
1716
        render_irqs =
1717
            GT_USER_INTERRUPT |
1718
            GT_PIPE_NOTIFY |
1719
            GT_BSD_USER_INTERRUPT;
1720
    I915_WRITE(GTIER, render_irqs);
1721
    POSTING_READ(GTIER);
1722
 
3480 Serge 1723
	ibx_irq_postinstall(dev);
2351 Serge 1724
 
1725
    if (IS_IRONLAKE_M(dev)) {
1726
        /* Clear & enable PCU event interrupts */
1727
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1728
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
3480 Serge 1729
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2351 Serge 1730
    }
1731
 
1732
    return 0;
1733
}
1734
 
3031 serge 1735
static int ivybridge_irq_postinstall(struct drm_device *dev)
1736
{
1737
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1738
	/* enable kind of interrupts always enabled */
1739
	u32 display_mask =
1740
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1741
		DE_PLANEC_FLIP_DONE_IVB |
1742
		DE_PLANEB_FLIP_DONE_IVB |
3480 Serge 1743
		DE_PLANEA_FLIP_DONE_IVB |
1744
		DE_AUX_CHANNEL_A_IVB;
3031 serge 1745
	u32 render_irqs;
2351 Serge 1746
 
3031 serge 1747
	dev_priv->irq_mask = ~display_mask;
1748
 
1749
	/* should always can generate irq */
1750
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1751
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1752
	I915_WRITE(DEIER,
1753
		   display_mask |
1754
		   DE_PIPEC_VBLANK_IVB |
1755
		   DE_PIPEB_VBLANK_IVB |
1756
		   DE_PIPEA_VBLANK_IVB);
1757
	POSTING_READ(DEIER);
1758
 
1759
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1760
 
1761
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1762
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1763
 
1764
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1765
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1766
	I915_WRITE(GTIER, render_irqs);
1767
	POSTING_READ(GTIER);
1768
 
3480 Serge 1769
	ibx_irq_postinstall(dev);
3031 serge 1770
 
1771
	return 0;
1772
}
1773
 
1774
static int valleyview_irq_postinstall(struct drm_device *dev)
1775
{
1776
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1777
	u32 enable_mask;
1778
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
3243 Serge 1779
	u32 render_irqs;
3031 serge 1780
	u16 msid;
1781
 
1782
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1783
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1784
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1785
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1786
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1787
 
1788
	/*
1789
	 *Leave vblank interrupts masked initially.  enable/disable will
1790
	 * toggle them based on usage.
1791
	 */
1792
	dev_priv->irq_mask = (~enable_mask) |
1793
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1794
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1795
 
1796
	dev_priv->pipestat[0] = 0;
1797
	dev_priv->pipestat[1] = 0;
1798
 
1799
	/* Hack for broken MSIs on VLV */
3243 Serge 1800
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1801
//   pci_read_config_word(dev->pdev, 0x98, &msid);
1802
//   msid &= 0xff; /* mask out delivery bits */
1803
//   msid |= (1<<14);
1804
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
3031 serge 1805
 
3480 Serge 1806
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1807
	POSTING_READ(PORT_HOTPLUG_EN);
1808
 
3031 serge 1809
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1810
	I915_WRITE(VLV_IER, enable_mask);
1811
	I915_WRITE(VLV_IIR, 0xffffffff);
1812
	I915_WRITE(PIPESTAT(0), 0xffff);
1813
	I915_WRITE(PIPESTAT(1), 0xffff);
1814
	POSTING_READ(VLV_IER);
1815
 
1816
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
3480 Serge 1817
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3031 serge 1818
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1819
 
1820
	I915_WRITE(VLV_IIR, 0xffffffff);
1821
	I915_WRITE(VLV_IIR, 0xffffffff);
1822
 
1823
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1824
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3243 Serge 1825
 
1826
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1827
		GEN6_BLITTER_USER_INTERRUPT;
1828
	I915_WRITE(GTIER, render_irqs);
3031 serge 1829
	POSTING_READ(GTIER);
1830
 
1831
	/* ack & enable invalid PTE error interrupts */
1832
#if 0 /* FIXME: add support to irq handler for checking these bits */
1833
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1834
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1835
#endif
1836
 
1837
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3480 Serge 1838
 
1839
	return 0;
1840
}
1841
 
1842
static void valleyview_hpd_irq_setup(struct drm_device *dev)
1843
{
1844
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1845
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1846
 
3031 serge 1847
	/* Note HDMI and DP share bits */
3480 Serge 1848
	if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
1849
		hotplug_en |= PORTB_HOTPLUG_INT_EN;
1850
	if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
1851
		hotplug_en |= PORTC_HOTPLUG_INT_EN;
1852
	if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
1853
		hotplug_en |= PORTD_HOTPLUG_INT_EN;
3243 Serge 1854
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
3031 serge 1855
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
3243 Serge 1856
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
3031 serge 1857
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1858
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1859
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1860
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1861
	}
1862
 
1863
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1864
}
1865
 
1866
static void valleyview_irq_uninstall(struct drm_device *dev)
1867
{
1868
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1869
	int pipe;
1870
 
1871
	if (!dev_priv)
1872
		return;
1873
 
1874
	for_each_pipe(pipe)
1875
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1876
 
1877
	I915_WRITE(HWSTAM, 0xffffffff);
1878
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1879
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1880
	for_each_pipe(pipe)
1881
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1882
	I915_WRITE(VLV_IIR, 0xffffffff);
1883
	I915_WRITE(VLV_IMR, 0xffffffff);
1884
	I915_WRITE(VLV_IER, 0x0);
1885
	POSTING_READ(VLV_IER);
1886
}
1887
 
1888
static void ironlake_irq_uninstall(struct drm_device *dev)
1889
{
1890
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1891
 
1892
	if (!dev_priv)
1893
		return;
1894
 
1895
	I915_WRITE(HWSTAM, 0xffffffff);
1896
 
1897
	I915_WRITE(DEIMR, 0xffffffff);
1898
	I915_WRITE(DEIER, 0x0);
1899
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1900
 
1901
	I915_WRITE(GTIMR, 0xffffffff);
1902
	I915_WRITE(GTIER, 0x0);
1903
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1904
 
1905
	I915_WRITE(SDEIMR, 0xffffffff);
1906
	I915_WRITE(SDEIER, 0x0);
1907
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1908
}
1909
 
1910
#if 0
1911
 
1912
static void i8xx_irq_preinstall(struct drm_device * dev)
1913
{
1914
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1915
	int pipe;
1916
 
1917
	atomic_set(&dev_priv->irq_received, 0);
1918
 
1919
	for_each_pipe(pipe)
1920
		I915_WRITE(PIPESTAT(pipe), 0);
1921
	I915_WRITE16(IMR, 0xffff);
1922
	I915_WRITE16(IER, 0x0);
1923
	POSTING_READ16(IER);
1924
}
1925
 
1926
static int i8xx_irq_postinstall(struct drm_device *dev)
1927
{
1928
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1929
 
1930
	dev_priv->pipestat[0] = 0;
1931
	dev_priv->pipestat[1] = 0;
1932
 
1933
	I915_WRITE16(EMR,
1934
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1935
 
1936
	/* Unmask the interrupts that we always want on. */
1937
	dev_priv->irq_mask =
1938
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1939
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1940
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1941
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1942
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1943
	I915_WRITE16(IMR, dev_priv->irq_mask);
1944
 
1945
	I915_WRITE16(IER,
1946
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1947
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1948
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1949
		     I915_USER_INTERRUPT);
1950
	POSTING_READ16(IER);
1951
 
1952
	return 0;
1953
}
1954
 
3243 Serge 1955
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 1956
{
1957
	struct drm_device *dev = (struct drm_device *) arg;
1958
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1959
	u16 iir, new_iir;
1960
	u32 pipe_stats[2];
1961
	unsigned long irqflags;
1962
	int irq_received;
1963
	int pipe;
1964
	u16 flip_mask =
1965
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1966
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1967
 
1968
	atomic_inc(&dev_priv->irq_received);
1969
 
1970
	iir = I915_READ16(IIR);
1971
	if (iir == 0)
1972
		return IRQ_NONE;
1973
 
1974
	while (iir & ~flip_mask) {
1975
		/* Can't rely on pipestat interrupt bit in iir as it might
1976
		 * have been cleared after the pipestat interrupt was received.
1977
		 * It doesn't set the bit in iir again, but it still produces
1978
		 * interrupts (for non-MSI).
1979
		 */
1980
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1981
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1982
			i915_handle_error(dev, false);
1983
 
1984
		for_each_pipe(pipe) {
1985
			int reg = PIPESTAT(pipe);
1986
			pipe_stats[pipe] = I915_READ(reg);
1987
 
1988
			/*
1989
			 * Clear the PIPE*STAT regs before the IIR
1990
			 */
1991
			if (pipe_stats[pipe] & 0x8000ffff) {
1992
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1993
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1994
							 pipe_name(pipe));
1995
				I915_WRITE(reg, pipe_stats[pipe]);
1996
				irq_received = 1;
1997
			}
1998
		}
1999
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2000
 
2001
		I915_WRITE16(IIR, iir & ~flip_mask);
2002
		new_iir = I915_READ16(IIR); /* Flush posted writes */
2003
 
2004
		i915_update_dri1_breadcrumb(dev);
2005
 
2006
		if (iir & I915_USER_INTERRUPT)
2007
			notify_ring(dev, &dev_priv->ring[RCS]);
2008
 
2009
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2010
		    drm_handle_vblank(dev, 0)) {
2011
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2012
				intel_prepare_page_flip(dev, 0);
2013
				intel_finish_page_flip(dev, 0);
2014
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2015
			}
2016
		}
2017
 
2018
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2019
		    drm_handle_vblank(dev, 1)) {
2020
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2021
				intel_prepare_page_flip(dev, 1);
2022
				intel_finish_page_flip(dev, 1);
2023
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2024
			}
2025
		}
2026
 
2027
		iir = new_iir;
2028
	}
2029
 
2030
	return IRQ_HANDLED;
2031
}
2032
 
2033
static void i8xx_irq_uninstall(struct drm_device * dev)
2034
{
2035
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2036
	int pipe;
2037
 
2038
	for_each_pipe(pipe) {
2039
		/* Clear enable bits; then clear status bits */
2040
		I915_WRITE(PIPESTAT(pipe), 0);
2041
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2042
	}
2043
	I915_WRITE16(IMR, 0xffff);
2044
	I915_WRITE16(IER, 0x0);
2045
	I915_WRITE16(IIR, I915_READ16(IIR));
2046
}
2047
 
2048
#endif
2049
 
2050
static void i915_irq_preinstall(struct drm_device * dev)
2051
{
2052
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2053
	int pipe;
2054
 
2055
	atomic_set(&dev_priv->irq_received, 0);
2056
 
2057
	if (I915_HAS_HOTPLUG(dev)) {
2058
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2059
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2060
	}
2061
 
2062
	I915_WRITE16(HWSTAM, 0xeffe);
2063
	for_each_pipe(pipe)
2064
		I915_WRITE(PIPESTAT(pipe), 0);
2065
	I915_WRITE(IMR, 0xffffffff);
2066
	I915_WRITE(IER, 0x0);
2067
	POSTING_READ(IER);
2068
}
2069
 
2070
static int i915_irq_postinstall(struct drm_device *dev)
2071
{
2072
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2073
	u32 enable_mask;
2074
 
2075
	dev_priv->pipestat[0] = 0;
2076
	dev_priv->pipestat[1] = 0;
2077
 
2078
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2079
 
2080
	/* Unmask the interrupts that we always want on. */
2081
	dev_priv->irq_mask =
2082
		~(I915_ASLE_INTERRUPT |
2083
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2084
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2085
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2086
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2087
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2088
 
2089
	enable_mask =
2090
		I915_ASLE_INTERRUPT |
2091
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2092
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2093
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2094
		I915_USER_INTERRUPT;
3480 Serge 2095
 
3031 serge 2096
	if (I915_HAS_HOTPLUG(dev)) {
3480 Serge 2097
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2098
		POSTING_READ(PORT_HOTPLUG_EN);
2099
 
3031 serge 2100
		/* Enable in IER... */
2101
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2102
		/* and unmask in IMR */
2103
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2104
	}
2105
 
2106
	I915_WRITE(IMR, dev_priv->irq_mask);
2107
	I915_WRITE(IER, enable_mask);
2108
	POSTING_READ(IER);
2109
 
3480 Serge 2110
//	intel_opregion_enable_asle(dev);
2111
 
2112
	return 0;
2113
}
2114
 
2115
static void i915_hpd_irq_setup(struct drm_device *dev)
2116
{
2117
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2118
	u32 hotplug_en;
2119
 
3031 serge 2120
	if (I915_HAS_HOTPLUG(dev)) {
3480 Serge 2121
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2122
 
2123
		if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2124
			hotplug_en |= PORTB_HOTPLUG_INT_EN;
2125
		if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2126
			hotplug_en |= PORTC_HOTPLUG_INT_EN;
2127
		if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2128
			hotplug_en |= PORTD_HOTPLUG_INT_EN;
3031 serge 2129
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2130
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2131
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2132
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2133
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2134
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2135
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2136
		}
3480 Serge 2137
 
3031 serge 2138
		/* Ignore TV since it's buggy */
2139
 
2140
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2141
	}
2142
}
2143
 
3243 Serge 2144
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 2145
{
2146
	struct drm_device *dev = (struct drm_device *) arg;
2147
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2148
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2149
	unsigned long irqflags;
2150
	u32 flip_mask =
2151
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2152
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2153
	u32 flip[2] = {
2154
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2155
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2156
	};
2157
	int pipe, ret = IRQ_NONE;
2158
 
2159
	atomic_inc(&dev_priv->irq_received);
2160
 
2161
	iir = I915_READ(IIR);
2162
	do {
2163
		bool irq_received = (iir & ~flip_mask) != 0;
2164
		bool blc_event = false;
2165
 
2166
		/* Can't rely on pipestat interrupt bit in iir as it might
2167
		 * have been cleared after the pipestat interrupt was received.
2168
		 * It doesn't set the bit in iir again, but it still produces
2169
		 * interrupts (for non-MSI).
2170
		 */
2171
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2172
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2173
			i915_handle_error(dev, false);
2174
 
2175
		for_each_pipe(pipe) {
2176
			int reg = PIPESTAT(pipe);
2177
			pipe_stats[pipe] = I915_READ(reg);
2178
 
2179
			/* Clear the PIPE*STAT regs before the IIR */
2180
			if (pipe_stats[pipe] & 0x8000ffff) {
2181
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2182
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2183
							 pipe_name(pipe));
2184
				I915_WRITE(reg, pipe_stats[pipe]);
2185
				irq_received = true;
2186
			}
2187
		}
2188
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2189
 
2190
		if (!irq_received)
2191
			break;
2192
 
2193
		/* Consume port.  Then clear IIR or we'll miss events */
2194
		if ((I915_HAS_HOTPLUG(dev)) &&
2195
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2196
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2197
 
2198
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2199
				  hotplug_status);
3480 Serge 2200
			if (hotplug_status & dev_priv->hotplug_supported_mask)
2201
				queue_work(dev_priv->wq,
2202
					   &dev_priv->hotplug_work);
3031 serge 2203
 
2204
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2205
			POSTING_READ(PORT_HOTPLUG_STAT);
2206
		}
2207
 
2208
		I915_WRITE(IIR, iir & ~flip_mask);
2209
		new_iir = I915_READ(IIR); /* Flush posted writes */
2210
 
2211
		if (iir & I915_USER_INTERRUPT)
2212
			notify_ring(dev, &dev_priv->ring[RCS]);
2213
 
2214
		for_each_pipe(pipe) {
2215
			int plane = pipe;
2216
			if (IS_MOBILE(dev))
2217
				plane = !plane;
3051 serge 2218
            if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
2219
                drm_handle_vblank(dev, pipe) */) {
3031 serge 2220
				if (iir & flip[plane]) {
2221
//					intel_prepare_page_flip(dev, plane);
2222
//					intel_finish_page_flip(dev, pipe);
2223
					flip_mask &= ~flip[plane];
2224
				}
2225
			}
2226
 
2227
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2228
				blc_event = true;
2229
		}
2230
 
2231
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2232
//			intel_opregion_asle_intr(dev);
2233
 
2234
		/* With MSI, interrupts are only generated when iir
2235
		 * transitions from zero to nonzero.  If another bit got
2236
		 * set while we were handling the existing iir bits, then
2237
		 * we would never get another interrupt.
2238
		 *
2239
		 * This is fine on non-MSI as well, as if we hit this path
2240
		 * we avoid exiting the interrupt handler only to generate
2241
		 * another one.
2242
		 *
2243
		 * Note that for MSI this could cause a stray interrupt report
2244
		 * if an interrupt landed in the time between writing IIR and
2245
		 * the posting read.  This should be rare enough to never
2246
		 * trigger the 99% of 100,000 interrupts test for disabling
2247
		 * stray interrupts.
2248
		 */
2249
		ret = IRQ_HANDLED;
2250
		iir = new_iir;
2251
	} while (iir & ~flip_mask);
2252
 
2253
	i915_update_dri1_breadcrumb(dev);
2254
 
2255
	return ret;
2256
}
2257
 
2258
static void i915_irq_uninstall(struct drm_device * dev)
2259
{
2260
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2261
	int pipe;
2262
 
2263
	if (I915_HAS_HOTPLUG(dev)) {
2264
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2265
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2266
	}
2267
 
2268
	I915_WRITE16(HWSTAM, 0xffff);
2269
	for_each_pipe(pipe) {
2270
		/* Clear enable bits; then clear status bits */
2271
		I915_WRITE(PIPESTAT(pipe), 0);
2272
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2273
	}
2274
	I915_WRITE(IMR, 0xffffffff);
2275
	I915_WRITE(IER, 0x0);
2276
 
2277
	I915_WRITE(IIR, I915_READ(IIR));
2278
}
2279
 
2280
static void i965_irq_preinstall(struct drm_device * dev)
2281
{
2282
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2283
	int pipe;
2284
 
2285
	atomic_set(&dev_priv->irq_received, 0);
2286
 
2287
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2288
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2289
 
2290
	I915_WRITE(HWSTAM, 0xeffe);
2291
	for_each_pipe(pipe)
2292
		I915_WRITE(PIPESTAT(pipe), 0);
2293
	I915_WRITE(IMR, 0xffffffff);
2294
	I915_WRITE(IER, 0x0);
2295
	POSTING_READ(IER);
2296
}
2297
 
2298
static int i965_irq_postinstall(struct drm_device *dev)
2299
{
2300
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2301
	u32 enable_mask;
2302
	u32 error_mask;
2303
 
2304
	/* Unmask the interrupts that we always want on. */
2305
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2306
			       I915_DISPLAY_PORT_INTERRUPT |
2307
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2308
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2309
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2310
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2311
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2312
 
2313
	enable_mask = ~dev_priv->irq_mask;
2314
	enable_mask |= I915_USER_INTERRUPT;
2315
 
2316
	if (IS_G4X(dev))
2317
		enable_mask |= I915_BSD_USER_INTERRUPT;
2318
 
2319
	dev_priv->pipestat[0] = 0;
2320
	dev_priv->pipestat[1] = 0;
3480 Serge 2321
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3031 serge 2322
 
2323
	/*
2324
	 * Enable some error detection, note the instruction error mask
2325
	 * bit is reserved, so we leave it masked.
2326
	 */
2327
	if (IS_G4X(dev)) {
2328
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2329
			       GM45_ERROR_MEM_PRIV |
2330
			       GM45_ERROR_CP_PRIV |
2331
			       I915_ERROR_MEMORY_REFRESH);
2332
	} else {
2333
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2334
			       I915_ERROR_MEMORY_REFRESH);
2335
	}
2336
	I915_WRITE(EMR, error_mask);
2337
 
2338
	I915_WRITE(IMR, dev_priv->irq_mask);
2339
	I915_WRITE(IER, enable_mask);
2340
	POSTING_READ(IER);
2341
 
3480 Serge 2342
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2343
	POSTING_READ(PORT_HOTPLUG_EN);
2344
 
2345
//	intel_opregion_enable_asle(dev);
2346
 
2347
	return 0;
2348
}
2349
 
2350
static void i965_hpd_irq_setup(struct drm_device *dev)
2351
{
2352
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2353
	u32 hotplug_en;
2354
 
3031 serge 2355
	/* Note HDMI and DP share hotplug bits */
2356
	hotplug_en = 0;
3480 Serge 2357
	if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2358
		hotplug_en |= PORTB_HOTPLUG_INT_EN;
2359
	if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2360
		hotplug_en |= PORTC_HOTPLUG_INT_EN;
2361
	if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2362
		hotplug_en |= PORTD_HOTPLUG_INT_EN;
3031 serge 2363
	if (IS_G4X(dev)) {
2364
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2365
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2366
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2367
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2368
	} else {
2369
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2370
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2371
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2372
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2373
	}
2374
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2375
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2376
 
2377
		/* Programming the CRT detection parameters tends
2378
		   to generate a spurious hotplug event about three
2379
		   seconds later.  So just do it once.
2380
		   */
2381
		if (IS_G4X(dev))
2382
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2383
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2384
	}
3480 Serge 2385
 
3031 serge 2386
	/* Ignore TV since it's buggy */
2387
 
2388
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2389
}
2390
 
3243 Serge 2391
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 2392
{
2393
	struct drm_device *dev = (struct drm_device *) arg;
2394
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2395
	u32 iir, new_iir;
2396
	u32 pipe_stats[I915_MAX_PIPES];
2397
	unsigned long irqflags;
2398
	int irq_received;
2399
	int ret = IRQ_NONE, pipe;
2400
 
2401
	atomic_inc(&dev_priv->irq_received);
2402
 
2403
	iir = I915_READ(IIR);
2404
 
2405
	for (;;) {
2406
		bool blc_event = false;
2407
 
2408
		irq_received = iir != 0;
2409
 
2410
		/* Can't rely on pipestat interrupt bit in iir as it might
2411
		 * have been cleared after the pipestat interrupt was received.
2412
		 * It doesn't set the bit in iir again, but it still produces
2413
		 * interrupts (for non-MSI).
2414
		 */
2415
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2416
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2417
			i915_handle_error(dev, false);
2418
 
2419
		for_each_pipe(pipe) {
2420
			int reg = PIPESTAT(pipe);
2421
			pipe_stats[pipe] = I915_READ(reg);
2422
 
2423
			/*
2424
			 * Clear the PIPE*STAT regs before the IIR
2425
			 */
2426
			if (pipe_stats[pipe] & 0x8000ffff) {
2427
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2428
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2429
							 pipe_name(pipe));
2430
				I915_WRITE(reg, pipe_stats[pipe]);
2431
				irq_received = 1;
2432
			}
2433
		}
2434
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2435
 
2436
		if (!irq_received)
2437
			break;
2438
 
2439
		ret = IRQ_HANDLED;
2440
 
2441
		/* Consume port.  Then clear IIR or we'll miss events */
2442
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2443
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2444
 
2445
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2446
				  hotplug_status);
3480 Serge 2447
			if (hotplug_status & dev_priv->hotplug_supported_mask)
2448
				queue_work(dev_priv->wq,
2449
					   &dev_priv->hotplug_work);
3031 serge 2450
 
2451
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2452
			I915_READ(PORT_HOTPLUG_STAT);
2453
		}
2454
 
2455
		I915_WRITE(IIR, iir);
2456
		new_iir = I915_READ(IIR); /* Flush posted writes */
2457
 
2458
		if (iir & I915_USER_INTERRUPT)
2459
			notify_ring(dev, &dev_priv->ring[RCS]);
2460
		if (iir & I915_BSD_USER_INTERRUPT)
2461
			notify_ring(dev, &dev_priv->ring[VCS]);
2462
 
2463
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2464
//			intel_prepare_page_flip(dev, 0);
2465
 
2466
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2467
//			intel_prepare_page_flip(dev, 1);
2468
 
2469
		for_each_pipe(pipe) {
3051 serge 2470
//           if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2471
//               drm_handle_vblank(dev, pipe)) {
3031 serge 2472
//				i915_pageflip_stall_check(dev, pipe);
2473
//				intel_finish_page_flip(dev, pipe);
3051 serge 2474
//           }
3031 serge 2475
 
2476
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2477
				blc_event = true;
2478
		}
2479
 
2480
 
2481
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2482
//			intel_opregion_asle_intr(dev);
2483
 
3480 Serge 2484
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2485
			gmbus_irq_handler(dev);
2486
 
3031 serge 2487
		/* With MSI, interrupts are only generated when iir
2488
		 * transitions from zero to nonzero.  If another bit got
2489
		 * set while we were handling the existing iir bits, then
2490
		 * we would never get another interrupt.
2491
		 *
2492
		 * This is fine on non-MSI as well, as if we hit this path
2493
		 * we avoid exiting the interrupt handler only to generate
2494
		 * another one.
2495
		 *
2496
		 * Note that for MSI this could cause a stray interrupt report
2497
		 * if an interrupt landed in the time between writing IIR and
2498
		 * the posting read.  This should be rare enough to never
2499
		 * trigger the 99% of 100,000 interrupts test for disabling
2500
		 * stray interrupts.
2501
		 */
2502
		iir = new_iir;
2503
	}
2504
 
2505
	i915_update_dri1_breadcrumb(dev);
2506
 
2507
	return ret;
2508
}
2509
 
2510
static void i965_irq_uninstall(struct drm_device * dev)
2511
{
2512
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2513
	int pipe;
2514
 
2515
	if (!dev_priv)
2516
		return;
2517
 
2518
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2519
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2520
 
2521
	I915_WRITE(HWSTAM, 0xffffffff);
2522
	for_each_pipe(pipe)
2523
		I915_WRITE(PIPESTAT(pipe), 0);
2524
	I915_WRITE(IMR, 0xffffffff);
2525
	I915_WRITE(IER, 0x0);
2526
 
2527
	for_each_pipe(pipe)
2528
		I915_WRITE(PIPESTAT(pipe),
2529
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2530
	I915_WRITE(IIR, I915_READ(IIR));
2531
}
2532
 
2351 Serge 2533
void intel_irq_init(struct drm_device *dev)
2534
{
3031 serge 2535
	struct drm_i915_private *dev_priv = dev->dev_private;
2536
 
3480 Serge 2537
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2538
 
2539
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2540
 
2541
 
2542
 
3031 serge 2543
	if (IS_VALLEYVIEW(dev)) {
3243 Serge 2544
		dev->driver->irq_handler = valleyview_irq_handler;
2545
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2546
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3480 Serge 2547
		dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
2548
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2351 Serge 2549
		/* Share pre & uninstall handlers with ILK/SNB */
3243 Serge 2550
		dev->driver->irq_handler = ivybridge_irq_handler;
2551
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2552
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2351 Serge 2553
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 2554
		dev->driver->irq_handler = ironlake_irq_handler;
2555
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2556
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2351 Serge 2557
	} else {
3031 serge 2558
		if (INTEL_INFO(dev)->gen == 2) {
2559
		} else if (INTEL_INFO(dev)->gen == 3) {
3243 Serge 2560
			dev->driver->irq_preinstall = i915_irq_preinstall;
2561
			dev->driver->irq_postinstall = i915_irq_postinstall;
2562
			dev->driver->irq_handler = i915_irq_handler;
3480 Serge 2563
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3031 serge 2564
		} else {
3243 Serge 2565
			dev->driver->irq_preinstall = i965_irq_preinstall;
2566
			dev->driver->irq_postinstall = i965_irq_postinstall;
2567
			dev->driver->irq_handler = i965_irq_handler;
3480 Serge 2568
			dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
3031 serge 2569
		}
2351 Serge 2570
	}
3480 Serge 2571
}
3243 Serge 2572
 
3480 Serge 2573
void intel_hpd_init(struct drm_device *dev)
2574
{
2575
	struct drm_i915_private *dev_priv = dev->dev_private;
2576
 
2577
	if (dev_priv->display.hpd_irq_setup)
2578
		dev_priv->display.hpd_irq_setup(dev);
2351 Serge 2579
}
2580
 
3480 Serge 2581
 
3243 Serge 2582
irqreturn_t intel_irq_handler(struct drm_device *dev)
2583
{
2351 Serge 2584
 
3266 Serge 2585
//    printf("i915 irq\n");
3243 Serge 2586
 
2587
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2588
 
2589
    return dev->driver->irq_handler(0, dev);
2590
}
2591
 
2351 Serge 2592
int drm_irq_install(struct drm_device *dev)
2593
{
3051 serge 2594
    unsigned long sh_flags = 0;
2351 Serge 2595
    int irq_line;
2596
    int ret = 0;
2597
 
3051 serge 2598
    char *irqname;
2599
 
2351 Serge 2600
    mutex_lock(&dev->struct_mutex);
2601
 
2602
    /* Driver must have been initialized */
2603
    if (!dev->dev_private) {
3243 Serge 2604
            mutex_unlock(&dev->struct_mutex);
2605
            return -EINVAL;
2351 Serge 2606
    }
2607
 
2608
    if (dev->irq_enabled) {
3243 Serge 2609
            mutex_unlock(&dev->struct_mutex);
2610
            return -EBUSY;
2351 Serge 2611
    }
2612
    dev->irq_enabled = 1;
2613
    mutex_unlock(&dev->struct_mutex);
2614
 
2615
    irq_line   = drm_dev_to_irq(dev);
2616
 
2617
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2618
 
3051 serge 2619
    /* Before installing handler */
3243 Serge 2620
    if (dev->driver->irq_preinstall)
2621
            dev->driver->irq_preinstall(dev);
2351 Serge 2622
 
3243 Serge 2623
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
2351 Serge 2624
 
3051 serge 2625
    /* After installing handler */
3243 Serge 2626
    if (dev->driver->irq_postinstall)
2627
            ret = dev->driver->irq_postinstall(dev);
2351 Serge 2628
 
3051 serge 2629
    if (ret < 0) {
2630
            DRM_ERROR(__FUNCTION__);
2631
    }
2351 Serge 2632
 
2633
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2634
    cmd&= ~(1<<10);
2635
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2636
 
2637
    return ret;
2638
}
2639