Subversion Repositories Kolibri OS

Rev

Rev 3037 | Rev 3243 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) ": " fmt
30
 
2351 Serge 31
#include 
3031 serge 32
#include 
33
#include 
34
#include 
2351 Serge 35
#include "i915_drv.h"
36
#include "i915_trace.h"
37
#include "intel_drv.h"
38
 
3031 serge 39
 
40
#define pr_err(fmt, ...) \
41
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
42
 
43
#define DRM_IRQ_ARGS            void *arg
44
 
3051 serge 45
static struct drm_driver {
46
    irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
47
    void (*irq_preinstall) (struct drm_device *dev);
48
    int (*irq_postinstall) (struct drm_device *dev);
49
}drm_driver;
50
 
51
static struct drm_driver *driver = &drm_driver;
52
 
2352 Serge 53
#define DRM_WAKEUP( queue ) wake_up( queue )
54
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
55
 
2351 Serge 56
#define MAX_NOPID ((u32)~0)
57
 
58
/**
59
 * Interrupts that are always left unmasked.
60
 *
61
 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
62
 * we leave them always unmasked in IMR and then control enabling them through
63
 * PIPESTAT alone.
64
 */
65
#define I915_INTERRUPT_ENABLE_FIX			\
66
	(I915_ASLE_INTERRUPT |				\
67
	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
68
	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
69
	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
70
	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
71
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
72
 
73
/** Interrupts that we mask and unmask at runtime. */
74
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
75
 
76
#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
77
				 PIPE_VBLANK_INTERRUPT_STATUS)
78
 
79
#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
80
				 PIPE_VBLANK_INTERRUPT_ENABLE)
81
 
82
#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
83
					 DRM_I915_VBLANK_PIPE_B)
84
 
85
/* For display hotplug interrupt */
86
static void
87
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
88
{
89
    if ((dev_priv->irq_mask & mask) != 0) {
90
        dev_priv->irq_mask &= ~mask;
91
        I915_WRITE(DEIMR, dev_priv->irq_mask);
92
        POSTING_READ(DEIMR);
93
    }
94
}
95
 
96
static inline void
97
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98
{
99
    if ((dev_priv->irq_mask & mask) != mask) {
100
        dev_priv->irq_mask |= mask;
101
        I915_WRITE(DEIMR, dev_priv->irq_mask);
102
        POSTING_READ(DEIMR);
103
    }
104
}
3031 serge 105
 
106
void
107
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
108
{
109
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
110
		u32 reg = PIPESTAT(pipe);
111
 
112
		dev_priv->pipestat[pipe] |= mask;
113
		/* Enable the interrupt, clear any pending status */
114
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
115
		POSTING_READ(reg);
116
	}
117
}
118
 
119
void
120
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
121
{
122
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
123
		u32 reg = PIPESTAT(pipe);
124
 
125
		dev_priv->pipestat[pipe] &= ~mask;
126
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
127
		POSTING_READ(reg);
128
	}
129
}
130
 
131
#if 0
132
/**
133
 * intel_enable_asle - enable ASLE interrupt for OpRegion
134
 */
135
void intel_enable_asle(struct drm_device *dev)
136
{
137
	drm_i915_private_t *dev_priv = dev->dev_private;
138
	unsigned long irqflags;
139
 
140
	/* FIXME: opregion/asle for VLV */
141
	if (IS_VALLEYVIEW(dev))
142
		return;
143
 
144
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
145
 
146
	if (HAS_PCH_SPLIT(dev))
147
		ironlake_enable_display_irq(dev_priv, DE_GSE);
148
	else {
149
		i915_enable_pipestat(dev_priv, 1,
150
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
151
		if (INTEL_INFO(dev)->gen >= 4)
152
			i915_enable_pipestat(dev_priv, 0,
153
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
154
	}
155
 
156
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
157
}
158
#endif
159
 
160
/**
161
 * i915_pipe_enabled - check if a pipe is enabled
162
 * @dev: DRM device
163
 * @pipe: pipe to check
164
 *
165
 * Reading certain registers when the pipe is disabled can hang the chip.
166
 * Use this routine to make sure the PLL is running and the pipe is active
167
 * before reading such registers if unsure.
168
 */
169
static int
170
i915_pipe_enabled(struct drm_device *dev, int pipe)
171
{
172
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
173
	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
174
}
175
 
176
/* Called from drm generic code, passed a 'crtc', which
177
 * we use as a pipe index
178
 */
179
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
180
{
181
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
182
	unsigned long high_frame;
183
	unsigned long low_frame;
184
	u32 high1, high2, low;
185
 
186
	if (!i915_pipe_enabled(dev, pipe)) {
187
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
188
				"pipe %c\n", pipe_name(pipe));
189
		return 0;
190
	}
191
 
192
	high_frame = PIPEFRAME(pipe);
193
	low_frame = PIPEFRAMEPIXEL(pipe);
194
 
195
	/*
196
	 * High & low register fields aren't synchronized, so make sure
197
	 * we get a low value that's stable across two reads of the high
198
	 * register.
199
	 */
200
	do {
201
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
202
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
203
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
204
	} while (high1 != high2);
205
 
206
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
207
	low >>= PIPE_FRAME_LOW_SHIFT;
208
	return (high1 << 8) | low;
209
}
210
 
211
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
212
{
213
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
214
	int reg = PIPE_FRMCOUNT_GM45(pipe);
215
 
216
	if (!i915_pipe_enabled(dev, pipe)) {
217
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
218
				 "pipe %c\n", pipe_name(pipe));
219
		return 0;
220
	}
221
 
222
	return I915_READ(reg);
223
}
224
 
225
 
2352 Serge 226
static void notify_ring(struct drm_device *dev,
227
			struct intel_ring_buffer *ring)
228
{
229
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 230
 
2352 Serge 231
	if (ring->obj == NULL)
232
		return;
2351 Serge 233
 
3031 serge 234
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
2351 Serge 235
 
2352 Serge 236
	wake_up_all(&ring->irq_queue);
237
//   if (i915_enable_hangcheck) {
238
//       dev_priv->hangcheck_count = 0;
239
//       mod_timer(&dev_priv->hangcheck_timer,
240
//             jiffies +
241
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
242
//   }
243
}
244
 
3031 serge 245
#if 0
246
static void gen6_pm_rps_work(struct work_struct *work)
247
{
248
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
249
						    rps.work);
250
	u32 pm_iir, pm_imr;
251
	u8 new_delay;
2352 Serge 252
 
3031 serge 253
	spin_lock_irq(&dev_priv->rps.lock);
254
	pm_iir = dev_priv->rps.pm_iir;
255
	dev_priv->rps.pm_iir = 0;
256
	pm_imr = I915_READ(GEN6_PMIMR);
257
	I915_WRITE(GEN6_PMIMR, 0);
258
	spin_unlock_irq(&dev_priv->rps.lock);
2352 Serge 259
 
3031 serge 260
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
261
		return;
262
 
263
	mutex_lock(&dev_priv->dev->struct_mutex);
264
 
265
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
266
		new_delay = dev_priv->rps.cur_delay + 1;
267
	else
268
		new_delay = dev_priv->rps.cur_delay - 1;
269
 
270
	/* sysfs frequency interfaces may have snuck in while servicing the
271
	 * interrupt
272
	 */
273
	if (!(new_delay > dev_priv->rps.max_delay ||
274
	      new_delay < dev_priv->rps.min_delay)) {
275
		gen6_set_rps(dev_priv->dev, new_delay);
276
	}
277
 
278
	mutex_unlock(&dev_priv->dev->struct_mutex);
279
}
280
 
281
 
282
/**
283
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
284
 * occurred.
285
 * @work: workqueue struct
286
 *
287
 * Doesn't actually do anything except notify userspace. As a consequence of
288
 * this event, userspace should try to remap the bad rows since statistically
289
 * it is likely the same row is more likely to go bad again.
290
 */
291
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 292
{
3031 serge 293
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
294
						    parity_error_work);
295
	u32 error_status, row, bank, subbank;
296
	char *parity_event[5];
297
	uint32_t misccpctl;
298
	unsigned long flags;
299
 
300
	/* We must turn off DOP level clock gating to access the L3 registers.
301
	 * In order to prevent a get/put style interface, acquire struct mutex
302
	 * any time we access those registers.
303
	 */
304
	mutex_lock(&dev_priv->dev->struct_mutex);
305
 
306
	misccpctl = I915_READ(GEN7_MISCCPCTL);
307
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
308
	POSTING_READ(GEN7_MISCCPCTL);
309
 
310
	error_status = I915_READ(GEN7_L3CDERRST1);
311
	row = GEN7_PARITY_ERROR_ROW(error_status);
312
	bank = GEN7_PARITY_ERROR_BANK(error_status);
313
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
314
 
315
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
316
				    GEN7_L3CDERRST1_ENABLE);
317
	POSTING_READ(GEN7_L3CDERRST1);
318
 
319
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
320
 
321
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
322
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
323
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
324
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
325
 
326
	mutex_unlock(&dev_priv->dev->struct_mutex);
327
 
328
	parity_event[0] = "L3_PARITY_ERROR=1";
329
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
330
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
331
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
332
	parity_event[4] = NULL;
333
 
334
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
335
			   KOBJ_CHANGE, parity_event);
336
 
337
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
338
		  row, bank, subbank);
339
 
340
	kfree(parity_event[3]);
341
	kfree(parity_event[2]);
342
	kfree(parity_event[1]);
343
}
344
 
345
static void ivybridge_handle_parity_error(struct drm_device *dev)
346
{
347
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
348
	unsigned long flags;
349
 
350
	if (!HAS_L3_GPU_CACHE(dev))
351
		return;
352
 
353
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
354
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
355
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
356
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
357
 
358
	queue_work(dev_priv->wq, &dev_priv->parity_error_work);
359
}
360
 
361
#endif
362
 
363
static void snb_gt_irq_handler(struct drm_device *dev,
364
			       struct drm_i915_private *dev_priv,
365
			       u32 gt_iir)
366
{
367
 
368
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
369
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
370
		notify_ring(dev, &dev_priv->ring[RCS]);
371
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
372
		notify_ring(dev, &dev_priv->ring[VCS]);
373
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
374
		notify_ring(dev, &dev_priv->ring[BCS]);
375
 
376
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
377
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
378
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
379
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
380
		i915_handle_error(dev, false);
381
	}
382
 
383
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
384
//		ivybridge_handle_parity_error(dev);
385
}
386
 
387
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
388
				u32 pm_iir)
389
{
390
	unsigned long flags;
391
 
392
	/*
393
	 * IIR bits should never already be set because IMR should
394
	 * prevent an interrupt from being shown in IIR. The warning
395
	 * displays a case where we've unsafely cleared
396
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
397
	 * type is not a problem, it displays a problem in the logic.
398
	 *
399
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
400
	 */
401
 
402
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
403
	dev_priv->rps.pm_iir |= pm_iir;
404
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
405
	POSTING_READ(GEN6_PMIMR);
406
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
407
 
408
	queue_work(dev_priv->wq, &dev_priv->rps.work);
409
}
410
 
411
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
412
{
413
	struct drm_device *dev = (struct drm_device *) arg;
414
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
415
	u32 iir, gt_iir, pm_iir;
416
	irqreturn_t ret = IRQ_NONE;
417
	unsigned long irqflags;
418
	int pipe;
419
	u32 pipe_stats[I915_MAX_PIPES];
420
	bool blc_event;
421
 
422
	atomic_inc(&dev_priv->irq_received);
423
 
424
	while (true) {
425
		iir = I915_READ(VLV_IIR);
426
		gt_iir = I915_READ(GTIIR);
427
		pm_iir = I915_READ(GEN6_PMIIR);
428
 
429
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
430
			goto out;
431
 
432
		ret = IRQ_HANDLED;
433
 
434
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
435
 
436
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
437
		for_each_pipe(pipe) {
438
			int reg = PIPESTAT(pipe);
439
			pipe_stats[pipe] = I915_READ(reg);
440
 
441
			/*
442
			 * Clear the PIPE*STAT regs before the IIR
443
			 */
444
			if (pipe_stats[pipe] & 0x8000ffff) {
445
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
446
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
447
							 pipe_name(pipe));
448
				I915_WRITE(reg, pipe_stats[pipe]);
449
			}
450
		}
451
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
452
 
453
#if 0
454
		for_each_pipe(pipe) {
455
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
456
				drm_handle_vblank(dev, pipe);
457
 
458
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
459
				intel_prepare_page_flip(dev, pipe);
460
				intel_finish_page_flip(dev, pipe);
461
			}
462
		}
463
#endif
464
 
465
		/* Consume port.  Then clear IIR or we'll miss events */
466
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
467
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
468
 
469
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
470
					 hotplug_status);
471
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
472
//				queue_work(dev_priv->wq,
473
//					   &dev_priv->hotplug_work);
474
 
475
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
476
			I915_READ(PORT_HOTPLUG_STAT);
477
		}
478
 
479
		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
480
			blc_event = true;
481
 
482
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
483
//			gen6_queue_rps_work(dev_priv, pm_iir);
484
 
485
		I915_WRITE(GTIIR, gt_iir);
486
		I915_WRITE(GEN6_PMIIR, pm_iir);
487
		I915_WRITE(VLV_IIR, iir);
488
	}
489
 
490
out:
491
	return ret;
492
}
493
 
494
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
495
{
496
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
497
	int pipe;
498
 
499
	if (pch_iir & SDE_AUDIO_POWER_MASK)
500
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
501
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
502
				 SDE_AUDIO_POWER_SHIFT);
503
 
504
	if (pch_iir & SDE_GMBUS)
505
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
506
 
507
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
508
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
509
 
510
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
511
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
512
 
513
	if (pch_iir & SDE_POISON)
514
		DRM_ERROR("PCH poison interrupt\n");
515
 
516
	if (pch_iir & SDE_FDI_MASK)
517
		for_each_pipe(pipe)
518
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
519
					 pipe_name(pipe),
520
					 I915_READ(FDI_RX_IIR(pipe)));
521
 
522
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
523
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
524
 
525
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
526
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
527
 
528
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
529
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
530
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
531
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
532
}
533
 
534
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
535
{
536
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
537
	int pipe;
538
 
539
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
540
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
541
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
542
				 SDE_AUDIO_POWER_SHIFT_CPT);
543
 
544
	if (pch_iir & SDE_AUX_MASK_CPT)
545
		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
546
 
547
	if (pch_iir & SDE_GMBUS_CPT)
548
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
549
 
550
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
551
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
552
 
553
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
554
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
555
 
556
	if (pch_iir & SDE_FDI_MASK_CPT)
557
		for_each_pipe(pipe)
558
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
559
					 pipe_name(pipe),
560
					 I915_READ(FDI_RX_IIR(pipe)));
561
}
562
 
563
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
564
{
565
	struct drm_device *dev = (struct drm_device *) arg;
566
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
567
	u32 de_iir, gt_iir, de_ier, pm_iir;
568
	irqreturn_t ret = IRQ_NONE;
569
	int i;
570
 
571
	atomic_inc(&dev_priv->irq_received);
572
 
573
	/* disable master interrupt before clearing iir  */
574
	de_ier = I915_READ(DEIER);
575
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
576
 
577
	gt_iir = I915_READ(GTIIR);
578
	if (gt_iir) {
579
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
580
		I915_WRITE(GTIIR, gt_iir);
581
		ret = IRQ_HANDLED;
582
	}
583
 
584
	de_iir = I915_READ(DEIIR);
585
	if (de_iir) {
586
#if 0
587
		if (de_iir & DE_GSE_IVB)
588
			intel_opregion_gse_intr(dev);
589
 
590
		for (i = 0; i < 3; i++) {
591
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
592
				drm_handle_vblank(dev, i);
593
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
594
				intel_prepare_page_flip(dev, i);
595
				intel_finish_page_flip_plane(dev, i);
596
			}
597
		}
598
#endif
599
		/* check event from PCH */
600
		if (de_iir & DE_PCH_EVENT_IVB) {
601
			u32 pch_iir = I915_READ(SDEIIR);
602
 
603
//			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
604
//				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
605
			cpt_irq_handler(dev, pch_iir);
606
 
607
			/* clear PCH hotplug event before clear CPU irq */
608
			I915_WRITE(SDEIIR, pch_iir);
609
		}
610
 
611
		I915_WRITE(DEIIR, de_iir);
612
		ret = IRQ_HANDLED;
613
	}
614
 
615
	pm_iir = I915_READ(GEN6_PMIIR);
616
	if (pm_iir) {
617
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
618
//			gen6_queue_rps_work(dev_priv, pm_iir);
619
		I915_WRITE(GEN6_PMIIR, pm_iir);
620
		ret = IRQ_HANDLED;
621
	}
622
 
623
	I915_WRITE(DEIER, de_ier);
624
	POSTING_READ(DEIER);
625
 
626
	return ret;
627
}
628
 
629
static void ilk_gt_irq_handler(struct drm_device *dev,
630
			       struct drm_i915_private *dev_priv,
631
			       u32 gt_iir)
632
{
633
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
634
		notify_ring(dev, &dev_priv->ring[RCS]);
635
	if (gt_iir & GT_BSD_USER_INTERRUPT)
636
		notify_ring(dev, &dev_priv->ring[VCS]);
637
}
638
 
639
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
640
{
641
	struct drm_device *dev = (struct drm_device *) arg;
2351 Serge 642
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
643
    int ret = IRQ_NONE;
644
    u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
645
    u32 hotplug_mask;
646
 
647
    atomic_inc(&dev_priv->irq_received);
648
 
649
    /* disable master interrupt before clearing iir  */
650
    de_ier = I915_READ(DEIER);
651
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
652
    POSTING_READ(DEIER);
653
 
654
    de_iir = I915_READ(DEIIR);
655
    gt_iir = I915_READ(GTIIR);
656
    pch_iir = I915_READ(SDEIIR);
657
    pm_iir = I915_READ(GEN6_PMIIR);
658
 
659
    if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
660
        (!IS_GEN6(dev) || pm_iir == 0))
661
        goto done;
662
 
663
    if (HAS_PCH_CPT(dev))
664
        hotplug_mask = SDE_HOTPLUG_MASK_CPT;
665
    else
666
        hotplug_mask = SDE_HOTPLUG_MASK;
667
 
668
    ret = IRQ_HANDLED;
669
 
3031 serge 670
	if (IS_GEN5(dev))
671
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
672
	else
673
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
674
#if 0
675
	if (de_iir & DE_GSE)
676
		intel_opregion_gse_intr(dev);
2351 Serge 677
 
3031 serge 678
	if (de_iir & DE_PIPEA_VBLANK)
679
		drm_handle_vblank(dev, 0);
2351 Serge 680
 
3031 serge 681
	if (de_iir & DE_PIPEB_VBLANK)
682
		drm_handle_vblank(dev, 1);
2351 Serge 683
 
3031 serge 684
	if (de_iir & DE_PLANEA_FLIP_DONE) {
685
		intel_prepare_page_flip(dev, 0);
686
		intel_finish_page_flip_plane(dev, 0);
687
	}
2351 Serge 688
 
3031 serge 689
	if (de_iir & DE_PLANEB_FLIP_DONE) {
690
		intel_prepare_page_flip(dev, 1);
691
		intel_finish_page_flip_plane(dev, 1);
692
	}
693
#endif
2351 Serge 694
 
3031 serge 695
	/* check event from PCH */
696
	if (de_iir & DE_PCH_EVENT) {
697
//		if (pch_iir & hotplug_mask)
698
//			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
699
		if (HAS_PCH_CPT(dev))
700
			cpt_irq_handler(dev, pch_iir);
701
		else
702
			ibx_irq_handler(dev, pch_iir);
703
	}
704
#if 0
705
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
706
		ironlake_handle_rps_change(dev);
2351 Serge 707
 
3031 serge 708
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
709
		gen6_queue_rps_work(dev_priv, pm_iir);
710
#endif
2351 Serge 711
    /* should clear PCH hotplug event before clear CPU irq */
712
    I915_WRITE(SDEIIR, pch_iir);
713
    I915_WRITE(GTIIR, gt_iir);
714
    I915_WRITE(DEIIR, de_iir);
715
    I915_WRITE(GEN6_PMIIR, pm_iir);
716
 
717
done:
718
    I915_WRITE(DEIER, de_ier);
719
    POSTING_READ(DEIER);
720
 
721
    return ret;
722
}
723
 
724
 
725
 
726
 
3031 serge 727
/* NB: please notice the memset */
728
static void i915_get_extra_instdone(struct drm_device *dev,
729
				    uint32_t *instdone)
730
{
731
	struct drm_i915_private *dev_priv = dev->dev_private;
732
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
2351 Serge 733
 
3031 serge 734
	switch(INTEL_INFO(dev)->gen) {
735
	case 2:
736
	case 3:
737
		instdone[0] = I915_READ(INSTDONE);
738
		break;
739
	case 4:
740
	case 5:
741
	case 6:
742
		instdone[0] = I915_READ(INSTDONE_I965);
743
		instdone[1] = I915_READ(INSTDONE1);
744
		break;
745
	default:
746
        WARN(1, "Unsupported platform\n");
747
	case 7:
748
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
749
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
750
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
751
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
752
		break;
753
	}
754
}
2351 Serge 755
 
3031 serge 756
#ifdef CONFIG_DEBUG_FS
757
static struct drm_i915_error_object *
758
i915_error_object_create(struct drm_i915_private *dev_priv,
759
			 struct drm_i915_gem_object *src)
760
{
761
	struct drm_i915_error_object *dst;
762
	int i, count;
763
	u32 reloc_offset;
2351 Serge 764
 
3031 serge 765
	if (src == NULL || src->pages == NULL)
766
		return NULL;
2351 Serge 767
 
3031 serge 768
	count = src->base.size / PAGE_SIZE;
2351 Serge 769
 
3031 serge 770
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
771
	if (dst == NULL)
772
		return NULL;
773
 
774
	reloc_offset = src->gtt_offset;
775
	for (i = 0; i < count; i++) {
776
		unsigned long flags;
777
		void *d;
778
 
779
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
780
		if (d == NULL)
781
			goto unwind;
782
 
783
		local_irq_save(flags);
784
		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
785
		    src->has_global_gtt_mapping) {
786
			void __iomem *s;
787
 
788
			/* Simply ignore tiling or any overlapping fence.
789
			 * It's part of the error state, and this hopefully
790
			 * captures what the GPU read.
791
			 */
792
 
793
			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
794
						     reloc_offset);
795
			memcpy_fromio(d, s, PAGE_SIZE);
796
			io_mapping_unmap_atomic(s);
797
		} else {
798
			struct page *page;
799
			void *s;
800
 
801
			page = i915_gem_object_get_page(src, i);
802
 
803
			drm_clflush_pages(&page, 1);
804
 
805
			s = kmap_atomic(page);
806
			memcpy(d, s, PAGE_SIZE);
807
			kunmap_atomic(s);
808
 
809
			drm_clflush_pages(&page, 1);
810
		}
811
		local_irq_restore(flags);
812
 
813
		dst->pages[i] = d;
814
 
815
		reloc_offset += PAGE_SIZE;
816
	}
817
	dst->page_count = count;
818
	dst->gtt_offset = src->gtt_offset;
819
 
820
	return dst;
821
 
822
unwind:
823
	while (i--)
824
		kfree(dst->pages[i]);
825
	kfree(dst);
826
	return NULL;
827
}
828
 
829
static void
830
i915_error_object_free(struct drm_i915_error_object *obj)
831
{
832
	int page;
833
 
834
	if (obj == NULL)
835
		return;
836
 
837
	for (page = 0; page < obj->page_count; page++)
838
		kfree(obj->pages[page]);
839
 
840
	kfree(obj);
841
}
842
 
843
void
844
i915_error_state_free(struct kref *error_ref)
845
{
846
	struct drm_i915_error_state *error = container_of(error_ref,
847
							  typeof(*error), ref);
848
	int i;
849
 
850
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
851
		i915_error_object_free(error->ring[i].batchbuffer);
852
		i915_error_object_free(error->ring[i].ringbuffer);
853
		kfree(error->ring[i].requests);
854
	}
855
 
856
	kfree(error->active_bo);
857
	kfree(error->overlay);
858
	kfree(error);
859
}
860
static void capture_bo(struct drm_i915_error_buffer *err,
861
		       struct drm_i915_gem_object *obj)
862
{
863
	err->size = obj->base.size;
864
	err->name = obj->base.name;
865
	err->rseqno = obj->last_read_seqno;
866
	err->wseqno = obj->last_write_seqno;
867
	err->gtt_offset = obj->gtt_offset;
868
	err->read_domains = obj->base.read_domains;
869
	err->write_domain = obj->base.write_domain;
870
	err->fence_reg = obj->fence_reg;
871
	err->pinned = 0;
872
	if (obj->pin_count > 0)
873
		err->pinned = 1;
874
	if (obj->user_pin_count > 0)
875
		err->pinned = -1;
876
	err->tiling = obj->tiling_mode;
877
	err->dirty = obj->dirty;
878
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
879
	err->ring = obj->ring ? obj->ring->id : -1;
880
	err->cache_level = obj->cache_level;
881
}
882
 
883
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
884
			     int count, struct list_head *head)
885
{
886
	struct drm_i915_gem_object *obj;
887
	int i = 0;
888
 
889
	list_for_each_entry(obj, head, mm_list) {
890
		capture_bo(err++, obj);
891
		if (++i == count)
892
			break;
893
	}
894
 
895
	return i;
896
}
897
 
898
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
899
			     int count, struct list_head *head)
900
{
901
	struct drm_i915_gem_object *obj;
902
	int i = 0;
903
 
904
	list_for_each_entry(obj, head, gtt_list) {
905
		if (obj->pin_count == 0)
906
			continue;
907
 
908
		capture_bo(err++, obj);
909
		if (++i == count)
910
			break;
911
	}
912
 
913
	return i;
914
}
915
 
916
static void i915_gem_record_fences(struct drm_device *dev,
917
				   struct drm_i915_error_state *error)
918
{
919
	struct drm_i915_private *dev_priv = dev->dev_private;
920
	int i;
921
 
922
	/* Fences */
923
	switch (INTEL_INFO(dev)->gen) {
924
	case 7:
925
	case 6:
926
		for (i = 0; i < 16; i++)
927
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
928
		break;
929
	case 5:
930
	case 4:
931
		for (i = 0; i < 16; i++)
932
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
933
		break;
934
	case 3:
935
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
936
			for (i = 0; i < 8; i++)
937
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
938
	case 2:
939
		for (i = 0; i < 8; i++)
940
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
941
		break;
942
 
943
	}
944
}
945
 
946
static struct drm_i915_error_object *
947
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
948
			     struct intel_ring_buffer *ring)
949
{
950
	struct drm_i915_gem_object *obj;
951
	u32 seqno;
952
 
953
	if (!ring->get_seqno)
954
		return NULL;
955
 
956
	seqno = ring->get_seqno(ring, false);
957
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
958
		if (obj->ring != ring)
959
			continue;
960
 
961
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
962
			continue;
963
 
964
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
965
			continue;
966
 
967
		/* We need to copy these to an anonymous buffer as the simplest
968
		 * method to avoid being overwritten by userspace.
969
		 */
970
		return i915_error_object_create(dev_priv, obj);
971
	}
972
 
973
	return NULL;
974
}
975
 
976
static void i915_record_ring_state(struct drm_device *dev,
977
				   struct drm_i915_error_state *error,
978
				   struct intel_ring_buffer *ring)
979
{
980
	struct drm_i915_private *dev_priv = dev->dev_private;
981
 
982
	if (INTEL_INFO(dev)->gen >= 6) {
983
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
984
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
985
		error->semaphore_mboxes[ring->id][0]
986
			= I915_READ(RING_SYNC_0(ring->mmio_base));
987
		error->semaphore_mboxes[ring->id][1]
988
			= I915_READ(RING_SYNC_1(ring->mmio_base));
989
	}
990
 
991
	if (INTEL_INFO(dev)->gen >= 4) {
992
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
993
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
994
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
995
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
996
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
997
		if (ring->id == RCS)
998
			error->bbaddr = I915_READ64(BB_ADDR);
999
	} else {
1000
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1001
		error->ipeir[ring->id] = I915_READ(IPEIR);
1002
		error->ipehr[ring->id] = I915_READ(IPEHR);
1003
		error->instdone[ring->id] = I915_READ(INSTDONE);
1004
	}
1005
 
1006
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1007
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1008
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1009
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1010
	error->head[ring->id] = I915_READ_HEAD(ring);
1011
	error->tail[ring->id] = I915_READ_TAIL(ring);
1012
 
1013
	error->cpu_ring_head[ring->id] = ring->head;
1014
	error->cpu_ring_tail[ring->id] = ring->tail;
1015
}
1016
 
1017
static void i915_gem_record_rings(struct drm_device *dev,
1018
				  struct drm_i915_error_state *error)
1019
{
1020
	struct drm_i915_private *dev_priv = dev->dev_private;
1021
	struct intel_ring_buffer *ring;
1022
	struct drm_i915_gem_request *request;
1023
	int i, count;
1024
 
1025
	for_each_ring(ring, dev_priv, i) {
1026
		i915_record_ring_state(dev, error, ring);
1027
 
1028
		error->ring[i].batchbuffer =
1029
			i915_error_first_batchbuffer(dev_priv, ring);
1030
 
1031
		error->ring[i].ringbuffer =
1032
			i915_error_object_create(dev_priv, ring->obj);
1033
 
1034
		count = 0;
1035
		list_for_each_entry(request, &ring->request_list, list)
1036
			count++;
1037
 
1038
		error->ring[i].num_requests = count;
1039
		error->ring[i].requests =
1040
			kmalloc(count*sizeof(struct drm_i915_error_request),
1041
				GFP_ATOMIC);
1042
		if (error->ring[i].requests == NULL) {
1043
			error->ring[i].num_requests = 0;
1044
			continue;
1045
		}
1046
 
1047
		count = 0;
1048
		list_for_each_entry(request, &ring->request_list, list) {
1049
			struct drm_i915_error_request *erq;
1050
 
1051
			erq = &error->ring[i].requests[count++];
1052
			erq->seqno = request->seqno;
1053
			erq->jiffies = request->emitted_jiffies;
1054
			erq->tail = request->tail;
1055
		}
1056
	}
1057
}
1058
 
1059
/**
1060
 * i915_capture_error_state - capture an error record for later analysis
1061
 * @dev: drm device
1062
 *
1063
 * Should be called when an error is detected (either a hang or an error
1064
 * interrupt) to capture error state from the time of the error.  Fills
1065
 * out a structure which becomes available in debugfs for user level tools
1066
 * to pick up.
1067
 */
1068
static void i915_capture_error_state(struct drm_device *dev)
1069
{
1070
	struct drm_i915_private *dev_priv = dev->dev_private;
1071
	struct drm_i915_gem_object *obj;
1072
	struct drm_i915_error_state *error;
1073
	unsigned long flags;
1074
	int i, pipe;
1075
 
1076
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1077
	error = dev_priv->first_error;
1078
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1079
	if (error)
1080
		return;
1081
 
1082
	/* Account for pipe specific data like PIPE*STAT */
1083
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1084
	if (!error) {
1085
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1086
		return;
1087
	}
1088
 
1089
	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1090
		 dev->primary->index);
1091
 
1092
	kref_init(&error->ref);
1093
	error->eir = I915_READ(EIR);
1094
	error->pgtbl_er = I915_READ(PGTBL_ER);
1095
	error->ccid = I915_READ(CCID);
1096
 
1097
	if (HAS_PCH_SPLIT(dev))
1098
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1099
	else if (IS_VALLEYVIEW(dev))
1100
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1101
	else if (IS_GEN2(dev))
1102
		error->ier = I915_READ16(IER);
1103
	else
1104
		error->ier = I915_READ(IER);
1105
 
1106
	for_each_pipe(pipe)
1107
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1108
 
1109
	if (INTEL_INFO(dev)->gen >= 6) {
1110
		error->error = I915_READ(ERROR_GEN6);
1111
		error->done_reg = I915_READ(DONE_REG);
1112
	}
1113
 
1114
	if (INTEL_INFO(dev)->gen == 7)
1115
		error->err_int = I915_READ(GEN7_ERR_INT);
1116
 
1117
	i915_get_extra_instdone(dev, error->extra_instdone);
1118
 
1119
	i915_gem_record_fences(dev, error);
1120
	i915_gem_record_rings(dev, error);
1121
 
1122
	/* Record buffers on the active and pinned lists. */
1123
	error->active_bo = NULL;
1124
	error->pinned_bo = NULL;
1125
 
1126
	i = 0;
1127
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1128
		i++;
1129
	error->active_bo_count = i;
1130
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1131
		if (obj->pin_count)
1132
			i++;
1133
	error->pinned_bo_count = i - error->active_bo_count;
1134
 
1135
	error->active_bo = NULL;
1136
	error->pinned_bo = NULL;
1137
	if (i) {
1138
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1139
					   GFP_ATOMIC);
1140
		if (error->active_bo)
1141
			error->pinned_bo =
1142
				error->active_bo + error->active_bo_count;
1143
	}
1144
 
1145
	if (error->active_bo)
1146
		error->active_bo_count =
1147
			capture_active_bo(error->active_bo,
1148
					  error->active_bo_count,
1149
					  &dev_priv->mm.active_list);
1150
 
1151
	if (error->pinned_bo)
1152
		error->pinned_bo_count =
1153
			capture_pinned_bo(error->pinned_bo,
1154
					  error->pinned_bo_count,
1155
					  &dev_priv->mm.bound_list);
1156
 
1157
	do_gettimeofday(&error->time);
1158
 
1159
	error->overlay = intel_overlay_capture_error_state(dev);
1160
	error->display = intel_display_capture_error_state(dev);
1161
 
1162
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1163
	if (dev_priv->first_error == NULL) {
1164
		dev_priv->first_error = error;
1165
		error = NULL;
1166
	}
1167
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1168
 
1169
	if (error)
1170
		i915_error_state_free(&error->ref);
1171
}
1172
 
1173
void i915_destroy_error_state(struct drm_device *dev)
1174
{
1175
	struct drm_i915_private *dev_priv = dev->dev_private;
1176
	struct drm_i915_error_state *error;
1177
	unsigned long flags;
1178
 
1179
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1180
	error = dev_priv->first_error;
1181
	dev_priv->first_error = NULL;
1182
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1183
 
1184
	if (error)
1185
		kref_put(&error->ref, i915_error_state_free);
1186
}
1187
#else
1188
#define i915_capture_error_state(x)
1189
#endif
1190
 
1191
static void i915_report_and_clear_eir(struct drm_device *dev)
1192
{
1193
	struct drm_i915_private *dev_priv = dev->dev_private;
1194
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1195
	u32 eir = I915_READ(EIR);
1196
	int pipe, i;
1197
 
1198
	if (!eir)
1199
		return;
1200
 
1201
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1202
 
1203
	i915_get_extra_instdone(dev, instdone);
1204
 
1205
	if (IS_G4X(dev)) {
1206
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1207
			u32 ipeir = I915_READ(IPEIR_I965);
1208
 
1209
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1210
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1211
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1212
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1213
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1214
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1215
			I915_WRITE(IPEIR_I965, ipeir);
1216
			POSTING_READ(IPEIR_I965);
1217
		}
1218
		if (eir & GM45_ERROR_PAGE_TABLE) {
1219
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1220
			pr_err("page table error\n");
1221
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1222
			I915_WRITE(PGTBL_ER, pgtbl_err);
1223
			POSTING_READ(PGTBL_ER);
1224
		}
1225
	}
1226
 
1227
	if (!IS_GEN2(dev)) {
1228
		if (eir & I915_ERROR_PAGE_TABLE) {
1229
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1230
			pr_err("page table error\n");
1231
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1232
			I915_WRITE(PGTBL_ER, pgtbl_err);
1233
			POSTING_READ(PGTBL_ER);
1234
		}
1235
	}
1236
 
1237
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1238
		pr_err("memory refresh error:\n");
1239
		for_each_pipe(pipe)
1240
			pr_err("pipe %c stat: 0x%08x\n",
1241
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1242
		/* pipestat has already been acked */
1243
	}
1244
	if (eir & I915_ERROR_INSTRUCTION) {
1245
		pr_err("instruction error\n");
1246
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1247
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1248
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1249
		if (INTEL_INFO(dev)->gen < 4) {
1250
			u32 ipeir = I915_READ(IPEIR);
1251
 
1252
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1253
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1254
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1255
			I915_WRITE(IPEIR, ipeir);
1256
			POSTING_READ(IPEIR);
1257
		} else {
1258
			u32 ipeir = I915_READ(IPEIR_I965);
1259
 
1260
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1261
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1262
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1263
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1264
			I915_WRITE(IPEIR_I965, ipeir);
1265
			POSTING_READ(IPEIR_I965);
1266
		}
1267
	}
1268
 
1269
	I915_WRITE(EIR, eir);
1270
	POSTING_READ(EIR);
1271
	eir = I915_READ(EIR);
1272
	if (eir) {
1273
		/*
1274
		 * some errors might have become stuck,
1275
		 * mask them.
1276
		 */
1277
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1278
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1279
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1280
	}
1281
}
1282
 
1283
/**
1284
 * i915_handle_error - handle an error interrupt
1285
 * @dev: drm device
1286
 *
1287
 * Do some basic checking of regsiter state at error interrupt time and
1288
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1289
 * sure we get a record and make it available in debugfs.  Fire a uevent
1290
 * so userspace knows something bad happened (should trigger collection
1291
 * of a ring dump etc.).
1292
 */
1293
void i915_handle_error(struct drm_device *dev, bool wedged)
1294
{
1295
	struct drm_i915_private *dev_priv = dev->dev_private;
1296
	struct intel_ring_buffer *ring;
1297
	int i;
1298
 
1299
	i915_capture_error_state(dev);
1300
	i915_report_and_clear_eir(dev);
1301
 
1302
	if (wedged) {
1303
//		INIT_COMPLETION(dev_priv->error_completion);
1304
		atomic_set(&dev_priv->mm.wedged, 1);
1305
 
1306
		/*
1307
		 * Wakeup waiting processes so they don't hang
1308
		 */
1309
		for_each_ring(ring, dev_priv, i)
1310
			wake_up_all(&ring->irq_queue);
1311
	}
1312
 
1313
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1314
}
1315
 
1316
#if 0
1317
 
1318
 
1319
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1320
{
1321
	drm_i915_private_t *dev_priv = dev->dev_private;
1322
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1323
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1324
	struct drm_i915_gem_object *obj;
1325
	struct intel_unpin_work *work;
1326
	unsigned long flags;
1327
	bool stall_detected;
1328
 
1329
	/* Ignore early vblank irqs */
1330
	if (intel_crtc == NULL)
1331
		return;
1332
 
1333
	spin_lock_irqsave(&dev->event_lock, flags);
1334
	work = intel_crtc->unpin_work;
1335
 
1336
	if (work == NULL || work->pending || !work->enable_stall_check) {
1337
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1338
		spin_unlock_irqrestore(&dev->event_lock, flags);
1339
		return;
1340
	}
1341
 
1342
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1343
	obj = work->pending_flip_obj;
1344
	if (INTEL_INFO(dev)->gen >= 4) {
1345
		int dspsurf = DSPSURF(intel_crtc->plane);
1346
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1347
					obj->gtt_offset;
1348
	} else {
1349
		int dspaddr = DSPADDR(intel_crtc->plane);
1350
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1351
							crtc->y * crtc->fb->pitches[0] +
1352
							crtc->x * crtc->fb->bits_per_pixel/8);
1353
	}
1354
 
1355
	spin_unlock_irqrestore(&dev->event_lock, flags);
1356
 
1357
	if (stall_detected) {
1358
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1359
		intel_prepare_page_flip(dev, intel_crtc->plane);
1360
	}
1361
}
1362
 
1363
#endif
1364
 
1365
/* Called from drm generic code, passed 'crtc' which
1366
 * we use as a pipe index
1367
 */
1368
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1369
{
1370
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1371
	unsigned long irqflags;
1372
 
1373
	if (!i915_pipe_enabled(dev, pipe))
1374
		return -EINVAL;
1375
 
1376
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1377
	if (INTEL_INFO(dev)->gen >= 4)
1378
		i915_enable_pipestat(dev_priv, pipe,
1379
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1380
	else
1381
		i915_enable_pipestat(dev_priv, pipe,
1382
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1383
 
1384
	/* maintain vblank delivery even in deep C-states */
1385
	if (dev_priv->info->gen == 3)
1386
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1387
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1388
 
1389
	return 0;
1390
}
1391
 
1392
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1393
{
1394
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1395
	unsigned long irqflags;
1396
 
1397
	if (!i915_pipe_enabled(dev, pipe))
1398
		return -EINVAL;
1399
 
1400
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1401
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1402
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1403
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1404
 
1405
	return 0;
1406
}
1407
 
1408
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1409
{
1410
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1411
	unsigned long irqflags;
1412
 
1413
	if (!i915_pipe_enabled(dev, pipe))
1414
		return -EINVAL;
1415
 
1416
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1417
	ironlake_enable_display_irq(dev_priv,
1418
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1419
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1420
 
1421
	return 0;
1422
}
1423
 
1424
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1425
{
1426
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1427
	unsigned long irqflags;
1428
	u32 imr;
1429
 
1430
	if (!i915_pipe_enabled(dev, pipe))
1431
		return -EINVAL;
1432
 
1433
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1434
	imr = I915_READ(VLV_IMR);
1435
	if (pipe == 0)
1436
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1437
	else
1438
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1439
	I915_WRITE(VLV_IMR, imr);
1440
	i915_enable_pipestat(dev_priv, pipe,
1441
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1442
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1443
 
1444
	return 0;
1445
}
1446
 
1447
/* Called from drm generic code, passed 'crtc' which
1448
 * we use as a pipe index
1449
 */
1450
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1451
{
1452
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1453
	unsigned long irqflags;
1454
 
1455
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1456
	if (dev_priv->info->gen == 3)
1457
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1458
 
1459
	i915_disable_pipestat(dev_priv, pipe,
1460
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1461
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1462
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1463
}
1464
 
1465
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1466
{
1467
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1468
	unsigned long irqflags;
1469
 
1470
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1471
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1472
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1473
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1474
}
1475
 
1476
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1477
{
1478
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1479
	unsigned long irqflags;
1480
 
1481
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1482
	ironlake_disable_display_irq(dev_priv,
1483
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1484
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1485
}
1486
 
1487
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1488
{
1489
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1490
	unsigned long irqflags;
1491
	u32 imr;
1492
 
1493
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1494
	i915_disable_pipestat(dev_priv, pipe,
1495
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1496
	imr = I915_READ(VLV_IMR);
1497
	if (pipe == 0)
1498
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1499
	else
1500
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1501
	I915_WRITE(VLV_IMR, imr);
1502
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1503
}
1504
 
1505
static u32
1506
ring_last_seqno(struct intel_ring_buffer *ring)
1507
{
1508
	return list_entry(ring->request_list.prev,
1509
			  struct drm_i915_gem_request, list)->seqno;
1510
}
2351 Serge 1511
/* drm_dma.h hooks
1512
*/
1513
static void ironlake_irq_preinstall(struct drm_device *dev)
1514
{
1515
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1516
 
1517
    atomic_set(&dev_priv->irq_received, 0);
1518
 
1519
    I915_WRITE(HWSTAM, 0xeffe);
1520
 
1521
    /* XXX hotplug from PCH */
1522
 
1523
    I915_WRITE(DEIMR, 0xffffffff);
1524
    I915_WRITE(DEIER, 0x0);
1525
    POSTING_READ(DEIER);
1526
 
1527
    /* and GT */
1528
    I915_WRITE(GTIMR, 0xffffffff);
1529
    I915_WRITE(GTIER, 0x0);
1530
    POSTING_READ(GTIER);
1531
 
1532
    /* south display irq */
1533
    I915_WRITE(SDEIMR, 0xffffffff);
1534
    I915_WRITE(SDEIER, 0x0);
1535
    POSTING_READ(SDEIER);
1536
}
1537
 
3031 serge 1538
static void valleyview_irq_preinstall(struct drm_device *dev)
1539
{
1540
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1541
	int pipe;
1542
 
1543
	atomic_set(&dev_priv->irq_received, 0);
1544
 
1545
	/* VLV magic */
1546
	I915_WRITE(VLV_IMR, 0);
1547
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1548
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1549
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1550
 
1551
	/* and GT */
1552
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1553
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1554
	I915_WRITE(GTIMR, 0xffffffff);
1555
	I915_WRITE(GTIER, 0x0);
1556
	POSTING_READ(GTIER);
1557
 
1558
	I915_WRITE(DPINVGTT, 0xff);
1559
 
1560
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1561
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1562
	for_each_pipe(pipe)
1563
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1564
	I915_WRITE(VLV_IIR, 0xffffffff);
1565
	I915_WRITE(VLV_IMR, 0xffffffff);
1566
	I915_WRITE(VLV_IER, 0x0);
1567
	POSTING_READ(VLV_IER);
1568
}
1569
 
2351 Serge 1570
/*
1571
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1572
 * duration to 2ms (which is the minimum in the Display Port spec)
1573
 *
1574
 * This register is the same on all known PCH chips.
1575
 */
1576
 
1577
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1578
{
1579
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1580
	u32	hotplug;
1581
 
1582
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1583
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1584
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1585
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1586
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1587
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1588
}
1589
 
1590
static int ironlake_irq_postinstall(struct drm_device *dev)
1591
{
1592
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1593
    /* enable kind of interrupts always enabled */
1594
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1595
               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1596
    u32 render_irqs;
1597
    u32 hotplug_mask;
1598
 
1599
    dev_priv->irq_mask = ~display_mask;
1600
 
1601
    /* should always can generate irq */
1602
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1603
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1604
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1605
    POSTING_READ(DEIER);
1606
 
1607
	dev_priv->gt_irq_mask = ~0;
1608
 
1609
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1610
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1611
 
1612
    if (IS_GEN6(dev))
1613
        render_irqs =
1614
            GT_USER_INTERRUPT |
3031 serge 1615
			GEN6_BSD_USER_INTERRUPT |
1616
			GEN6_BLITTER_USER_INTERRUPT;
2351 Serge 1617
    else
1618
        render_irqs =
1619
            GT_USER_INTERRUPT |
1620
            GT_PIPE_NOTIFY |
1621
            GT_BSD_USER_INTERRUPT;
1622
    I915_WRITE(GTIER, render_irqs);
1623
    POSTING_READ(GTIER);
1624
 
1625
    if (HAS_PCH_CPT(dev)) {
1626
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1627
                SDE_PORTB_HOTPLUG_CPT |
1628
                SDE_PORTC_HOTPLUG_CPT |
1629
                SDE_PORTD_HOTPLUG_CPT);
1630
    } else {
1631
        hotplug_mask = (SDE_CRT_HOTPLUG |
1632
                SDE_PORTB_HOTPLUG |
1633
                SDE_PORTC_HOTPLUG |
1634
                SDE_PORTD_HOTPLUG |
1635
                SDE_AUX_MASK);
1636
    }
1637
 
1638
    dev_priv->pch_irq_mask = ~hotplug_mask;
1639
 
1640
    I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1641
    I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1642
    I915_WRITE(SDEIER, hotplug_mask);
1643
    POSTING_READ(SDEIER);
1644
 
3031 serge 1645
//    ironlake_enable_pch_hotplug(dev);
2351 Serge 1646
 
1647
    if (IS_IRONLAKE_M(dev)) {
1648
        /* Clear & enable PCU event interrupts */
1649
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1650
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1651
        ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1652
    }
1653
 
1654
    return 0;
1655
}
1656
 
3031 serge 1657
static int ivybridge_irq_postinstall(struct drm_device *dev)
1658
{
1659
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1660
	/* enable kind of interrupts always enabled */
1661
	u32 display_mask =
1662
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1663
		DE_PLANEC_FLIP_DONE_IVB |
1664
		DE_PLANEB_FLIP_DONE_IVB |
1665
		DE_PLANEA_FLIP_DONE_IVB;
1666
	u32 render_irqs;
1667
	u32 hotplug_mask;
2351 Serge 1668
 
3031 serge 1669
	dev_priv->irq_mask = ~display_mask;
1670
 
1671
	/* should always can generate irq */
1672
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1673
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1674
	I915_WRITE(DEIER,
1675
		   display_mask |
1676
		   DE_PIPEC_VBLANK_IVB |
1677
		   DE_PIPEB_VBLANK_IVB |
1678
		   DE_PIPEA_VBLANK_IVB);
1679
	POSTING_READ(DEIER);
1680
 
1681
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1682
 
1683
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1684
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1685
 
1686
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1687
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1688
	I915_WRITE(GTIER, render_irqs);
1689
	POSTING_READ(GTIER);
1690
 
1691
	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1692
			SDE_PORTB_HOTPLUG_CPT |
1693
			SDE_PORTC_HOTPLUG_CPT |
1694
			SDE_PORTD_HOTPLUG_CPT);
1695
	dev_priv->pch_irq_mask = ~hotplug_mask;
1696
 
1697
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1698
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1699
	I915_WRITE(SDEIER, hotplug_mask);
1700
	POSTING_READ(SDEIER);
1701
 
1702
//	ironlake_enable_pch_hotplug(dev);
1703
 
1704
	return 0;
1705
}
1706
 
1707
static int valleyview_irq_postinstall(struct drm_device *dev)
1708
{
1709
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1710
	u32 enable_mask;
1711
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1712
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1713
	u16 msid;
1714
 
1715
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1716
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1717
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1718
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1719
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1720
 
1721
	/*
1722
	 *Leave vblank interrupts masked initially.  enable/disable will
1723
	 * toggle them based on usage.
1724
	 */
1725
	dev_priv->irq_mask = (~enable_mask) |
1726
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1727
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1728
 
1729
	dev_priv->pipestat[0] = 0;
1730
	dev_priv->pipestat[1] = 0;
1731
 
1732
	/* Hack for broken MSIs on VLV */
1733
	pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1734
	pci_read_config_word(dev->pdev, 0x98, &msid);
1735
	msid &= 0xff; /* mask out delivery bits */
1736
	msid |= (1<<14);
1737
	pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1738
 
1739
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1740
	I915_WRITE(VLV_IER, enable_mask);
1741
	I915_WRITE(VLV_IIR, 0xffffffff);
1742
	I915_WRITE(PIPESTAT(0), 0xffff);
1743
	I915_WRITE(PIPESTAT(1), 0xffff);
1744
	POSTING_READ(VLV_IER);
1745
 
1746
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1747
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1748
 
1749
	I915_WRITE(VLV_IIR, 0xffffffff);
1750
	I915_WRITE(VLV_IIR, 0xffffffff);
1751
 
1752
	dev_priv->gt_irq_mask = ~0;
1753
 
1754
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1755
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1756
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1757
	I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1758
		   GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1759
		   GT_GEN6_BLT_USER_INTERRUPT |
1760
		   GT_GEN6_BSD_USER_INTERRUPT |
1761
		   GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1762
		   GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1763
		   GT_PIPE_NOTIFY |
1764
		   GT_RENDER_CS_ERROR_INTERRUPT |
1765
		   GT_SYNC_STATUS |
1766
		   GT_USER_INTERRUPT);
1767
	POSTING_READ(GTIER);
1768
 
1769
	/* ack & enable invalid PTE error interrupts */
1770
#if 0 /* FIXME: add support to irq handler for checking these bits */
1771
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1772
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1773
#endif
1774
 
1775
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1776
#if 0 /* FIXME: check register definitions; some have moved */
1777
	/* Note HDMI and DP share bits */
1778
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1779
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1780
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1781
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1782
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1783
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
1784
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1785
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1786
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1787
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1788
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1789
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1790
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1791
	}
1792
#endif
1793
 
1794
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1795
 
1796
	return 0;
1797
}
1798
 
1799
 
1800
static void valleyview_irq_uninstall(struct drm_device *dev)
1801
{
1802
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1803
	int pipe;
1804
 
1805
	if (!dev_priv)
1806
		return;
1807
 
1808
	for_each_pipe(pipe)
1809
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1810
 
1811
	I915_WRITE(HWSTAM, 0xffffffff);
1812
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1813
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1814
	for_each_pipe(pipe)
1815
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1816
	I915_WRITE(VLV_IIR, 0xffffffff);
1817
	I915_WRITE(VLV_IMR, 0xffffffff);
1818
	I915_WRITE(VLV_IER, 0x0);
1819
	POSTING_READ(VLV_IER);
1820
}
1821
 
1822
static void ironlake_irq_uninstall(struct drm_device *dev)
1823
{
1824
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1825
 
1826
	if (!dev_priv)
1827
		return;
1828
 
1829
	I915_WRITE(HWSTAM, 0xffffffff);
1830
 
1831
	I915_WRITE(DEIMR, 0xffffffff);
1832
	I915_WRITE(DEIER, 0x0);
1833
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1834
 
1835
	I915_WRITE(GTIMR, 0xffffffff);
1836
	I915_WRITE(GTIER, 0x0);
1837
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1838
 
1839
	I915_WRITE(SDEIMR, 0xffffffff);
1840
	I915_WRITE(SDEIER, 0x0);
1841
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1842
}
1843
 
1844
#if 0
1845
 
1846
static void i8xx_irq_preinstall(struct drm_device * dev)
1847
{
1848
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1849
	int pipe;
1850
 
1851
	atomic_set(&dev_priv->irq_received, 0);
1852
 
1853
	for_each_pipe(pipe)
1854
		I915_WRITE(PIPESTAT(pipe), 0);
1855
	I915_WRITE16(IMR, 0xffff);
1856
	I915_WRITE16(IER, 0x0);
1857
	POSTING_READ16(IER);
1858
}
1859
 
1860
static int i8xx_irq_postinstall(struct drm_device *dev)
1861
{
1862
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1863
 
1864
	dev_priv->pipestat[0] = 0;
1865
	dev_priv->pipestat[1] = 0;
1866
 
1867
	I915_WRITE16(EMR,
1868
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1869
 
1870
	/* Unmask the interrupts that we always want on. */
1871
	dev_priv->irq_mask =
1872
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1873
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1874
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1875
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1876
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1877
	I915_WRITE16(IMR, dev_priv->irq_mask);
1878
 
1879
	I915_WRITE16(IER,
1880
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1881
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1882
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1883
		     I915_USER_INTERRUPT);
1884
	POSTING_READ16(IER);
1885
 
1886
	return 0;
1887
}
1888
 
1889
 
1890
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
1891
{
1892
	struct drm_device *dev = (struct drm_device *) arg;
1893
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1894
	u16 iir, new_iir;
1895
	u32 pipe_stats[2];
1896
	unsigned long irqflags;
1897
	int irq_received;
1898
	int pipe;
1899
	u16 flip_mask =
1900
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1901
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1902
 
1903
	atomic_inc(&dev_priv->irq_received);
1904
 
1905
	iir = I915_READ16(IIR);
1906
	if (iir == 0)
1907
		return IRQ_NONE;
1908
 
1909
	while (iir & ~flip_mask) {
1910
		/* Can't rely on pipestat interrupt bit in iir as it might
1911
		 * have been cleared after the pipestat interrupt was received.
1912
		 * It doesn't set the bit in iir again, but it still produces
1913
		 * interrupts (for non-MSI).
1914
		 */
1915
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1916
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1917
			i915_handle_error(dev, false);
1918
 
1919
		for_each_pipe(pipe) {
1920
			int reg = PIPESTAT(pipe);
1921
			pipe_stats[pipe] = I915_READ(reg);
1922
 
1923
			/*
1924
			 * Clear the PIPE*STAT regs before the IIR
1925
			 */
1926
			if (pipe_stats[pipe] & 0x8000ffff) {
1927
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1928
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1929
							 pipe_name(pipe));
1930
				I915_WRITE(reg, pipe_stats[pipe]);
1931
				irq_received = 1;
1932
			}
1933
		}
1934
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1935
 
1936
		I915_WRITE16(IIR, iir & ~flip_mask);
1937
		new_iir = I915_READ16(IIR); /* Flush posted writes */
1938
 
1939
		i915_update_dri1_breadcrumb(dev);
1940
 
1941
		if (iir & I915_USER_INTERRUPT)
1942
			notify_ring(dev, &dev_priv->ring[RCS]);
1943
 
1944
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1945
		    drm_handle_vblank(dev, 0)) {
1946
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1947
				intel_prepare_page_flip(dev, 0);
1948
				intel_finish_page_flip(dev, 0);
1949
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1950
			}
1951
		}
1952
 
1953
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1954
		    drm_handle_vblank(dev, 1)) {
1955
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1956
				intel_prepare_page_flip(dev, 1);
1957
				intel_finish_page_flip(dev, 1);
1958
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1959
			}
1960
		}
1961
 
1962
		iir = new_iir;
1963
	}
1964
 
1965
	return IRQ_HANDLED;
1966
}
1967
 
1968
static void i8xx_irq_uninstall(struct drm_device * dev)
1969
{
1970
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1971
	int pipe;
1972
 
1973
	for_each_pipe(pipe) {
1974
		/* Clear enable bits; then clear status bits */
1975
		I915_WRITE(PIPESTAT(pipe), 0);
1976
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1977
	}
1978
	I915_WRITE16(IMR, 0xffff);
1979
	I915_WRITE16(IER, 0x0);
1980
	I915_WRITE16(IIR, I915_READ16(IIR));
1981
}
1982
 
1983
#endif
1984
 
1985
static void i915_irq_preinstall(struct drm_device * dev)
1986
{
1987
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1988
	int pipe;
1989
 
1990
	atomic_set(&dev_priv->irq_received, 0);
1991
 
1992
	if (I915_HAS_HOTPLUG(dev)) {
1993
		I915_WRITE(PORT_HOTPLUG_EN, 0);
1994
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1995
	}
1996
 
1997
	I915_WRITE16(HWSTAM, 0xeffe);
1998
	for_each_pipe(pipe)
1999
		I915_WRITE(PIPESTAT(pipe), 0);
2000
	I915_WRITE(IMR, 0xffffffff);
2001
	I915_WRITE(IER, 0x0);
2002
	POSTING_READ(IER);
2003
}
2004
 
2005
static int i915_irq_postinstall(struct drm_device *dev)
2006
{
2007
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2008
	u32 enable_mask;
2009
 
2010
	dev_priv->pipestat[0] = 0;
2011
	dev_priv->pipestat[1] = 0;
2012
 
2013
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2014
 
2015
	/* Unmask the interrupts that we always want on. */
2016
	dev_priv->irq_mask =
2017
		~(I915_ASLE_INTERRUPT |
2018
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2019
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2020
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2021
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2022
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2023
 
2024
	enable_mask =
2025
		I915_ASLE_INTERRUPT |
2026
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2027
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2028
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2029
		I915_USER_INTERRUPT;
2030
#if 0
2031
	if (I915_HAS_HOTPLUG(dev)) {
2032
		/* Enable in IER... */
2033
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2034
		/* and unmask in IMR */
2035
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2036
	}
2037
#endif
2038
 
2039
	I915_WRITE(IMR, dev_priv->irq_mask);
2040
	I915_WRITE(IER, enable_mask);
2041
	POSTING_READ(IER);
2042
 
2043
	if (I915_HAS_HOTPLUG(dev)) {
2044
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2045
#if 0
2046
		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2047
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2048
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2049
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2050
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2051
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2052
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2053
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2054
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2055
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2056
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2057
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2058
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2059
		}
2060
#endif
2061
		/* Ignore TV since it's buggy */
2062
 
2063
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2064
	}
2065
 
2066
//	intel_opregion_enable_asle(dev);
2067
 
2068
	return 0;
2069
}
2070
 
2071
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2072
{
2073
	struct drm_device *dev = (struct drm_device *) arg;
2074
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2075
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2076
	unsigned long irqflags;
2077
	u32 flip_mask =
2078
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2079
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2080
	u32 flip[2] = {
2081
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2082
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2083
	};
2084
	int pipe, ret = IRQ_NONE;
2085
 
2086
	atomic_inc(&dev_priv->irq_received);
2087
 
2088
	iir = I915_READ(IIR);
2089
	do {
2090
		bool irq_received = (iir & ~flip_mask) != 0;
2091
		bool blc_event = false;
2092
 
2093
		/* Can't rely on pipestat interrupt bit in iir as it might
2094
		 * have been cleared after the pipestat interrupt was received.
2095
		 * It doesn't set the bit in iir again, but it still produces
2096
		 * interrupts (for non-MSI).
2097
		 */
2098
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2099
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2100
			i915_handle_error(dev, false);
2101
 
2102
		for_each_pipe(pipe) {
2103
			int reg = PIPESTAT(pipe);
2104
			pipe_stats[pipe] = I915_READ(reg);
2105
 
2106
			/* Clear the PIPE*STAT regs before the IIR */
2107
			if (pipe_stats[pipe] & 0x8000ffff) {
2108
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2109
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2110
							 pipe_name(pipe));
2111
				I915_WRITE(reg, pipe_stats[pipe]);
2112
				irq_received = true;
2113
			}
2114
		}
2115
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2116
 
2117
		if (!irq_received)
2118
			break;
2119
 
2120
		/* Consume port.  Then clear IIR or we'll miss events */
2121
		if ((I915_HAS_HOTPLUG(dev)) &&
2122
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2123
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2124
 
2125
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2126
				  hotplug_status);
2127
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2128
//				queue_work(dev_priv->wq,
2129
//					   &dev_priv->hotplug_work);
2130
 
2131
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2132
			POSTING_READ(PORT_HOTPLUG_STAT);
2133
		}
2134
 
2135
		I915_WRITE(IIR, iir & ~flip_mask);
2136
		new_iir = I915_READ(IIR); /* Flush posted writes */
2137
 
2138
		if (iir & I915_USER_INTERRUPT)
2139
			notify_ring(dev, &dev_priv->ring[RCS]);
2140
 
2141
		for_each_pipe(pipe) {
2142
			int plane = pipe;
2143
			if (IS_MOBILE(dev))
2144
				plane = !plane;
3051 serge 2145
            if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
2146
                drm_handle_vblank(dev, pipe) */) {
3031 serge 2147
				if (iir & flip[plane]) {
2148
//					intel_prepare_page_flip(dev, plane);
2149
//					intel_finish_page_flip(dev, pipe);
2150
					flip_mask &= ~flip[plane];
2151
				}
2152
			}
2153
 
2154
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2155
				blc_event = true;
2156
		}
2157
 
2158
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2159
//			intel_opregion_asle_intr(dev);
2160
 
2161
		/* With MSI, interrupts are only generated when iir
2162
		 * transitions from zero to nonzero.  If another bit got
2163
		 * set while we were handling the existing iir bits, then
2164
		 * we would never get another interrupt.
2165
		 *
2166
		 * This is fine on non-MSI as well, as if we hit this path
2167
		 * we avoid exiting the interrupt handler only to generate
2168
		 * another one.
2169
		 *
2170
		 * Note that for MSI this could cause a stray interrupt report
2171
		 * if an interrupt landed in the time between writing IIR and
2172
		 * the posting read.  This should be rare enough to never
2173
		 * trigger the 99% of 100,000 interrupts test for disabling
2174
		 * stray interrupts.
2175
		 */
2176
		ret = IRQ_HANDLED;
2177
		iir = new_iir;
2178
	} while (iir & ~flip_mask);
2179
 
2180
	i915_update_dri1_breadcrumb(dev);
2181
 
2182
	return ret;
2183
}
2184
 
2185
static void i915_irq_uninstall(struct drm_device * dev)
2186
{
2187
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2188
	int pipe;
2189
 
2190
	if (I915_HAS_HOTPLUG(dev)) {
2191
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2192
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2193
	}
2194
 
2195
	I915_WRITE16(HWSTAM, 0xffff);
2196
	for_each_pipe(pipe) {
2197
		/* Clear enable bits; then clear status bits */
2198
		I915_WRITE(PIPESTAT(pipe), 0);
2199
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2200
	}
2201
	I915_WRITE(IMR, 0xffffffff);
2202
	I915_WRITE(IER, 0x0);
2203
 
2204
	I915_WRITE(IIR, I915_READ(IIR));
2205
}
2206
 
2207
static void i965_irq_preinstall(struct drm_device * dev)
2208
{
2209
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2210
	int pipe;
2211
 
2212
	atomic_set(&dev_priv->irq_received, 0);
2213
 
2214
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2215
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2216
 
2217
	I915_WRITE(HWSTAM, 0xeffe);
2218
	for_each_pipe(pipe)
2219
		I915_WRITE(PIPESTAT(pipe), 0);
2220
	I915_WRITE(IMR, 0xffffffff);
2221
	I915_WRITE(IER, 0x0);
2222
	POSTING_READ(IER);
2223
}
2224
 
2225
static int i965_irq_postinstall(struct drm_device *dev)
2226
{
2227
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2228
	u32 hotplug_en;
2229
	u32 enable_mask;
2230
	u32 error_mask;
2231
 
2232
	/* Unmask the interrupts that we always want on. */
2233
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2234
			       I915_DISPLAY_PORT_INTERRUPT |
2235
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2236
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2237
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2238
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2239
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2240
 
2241
	enable_mask = ~dev_priv->irq_mask;
2242
	enable_mask |= I915_USER_INTERRUPT;
2243
 
2244
	if (IS_G4X(dev))
2245
		enable_mask |= I915_BSD_USER_INTERRUPT;
2246
 
2247
	dev_priv->pipestat[0] = 0;
2248
	dev_priv->pipestat[1] = 0;
2249
 
2250
	/*
2251
	 * Enable some error detection, note the instruction error mask
2252
	 * bit is reserved, so we leave it masked.
2253
	 */
2254
	if (IS_G4X(dev)) {
2255
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2256
			       GM45_ERROR_MEM_PRIV |
2257
			       GM45_ERROR_CP_PRIV |
2258
			       I915_ERROR_MEMORY_REFRESH);
2259
	} else {
2260
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2261
			       I915_ERROR_MEMORY_REFRESH);
2262
	}
2263
	I915_WRITE(EMR, error_mask);
2264
 
2265
	I915_WRITE(IMR, dev_priv->irq_mask);
2266
	I915_WRITE(IER, enable_mask);
2267
	POSTING_READ(IER);
2268
 
2269
	/* Note HDMI and DP share hotplug bits */
2270
	hotplug_en = 0;
2271
#if 0
2272
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2273
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2274
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2275
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2276
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2277
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2278
	if (IS_G4X(dev)) {
2279
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2280
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2281
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2282
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2283
	} else {
2284
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2285
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2286
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2287
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2288
	}
2289
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2290
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2291
 
2292
		/* Programming the CRT detection parameters tends
2293
		   to generate a spurious hotplug event about three
2294
		   seconds later.  So just do it once.
2295
		   */
2296
		if (IS_G4X(dev))
2297
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2298
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2299
	}
2300
#endif
2301
	/* Ignore TV since it's buggy */
2302
 
2303
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2304
 
2305
//	intel_opregion_enable_asle(dev);
2306
 
2307
	return 0;
2308
}
2309
 
2310
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2311
{
2312
	struct drm_device *dev = (struct drm_device *) arg;
2313
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2314
	u32 iir, new_iir;
2315
	u32 pipe_stats[I915_MAX_PIPES];
2316
	unsigned long irqflags;
2317
	int irq_received;
2318
	int ret = IRQ_NONE, pipe;
2319
 
2320
	atomic_inc(&dev_priv->irq_received);
2321
 
2322
	iir = I915_READ(IIR);
2323
 
2324
	for (;;) {
2325
		bool blc_event = false;
2326
 
2327
		irq_received = iir != 0;
2328
 
2329
		/* Can't rely on pipestat interrupt bit in iir as it might
2330
		 * have been cleared after the pipestat interrupt was received.
2331
		 * It doesn't set the bit in iir again, but it still produces
2332
		 * interrupts (for non-MSI).
2333
		 */
2334
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2335
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2336
			i915_handle_error(dev, false);
2337
 
2338
		for_each_pipe(pipe) {
2339
			int reg = PIPESTAT(pipe);
2340
			pipe_stats[pipe] = I915_READ(reg);
2341
 
2342
			/*
2343
			 * Clear the PIPE*STAT regs before the IIR
2344
			 */
2345
			if (pipe_stats[pipe] & 0x8000ffff) {
2346
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2347
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2348
							 pipe_name(pipe));
2349
				I915_WRITE(reg, pipe_stats[pipe]);
2350
				irq_received = 1;
2351
			}
2352
		}
2353
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2354
 
2355
		if (!irq_received)
2356
			break;
2357
 
2358
		ret = IRQ_HANDLED;
2359
 
2360
		/* Consume port.  Then clear IIR or we'll miss events */
2361
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2362
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2363
 
2364
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2365
				  hotplug_status);
2366
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2367
//				queue_work(dev_priv->wq,
2368
//					   &dev_priv->hotplug_work);
2369
 
2370
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2371
			I915_READ(PORT_HOTPLUG_STAT);
2372
		}
2373
 
2374
		I915_WRITE(IIR, iir);
2375
		new_iir = I915_READ(IIR); /* Flush posted writes */
2376
 
2377
		if (iir & I915_USER_INTERRUPT)
2378
			notify_ring(dev, &dev_priv->ring[RCS]);
2379
		if (iir & I915_BSD_USER_INTERRUPT)
2380
			notify_ring(dev, &dev_priv->ring[VCS]);
2381
 
2382
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2383
//			intel_prepare_page_flip(dev, 0);
2384
 
2385
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2386
//			intel_prepare_page_flip(dev, 1);
2387
 
2388
		for_each_pipe(pipe) {
3051 serge 2389
//           if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2390
//               drm_handle_vblank(dev, pipe)) {
3031 serge 2391
//				i915_pageflip_stall_check(dev, pipe);
2392
//				intel_finish_page_flip(dev, pipe);
3051 serge 2393
//           }
3031 serge 2394
 
2395
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2396
				blc_event = true;
2397
		}
2398
 
2399
 
2400
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2401
//			intel_opregion_asle_intr(dev);
2402
 
2403
		/* With MSI, interrupts are only generated when iir
2404
		 * transitions from zero to nonzero.  If another bit got
2405
		 * set while we were handling the existing iir bits, then
2406
		 * we would never get another interrupt.
2407
		 *
2408
		 * This is fine on non-MSI as well, as if we hit this path
2409
		 * we avoid exiting the interrupt handler only to generate
2410
		 * another one.
2411
		 *
2412
		 * Note that for MSI this could cause a stray interrupt report
2413
		 * if an interrupt landed in the time between writing IIR and
2414
		 * the posting read.  This should be rare enough to never
2415
		 * trigger the 99% of 100,000 interrupts test for disabling
2416
		 * stray interrupts.
2417
		 */
2418
		iir = new_iir;
2419
	}
2420
 
2421
	i915_update_dri1_breadcrumb(dev);
2422
 
2423
	return ret;
2424
}
2425
 
2426
static void i965_irq_uninstall(struct drm_device * dev)
2427
{
2428
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2429
	int pipe;
2430
 
2431
	if (!dev_priv)
2432
		return;
2433
 
2434
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2435
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2436
 
2437
	I915_WRITE(HWSTAM, 0xffffffff);
2438
	for_each_pipe(pipe)
2439
		I915_WRITE(PIPESTAT(pipe), 0);
2440
	I915_WRITE(IMR, 0xffffffff);
2441
	I915_WRITE(IER, 0x0);
2442
 
2443
	for_each_pipe(pipe)
2444
		I915_WRITE(PIPESTAT(pipe),
2445
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2446
	I915_WRITE(IIR, I915_READ(IIR));
2447
}
2448
 
2351 Serge 2449
void intel_irq_init(struct drm_device *dev)
2450
{
3031 serge 2451
	struct drm_i915_private *dev_priv = dev->dev_private;
2452
 
2453
	if (IS_VALLEYVIEW(dev)) {
3051 serge 2454
        driver->irq_handler = valleyview_irq_handler;
2455
        driver->irq_preinstall = valleyview_irq_preinstall;
2456
        driver->irq_postinstall = valleyview_irq_postinstall;
3031 serge 2457
	} else if (IS_IVYBRIDGE(dev)) {
2351 Serge 2458
		/* Share pre & uninstall handlers with ILK/SNB */
3051 serge 2459
        driver->irq_handler = ivybridge_irq_handler;
2460
        driver->irq_preinstall = ironlake_irq_preinstall;
2461
        driver->irq_postinstall = ivybridge_irq_postinstall;
3031 serge 2462
	} else if (IS_HASWELL(dev)) {
2463
		/* Share interrupts handling with IVB */
3051 serge 2464
        driver->irq_handler = ivybridge_irq_handler;
2465
        driver->irq_preinstall = ironlake_irq_preinstall;
2466
        driver->irq_postinstall = ivybridge_irq_postinstall;
2351 Serge 2467
	} else if (HAS_PCH_SPLIT(dev)) {
3051 serge 2468
        driver->irq_handler = ironlake_irq_handler;
2469
        driver->irq_preinstall = ironlake_irq_preinstall;
2470
        driver->irq_postinstall = ironlake_irq_postinstall;
2351 Serge 2471
	} else {
3031 serge 2472
		if (INTEL_INFO(dev)->gen == 2) {
2473
		} else if (INTEL_INFO(dev)->gen == 3) {
3051 serge 2474
            driver->irq_handler = i915_irq_handler;
2475
            driver->irq_preinstall = i915_irq_preinstall;
2476
            driver->irq_postinstall = i915_irq_postinstall;
3031 serge 2477
		} else {
3051 serge 2478
            driver->irq_handler = i965_irq_handler;
2479
            driver->irq_preinstall = i965_irq_preinstall;
2480
            driver->irq_postinstall = i965_irq_postinstall;
3031 serge 2481
		}
2351 Serge 2482
	}
2483
}
2484
 
2485
 
2486
int drm_irq_install(struct drm_device *dev)
2487
{
3051 serge 2488
    unsigned long sh_flags = 0;
2351 Serge 2489
    int irq_line;
2490
    int ret = 0;
2491
 
3051 serge 2492
    char *irqname;
2493
 
2351 Serge 2494
    mutex_lock(&dev->struct_mutex);
2495
 
2496
    /* Driver must have been initialized */
2497
    if (!dev->dev_private) {
2498
        mutex_unlock(&dev->struct_mutex);
2499
        return -EINVAL;
2500
    }
2501
 
2502
    if (dev->irq_enabled) {
2503
        mutex_unlock(&dev->struct_mutex);
2504
        return -EBUSY;
2505
    }
2506
    dev->irq_enabled = 1;
2507
    mutex_unlock(&dev->struct_mutex);
2508
 
2509
    irq_line   = drm_dev_to_irq(dev);
2510
 
2511
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2512
 
3051 serge 2513
    /* Before installing handler */
2514
    if (driver->irq_preinstall)
2515
            driver->irq_preinstall(dev);
2351 Serge 2516
 
3051 serge 2517
    ret = AttachIntHandler(irq_line, driver->irq_handler, (u32)dev);
2351 Serge 2518
 
3051 serge 2519
    /* After installing handler */
2520
    if (driver->irq_postinstall)
2521
            ret = driver->irq_postinstall(dev);
2351 Serge 2522
 
3051 serge 2523
    if (ret < 0) {
2524
            DRM_ERROR(__FUNCTION__);
2525
    }
2351 Serge 2526
 
2527
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2528
    cmd&= ~(1<<10);
2529
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2530
 
2531
    return ret;
2532
}
2533