Subversion Repositories Kolibri OS

Rev

Rev 3051 | Rev 3266 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) ": " fmt
30
 
31
#include 
32
#include 
33
#include 
2351 Serge 34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
3031 serge 38
 
39
#define pr_err(fmt, ...) \
40
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
41
 
42
 
2352 Serge 43
#define DRM_WAKEUP( queue ) wake_up( queue )
44
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
45
 
2351 Serge 46
#define MAX_NOPID ((u32)~0)
47
 
48
/**
49
 * Interrupts that are always left unmasked.
50
 *
51
 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
52
 * we leave them always unmasked in IMR and then control enabling them through
53
 * PIPESTAT alone.
54
 */
55
#define I915_INTERRUPT_ENABLE_FIX			\
56
	(I915_ASLE_INTERRUPT |				\
57
	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
58
	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
59
	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
60
	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
61
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
62
 
63
/** Interrupts that we mask and unmask at runtime. */
64
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
65
 
66
#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
67
				 PIPE_VBLANK_INTERRUPT_STATUS)
68
 
69
#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
70
				 PIPE_VBLANK_INTERRUPT_ENABLE)
71
 
72
#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
73
					 DRM_I915_VBLANK_PIPE_B)
74
 
75
/* For display hotplug interrupt */
76
static void
77
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
78
{
79
    if ((dev_priv->irq_mask & mask) != 0) {
80
        dev_priv->irq_mask &= ~mask;
81
        I915_WRITE(DEIMR, dev_priv->irq_mask);
82
        POSTING_READ(DEIMR);
83
    }
84
}
85
 
86
static inline void
87
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
88
{
89
    if ((dev_priv->irq_mask & mask) != mask) {
90
        dev_priv->irq_mask |= mask;
91
        I915_WRITE(DEIMR, dev_priv->irq_mask);
92
        POSTING_READ(DEIMR);
93
    }
94
}
3031 serge 95
 
96
void
97
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
98
{
99
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
100
		u32 reg = PIPESTAT(pipe);
101
 
102
		dev_priv->pipestat[pipe] |= mask;
103
		/* Enable the interrupt, clear any pending status */
104
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
105
		POSTING_READ(reg);
106
	}
107
}
108
 
109
void
110
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
111
{
112
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
113
		u32 reg = PIPESTAT(pipe);
114
 
115
		dev_priv->pipestat[pipe] &= ~mask;
116
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
117
		POSTING_READ(reg);
118
	}
119
}
120
 
121
#if 0
122
/**
123
 * intel_enable_asle - enable ASLE interrupt for OpRegion
124
 */
125
void intel_enable_asle(struct drm_device *dev)
126
{
127
	drm_i915_private_t *dev_priv = dev->dev_private;
128
	unsigned long irqflags;
129
 
130
	/* FIXME: opregion/asle for VLV */
131
	if (IS_VALLEYVIEW(dev))
132
		return;
133
 
134
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
135
 
136
	if (HAS_PCH_SPLIT(dev))
137
		ironlake_enable_display_irq(dev_priv, DE_GSE);
138
	else {
139
		i915_enable_pipestat(dev_priv, 1,
140
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
141
		if (INTEL_INFO(dev)->gen >= 4)
142
			i915_enable_pipestat(dev_priv, 0,
143
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
144
	}
145
 
146
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
147
}
148
#endif
149
 
150
/**
151
 * i915_pipe_enabled - check if a pipe is enabled
152
 * @dev: DRM device
153
 * @pipe: pipe to check
154
 *
155
 * Reading certain registers when the pipe is disabled can hang the chip.
156
 * Use this routine to make sure the PLL is running and the pipe is active
157
 * before reading such registers if unsure.
158
 */
159
static int
160
i915_pipe_enabled(struct drm_device *dev, int pipe)
161
{
162
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3243 Serge 163
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
164
								      pipe);
165
 
166
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
3031 serge 167
}
168
 
169
/* Called from drm generic code, passed a 'crtc', which
170
 * we use as a pipe index
171
 */
172
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
173
{
174
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
175
	unsigned long high_frame;
176
	unsigned long low_frame;
177
	u32 high1, high2, low;
178
 
179
	if (!i915_pipe_enabled(dev, pipe)) {
180
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
181
				"pipe %c\n", pipe_name(pipe));
182
		return 0;
183
	}
184
 
185
	high_frame = PIPEFRAME(pipe);
186
	low_frame = PIPEFRAMEPIXEL(pipe);
187
 
188
	/*
189
	 * High & low register fields aren't synchronized, so make sure
190
	 * we get a low value that's stable across two reads of the high
191
	 * register.
192
	 */
193
	do {
194
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
195
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
196
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
197
	} while (high1 != high2);
198
 
199
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
200
	low >>= PIPE_FRAME_LOW_SHIFT;
201
	return (high1 << 8) | low;
202
}
203
 
204
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
205
{
206
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
207
	int reg = PIPE_FRMCOUNT_GM45(pipe);
208
 
209
	if (!i915_pipe_enabled(dev, pipe)) {
210
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
211
				 "pipe %c\n", pipe_name(pipe));
212
		return 0;
213
	}
214
 
215
	return I915_READ(reg);
216
}
217
 
218
 
2352 Serge 219
static void notify_ring(struct drm_device *dev,
220
			struct intel_ring_buffer *ring)
221
{
222
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 223
 
2352 Serge 224
	if (ring->obj == NULL)
225
		return;
2351 Serge 226
 
3031 serge 227
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
2351 Serge 228
 
2352 Serge 229
	wake_up_all(&ring->irq_queue);
230
//   if (i915_enable_hangcheck) {
231
//       dev_priv->hangcheck_count = 0;
232
//       mod_timer(&dev_priv->hangcheck_timer,
233
//             jiffies +
234
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
235
//   }
236
}
237
 
3031 serge 238
#if 0
239
static void gen6_pm_rps_work(struct work_struct *work)
240
{
241
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
242
						    rps.work);
243
	u32 pm_iir, pm_imr;
244
	u8 new_delay;
2352 Serge 245
 
3031 serge 246
	spin_lock_irq(&dev_priv->rps.lock);
247
	pm_iir = dev_priv->rps.pm_iir;
248
	dev_priv->rps.pm_iir = 0;
249
	pm_imr = I915_READ(GEN6_PMIMR);
250
	I915_WRITE(GEN6_PMIMR, 0);
251
	spin_unlock_irq(&dev_priv->rps.lock);
2352 Serge 252
 
3031 serge 253
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
254
		return;
255
 
3243 Serge 256
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 257
 
258
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
259
		new_delay = dev_priv->rps.cur_delay + 1;
260
	else
261
		new_delay = dev_priv->rps.cur_delay - 1;
262
 
263
	/* sysfs frequency interfaces may have snuck in while servicing the
264
	 * interrupt
265
	 */
266
	if (!(new_delay > dev_priv->rps.max_delay ||
267
	      new_delay < dev_priv->rps.min_delay)) {
268
		gen6_set_rps(dev_priv->dev, new_delay);
269
	}
270
 
3243 Serge 271
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 272
}
273
 
274
 
275
/**
276
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
277
 * occurred.
278
 * @work: workqueue struct
279
 *
280
 * Doesn't actually do anything except notify userspace. As a consequence of
281
 * this event, userspace should try to remap the bad rows since statistically
282
 * it is likely the same row is more likely to go bad again.
283
 */
284
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 285
{
3031 serge 286
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3243 Serge 287
						    l3_parity.error_work);
3031 serge 288
	u32 error_status, row, bank, subbank;
289
	char *parity_event[5];
290
	uint32_t misccpctl;
291
	unsigned long flags;
292
 
293
	/* We must turn off DOP level clock gating to access the L3 registers.
294
	 * In order to prevent a get/put style interface, acquire struct mutex
295
	 * any time we access those registers.
296
	 */
297
	mutex_lock(&dev_priv->dev->struct_mutex);
298
 
299
	misccpctl = I915_READ(GEN7_MISCCPCTL);
300
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
301
	POSTING_READ(GEN7_MISCCPCTL);
302
 
303
	error_status = I915_READ(GEN7_L3CDERRST1);
304
	row = GEN7_PARITY_ERROR_ROW(error_status);
305
	bank = GEN7_PARITY_ERROR_BANK(error_status);
306
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
307
 
308
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
309
				    GEN7_L3CDERRST1_ENABLE);
310
	POSTING_READ(GEN7_L3CDERRST1);
311
 
312
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
313
 
314
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
315
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
316
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
317
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
318
 
319
	mutex_unlock(&dev_priv->dev->struct_mutex);
320
 
321
	parity_event[0] = "L3_PARITY_ERROR=1";
322
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
323
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
324
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
325
	parity_event[4] = NULL;
326
 
327
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
328
			   KOBJ_CHANGE, parity_event);
329
 
330
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
331
		  row, bank, subbank);
332
 
333
	kfree(parity_event[3]);
334
	kfree(parity_event[2]);
335
	kfree(parity_event[1]);
336
}
337
 
338
static void ivybridge_handle_parity_error(struct drm_device *dev)
339
{
340
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
341
	unsigned long flags;
342
 
343
	if (!HAS_L3_GPU_CACHE(dev))
344
		return;
345
 
346
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
347
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
348
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
349
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
350
 
3243 Serge 351
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 352
}
353
 
354
#endif
355
 
356
static void snb_gt_irq_handler(struct drm_device *dev,
357
			       struct drm_i915_private *dev_priv,
358
			       u32 gt_iir)
359
{
3243 Serge 360
    printf("%s\n", __FUNCTION__);
3031 serge 361
 
362
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
363
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
364
		notify_ring(dev, &dev_priv->ring[RCS]);
365
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
366
		notify_ring(dev, &dev_priv->ring[VCS]);
367
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
368
		notify_ring(dev, &dev_priv->ring[BCS]);
369
 
370
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
371
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
372
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
373
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
374
		i915_handle_error(dev, false);
375
	}
376
 
377
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
378
//		ivybridge_handle_parity_error(dev);
379
}
380
 
381
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
382
				u32 pm_iir)
383
{
384
	unsigned long flags;
385
 
386
	/*
387
	 * IIR bits should never already be set because IMR should
388
	 * prevent an interrupt from being shown in IIR. The warning
389
	 * displays a case where we've unsafely cleared
390
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
391
	 * type is not a problem, it displays a problem in the logic.
392
	 *
393
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
394
	 */
395
 
396
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
397
	dev_priv->rps.pm_iir |= pm_iir;
398
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
399
	POSTING_READ(GEN6_PMIMR);
400
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
401
 
3243 Serge 402
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
3031 serge 403
}
404
 
3243 Serge 405
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
3031 serge 406
{
407
	struct drm_device *dev = (struct drm_device *) arg;
408
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
409
	u32 iir, gt_iir, pm_iir;
410
	irqreturn_t ret = IRQ_NONE;
411
	unsigned long irqflags;
412
	int pipe;
413
	u32 pipe_stats[I915_MAX_PIPES];
414
	bool blc_event;
415
 
3243 Serge 416
    printf("%s\n", __FUNCTION__);
417
 
3031 serge 418
	atomic_inc(&dev_priv->irq_received);
419
 
420
	while (true) {
421
		iir = I915_READ(VLV_IIR);
422
		gt_iir = I915_READ(GTIIR);
423
		pm_iir = I915_READ(GEN6_PMIIR);
424
 
425
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
426
			goto out;
427
 
428
		ret = IRQ_HANDLED;
429
 
430
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
431
 
432
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
433
		for_each_pipe(pipe) {
434
			int reg = PIPESTAT(pipe);
435
			pipe_stats[pipe] = I915_READ(reg);
436
 
437
			/*
438
			 * Clear the PIPE*STAT regs before the IIR
439
			 */
440
			if (pipe_stats[pipe] & 0x8000ffff) {
441
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
442
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
443
							 pipe_name(pipe));
444
				I915_WRITE(reg, pipe_stats[pipe]);
445
			}
446
		}
447
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
448
 
449
#if 0
450
		for_each_pipe(pipe) {
451
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
452
				drm_handle_vblank(dev, pipe);
453
 
454
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
455
				intel_prepare_page_flip(dev, pipe);
456
				intel_finish_page_flip(dev, pipe);
457
			}
458
		}
459
#endif
460
 
461
		/* Consume port.  Then clear IIR or we'll miss events */
462
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
463
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
464
 
465
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
466
					 hotplug_status);
467
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
468
//				queue_work(dev_priv->wq,
469
//					   &dev_priv->hotplug_work);
470
 
471
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
472
			I915_READ(PORT_HOTPLUG_STAT);
473
		}
474
 
475
		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
476
			blc_event = true;
477
 
3243 Serge 478
        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
479
            gen6_queue_rps_work(dev_priv, pm_iir);
3031 serge 480
 
481
		I915_WRITE(GTIIR, gt_iir);
482
		I915_WRITE(GEN6_PMIIR, pm_iir);
483
		I915_WRITE(VLV_IIR, iir);
484
	}
485
 
486
out:
487
	return ret;
488
}
489
 
490
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
491
{
492
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
493
	int pipe;
494
 
3243 Serge 495
    printf("%s\n", __FUNCTION__);
496
 
3031 serge 497
	if (pch_iir & SDE_AUDIO_POWER_MASK)
498
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
499
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
500
				 SDE_AUDIO_POWER_SHIFT);
501
 
502
	if (pch_iir & SDE_GMBUS)
503
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
504
 
505
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
506
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
507
 
508
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
509
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
510
 
511
	if (pch_iir & SDE_POISON)
512
		DRM_ERROR("PCH poison interrupt\n");
513
 
514
	if (pch_iir & SDE_FDI_MASK)
515
		for_each_pipe(pipe)
516
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
517
					 pipe_name(pipe),
518
					 I915_READ(FDI_RX_IIR(pipe)));
519
 
520
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
521
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
522
 
523
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
524
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
525
 
526
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
527
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
528
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
529
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
530
}
531
 
532
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
533
{
534
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
535
	int pipe;
536
 
537
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
538
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
539
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
540
				 SDE_AUDIO_POWER_SHIFT_CPT);
541
 
542
	if (pch_iir & SDE_AUX_MASK_CPT)
543
		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
544
 
545
	if (pch_iir & SDE_GMBUS_CPT)
546
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
547
 
548
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
549
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
550
 
551
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
552
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
553
 
554
	if (pch_iir & SDE_FDI_MASK_CPT)
555
		for_each_pipe(pipe)
556
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
557
					 pipe_name(pipe),
558
					 I915_READ(FDI_RX_IIR(pipe)));
559
}
560
 
3243 Serge 561
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
3031 serge 562
{
563
	struct drm_device *dev = (struct drm_device *) arg;
564
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
565
	u32 de_iir, gt_iir, de_ier, pm_iir;
566
	irqreturn_t ret = IRQ_NONE;
567
	int i;
568
 
3243 Serge 569
    printf("%s\n", __FUNCTION__);
570
 
3031 serge 571
	atomic_inc(&dev_priv->irq_received);
572
 
573
	/* disable master interrupt before clearing iir  */
574
	de_ier = I915_READ(DEIER);
575
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
576
 
577
	gt_iir = I915_READ(GTIIR);
578
	if (gt_iir) {
579
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
580
		I915_WRITE(GTIIR, gt_iir);
581
		ret = IRQ_HANDLED;
582
	}
583
 
584
	de_iir = I915_READ(DEIIR);
585
	if (de_iir) {
586
#if 0
587
		if (de_iir & DE_GSE_IVB)
588
			intel_opregion_gse_intr(dev);
589
 
590
		for (i = 0; i < 3; i++) {
591
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
592
				drm_handle_vblank(dev, i);
593
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
594
				intel_prepare_page_flip(dev, i);
595
				intel_finish_page_flip_plane(dev, i);
596
			}
597
		}
598
#endif
599
		/* check event from PCH */
600
		if (de_iir & DE_PCH_EVENT_IVB) {
601
			u32 pch_iir = I915_READ(SDEIIR);
602
 
603
//			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
604
//				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
605
			cpt_irq_handler(dev, pch_iir);
606
 
607
			/* clear PCH hotplug event before clear CPU irq */
608
			I915_WRITE(SDEIIR, pch_iir);
609
		}
610
 
611
		I915_WRITE(DEIIR, de_iir);
612
		ret = IRQ_HANDLED;
613
	}
614
 
615
	pm_iir = I915_READ(GEN6_PMIIR);
616
	if (pm_iir) {
617
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
618
//			gen6_queue_rps_work(dev_priv, pm_iir);
619
		I915_WRITE(GEN6_PMIIR, pm_iir);
620
		ret = IRQ_HANDLED;
621
	}
622
 
623
	I915_WRITE(DEIER, de_ier);
624
	POSTING_READ(DEIER);
625
 
626
	return ret;
627
}
628
 
629
static void ilk_gt_irq_handler(struct drm_device *dev,
630
			       struct drm_i915_private *dev_priv,
631
			       u32 gt_iir)
632
{
633
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
634
		notify_ring(dev, &dev_priv->ring[RCS]);
635
	if (gt_iir & GT_BSD_USER_INTERRUPT)
636
		notify_ring(dev, &dev_priv->ring[VCS]);
637
}
638
 
3243 Serge 639
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 640
{
641
	struct drm_device *dev = (struct drm_device *) arg;
2351 Serge 642
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
643
    int ret = IRQ_NONE;
644
    u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
645
 
3243 Serge 646
    printf("%s\n", __FUNCTION__);
647
 
2351 Serge 648
    atomic_inc(&dev_priv->irq_received);
649
 
650
    /* disable master interrupt before clearing iir  */
651
    de_ier = I915_READ(DEIER);
652
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
653
    POSTING_READ(DEIER);
654
 
655
    de_iir = I915_READ(DEIIR);
656
    gt_iir = I915_READ(GTIIR);
657
    pch_iir = I915_READ(SDEIIR);
658
    pm_iir = I915_READ(GEN6_PMIIR);
659
 
660
    if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
661
        (!IS_GEN6(dev) || pm_iir == 0))
662
        goto done;
663
 
664
    ret = IRQ_HANDLED;
665
 
3031 serge 666
	if (IS_GEN5(dev))
667
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
668
	else
669
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
670
#if 0
671
	if (de_iir & DE_GSE)
672
		intel_opregion_gse_intr(dev);
2351 Serge 673
 
3031 serge 674
	if (de_iir & DE_PIPEA_VBLANK)
675
		drm_handle_vblank(dev, 0);
2351 Serge 676
 
3031 serge 677
	if (de_iir & DE_PIPEB_VBLANK)
678
		drm_handle_vblank(dev, 1);
2351 Serge 679
 
3031 serge 680
	if (de_iir & DE_PLANEA_FLIP_DONE) {
681
		intel_prepare_page_flip(dev, 0);
682
		intel_finish_page_flip_plane(dev, 0);
683
	}
2351 Serge 684
 
3031 serge 685
	if (de_iir & DE_PLANEB_FLIP_DONE) {
686
		intel_prepare_page_flip(dev, 1);
687
		intel_finish_page_flip_plane(dev, 1);
688
	}
689
#endif
2351 Serge 690
 
3031 serge 691
	/* check event from PCH */
692
	if (de_iir & DE_PCH_EVENT) {
693
//		if (pch_iir & hotplug_mask)
694
//			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
695
		if (HAS_PCH_CPT(dev))
696
			cpt_irq_handler(dev, pch_iir);
697
		else
698
			ibx_irq_handler(dev, pch_iir);
699
	}
700
#if 0
701
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
702
		ironlake_handle_rps_change(dev);
2351 Serge 703
 
3031 serge 704
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
705
		gen6_queue_rps_work(dev_priv, pm_iir);
706
#endif
2351 Serge 707
    /* should clear PCH hotplug event before clear CPU irq */
708
    I915_WRITE(SDEIIR, pch_iir);
709
    I915_WRITE(GTIIR, gt_iir);
710
    I915_WRITE(DEIIR, de_iir);
711
    I915_WRITE(GEN6_PMIIR, pm_iir);
712
 
713
done:
714
    I915_WRITE(DEIER, de_ier);
715
    POSTING_READ(DEIER);
716
 
717
    return ret;
718
}
719
 
720
 
721
 
722
 
3031 serge 723
/* NB: please notice the memset */
724
static void i915_get_extra_instdone(struct drm_device *dev,
725
				    uint32_t *instdone)
726
{
727
	struct drm_i915_private *dev_priv = dev->dev_private;
728
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
2351 Serge 729
 
3031 serge 730
	switch(INTEL_INFO(dev)->gen) {
731
	case 2:
732
	case 3:
733
		instdone[0] = I915_READ(INSTDONE);
734
		break;
735
	case 4:
736
	case 5:
737
	case 6:
738
		instdone[0] = I915_READ(INSTDONE_I965);
739
		instdone[1] = I915_READ(INSTDONE1);
740
		break;
741
	default:
742
        WARN(1, "Unsupported platform\n");
743
	case 7:
744
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
745
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
746
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
747
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
748
		break;
749
	}
750
}
2351 Serge 751
 
3031 serge 752
#ifdef CONFIG_DEBUG_FS
753
static struct drm_i915_error_object *
754
i915_error_object_create(struct drm_i915_private *dev_priv,
755
			 struct drm_i915_gem_object *src)
756
{
757
	struct drm_i915_error_object *dst;
758
	int i, count;
759
	u32 reloc_offset;
2351 Serge 760
 
3031 serge 761
	if (src == NULL || src->pages == NULL)
762
		return NULL;
2351 Serge 763
 
3031 serge 764
	count = src->base.size / PAGE_SIZE;
2351 Serge 765
 
3031 serge 766
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
767
	if (dst == NULL)
768
		return NULL;
769
 
770
	reloc_offset = src->gtt_offset;
771
	for (i = 0; i < count; i++) {
772
		unsigned long flags;
773
		void *d;
774
 
775
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
776
		if (d == NULL)
777
			goto unwind;
778
 
779
		local_irq_save(flags);
780
		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
781
		    src->has_global_gtt_mapping) {
782
			void __iomem *s;
783
 
784
			/* Simply ignore tiling or any overlapping fence.
785
			 * It's part of the error state, and this hopefully
786
			 * captures what the GPU read.
787
			 */
788
 
789
			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
790
						     reloc_offset);
791
			memcpy_fromio(d, s, PAGE_SIZE);
792
			io_mapping_unmap_atomic(s);
793
		} else {
794
			struct page *page;
795
			void *s;
796
 
797
			page = i915_gem_object_get_page(src, i);
798
 
799
			drm_clflush_pages(&page, 1);
800
 
801
			s = kmap_atomic(page);
802
			memcpy(d, s, PAGE_SIZE);
803
			kunmap_atomic(s);
804
 
805
			drm_clflush_pages(&page, 1);
806
		}
807
		local_irq_restore(flags);
808
 
809
		dst->pages[i] = d;
810
 
811
		reloc_offset += PAGE_SIZE;
812
	}
813
	dst->page_count = count;
814
	dst->gtt_offset = src->gtt_offset;
815
 
816
	return dst;
817
 
818
unwind:
819
	while (i--)
820
		kfree(dst->pages[i]);
821
	kfree(dst);
822
	return NULL;
823
}
824
 
825
static void
826
i915_error_object_free(struct drm_i915_error_object *obj)
827
{
828
	int page;
829
 
830
	if (obj == NULL)
831
		return;
832
 
833
	for (page = 0; page < obj->page_count; page++)
834
		kfree(obj->pages[page]);
835
 
836
	kfree(obj);
837
}
838
 
839
void
840
i915_error_state_free(struct kref *error_ref)
841
{
842
	struct drm_i915_error_state *error = container_of(error_ref,
843
							  typeof(*error), ref);
844
	int i;
845
 
846
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
847
		i915_error_object_free(error->ring[i].batchbuffer);
848
		i915_error_object_free(error->ring[i].ringbuffer);
849
		kfree(error->ring[i].requests);
850
	}
851
 
852
	kfree(error->active_bo);
853
	kfree(error->overlay);
854
	kfree(error);
855
}
856
static void capture_bo(struct drm_i915_error_buffer *err,
857
		       struct drm_i915_gem_object *obj)
858
{
859
	err->size = obj->base.size;
860
	err->name = obj->base.name;
861
	err->rseqno = obj->last_read_seqno;
862
	err->wseqno = obj->last_write_seqno;
863
	err->gtt_offset = obj->gtt_offset;
864
	err->read_domains = obj->base.read_domains;
865
	err->write_domain = obj->base.write_domain;
866
	err->fence_reg = obj->fence_reg;
867
	err->pinned = 0;
868
	if (obj->pin_count > 0)
869
		err->pinned = 1;
870
	if (obj->user_pin_count > 0)
871
		err->pinned = -1;
872
	err->tiling = obj->tiling_mode;
873
	err->dirty = obj->dirty;
874
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
875
	err->ring = obj->ring ? obj->ring->id : -1;
876
	err->cache_level = obj->cache_level;
877
}
878
 
879
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
880
			     int count, struct list_head *head)
881
{
882
	struct drm_i915_gem_object *obj;
883
	int i = 0;
884
 
885
	list_for_each_entry(obj, head, mm_list) {
886
		capture_bo(err++, obj);
887
		if (++i == count)
888
			break;
889
	}
890
 
891
	return i;
892
}
893
 
894
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
895
			     int count, struct list_head *head)
896
{
897
	struct drm_i915_gem_object *obj;
898
	int i = 0;
899
 
900
	list_for_each_entry(obj, head, gtt_list) {
901
		if (obj->pin_count == 0)
902
			continue;
903
 
904
		capture_bo(err++, obj);
905
		if (++i == count)
906
			break;
907
	}
908
 
909
	return i;
910
}
911
 
912
static void i915_gem_record_fences(struct drm_device *dev,
913
				   struct drm_i915_error_state *error)
914
{
915
	struct drm_i915_private *dev_priv = dev->dev_private;
916
	int i;
917
 
918
	/* Fences */
919
	switch (INTEL_INFO(dev)->gen) {
920
	case 7:
921
	case 6:
922
		for (i = 0; i < 16; i++)
923
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
924
		break;
925
	case 5:
926
	case 4:
927
		for (i = 0; i < 16; i++)
928
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
929
		break;
930
	case 3:
931
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
932
			for (i = 0; i < 8; i++)
933
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
934
	case 2:
935
		for (i = 0; i < 8; i++)
936
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
937
		break;
938
 
939
	}
940
}
941
 
942
static struct drm_i915_error_object *
943
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
944
			     struct intel_ring_buffer *ring)
945
{
946
	struct drm_i915_gem_object *obj;
947
	u32 seqno;
948
 
949
	if (!ring->get_seqno)
950
		return NULL;
951
 
952
	seqno = ring->get_seqno(ring, false);
953
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
954
		if (obj->ring != ring)
955
			continue;
956
 
957
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
958
			continue;
959
 
960
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
961
			continue;
962
 
963
		/* We need to copy these to an anonymous buffer as the simplest
964
		 * method to avoid being overwritten by userspace.
965
		 */
966
		return i915_error_object_create(dev_priv, obj);
967
	}
968
 
969
	return NULL;
970
}
971
 
972
static void i915_record_ring_state(struct drm_device *dev,
973
				   struct drm_i915_error_state *error,
974
				   struct intel_ring_buffer *ring)
975
{
976
	struct drm_i915_private *dev_priv = dev->dev_private;
977
 
978
	if (INTEL_INFO(dev)->gen >= 6) {
979
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
980
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
981
		error->semaphore_mboxes[ring->id][0]
982
			= I915_READ(RING_SYNC_0(ring->mmio_base));
983
		error->semaphore_mboxes[ring->id][1]
984
			= I915_READ(RING_SYNC_1(ring->mmio_base));
3243 Serge 985
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
986
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
3031 serge 987
	}
988
 
989
	if (INTEL_INFO(dev)->gen >= 4) {
990
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
991
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
992
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
993
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
994
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
995
		if (ring->id == RCS)
996
			error->bbaddr = I915_READ64(BB_ADDR);
997
	} else {
998
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
999
		error->ipeir[ring->id] = I915_READ(IPEIR);
1000
		error->ipehr[ring->id] = I915_READ(IPEHR);
1001
		error->instdone[ring->id] = I915_READ(INSTDONE);
1002
	}
1003
 
1004
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1005
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1006
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1007
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1008
	error->head[ring->id] = I915_READ_HEAD(ring);
1009
	error->tail[ring->id] = I915_READ_TAIL(ring);
3243 Serge 1010
	error->ctl[ring->id] = I915_READ_CTL(ring);
3031 serge 1011
 
1012
	error->cpu_ring_head[ring->id] = ring->head;
1013
	error->cpu_ring_tail[ring->id] = ring->tail;
1014
}
1015
 
1016
static void i915_gem_record_rings(struct drm_device *dev,
1017
				  struct drm_i915_error_state *error)
1018
{
1019
	struct drm_i915_private *dev_priv = dev->dev_private;
1020
	struct intel_ring_buffer *ring;
1021
	struct drm_i915_gem_request *request;
1022
	int i, count;
1023
 
1024
	for_each_ring(ring, dev_priv, i) {
1025
		i915_record_ring_state(dev, error, ring);
1026
 
1027
		error->ring[i].batchbuffer =
1028
			i915_error_first_batchbuffer(dev_priv, ring);
1029
 
1030
		error->ring[i].ringbuffer =
1031
			i915_error_object_create(dev_priv, ring->obj);
1032
 
1033
		count = 0;
1034
		list_for_each_entry(request, &ring->request_list, list)
1035
			count++;
1036
 
1037
		error->ring[i].num_requests = count;
1038
		error->ring[i].requests =
1039
			kmalloc(count*sizeof(struct drm_i915_error_request),
1040
				GFP_ATOMIC);
1041
		if (error->ring[i].requests == NULL) {
1042
			error->ring[i].num_requests = 0;
1043
			continue;
1044
		}
1045
 
1046
		count = 0;
1047
		list_for_each_entry(request, &ring->request_list, list) {
1048
			struct drm_i915_error_request *erq;
1049
 
1050
			erq = &error->ring[i].requests[count++];
1051
			erq->seqno = request->seqno;
1052
			erq->jiffies = request->emitted_jiffies;
1053
			erq->tail = request->tail;
1054
		}
1055
	}
1056
}
1057
 
1058
/**
1059
 * i915_capture_error_state - capture an error record for later analysis
1060
 * @dev: drm device
1061
 *
1062
 * Should be called when an error is detected (either a hang or an error
1063
 * interrupt) to capture error state from the time of the error.  Fills
1064
 * out a structure which becomes available in debugfs for user level tools
1065
 * to pick up.
1066
 */
1067
static void i915_capture_error_state(struct drm_device *dev)
1068
{
1069
	struct drm_i915_private *dev_priv = dev->dev_private;
1070
	struct drm_i915_gem_object *obj;
1071
	struct drm_i915_error_state *error;
1072
	unsigned long flags;
1073
	int i, pipe;
1074
 
1075
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1076
	error = dev_priv->first_error;
1077
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1078
	if (error)
1079
		return;
1080
 
1081
	/* Account for pipe specific data like PIPE*STAT */
1082
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1083
	if (!error) {
1084
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1085
		return;
1086
	}
1087
 
1088
	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1089
		 dev->primary->index);
1090
 
1091
	kref_init(&error->ref);
1092
	error->eir = I915_READ(EIR);
1093
	error->pgtbl_er = I915_READ(PGTBL_ER);
1094
	error->ccid = I915_READ(CCID);
1095
 
1096
	if (HAS_PCH_SPLIT(dev))
1097
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1098
	else if (IS_VALLEYVIEW(dev))
1099
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1100
	else if (IS_GEN2(dev))
1101
		error->ier = I915_READ16(IER);
1102
	else
1103
		error->ier = I915_READ(IER);
1104
 
3243 Serge 1105
	if (INTEL_INFO(dev)->gen >= 6)
1106
		error->derrmr = I915_READ(DERRMR);
1107
 
1108
	if (IS_VALLEYVIEW(dev))
1109
		error->forcewake = I915_READ(FORCEWAKE_VLV);
1110
	else if (INTEL_INFO(dev)->gen >= 7)
1111
		error->forcewake = I915_READ(FORCEWAKE_MT);
1112
	else if (INTEL_INFO(dev)->gen == 6)
1113
		error->forcewake = I915_READ(FORCEWAKE);
1114
 
3031 serge 1115
	for_each_pipe(pipe)
1116
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1117
 
1118
	if (INTEL_INFO(dev)->gen >= 6) {
1119
		error->error = I915_READ(ERROR_GEN6);
1120
		error->done_reg = I915_READ(DONE_REG);
1121
	}
1122
 
1123
	if (INTEL_INFO(dev)->gen == 7)
1124
		error->err_int = I915_READ(GEN7_ERR_INT);
1125
 
1126
	i915_get_extra_instdone(dev, error->extra_instdone);
1127
 
1128
	i915_gem_record_fences(dev, error);
1129
	i915_gem_record_rings(dev, error);
1130
 
1131
	/* Record buffers on the active and pinned lists. */
1132
	error->active_bo = NULL;
1133
	error->pinned_bo = NULL;
1134
 
1135
	i = 0;
1136
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1137
		i++;
1138
	error->active_bo_count = i;
1139
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1140
		if (obj->pin_count)
1141
			i++;
1142
	error->pinned_bo_count = i - error->active_bo_count;
1143
 
1144
	error->active_bo = NULL;
1145
	error->pinned_bo = NULL;
1146
	if (i) {
1147
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1148
					   GFP_ATOMIC);
1149
		if (error->active_bo)
1150
			error->pinned_bo =
1151
				error->active_bo + error->active_bo_count;
1152
	}
1153
 
1154
	if (error->active_bo)
1155
		error->active_bo_count =
1156
			capture_active_bo(error->active_bo,
1157
					  error->active_bo_count,
1158
					  &dev_priv->mm.active_list);
1159
 
1160
	if (error->pinned_bo)
1161
		error->pinned_bo_count =
1162
			capture_pinned_bo(error->pinned_bo,
1163
					  error->pinned_bo_count,
1164
					  &dev_priv->mm.bound_list);
1165
 
1166
	do_gettimeofday(&error->time);
1167
 
1168
	error->overlay = intel_overlay_capture_error_state(dev);
1169
	error->display = intel_display_capture_error_state(dev);
1170
 
1171
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1172
	if (dev_priv->first_error == NULL) {
1173
		dev_priv->first_error = error;
1174
		error = NULL;
1175
	}
1176
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1177
 
1178
	if (error)
1179
		i915_error_state_free(&error->ref);
1180
}
1181
 
1182
void i915_destroy_error_state(struct drm_device *dev)
1183
{
1184
	struct drm_i915_private *dev_priv = dev->dev_private;
1185
	struct drm_i915_error_state *error;
1186
	unsigned long flags;
1187
 
1188
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1189
	error = dev_priv->first_error;
1190
	dev_priv->first_error = NULL;
1191
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1192
 
1193
	if (error)
1194
		kref_put(&error->ref, i915_error_state_free);
1195
}
1196
#else
1197
#define i915_capture_error_state(x)
1198
#endif
1199
 
1200
static void i915_report_and_clear_eir(struct drm_device *dev)
1201
{
1202
	struct drm_i915_private *dev_priv = dev->dev_private;
1203
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1204
	u32 eir = I915_READ(EIR);
1205
	int pipe, i;
1206
 
1207
	if (!eir)
1208
		return;
1209
 
1210
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1211
 
1212
	i915_get_extra_instdone(dev, instdone);
1213
 
1214
	if (IS_G4X(dev)) {
1215
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1216
			u32 ipeir = I915_READ(IPEIR_I965);
1217
 
1218
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1219
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1220
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1221
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1222
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1223
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1224
			I915_WRITE(IPEIR_I965, ipeir);
1225
			POSTING_READ(IPEIR_I965);
1226
		}
1227
		if (eir & GM45_ERROR_PAGE_TABLE) {
1228
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1229
			pr_err("page table error\n");
1230
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1231
			I915_WRITE(PGTBL_ER, pgtbl_err);
1232
			POSTING_READ(PGTBL_ER);
1233
		}
1234
	}
1235
 
1236
	if (!IS_GEN2(dev)) {
1237
		if (eir & I915_ERROR_PAGE_TABLE) {
1238
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1239
			pr_err("page table error\n");
1240
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1241
			I915_WRITE(PGTBL_ER, pgtbl_err);
1242
			POSTING_READ(PGTBL_ER);
1243
		}
1244
	}
1245
 
1246
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1247
		pr_err("memory refresh error:\n");
1248
		for_each_pipe(pipe)
1249
			pr_err("pipe %c stat: 0x%08x\n",
1250
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1251
		/* pipestat has already been acked */
1252
	}
1253
	if (eir & I915_ERROR_INSTRUCTION) {
1254
		pr_err("instruction error\n");
1255
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1256
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1257
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1258
		if (INTEL_INFO(dev)->gen < 4) {
1259
			u32 ipeir = I915_READ(IPEIR);
1260
 
1261
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1262
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1263
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1264
			I915_WRITE(IPEIR, ipeir);
1265
			POSTING_READ(IPEIR);
1266
		} else {
1267
			u32 ipeir = I915_READ(IPEIR_I965);
1268
 
1269
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1270
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1271
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1272
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1273
			I915_WRITE(IPEIR_I965, ipeir);
1274
			POSTING_READ(IPEIR_I965);
1275
		}
1276
	}
1277
 
1278
	I915_WRITE(EIR, eir);
1279
	POSTING_READ(EIR);
1280
	eir = I915_READ(EIR);
1281
	if (eir) {
1282
		/*
1283
		 * some errors might have become stuck,
1284
		 * mask them.
1285
		 */
1286
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1287
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1288
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1289
	}
1290
}
1291
 
1292
/**
1293
 * i915_handle_error - handle an error interrupt
1294
 * @dev: drm device
1295
 *
1296
 * Do some basic checking of regsiter state at error interrupt time and
1297
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1298
 * sure we get a record and make it available in debugfs.  Fire a uevent
1299
 * so userspace knows something bad happened (should trigger collection
1300
 * of a ring dump etc.).
1301
 */
1302
void i915_handle_error(struct drm_device *dev, bool wedged)
1303
{
1304
	struct drm_i915_private *dev_priv = dev->dev_private;
1305
	struct intel_ring_buffer *ring;
1306
	int i;
1307
 
1308
	i915_capture_error_state(dev);
1309
	i915_report_and_clear_eir(dev);
1310
 
1311
	if (wedged) {
1312
//		INIT_COMPLETION(dev_priv->error_completion);
1313
		atomic_set(&dev_priv->mm.wedged, 1);
1314
 
1315
		/*
1316
		 * Wakeup waiting processes so they don't hang
1317
		 */
1318
		for_each_ring(ring, dev_priv, i)
1319
			wake_up_all(&ring->irq_queue);
1320
	}
1321
 
1322
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1323
}
1324
 
1325
#if 0
1326
 
1327
 
1328
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1329
{
1330
	drm_i915_private_t *dev_priv = dev->dev_private;
1331
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1332
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1333
	struct drm_i915_gem_object *obj;
1334
	struct intel_unpin_work *work;
1335
	unsigned long flags;
1336
	bool stall_detected;
1337
 
1338
	/* Ignore early vblank irqs */
1339
	if (intel_crtc == NULL)
1340
		return;
1341
 
1342
	spin_lock_irqsave(&dev->event_lock, flags);
1343
	work = intel_crtc->unpin_work;
1344
 
3243 Serge 1345
	if (work == NULL ||
1346
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1347
	    !work->enable_stall_check) {
3031 serge 1348
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1349
		spin_unlock_irqrestore(&dev->event_lock, flags);
1350
		return;
1351
	}
1352
 
1353
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1354
	obj = work->pending_flip_obj;
1355
	if (INTEL_INFO(dev)->gen >= 4) {
1356
		int dspsurf = DSPSURF(intel_crtc->plane);
1357
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1358
					obj->gtt_offset;
1359
	} else {
1360
		int dspaddr = DSPADDR(intel_crtc->plane);
1361
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1362
							crtc->y * crtc->fb->pitches[0] +
1363
							crtc->x * crtc->fb->bits_per_pixel/8);
1364
	}
1365
 
1366
	spin_unlock_irqrestore(&dev->event_lock, flags);
1367
 
1368
	if (stall_detected) {
1369
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1370
		intel_prepare_page_flip(dev, intel_crtc->plane);
1371
	}
1372
}
1373
 
1374
#endif
1375
 
1376
/* Called from drm generic code, passed 'crtc' which
1377
 * we use as a pipe index
1378
 */
1379
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1380
{
1381
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1382
	unsigned long irqflags;
1383
 
1384
	if (!i915_pipe_enabled(dev, pipe))
1385
		return -EINVAL;
1386
 
1387
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1388
	if (INTEL_INFO(dev)->gen >= 4)
1389
		i915_enable_pipestat(dev_priv, pipe,
1390
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1391
	else
1392
		i915_enable_pipestat(dev_priv, pipe,
1393
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1394
 
1395
	/* maintain vblank delivery even in deep C-states */
1396
	if (dev_priv->info->gen == 3)
1397
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1398
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1399
 
1400
	return 0;
1401
}
1402
 
1403
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1404
{
1405
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1406
	unsigned long irqflags;
1407
 
1408
	if (!i915_pipe_enabled(dev, pipe))
1409
		return -EINVAL;
1410
 
1411
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1412
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1413
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1414
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1415
 
1416
	return 0;
1417
}
1418
 
1419
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1420
{
1421
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1422
	unsigned long irqflags;
1423
 
1424
	if (!i915_pipe_enabled(dev, pipe))
1425
		return -EINVAL;
1426
 
1427
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1428
	ironlake_enable_display_irq(dev_priv,
1429
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1430
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1431
 
1432
	return 0;
1433
}
1434
 
1435
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1436
{
1437
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1438
	unsigned long irqflags;
1439
	u32 imr;
1440
 
1441
	if (!i915_pipe_enabled(dev, pipe))
1442
		return -EINVAL;
1443
 
1444
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1445
	imr = I915_READ(VLV_IMR);
1446
	if (pipe == 0)
1447
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1448
	else
1449
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1450
	I915_WRITE(VLV_IMR, imr);
1451
	i915_enable_pipestat(dev_priv, pipe,
1452
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1453
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1454
 
1455
	return 0;
1456
}
1457
 
1458
/* Called from drm generic code, passed 'crtc' which
1459
 * we use as a pipe index
1460
 */
1461
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1462
{
1463
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1464
	unsigned long irqflags;
1465
 
1466
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1467
	if (dev_priv->info->gen == 3)
1468
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1469
 
1470
	i915_disable_pipestat(dev_priv, pipe,
1471
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1472
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1473
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1474
}
1475
 
1476
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1477
{
1478
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1479
	unsigned long irqflags;
1480
 
1481
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1482
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1483
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1484
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1485
}
1486
 
1487
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1488
{
1489
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1490
	unsigned long irqflags;
1491
 
1492
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1493
	ironlake_disable_display_irq(dev_priv,
1494
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1495
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1496
}
1497
 
1498
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1499
{
1500
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1501
	unsigned long irqflags;
1502
	u32 imr;
1503
 
1504
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1505
	i915_disable_pipestat(dev_priv, pipe,
1506
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1507
	imr = I915_READ(VLV_IMR);
1508
	if (pipe == 0)
1509
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1510
	else
1511
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1512
	I915_WRITE(VLV_IMR, imr);
1513
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1514
}
1515
 
1516
static u32
1517
ring_last_seqno(struct intel_ring_buffer *ring)
1518
{
1519
	return list_entry(ring->request_list.prev,
1520
			  struct drm_i915_gem_request, list)->seqno;
1521
}
2351 Serge 1522
/* drm_dma.h hooks
1523
*/
1524
static void ironlake_irq_preinstall(struct drm_device *dev)
1525
{
1526
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1527
 
1528
    atomic_set(&dev_priv->irq_received, 0);
1529
 
1530
    I915_WRITE(HWSTAM, 0xeffe);
1531
 
1532
    /* XXX hotplug from PCH */
1533
 
1534
    I915_WRITE(DEIMR, 0xffffffff);
1535
    I915_WRITE(DEIER, 0x0);
1536
    POSTING_READ(DEIER);
1537
 
1538
    /* and GT */
1539
    I915_WRITE(GTIMR, 0xffffffff);
1540
    I915_WRITE(GTIER, 0x0);
1541
    POSTING_READ(GTIER);
1542
 
1543
    /* south display irq */
1544
    I915_WRITE(SDEIMR, 0xffffffff);
1545
    I915_WRITE(SDEIER, 0x0);
1546
    POSTING_READ(SDEIER);
1547
}
1548
 
3031 serge 1549
static void valleyview_irq_preinstall(struct drm_device *dev)
1550
{
1551
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1552
	int pipe;
1553
 
1554
	atomic_set(&dev_priv->irq_received, 0);
1555
 
1556
	/* VLV magic */
1557
	I915_WRITE(VLV_IMR, 0);
1558
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1559
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1560
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1561
 
1562
	/* and GT */
1563
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1564
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1565
	I915_WRITE(GTIMR, 0xffffffff);
1566
	I915_WRITE(GTIER, 0x0);
1567
	POSTING_READ(GTIER);
1568
 
1569
	I915_WRITE(DPINVGTT, 0xff);
1570
 
1571
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1572
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1573
	for_each_pipe(pipe)
1574
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1575
	I915_WRITE(VLV_IIR, 0xffffffff);
1576
	I915_WRITE(VLV_IMR, 0xffffffff);
1577
	I915_WRITE(VLV_IER, 0x0);
1578
	POSTING_READ(VLV_IER);
1579
}
1580
 
2351 Serge 1581
/*
1582
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1583
 * duration to 2ms (which is the minimum in the Display Port spec)
1584
 *
1585
 * This register is the same on all known PCH chips.
1586
 */
1587
 
1588
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1589
{
1590
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1591
	u32	hotplug;
1592
 
1593
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1594
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1595
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1596
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1597
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1598
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1599
}
1600
 
1601
static int ironlake_irq_postinstall(struct drm_device *dev)
1602
{
1603
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1604
    /* enable kind of interrupts always enabled */
1605
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1606
               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1607
    u32 render_irqs;
1608
    u32 hotplug_mask;
1609
 
1610
    dev_priv->irq_mask = ~display_mask;
1611
 
1612
    /* should always can generate irq */
1613
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1614
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1615
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1616
    POSTING_READ(DEIER);
1617
 
1618
	dev_priv->gt_irq_mask = ~0;
1619
 
1620
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1621
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1622
 
1623
    if (IS_GEN6(dev))
1624
        render_irqs =
1625
            GT_USER_INTERRUPT |
3031 serge 1626
			GEN6_BSD_USER_INTERRUPT |
1627
			GEN6_BLITTER_USER_INTERRUPT;
2351 Serge 1628
    else
1629
        render_irqs =
1630
            GT_USER_INTERRUPT |
1631
            GT_PIPE_NOTIFY |
1632
            GT_BSD_USER_INTERRUPT;
1633
    I915_WRITE(GTIER, render_irqs);
1634
    POSTING_READ(GTIER);
1635
 
1636
    if (HAS_PCH_CPT(dev)) {
1637
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1638
                SDE_PORTB_HOTPLUG_CPT |
1639
                SDE_PORTC_HOTPLUG_CPT |
1640
                SDE_PORTD_HOTPLUG_CPT);
1641
    } else {
1642
        hotplug_mask = (SDE_CRT_HOTPLUG |
1643
                SDE_PORTB_HOTPLUG |
1644
                SDE_PORTC_HOTPLUG |
1645
                SDE_PORTD_HOTPLUG |
1646
                SDE_AUX_MASK);
1647
    }
1648
 
1649
    dev_priv->pch_irq_mask = ~hotplug_mask;
1650
 
1651
    I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1652
    I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1653
    I915_WRITE(SDEIER, hotplug_mask);
1654
    POSTING_READ(SDEIER);
1655
 
3031 serge 1656
//    ironlake_enable_pch_hotplug(dev);
2351 Serge 1657
 
1658
    if (IS_IRONLAKE_M(dev)) {
1659
        /* Clear & enable PCU event interrupts */
1660
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1661
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
3243 Serge 1662
//        ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2351 Serge 1663
    }
1664
 
1665
    return 0;
1666
}
1667
 
3031 serge 1668
static int ivybridge_irq_postinstall(struct drm_device *dev)
1669
{
1670
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1671
	/* enable kind of interrupts always enabled */
1672
	u32 display_mask =
1673
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1674
		DE_PLANEC_FLIP_DONE_IVB |
1675
		DE_PLANEB_FLIP_DONE_IVB |
1676
		DE_PLANEA_FLIP_DONE_IVB;
1677
	u32 render_irqs;
1678
	u32 hotplug_mask;
2351 Serge 1679
 
3031 serge 1680
	dev_priv->irq_mask = ~display_mask;
1681
 
1682
	/* should always can generate irq */
1683
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1684
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1685
	I915_WRITE(DEIER,
1686
		   display_mask |
1687
		   DE_PIPEC_VBLANK_IVB |
1688
		   DE_PIPEB_VBLANK_IVB |
1689
		   DE_PIPEA_VBLANK_IVB);
1690
	POSTING_READ(DEIER);
1691
 
1692
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1693
 
1694
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1695
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1696
 
1697
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1698
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1699
	I915_WRITE(GTIER, render_irqs);
1700
	POSTING_READ(GTIER);
1701
 
1702
	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1703
			SDE_PORTB_HOTPLUG_CPT |
1704
			SDE_PORTC_HOTPLUG_CPT |
1705
			SDE_PORTD_HOTPLUG_CPT);
1706
	dev_priv->pch_irq_mask = ~hotplug_mask;
1707
 
1708
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1709
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1710
	I915_WRITE(SDEIER, hotplug_mask);
1711
	POSTING_READ(SDEIER);
1712
 
1713
//	ironlake_enable_pch_hotplug(dev);
1714
 
1715
	return 0;
1716
}
1717
 
1718
static int valleyview_irq_postinstall(struct drm_device *dev)
1719
{
1720
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1721
	u32 enable_mask;
1722
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1723
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
3243 Serge 1724
	u32 render_irqs;
3031 serge 1725
	u16 msid;
1726
 
1727
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1728
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1729
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1730
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1731
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1732
 
1733
	/*
1734
	 *Leave vblank interrupts masked initially.  enable/disable will
1735
	 * toggle them based on usage.
1736
	 */
1737
	dev_priv->irq_mask = (~enable_mask) |
1738
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1739
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1740
 
1741
	dev_priv->pipestat[0] = 0;
1742
	dev_priv->pipestat[1] = 0;
1743
 
1744
	/* Hack for broken MSIs on VLV */
3243 Serge 1745
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1746
//   pci_read_config_word(dev->pdev, 0x98, &msid);
1747
//   msid &= 0xff; /* mask out delivery bits */
1748
//   msid |= (1<<14);
1749
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
3031 serge 1750
 
1751
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1752
	I915_WRITE(VLV_IER, enable_mask);
1753
	I915_WRITE(VLV_IIR, 0xffffffff);
1754
	I915_WRITE(PIPESTAT(0), 0xffff);
1755
	I915_WRITE(PIPESTAT(1), 0xffff);
1756
	POSTING_READ(VLV_IER);
1757
 
1758
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1759
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1760
 
1761
	I915_WRITE(VLV_IIR, 0xffffffff);
1762
	I915_WRITE(VLV_IIR, 0xffffffff);
1763
 
1764
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1765
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3243 Serge 1766
 
1767
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1768
		GEN6_BLITTER_USER_INTERRUPT;
1769
	I915_WRITE(GTIER, render_irqs);
3031 serge 1770
	POSTING_READ(GTIER);
1771
 
1772
	/* ack & enable invalid PTE error interrupts */
1773
#if 0 /* FIXME: add support to irq handler for checking these bits */
1774
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1775
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1776
#endif
1777
 
1778
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1779
#if 0 /* FIXME: check register definitions; some have moved */
1780
	/* Note HDMI and DP share bits */
1781
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1782
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1783
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1784
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1785
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1786
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
3243 Serge 1787
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
3031 serge 1788
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
3243 Serge 1789
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
3031 serge 1790
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1791
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1792
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1793
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1794
	}
1795
#endif
1796
 
1797
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1798
 
1799
	return 0;
1800
}
1801
 
1802
static void valleyview_irq_uninstall(struct drm_device *dev)
1803
{
1804
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1805
	int pipe;
1806
 
1807
	if (!dev_priv)
1808
		return;
1809
 
1810
	for_each_pipe(pipe)
1811
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1812
 
1813
	I915_WRITE(HWSTAM, 0xffffffff);
1814
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1815
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1816
	for_each_pipe(pipe)
1817
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1818
	I915_WRITE(VLV_IIR, 0xffffffff);
1819
	I915_WRITE(VLV_IMR, 0xffffffff);
1820
	I915_WRITE(VLV_IER, 0x0);
1821
	POSTING_READ(VLV_IER);
1822
}
1823
 
1824
static void ironlake_irq_uninstall(struct drm_device *dev)
1825
{
1826
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1827
 
1828
	if (!dev_priv)
1829
		return;
1830
 
1831
	I915_WRITE(HWSTAM, 0xffffffff);
1832
 
1833
	I915_WRITE(DEIMR, 0xffffffff);
1834
	I915_WRITE(DEIER, 0x0);
1835
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1836
 
1837
	I915_WRITE(GTIMR, 0xffffffff);
1838
	I915_WRITE(GTIER, 0x0);
1839
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1840
 
1841
	I915_WRITE(SDEIMR, 0xffffffff);
1842
	I915_WRITE(SDEIER, 0x0);
1843
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1844
}
1845
 
1846
#if 0
1847
 
1848
static void i8xx_irq_preinstall(struct drm_device * dev)
1849
{
1850
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1851
	int pipe;
1852
 
1853
	atomic_set(&dev_priv->irq_received, 0);
1854
 
1855
	for_each_pipe(pipe)
1856
		I915_WRITE(PIPESTAT(pipe), 0);
1857
	I915_WRITE16(IMR, 0xffff);
1858
	I915_WRITE16(IER, 0x0);
1859
	POSTING_READ16(IER);
1860
}
1861
 
1862
static int i8xx_irq_postinstall(struct drm_device *dev)
1863
{
1864
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1865
 
1866
	dev_priv->pipestat[0] = 0;
1867
	dev_priv->pipestat[1] = 0;
1868
 
1869
	I915_WRITE16(EMR,
1870
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1871
 
1872
	/* Unmask the interrupts that we always want on. */
1873
	dev_priv->irq_mask =
1874
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1875
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1876
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1877
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1878
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1879
	I915_WRITE16(IMR, dev_priv->irq_mask);
1880
 
1881
	I915_WRITE16(IER,
1882
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1883
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1884
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1885
		     I915_USER_INTERRUPT);
1886
	POSTING_READ16(IER);
1887
 
1888
	return 0;
1889
}
1890
 
3243 Serge 1891
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 1892
{
1893
	struct drm_device *dev = (struct drm_device *) arg;
1894
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1895
	u16 iir, new_iir;
1896
	u32 pipe_stats[2];
1897
	unsigned long irqflags;
1898
	int irq_received;
1899
	int pipe;
1900
	u16 flip_mask =
1901
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1902
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1903
 
1904
	atomic_inc(&dev_priv->irq_received);
1905
 
1906
	iir = I915_READ16(IIR);
1907
	if (iir == 0)
1908
		return IRQ_NONE;
1909
 
1910
	while (iir & ~flip_mask) {
1911
		/* Can't rely on pipestat interrupt bit in iir as it might
1912
		 * have been cleared after the pipestat interrupt was received.
1913
		 * It doesn't set the bit in iir again, but it still produces
1914
		 * interrupts (for non-MSI).
1915
		 */
1916
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1917
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1918
			i915_handle_error(dev, false);
1919
 
1920
		for_each_pipe(pipe) {
1921
			int reg = PIPESTAT(pipe);
1922
			pipe_stats[pipe] = I915_READ(reg);
1923
 
1924
			/*
1925
			 * Clear the PIPE*STAT regs before the IIR
1926
			 */
1927
			if (pipe_stats[pipe] & 0x8000ffff) {
1928
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1929
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1930
							 pipe_name(pipe));
1931
				I915_WRITE(reg, pipe_stats[pipe]);
1932
				irq_received = 1;
1933
			}
1934
		}
1935
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1936
 
1937
		I915_WRITE16(IIR, iir & ~flip_mask);
1938
		new_iir = I915_READ16(IIR); /* Flush posted writes */
1939
 
1940
		i915_update_dri1_breadcrumb(dev);
1941
 
1942
		if (iir & I915_USER_INTERRUPT)
1943
			notify_ring(dev, &dev_priv->ring[RCS]);
1944
 
1945
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1946
		    drm_handle_vblank(dev, 0)) {
1947
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1948
				intel_prepare_page_flip(dev, 0);
1949
				intel_finish_page_flip(dev, 0);
1950
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1951
			}
1952
		}
1953
 
1954
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1955
		    drm_handle_vblank(dev, 1)) {
1956
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1957
				intel_prepare_page_flip(dev, 1);
1958
				intel_finish_page_flip(dev, 1);
1959
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1960
			}
1961
		}
1962
 
1963
		iir = new_iir;
1964
	}
1965
 
1966
	return IRQ_HANDLED;
1967
}
1968
 
1969
static void i8xx_irq_uninstall(struct drm_device * dev)
1970
{
1971
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1972
	int pipe;
1973
 
1974
	for_each_pipe(pipe) {
1975
		/* Clear enable bits; then clear status bits */
1976
		I915_WRITE(PIPESTAT(pipe), 0);
1977
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1978
	}
1979
	I915_WRITE16(IMR, 0xffff);
1980
	I915_WRITE16(IER, 0x0);
1981
	I915_WRITE16(IIR, I915_READ16(IIR));
1982
}
1983
 
1984
#endif
1985
 
1986
static void i915_irq_preinstall(struct drm_device * dev)
1987
{
1988
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1989
	int pipe;
1990
 
1991
	atomic_set(&dev_priv->irq_received, 0);
1992
 
1993
	if (I915_HAS_HOTPLUG(dev)) {
1994
		I915_WRITE(PORT_HOTPLUG_EN, 0);
1995
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1996
	}
1997
 
1998
	I915_WRITE16(HWSTAM, 0xeffe);
1999
	for_each_pipe(pipe)
2000
		I915_WRITE(PIPESTAT(pipe), 0);
2001
	I915_WRITE(IMR, 0xffffffff);
2002
	I915_WRITE(IER, 0x0);
2003
	POSTING_READ(IER);
2004
}
2005
 
2006
static int i915_irq_postinstall(struct drm_device *dev)
2007
{
2008
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2009
	u32 enable_mask;
2010
 
2011
	dev_priv->pipestat[0] = 0;
2012
	dev_priv->pipestat[1] = 0;
2013
 
2014
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2015
 
2016
	/* Unmask the interrupts that we always want on. */
2017
	dev_priv->irq_mask =
2018
		~(I915_ASLE_INTERRUPT |
2019
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2020
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2021
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2022
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2023
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2024
 
2025
	enable_mask =
2026
		I915_ASLE_INTERRUPT |
2027
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2028
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2029
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2030
		I915_USER_INTERRUPT;
2031
#if 0
2032
	if (I915_HAS_HOTPLUG(dev)) {
2033
		/* Enable in IER... */
2034
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2035
		/* and unmask in IMR */
2036
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2037
	}
2038
#endif
2039
 
2040
	I915_WRITE(IMR, dev_priv->irq_mask);
2041
	I915_WRITE(IER, enable_mask);
2042
	POSTING_READ(IER);
2043
 
2044
	if (I915_HAS_HOTPLUG(dev)) {
2045
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2046
#if 0
2047
		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2048
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2049
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2050
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2051
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2052
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2053
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2054
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2055
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2056
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2057
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2058
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2059
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2060
		}
2061
#endif
2062
		/* Ignore TV since it's buggy */
2063
 
2064
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2065
	}
2066
 
2067
//	intel_opregion_enable_asle(dev);
2068
 
2069
	return 0;
2070
}
2071
 
3243 Serge 2072
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 2073
{
2074
	struct drm_device *dev = (struct drm_device *) arg;
2075
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2076
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2077
	unsigned long irqflags;
2078
	u32 flip_mask =
2079
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2080
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2081
	u32 flip[2] = {
2082
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2083
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2084
	};
2085
	int pipe, ret = IRQ_NONE;
2086
 
2087
	atomic_inc(&dev_priv->irq_received);
2088
 
2089
	iir = I915_READ(IIR);
2090
	do {
2091
		bool irq_received = (iir & ~flip_mask) != 0;
2092
		bool blc_event = false;
2093
 
2094
		/* Can't rely on pipestat interrupt bit in iir as it might
2095
		 * have been cleared after the pipestat interrupt was received.
2096
		 * It doesn't set the bit in iir again, but it still produces
2097
		 * interrupts (for non-MSI).
2098
		 */
2099
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2100
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2101
			i915_handle_error(dev, false);
2102
 
2103
		for_each_pipe(pipe) {
2104
			int reg = PIPESTAT(pipe);
2105
			pipe_stats[pipe] = I915_READ(reg);
2106
 
2107
			/* Clear the PIPE*STAT regs before the IIR */
2108
			if (pipe_stats[pipe] & 0x8000ffff) {
2109
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2110
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2111
							 pipe_name(pipe));
2112
				I915_WRITE(reg, pipe_stats[pipe]);
2113
				irq_received = true;
2114
			}
2115
		}
2116
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2117
 
2118
		if (!irq_received)
2119
			break;
2120
 
2121
		/* Consume port.  Then clear IIR or we'll miss events */
2122
		if ((I915_HAS_HOTPLUG(dev)) &&
2123
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2124
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2125
 
2126
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2127
				  hotplug_status);
2128
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2129
//				queue_work(dev_priv->wq,
2130
//					   &dev_priv->hotplug_work);
2131
 
2132
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2133
			POSTING_READ(PORT_HOTPLUG_STAT);
2134
		}
2135
 
2136
		I915_WRITE(IIR, iir & ~flip_mask);
2137
		new_iir = I915_READ(IIR); /* Flush posted writes */
2138
 
2139
		if (iir & I915_USER_INTERRUPT)
2140
			notify_ring(dev, &dev_priv->ring[RCS]);
2141
 
2142
		for_each_pipe(pipe) {
2143
			int plane = pipe;
2144
			if (IS_MOBILE(dev))
2145
				plane = !plane;
3051 serge 2146
            if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
2147
                drm_handle_vblank(dev, pipe) */) {
3031 serge 2148
				if (iir & flip[plane]) {
2149
//					intel_prepare_page_flip(dev, plane);
2150
//					intel_finish_page_flip(dev, pipe);
2151
					flip_mask &= ~flip[plane];
2152
				}
2153
			}
2154
 
2155
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2156
				blc_event = true;
2157
		}
2158
 
2159
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2160
//			intel_opregion_asle_intr(dev);
2161
 
2162
		/* With MSI, interrupts are only generated when iir
2163
		 * transitions from zero to nonzero.  If another bit got
2164
		 * set while we were handling the existing iir bits, then
2165
		 * we would never get another interrupt.
2166
		 *
2167
		 * This is fine on non-MSI as well, as if we hit this path
2168
		 * we avoid exiting the interrupt handler only to generate
2169
		 * another one.
2170
		 *
2171
		 * Note that for MSI this could cause a stray interrupt report
2172
		 * if an interrupt landed in the time between writing IIR and
2173
		 * the posting read.  This should be rare enough to never
2174
		 * trigger the 99% of 100,000 interrupts test for disabling
2175
		 * stray interrupts.
2176
		 */
2177
		ret = IRQ_HANDLED;
2178
		iir = new_iir;
2179
	} while (iir & ~flip_mask);
2180
 
2181
	i915_update_dri1_breadcrumb(dev);
2182
 
2183
	return ret;
2184
}
2185
 
2186
static void i915_irq_uninstall(struct drm_device * dev)
2187
{
2188
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2189
	int pipe;
2190
 
2191
	if (I915_HAS_HOTPLUG(dev)) {
2192
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2193
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2194
	}
2195
 
2196
	I915_WRITE16(HWSTAM, 0xffff);
2197
	for_each_pipe(pipe) {
2198
		/* Clear enable bits; then clear status bits */
2199
		I915_WRITE(PIPESTAT(pipe), 0);
2200
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2201
	}
2202
	I915_WRITE(IMR, 0xffffffff);
2203
	I915_WRITE(IER, 0x0);
2204
 
2205
	I915_WRITE(IIR, I915_READ(IIR));
2206
}
2207
 
2208
static void i965_irq_preinstall(struct drm_device * dev)
2209
{
2210
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2211
	int pipe;
2212
 
2213
	atomic_set(&dev_priv->irq_received, 0);
2214
 
2215
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2216
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2217
 
2218
	I915_WRITE(HWSTAM, 0xeffe);
2219
	for_each_pipe(pipe)
2220
		I915_WRITE(PIPESTAT(pipe), 0);
2221
	I915_WRITE(IMR, 0xffffffff);
2222
	I915_WRITE(IER, 0x0);
2223
	POSTING_READ(IER);
2224
}
2225
 
2226
static int i965_irq_postinstall(struct drm_device *dev)
2227
{
2228
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2229
	u32 hotplug_en;
2230
	u32 enable_mask;
2231
	u32 error_mask;
2232
 
2233
	/* Unmask the interrupts that we always want on. */
2234
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2235
			       I915_DISPLAY_PORT_INTERRUPT |
2236
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2237
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2238
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2239
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2240
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2241
 
2242
	enable_mask = ~dev_priv->irq_mask;
2243
	enable_mask |= I915_USER_INTERRUPT;
2244
 
2245
	if (IS_G4X(dev))
2246
		enable_mask |= I915_BSD_USER_INTERRUPT;
2247
 
2248
	dev_priv->pipestat[0] = 0;
2249
	dev_priv->pipestat[1] = 0;
2250
 
2251
	/*
2252
	 * Enable some error detection, note the instruction error mask
2253
	 * bit is reserved, so we leave it masked.
2254
	 */
2255
	if (IS_G4X(dev)) {
2256
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2257
			       GM45_ERROR_MEM_PRIV |
2258
			       GM45_ERROR_CP_PRIV |
2259
			       I915_ERROR_MEMORY_REFRESH);
2260
	} else {
2261
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2262
			       I915_ERROR_MEMORY_REFRESH);
2263
	}
2264
	I915_WRITE(EMR, error_mask);
2265
 
2266
	I915_WRITE(IMR, dev_priv->irq_mask);
2267
	I915_WRITE(IER, enable_mask);
2268
	POSTING_READ(IER);
2269
 
2270
	/* Note HDMI and DP share hotplug bits */
2271
	hotplug_en = 0;
2272
#if 0
2273
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2274
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2275
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2276
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2277
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2278
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2279
	if (IS_G4X(dev)) {
2280
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2281
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2282
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2283
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2284
	} else {
2285
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2286
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2287
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2288
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2289
	}
2290
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2291
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2292
 
2293
		/* Programming the CRT detection parameters tends
2294
		   to generate a spurious hotplug event about three
2295
		   seconds later.  So just do it once.
2296
		   */
2297
		if (IS_G4X(dev))
2298
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2299
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2300
	}
2301
#endif
2302
	/* Ignore TV since it's buggy */
2303
 
2304
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2305
 
2306
//	intel_opregion_enable_asle(dev);
2307
 
2308
	return 0;
2309
}
2310
 
3243 Serge 2311
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 2312
{
2313
	struct drm_device *dev = (struct drm_device *) arg;
2314
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2315
	u32 iir, new_iir;
2316
	u32 pipe_stats[I915_MAX_PIPES];
2317
	unsigned long irqflags;
2318
	int irq_received;
2319
	int ret = IRQ_NONE, pipe;
2320
 
2321
	atomic_inc(&dev_priv->irq_received);
2322
 
2323
	iir = I915_READ(IIR);
2324
 
2325
	for (;;) {
2326
		bool blc_event = false;
2327
 
2328
		irq_received = iir != 0;
2329
 
2330
		/* Can't rely on pipestat interrupt bit in iir as it might
2331
		 * have been cleared after the pipestat interrupt was received.
2332
		 * It doesn't set the bit in iir again, but it still produces
2333
		 * interrupts (for non-MSI).
2334
		 */
2335
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2336
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2337
			i915_handle_error(dev, false);
2338
 
2339
		for_each_pipe(pipe) {
2340
			int reg = PIPESTAT(pipe);
2341
			pipe_stats[pipe] = I915_READ(reg);
2342
 
2343
			/*
2344
			 * Clear the PIPE*STAT regs before the IIR
2345
			 */
2346
			if (pipe_stats[pipe] & 0x8000ffff) {
2347
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2348
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2349
							 pipe_name(pipe));
2350
				I915_WRITE(reg, pipe_stats[pipe]);
2351
				irq_received = 1;
2352
			}
2353
		}
2354
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2355
 
2356
		if (!irq_received)
2357
			break;
2358
 
2359
		ret = IRQ_HANDLED;
2360
 
2361
		/* Consume port.  Then clear IIR or we'll miss events */
2362
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2363
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2364
 
2365
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2366
				  hotplug_status);
2367
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2368
//				queue_work(dev_priv->wq,
2369
//					   &dev_priv->hotplug_work);
2370
 
2371
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2372
			I915_READ(PORT_HOTPLUG_STAT);
2373
		}
2374
 
2375
		I915_WRITE(IIR, iir);
2376
		new_iir = I915_READ(IIR); /* Flush posted writes */
2377
 
2378
		if (iir & I915_USER_INTERRUPT)
2379
			notify_ring(dev, &dev_priv->ring[RCS]);
2380
		if (iir & I915_BSD_USER_INTERRUPT)
2381
			notify_ring(dev, &dev_priv->ring[VCS]);
2382
 
2383
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2384
//			intel_prepare_page_flip(dev, 0);
2385
 
2386
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2387
//			intel_prepare_page_flip(dev, 1);
2388
 
2389
		for_each_pipe(pipe) {
3051 serge 2390
//           if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2391
//               drm_handle_vblank(dev, pipe)) {
3031 serge 2392
//				i915_pageflip_stall_check(dev, pipe);
2393
//				intel_finish_page_flip(dev, pipe);
3051 serge 2394
//           }
3031 serge 2395
 
2396
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2397
				blc_event = true;
2398
		}
2399
 
2400
 
2401
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2402
//			intel_opregion_asle_intr(dev);
2403
 
2404
		/* With MSI, interrupts are only generated when iir
2405
		 * transitions from zero to nonzero.  If another bit got
2406
		 * set while we were handling the existing iir bits, then
2407
		 * we would never get another interrupt.
2408
		 *
2409
		 * This is fine on non-MSI as well, as if we hit this path
2410
		 * we avoid exiting the interrupt handler only to generate
2411
		 * another one.
2412
		 *
2413
		 * Note that for MSI this could cause a stray interrupt report
2414
		 * if an interrupt landed in the time between writing IIR and
2415
		 * the posting read.  This should be rare enough to never
2416
		 * trigger the 99% of 100,000 interrupts test for disabling
2417
		 * stray interrupts.
2418
		 */
2419
		iir = new_iir;
2420
	}
2421
 
2422
	i915_update_dri1_breadcrumb(dev);
2423
 
2424
	return ret;
2425
}
2426
 
2427
static void i965_irq_uninstall(struct drm_device * dev)
2428
{
2429
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2430
	int pipe;
2431
 
2432
	if (!dev_priv)
2433
		return;
2434
 
2435
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2436
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2437
 
2438
	I915_WRITE(HWSTAM, 0xffffffff);
2439
	for_each_pipe(pipe)
2440
		I915_WRITE(PIPESTAT(pipe), 0);
2441
	I915_WRITE(IMR, 0xffffffff);
2442
	I915_WRITE(IER, 0x0);
2443
 
2444
	for_each_pipe(pipe)
2445
		I915_WRITE(PIPESTAT(pipe),
2446
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2447
	I915_WRITE(IIR, I915_READ(IIR));
2448
}
2449
 
2351 Serge 2450
void intel_irq_init(struct drm_device *dev)
2451
{
3031 serge 2452
	struct drm_i915_private *dev_priv = dev->dev_private;
2453
 
2454
	if (IS_VALLEYVIEW(dev)) {
3243 Serge 2455
		dev->driver->irq_handler = valleyview_irq_handler;
2456
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2457
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3031 serge 2458
	} else if (IS_IVYBRIDGE(dev)) {
2351 Serge 2459
		/* Share pre & uninstall handlers with ILK/SNB */
3243 Serge 2460
		dev->driver->irq_handler = ivybridge_irq_handler;
2461
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2462
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3031 serge 2463
	} else if (IS_HASWELL(dev)) {
2464
		/* Share interrupts handling with IVB */
3243 Serge 2465
		dev->driver->irq_handler = ivybridge_irq_handler;
2466
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2467
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2351 Serge 2468
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 2469
		dev->driver->irq_handler = ironlake_irq_handler;
2470
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2471
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2351 Serge 2472
	} else {
3031 serge 2473
		if (INTEL_INFO(dev)->gen == 2) {
2474
		} else if (INTEL_INFO(dev)->gen == 3) {
3243 Serge 2475
			dev->driver->irq_preinstall = i915_irq_preinstall;
2476
			dev->driver->irq_postinstall = i915_irq_postinstall;
2477
			dev->driver->irq_handler = i915_irq_handler;
3031 serge 2478
		} else {
3243 Serge 2479
			dev->driver->irq_preinstall = i965_irq_preinstall;
2480
			dev->driver->irq_postinstall = i965_irq_postinstall;
2481
			dev->driver->irq_handler = i965_irq_handler;
3031 serge 2482
		}
2351 Serge 2483
	}
3243 Serge 2484
 
2485
    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2351 Serge 2486
}
2487
 
3243 Serge 2488
irqreturn_t intel_irq_handler(struct drm_device *dev)
2489
{
2351 Serge 2490
 
3243 Serge 2491
    printf("i915 irq\n");
2492
 
2493
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2494
 
2495
    return dev->driver->irq_handler(0, dev);
2496
}
2497
 
2351 Serge 2498
int drm_irq_install(struct drm_device *dev)
2499
{
3051 serge 2500
    unsigned long sh_flags = 0;
2351 Serge 2501
    int irq_line;
2502
    int ret = 0;
2503
 
3051 serge 2504
    char *irqname;
2505
 
2351 Serge 2506
    mutex_lock(&dev->struct_mutex);
2507
 
2508
    /* Driver must have been initialized */
2509
    if (!dev->dev_private) {
3243 Serge 2510
            mutex_unlock(&dev->struct_mutex);
2511
            return -EINVAL;
2351 Serge 2512
    }
2513
 
2514
    if (dev->irq_enabled) {
3243 Serge 2515
            mutex_unlock(&dev->struct_mutex);
2516
            return -EBUSY;
2351 Serge 2517
    }
2518
    dev->irq_enabled = 1;
2519
    mutex_unlock(&dev->struct_mutex);
2520
 
2521
    irq_line   = drm_dev_to_irq(dev);
2522
 
2523
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2524
 
3051 serge 2525
    /* Before installing handler */
3243 Serge 2526
    if (dev->driver->irq_preinstall)
2527
            dev->driver->irq_preinstall(dev);
2351 Serge 2528
 
3243 Serge 2529
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
2351 Serge 2530
 
3051 serge 2531
    /* After installing handler */
3243 Serge 2532
    if (dev->driver->irq_postinstall)
2533
            ret = dev->driver->irq_postinstall(dev);
2351 Serge 2534
 
3051 serge 2535
    if (ret < 0) {
2536
            DRM_ERROR(__FUNCTION__);
2537
    }
2351 Serge 2538
 
2539
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2540
    cmd&= ~(1<<10);
2541
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2542
 
2543
    return ret;
2544
}
2545