Subversion Repositories Kolibri OS

Rev

Rev 2352 | Rev 3033 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) ": " fmt
30
 
2351 Serge 31
#include 
3031 serge 32
#include 
33
#include 
34
#include 
2351 Serge 35
#include "i915_drv.h"
36
#include "i915_trace.h"
37
#include "intel_drv.h"
38
 
3031 serge 39
 
40
#define pr_err(fmt, ...) \
41
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
42
 
43
#define DRM_IRQ_ARGS            void *arg
44
 
2352 Serge 45
#define DRM_WAKEUP( queue ) wake_up( queue )
46
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
47
 
2351 Serge 48
#define MAX_NOPID ((u32)~0)
49
 
50
/**
51
 * Interrupts that are always left unmasked.
52
 *
53
 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
54
 * we leave them always unmasked in IMR and then control enabling them through
55
 * PIPESTAT alone.
56
 */
57
#define I915_INTERRUPT_ENABLE_FIX			\
58
	(I915_ASLE_INTERRUPT |				\
59
	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
60
	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
61
	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
62
	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
63
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
64
 
65
/** Interrupts that we mask and unmask at runtime. */
66
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
67
 
68
#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
69
				 PIPE_VBLANK_INTERRUPT_STATUS)
70
 
71
#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
72
				 PIPE_VBLANK_INTERRUPT_ENABLE)
73
 
74
#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
75
					 DRM_I915_VBLANK_PIPE_B)
76
 
77
/* For display hotplug interrupt */
78
static void
79
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
80
{
81
    if ((dev_priv->irq_mask & mask) != 0) {
82
        dev_priv->irq_mask &= ~mask;
83
        I915_WRITE(DEIMR, dev_priv->irq_mask);
84
        POSTING_READ(DEIMR);
85
    }
86
}
87
 
88
static inline void
89
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90
{
91
    if ((dev_priv->irq_mask & mask) != mask) {
92
        dev_priv->irq_mask |= mask;
93
        I915_WRITE(DEIMR, dev_priv->irq_mask);
94
        POSTING_READ(DEIMR);
95
    }
96
}
3031 serge 97
 
98
void
99
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
100
{
101
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
102
		u32 reg = PIPESTAT(pipe);
103
 
104
		dev_priv->pipestat[pipe] |= mask;
105
		/* Enable the interrupt, clear any pending status */
106
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
107
		POSTING_READ(reg);
108
	}
109
}
110
 
111
void
112
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
113
{
114
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
115
		u32 reg = PIPESTAT(pipe);
116
 
117
		dev_priv->pipestat[pipe] &= ~mask;
118
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
119
		POSTING_READ(reg);
120
	}
121
}
122
 
123
#if 0
124
/**
125
 * intel_enable_asle - enable ASLE interrupt for OpRegion
126
 */
127
void intel_enable_asle(struct drm_device *dev)
128
{
129
	drm_i915_private_t *dev_priv = dev->dev_private;
130
	unsigned long irqflags;
131
 
132
	/* FIXME: opregion/asle for VLV */
133
	if (IS_VALLEYVIEW(dev))
134
		return;
135
 
136
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
137
 
138
	if (HAS_PCH_SPLIT(dev))
139
		ironlake_enable_display_irq(dev_priv, DE_GSE);
140
	else {
141
		i915_enable_pipestat(dev_priv, 1,
142
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
143
		if (INTEL_INFO(dev)->gen >= 4)
144
			i915_enable_pipestat(dev_priv, 0,
145
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
146
	}
147
 
148
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
149
}
150
#endif
151
 
152
/**
153
 * i915_pipe_enabled - check if a pipe is enabled
154
 * @dev: DRM device
155
 * @pipe: pipe to check
156
 *
157
 * Reading certain registers when the pipe is disabled can hang the chip.
158
 * Use this routine to make sure the PLL is running and the pipe is active
159
 * before reading such registers if unsure.
160
 */
161
static int
162
i915_pipe_enabled(struct drm_device *dev, int pipe)
163
{
164
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
165
	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
166
}
167
 
168
/* Called from drm generic code, passed a 'crtc', which
169
 * we use as a pipe index
170
 */
171
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
172
{
173
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
174
	unsigned long high_frame;
175
	unsigned long low_frame;
176
	u32 high1, high2, low;
177
 
178
	if (!i915_pipe_enabled(dev, pipe)) {
179
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
180
				"pipe %c\n", pipe_name(pipe));
181
		return 0;
182
	}
183
 
184
	high_frame = PIPEFRAME(pipe);
185
	low_frame = PIPEFRAMEPIXEL(pipe);
186
 
187
	/*
188
	 * High & low register fields aren't synchronized, so make sure
189
	 * we get a low value that's stable across two reads of the high
190
	 * register.
191
	 */
192
	do {
193
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
194
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
195
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
196
	} while (high1 != high2);
197
 
198
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
199
	low >>= PIPE_FRAME_LOW_SHIFT;
200
	return (high1 << 8) | low;
201
}
202
 
203
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
204
{
205
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
206
	int reg = PIPE_FRMCOUNT_GM45(pipe);
207
 
208
	if (!i915_pipe_enabled(dev, pipe)) {
209
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
210
				 "pipe %c\n", pipe_name(pipe));
211
		return 0;
212
	}
213
 
214
	return I915_READ(reg);
215
}
216
 
217
 
2352 Serge 218
static void notify_ring(struct drm_device *dev,
219
			struct intel_ring_buffer *ring)
220
{
221
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 222
 
2352 Serge 223
	if (ring->obj == NULL)
224
		return;
2351 Serge 225
 
3031 serge 226
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
2351 Serge 227
 
2352 Serge 228
	wake_up_all(&ring->irq_queue);
229
//   if (i915_enable_hangcheck) {
230
//       dev_priv->hangcheck_count = 0;
231
//       mod_timer(&dev_priv->hangcheck_timer,
232
//             jiffies +
233
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
234
//   }
235
}
236
 
3031 serge 237
#if 0
238
static void gen6_pm_rps_work(struct work_struct *work)
239
{
240
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
241
						    rps.work);
242
	u32 pm_iir, pm_imr;
243
	u8 new_delay;
2352 Serge 244
 
3031 serge 245
	spin_lock_irq(&dev_priv->rps.lock);
246
	pm_iir = dev_priv->rps.pm_iir;
247
	dev_priv->rps.pm_iir = 0;
248
	pm_imr = I915_READ(GEN6_PMIMR);
249
	I915_WRITE(GEN6_PMIMR, 0);
250
	spin_unlock_irq(&dev_priv->rps.lock);
2352 Serge 251
 
3031 serge 252
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
253
		return;
254
 
255
	mutex_lock(&dev_priv->dev->struct_mutex);
256
 
257
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
258
		new_delay = dev_priv->rps.cur_delay + 1;
259
	else
260
		new_delay = dev_priv->rps.cur_delay - 1;
261
 
262
	/* sysfs frequency interfaces may have snuck in while servicing the
263
	 * interrupt
264
	 */
265
	if (!(new_delay > dev_priv->rps.max_delay ||
266
	      new_delay < dev_priv->rps.min_delay)) {
267
		gen6_set_rps(dev_priv->dev, new_delay);
268
	}
269
 
270
	mutex_unlock(&dev_priv->dev->struct_mutex);
271
}
272
 
273
 
274
/**
275
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
276
 * occurred.
277
 * @work: workqueue struct
278
 *
279
 * Doesn't actually do anything except notify userspace. As a consequence of
280
 * this event, userspace should try to remap the bad rows since statistically
281
 * it is likely the same row is more likely to go bad again.
282
 */
283
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 284
{
3031 serge 285
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
286
						    parity_error_work);
287
	u32 error_status, row, bank, subbank;
288
	char *parity_event[5];
289
	uint32_t misccpctl;
290
	unsigned long flags;
291
 
292
	/* We must turn off DOP level clock gating to access the L3 registers.
293
	 * In order to prevent a get/put style interface, acquire struct mutex
294
	 * any time we access those registers.
295
	 */
296
	mutex_lock(&dev_priv->dev->struct_mutex);
297
 
298
	misccpctl = I915_READ(GEN7_MISCCPCTL);
299
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
300
	POSTING_READ(GEN7_MISCCPCTL);
301
 
302
	error_status = I915_READ(GEN7_L3CDERRST1);
303
	row = GEN7_PARITY_ERROR_ROW(error_status);
304
	bank = GEN7_PARITY_ERROR_BANK(error_status);
305
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
306
 
307
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
308
				    GEN7_L3CDERRST1_ENABLE);
309
	POSTING_READ(GEN7_L3CDERRST1);
310
 
311
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
312
 
313
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
314
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
315
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
316
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
317
 
318
	mutex_unlock(&dev_priv->dev->struct_mutex);
319
 
320
	parity_event[0] = "L3_PARITY_ERROR=1";
321
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
322
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
323
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
324
	parity_event[4] = NULL;
325
 
326
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
327
			   KOBJ_CHANGE, parity_event);
328
 
329
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
330
		  row, bank, subbank);
331
 
332
	kfree(parity_event[3]);
333
	kfree(parity_event[2]);
334
	kfree(parity_event[1]);
335
}
336
 
337
static void ivybridge_handle_parity_error(struct drm_device *dev)
338
{
339
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
340
	unsigned long flags;
341
 
342
	if (!HAS_L3_GPU_CACHE(dev))
343
		return;
344
 
345
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
346
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
347
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
348
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
349
 
350
	queue_work(dev_priv->wq, &dev_priv->parity_error_work);
351
}
352
 
353
#endif
354
 
355
static void snb_gt_irq_handler(struct drm_device *dev,
356
			       struct drm_i915_private *dev_priv,
357
			       u32 gt_iir)
358
{
359
 
360
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
361
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
362
		notify_ring(dev, &dev_priv->ring[RCS]);
363
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
364
		notify_ring(dev, &dev_priv->ring[VCS]);
365
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
366
		notify_ring(dev, &dev_priv->ring[BCS]);
367
 
368
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
369
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
370
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
371
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
372
		i915_handle_error(dev, false);
373
	}
374
 
375
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
376
//		ivybridge_handle_parity_error(dev);
377
}
378
 
379
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
380
				u32 pm_iir)
381
{
382
	unsigned long flags;
383
 
384
	/*
385
	 * IIR bits should never already be set because IMR should
386
	 * prevent an interrupt from being shown in IIR. The warning
387
	 * displays a case where we've unsafely cleared
388
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
389
	 * type is not a problem, it displays a problem in the logic.
390
	 *
391
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
392
	 */
393
 
394
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
395
	dev_priv->rps.pm_iir |= pm_iir;
396
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
397
	POSTING_READ(GEN6_PMIMR);
398
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
399
 
400
	queue_work(dev_priv->wq, &dev_priv->rps.work);
401
}
402
 
403
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
404
{
405
	struct drm_device *dev = (struct drm_device *) arg;
406
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
407
	u32 iir, gt_iir, pm_iir;
408
	irqreturn_t ret = IRQ_NONE;
409
	unsigned long irqflags;
410
	int pipe;
411
	u32 pipe_stats[I915_MAX_PIPES];
412
	bool blc_event;
413
 
414
	atomic_inc(&dev_priv->irq_received);
415
 
416
	while (true) {
417
		iir = I915_READ(VLV_IIR);
418
		gt_iir = I915_READ(GTIIR);
419
		pm_iir = I915_READ(GEN6_PMIIR);
420
 
421
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
422
			goto out;
423
 
424
		ret = IRQ_HANDLED;
425
 
426
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
427
 
428
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
429
		for_each_pipe(pipe) {
430
			int reg = PIPESTAT(pipe);
431
			pipe_stats[pipe] = I915_READ(reg);
432
 
433
			/*
434
			 * Clear the PIPE*STAT regs before the IIR
435
			 */
436
			if (pipe_stats[pipe] & 0x8000ffff) {
437
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
438
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
439
							 pipe_name(pipe));
440
				I915_WRITE(reg, pipe_stats[pipe]);
441
			}
442
		}
443
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
444
 
445
#if 0
446
		for_each_pipe(pipe) {
447
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
448
				drm_handle_vblank(dev, pipe);
449
 
450
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
451
				intel_prepare_page_flip(dev, pipe);
452
				intel_finish_page_flip(dev, pipe);
453
			}
454
		}
455
#endif
456
 
457
		/* Consume port.  Then clear IIR or we'll miss events */
458
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
459
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
460
 
461
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
462
					 hotplug_status);
463
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
464
//				queue_work(dev_priv->wq,
465
//					   &dev_priv->hotplug_work);
466
 
467
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
468
			I915_READ(PORT_HOTPLUG_STAT);
469
		}
470
 
471
		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
472
			blc_event = true;
473
 
474
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
475
//			gen6_queue_rps_work(dev_priv, pm_iir);
476
 
477
		I915_WRITE(GTIIR, gt_iir);
478
		I915_WRITE(GEN6_PMIIR, pm_iir);
479
		I915_WRITE(VLV_IIR, iir);
480
	}
481
 
482
out:
483
	return ret;
484
}
485
 
486
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
487
{
488
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
489
	int pipe;
490
 
491
	if (pch_iir & SDE_AUDIO_POWER_MASK)
492
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
493
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
494
				 SDE_AUDIO_POWER_SHIFT);
495
 
496
	if (pch_iir & SDE_GMBUS)
497
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
498
 
499
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
500
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
501
 
502
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
503
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
504
 
505
	if (pch_iir & SDE_POISON)
506
		DRM_ERROR("PCH poison interrupt\n");
507
 
508
	if (pch_iir & SDE_FDI_MASK)
509
		for_each_pipe(pipe)
510
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
511
					 pipe_name(pipe),
512
					 I915_READ(FDI_RX_IIR(pipe)));
513
 
514
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
515
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
516
 
517
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
518
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
519
 
520
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
521
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
522
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
523
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
524
}
525
 
526
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
527
{
528
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
529
	int pipe;
530
 
531
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
532
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
533
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
534
				 SDE_AUDIO_POWER_SHIFT_CPT);
535
 
536
	if (pch_iir & SDE_AUX_MASK_CPT)
537
		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
538
 
539
	if (pch_iir & SDE_GMBUS_CPT)
540
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
541
 
542
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
543
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
544
 
545
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
546
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
547
 
548
	if (pch_iir & SDE_FDI_MASK_CPT)
549
		for_each_pipe(pipe)
550
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
551
					 pipe_name(pipe),
552
					 I915_READ(FDI_RX_IIR(pipe)));
553
}
554
 
555
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
556
{
557
	struct drm_device *dev = (struct drm_device *) arg;
558
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
559
	u32 de_iir, gt_iir, de_ier, pm_iir;
560
	irqreturn_t ret = IRQ_NONE;
561
	int i;
562
 
563
	atomic_inc(&dev_priv->irq_received);
564
 
565
	/* disable master interrupt before clearing iir  */
566
	de_ier = I915_READ(DEIER);
567
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
568
 
569
	gt_iir = I915_READ(GTIIR);
570
	if (gt_iir) {
571
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
572
		I915_WRITE(GTIIR, gt_iir);
573
		ret = IRQ_HANDLED;
574
	}
575
 
576
	de_iir = I915_READ(DEIIR);
577
	if (de_iir) {
578
#if 0
579
		if (de_iir & DE_GSE_IVB)
580
			intel_opregion_gse_intr(dev);
581
 
582
		for (i = 0; i < 3; i++) {
583
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
584
				drm_handle_vblank(dev, i);
585
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
586
				intel_prepare_page_flip(dev, i);
587
				intel_finish_page_flip_plane(dev, i);
588
			}
589
		}
590
#endif
591
		/* check event from PCH */
592
		if (de_iir & DE_PCH_EVENT_IVB) {
593
			u32 pch_iir = I915_READ(SDEIIR);
594
 
595
//			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
596
//				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
597
			cpt_irq_handler(dev, pch_iir);
598
 
599
			/* clear PCH hotplug event before clear CPU irq */
600
			I915_WRITE(SDEIIR, pch_iir);
601
		}
602
 
603
		I915_WRITE(DEIIR, de_iir);
604
		ret = IRQ_HANDLED;
605
	}
606
 
607
	pm_iir = I915_READ(GEN6_PMIIR);
608
	if (pm_iir) {
609
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
610
//			gen6_queue_rps_work(dev_priv, pm_iir);
611
		I915_WRITE(GEN6_PMIIR, pm_iir);
612
		ret = IRQ_HANDLED;
613
	}
614
 
615
	I915_WRITE(DEIER, de_ier);
616
	POSTING_READ(DEIER);
617
 
618
	return ret;
619
}
620
 
621
static void ilk_gt_irq_handler(struct drm_device *dev,
622
			       struct drm_i915_private *dev_priv,
623
			       u32 gt_iir)
624
{
625
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
626
		notify_ring(dev, &dev_priv->ring[RCS]);
627
	if (gt_iir & GT_BSD_USER_INTERRUPT)
628
		notify_ring(dev, &dev_priv->ring[VCS]);
629
}
630
 
631
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
632
{
633
	struct drm_device *dev = (struct drm_device *) arg;
2351 Serge 634
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
635
    int ret = IRQ_NONE;
636
    u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
637
    u32 hotplug_mask;
638
 
639
    atomic_inc(&dev_priv->irq_received);
640
 
641
    /* disable master interrupt before clearing iir  */
642
    de_ier = I915_READ(DEIER);
643
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
644
    POSTING_READ(DEIER);
645
 
646
    de_iir = I915_READ(DEIIR);
647
    gt_iir = I915_READ(GTIIR);
648
    pch_iir = I915_READ(SDEIIR);
649
    pm_iir = I915_READ(GEN6_PMIIR);
650
 
651
    if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
652
        (!IS_GEN6(dev) || pm_iir == 0))
653
        goto done;
654
 
655
    if (HAS_PCH_CPT(dev))
656
        hotplug_mask = SDE_HOTPLUG_MASK_CPT;
657
    else
658
        hotplug_mask = SDE_HOTPLUG_MASK;
659
 
660
    ret = IRQ_HANDLED;
661
 
3031 serge 662
	if (IS_GEN5(dev))
663
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
664
	else
665
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
666
#if 0
667
	if (de_iir & DE_GSE)
668
		intel_opregion_gse_intr(dev);
2351 Serge 669
 
3031 serge 670
	if (de_iir & DE_PIPEA_VBLANK)
671
		drm_handle_vblank(dev, 0);
2351 Serge 672
 
3031 serge 673
	if (de_iir & DE_PIPEB_VBLANK)
674
		drm_handle_vblank(dev, 1);
2351 Serge 675
 
3031 serge 676
	if (de_iir & DE_PLANEA_FLIP_DONE) {
677
		intel_prepare_page_flip(dev, 0);
678
		intel_finish_page_flip_plane(dev, 0);
679
	}
2351 Serge 680
 
3031 serge 681
	if (de_iir & DE_PLANEB_FLIP_DONE) {
682
		intel_prepare_page_flip(dev, 1);
683
		intel_finish_page_flip_plane(dev, 1);
684
	}
685
#endif
2351 Serge 686
 
3031 serge 687
	/* check event from PCH */
688
	if (de_iir & DE_PCH_EVENT) {
689
//		if (pch_iir & hotplug_mask)
690
//			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
691
		if (HAS_PCH_CPT(dev))
692
			cpt_irq_handler(dev, pch_iir);
693
		else
694
			ibx_irq_handler(dev, pch_iir);
695
	}
696
#if 0
697
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
698
		ironlake_handle_rps_change(dev);
2351 Serge 699
 
3031 serge 700
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
701
		gen6_queue_rps_work(dev_priv, pm_iir);
702
#endif
2351 Serge 703
    /* should clear PCH hotplug event before clear CPU irq */
704
    I915_WRITE(SDEIIR, pch_iir);
705
    I915_WRITE(GTIIR, gt_iir);
706
    I915_WRITE(DEIIR, de_iir);
707
    I915_WRITE(GEN6_PMIIR, pm_iir);
708
 
709
done:
710
    I915_WRITE(DEIER, de_ier);
711
    POSTING_READ(DEIER);
712
 
713
    return ret;
714
}
715
 
716
 
717
 
718
 
3031 serge 719
/* NB: please notice the memset */
720
static void i915_get_extra_instdone(struct drm_device *dev,
721
				    uint32_t *instdone)
722
{
723
	struct drm_i915_private *dev_priv = dev->dev_private;
724
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
2351 Serge 725
 
3031 serge 726
	switch(INTEL_INFO(dev)->gen) {
727
	case 2:
728
	case 3:
729
		instdone[0] = I915_READ(INSTDONE);
730
		break;
731
	case 4:
732
	case 5:
733
	case 6:
734
		instdone[0] = I915_READ(INSTDONE_I965);
735
		instdone[1] = I915_READ(INSTDONE1);
736
		break;
737
	default:
738
        WARN(1, "Unsupported platform\n");
739
	case 7:
740
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
741
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
742
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
743
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
744
		break;
745
	}
746
}
2351 Serge 747
 
3031 serge 748
#ifdef CONFIG_DEBUG_FS
749
static struct drm_i915_error_object *
750
i915_error_object_create(struct drm_i915_private *dev_priv,
751
			 struct drm_i915_gem_object *src)
752
{
753
	struct drm_i915_error_object *dst;
754
	int i, count;
755
	u32 reloc_offset;
2351 Serge 756
 
3031 serge 757
	if (src == NULL || src->pages == NULL)
758
		return NULL;
2351 Serge 759
 
3031 serge 760
	count = src->base.size / PAGE_SIZE;
2351 Serge 761
 
3031 serge 762
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
763
	if (dst == NULL)
764
		return NULL;
765
 
766
	reloc_offset = src->gtt_offset;
767
	for (i = 0; i < count; i++) {
768
		unsigned long flags;
769
		void *d;
770
 
771
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
772
		if (d == NULL)
773
			goto unwind;
774
 
775
		local_irq_save(flags);
776
		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
777
		    src->has_global_gtt_mapping) {
778
			void __iomem *s;
779
 
780
			/* Simply ignore tiling or any overlapping fence.
781
			 * It's part of the error state, and this hopefully
782
			 * captures what the GPU read.
783
			 */
784
 
785
			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
786
						     reloc_offset);
787
			memcpy_fromio(d, s, PAGE_SIZE);
788
			io_mapping_unmap_atomic(s);
789
		} else {
790
			struct page *page;
791
			void *s;
792
 
793
			page = i915_gem_object_get_page(src, i);
794
 
795
			drm_clflush_pages(&page, 1);
796
 
797
			s = kmap_atomic(page);
798
			memcpy(d, s, PAGE_SIZE);
799
			kunmap_atomic(s);
800
 
801
			drm_clflush_pages(&page, 1);
802
		}
803
		local_irq_restore(flags);
804
 
805
		dst->pages[i] = d;
806
 
807
		reloc_offset += PAGE_SIZE;
808
	}
809
	dst->page_count = count;
810
	dst->gtt_offset = src->gtt_offset;
811
 
812
	return dst;
813
 
814
unwind:
815
	while (i--)
816
		kfree(dst->pages[i]);
817
	kfree(dst);
818
	return NULL;
819
}
820
 
821
static void
822
i915_error_object_free(struct drm_i915_error_object *obj)
823
{
824
	int page;
825
 
826
	if (obj == NULL)
827
		return;
828
 
829
	for (page = 0; page < obj->page_count; page++)
830
		kfree(obj->pages[page]);
831
 
832
	kfree(obj);
833
}
834
 
835
void
836
i915_error_state_free(struct kref *error_ref)
837
{
838
	struct drm_i915_error_state *error = container_of(error_ref,
839
							  typeof(*error), ref);
840
	int i;
841
 
842
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
843
		i915_error_object_free(error->ring[i].batchbuffer);
844
		i915_error_object_free(error->ring[i].ringbuffer);
845
		kfree(error->ring[i].requests);
846
	}
847
 
848
	kfree(error->active_bo);
849
	kfree(error->overlay);
850
	kfree(error);
851
}
852
static void capture_bo(struct drm_i915_error_buffer *err,
853
		       struct drm_i915_gem_object *obj)
854
{
855
	err->size = obj->base.size;
856
	err->name = obj->base.name;
857
	err->rseqno = obj->last_read_seqno;
858
	err->wseqno = obj->last_write_seqno;
859
	err->gtt_offset = obj->gtt_offset;
860
	err->read_domains = obj->base.read_domains;
861
	err->write_domain = obj->base.write_domain;
862
	err->fence_reg = obj->fence_reg;
863
	err->pinned = 0;
864
	if (obj->pin_count > 0)
865
		err->pinned = 1;
866
	if (obj->user_pin_count > 0)
867
		err->pinned = -1;
868
	err->tiling = obj->tiling_mode;
869
	err->dirty = obj->dirty;
870
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
871
	err->ring = obj->ring ? obj->ring->id : -1;
872
	err->cache_level = obj->cache_level;
873
}
874
 
875
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
876
			     int count, struct list_head *head)
877
{
878
	struct drm_i915_gem_object *obj;
879
	int i = 0;
880
 
881
	list_for_each_entry(obj, head, mm_list) {
882
		capture_bo(err++, obj);
883
		if (++i == count)
884
			break;
885
	}
886
 
887
	return i;
888
}
889
 
890
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
891
			     int count, struct list_head *head)
892
{
893
	struct drm_i915_gem_object *obj;
894
	int i = 0;
895
 
896
	list_for_each_entry(obj, head, gtt_list) {
897
		if (obj->pin_count == 0)
898
			continue;
899
 
900
		capture_bo(err++, obj);
901
		if (++i == count)
902
			break;
903
	}
904
 
905
	return i;
906
}
907
 
908
static void i915_gem_record_fences(struct drm_device *dev,
909
				   struct drm_i915_error_state *error)
910
{
911
	struct drm_i915_private *dev_priv = dev->dev_private;
912
	int i;
913
 
914
	/* Fences */
915
	switch (INTEL_INFO(dev)->gen) {
916
	case 7:
917
	case 6:
918
		for (i = 0; i < 16; i++)
919
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
920
		break;
921
	case 5:
922
	case 4:
923
		for (i = 0; i < 16; i++)
924
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
925
		break;
926
	case 3:
927
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
928
			for (i = 0; i < 8; i++)
929
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
930
	case 2:
931
		for (i = 0; i < 8; i++)
932
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
933
		break;
934
 
935
	}
936
}
937
 
938
static struct drm_i915_error_object *
939
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
940
			     struct intel_ring_buffer *ring)
941
{
942
	struct drm_i915_gem_object *obj;
943
	u32 seqno;
944
 
945
	if (!ring->get_seqno)
946
		return NULL;
947
 
948
	seqno = ring->get_seqno(ring, false);
949
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
950
		if (obj->ring != ring)
951
			continue;
952
 
953
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
954
			continue;
955
 
956
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
957
			continue;
958
 
959
		/* We need to copy these to an anonymous buffer as the simplest
960
		 * method to avoid being overwritten by userspace.
961
		 */
962
		return i915_error_object_create(dev_priv, obj);
963
	}
964
 
965
	return NULL;
966
}
967
 
968
static void i915_record_ring_state(struct drm_device *dev,
969
				   struct drm_i915_error_state *error,
970
				   struct intel_ring_buffer *ring)
971
{
972
	struct drm_i915_private *dev_priv = dev->dev_private;
973
 
974
	if (INTEL_INFO(dev)->gen >= 6) {
975
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
976
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
977
		error->semaphore_mboxes[ring->id][0]
978
			= I915_READ(RING_SYNC_0(ring->mmio_base));
979
		error->semaphore_mboxes[ring->id][1]
980
			= I915_READ(RING_SYNC_1(ring->mmio_base));
981
	}
982
 
983
	if (INTEL_INFO(dev)->gen >= 4) {
984
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
985
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
986
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
987
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
988
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
989
		if (ring->id == RCS)
990
			error->bbaddr = I915_READ64(BB_ADDR);
991
	} else {
992
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
993
		error->ipeir[ring->id] = I915_READ(IPEIR);
994
		error->ipehr[ring->id] = I915_READ(IPEHR);
995
		error->instdone[ring->id] = I915_READ(INSTDONE);
996
	}
997
 
998
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
999
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1000
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1001
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1002
	error->head[ring->id] = I915_READ_HEAD(ring);
1003
	error->tail[ring->id] = I915_READ_TAIL(ring);
1004
 
1005
	error->cpu_ring_head[ring->id] = ring->head;
1006
	error->cpu_ring_tail[ring->id] = ring->tail;
1007
}
1008
 
1009
static void i915_gem_record_rings(struct drm_device *dev,
1010
				  struct drm_i915_error_state *error)
1011
{
1012
	struct drm_i915_private *dev_priv = dev->dev_private;
1013
	struct intel_ring_buffer *ring;
1014
	struct drm_i915_gem_request *request;
1015
	int i, count;
1016
 
1017
	for_each_ring(ring, dev_priv, i) {
1018
		i915_record_ring_state(dev, error, ring);
1019
 
1020
		error->ring[i].batchbuffer =
1021
			i915_error_first_batchbuffer(dev_priv, ring);
1022
 
1023
		error->ring[i].ringbuffer =
1024
			i915_error_object_create(dev_priv, ring->obj);
1025
 
1026
		count = 0;
1027
		list_for_each_entry(request, &ring->request_list, list)
1028
			count++;
1029
 
1030
		error->ring[i].num_requests = count;
1031
		error->ring[i].requests =
1032
			kmalloc(count*sizeof(struct drm_i915_error_request),
1033
				GFP_ATOMIC);
1034
		if (error->ring[i].requests == NULL) {
1035
			error->ring[i].num_requests = 0;
1036
			continue;
1037
		}
1038
 
1039
		count = 0;
1040
		list_for_each_entry(request, &ring->request_list, list) {
1041
			struct drm_i915_error_request *erq;
1042
 
1043
			erq = &error->ring[i].requests[count++];
1044
			erq->seqno = request->seqno;
1045
			erq->jiffies = request->emitted_jiffies;
1046
			erq->tail = request->tail;
1047
		}
1048
	}
1049
}
1050
 
1051
/**
1052
 * i915_capture_error_state - capture an error record for later analysis
1053
 * @dev: drm device
1054
 *
1055
 * Should be called when an error is detected (either a hang or an error
1056
 * interrupt) to capture error state from the time of the error.  Fills
1057
 * out a structure which becomes available in debugfs for user level tools
1058
 * to pick up.
1059
 */
1060
static void i915_capture_error_state(struct drm_device *dev)
1061
{
1062
	struct drm_i915_private *dev_priv = dev->dev_private;
1063
	struct drm_i915_gem_object *obj;
1064
	struct drm_i915_error_state *error;
1065
	unsigned long flags;
1066
	int i, pipe;
1067
 
1068
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1069
	error = dev_priv->first_error;
1070
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1071
	if (error)
1072
		return;
1073
 
1074
	/* Account for pipe specific data like PIPE*STAT */
1075
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1076
	if (!error) {
1077
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1078
		return;
1079
	}
1080
 
1081
	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1082
		 dev->primary->index);
1083
 
1084
	kref_init(&error->ref);
1085
	error->eir = I915_READ(EIR);
1086
	error->pgtbl_er = I915_READ(PGTBL_ER);
1087
	error->ccid = I915_READ(CCID);
1088
 
1089
	if (HAS_PCH_SPLIT(dev))
1090
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1091
	else if (IS_VALLEYVIEW(dev))
1092
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1093
	else if (IS_GEN2(dev))
1094
		error->ier = I915_READ16(IER);
1095
	else
1096
		error->ier = I915_READ(IER);
1097
 
1098
	for_each_pipe(pipe)
1099
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1100
 
1101
	if (INTEL_INFO(dev)->gen >= 6) {
1102
		error->error = I915_READ(ERROR_GEN6);
1103
		error->done_reg = I915_READ(DONE_REG);
1104
	}
1105
 
1106
	if (INTEL_INFO(dev)->gen == 7)
1107
		error->err_int = I915_READ(GEN7_ERR_INT);
1108
 
1109
	i915_get_extra_instdone(dev, error->extra_instdone);
1110
 
1111
	i915_gem_record_fences(dev, error);
1112
	i915_gem_record_rings(dev, error);
1113
 
1114
	/* Record buffers on the active and pinned lists. */
1115
	error->active_bo = NULL;
1116
	error->pinned_bo = NULL;
1117
 
1118
	i = 0;
1119
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1120
		i++;
1121
	error->active_bo_count = i;
1122
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1123
		if (obj->pin_count)
1124
			i++;
1125
	error->pinned_bo_count = i - error->active_bo_count;
1126
 
1127
	error->active_bo = NULL;
1128
	error->pinned_bo = NULL;
1129
	if (i) {
1130
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1131
					   GFP_ATOMIC);
1132
		if (error->active_bo)
1133
			error->pinned_bo =
1134
				error->active_bo + error->active_bo_count;
1135
	}
1136
 
1137
	if (error->active_bo)
1138
		error->active_bo_count =
1139
			capture_active_bo(error->active_bo,
1140
					  error->active_bo_count,
1141
					  &dev_priv->mm.active_list);
1142
 
1143
	if (error->pinned_bo)
1144
		error->pinned_bo_count =
1145
			capture_pinned_bo(error->pinned_bo,
1146
					  error->pinned_bo_count,
1147
					  &dev_priv->mm.bound_list);
1148
 
1149
	do_gettimeofday(&error->time);
1150
 
1151
	error->overlay = intel_overlay_capture_error_state(dev);
1152
	error->display = intel_display_capture_error_state(dev);
1153
 
1154
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1155
	if (dev_priv->first_error == NULL) {
1156
		dev_priv->first_error = error;
1157
		error = NULL;
1158
	}
1159
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1160
 
1161
	if (error)
1162
		i915_error_state_free(&error->ref);
1163
}
1164
 
1165
void i915_destroy_error_state(struct drm_device *dev)
1166
{
1167
	struct drm_i915_private *dev_priv = dev->dev_private;
1168
	struct drm_i915_error_state *error;
1169
	unsigned long flags;
1170
 
1171
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1172
	error = dev_priv->first_error;
1173
	dev_priv->first_error = NULL;
1174
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1175
 
1176
	if (error)
1177
		kref_put(&error->ref, i915_error_state_free);
1178
}
1179
#else
1180
#define i915_capture_error_state(x)
1181
#endif
1182
 
1183
static void i915_report_and_clear_eir(struct drm_device *dev)
1184
{
1185
	struct drm_i915_private *dev_priv = dev->dev_private;
1186
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1187
	u32 eir = I915_READ(EIR);
1188
	int pipe, i;
1189
 
1190
	if (!eir)
1191
		return;
1192
 
1193
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1194
 
1195
	i915_get_extra_instdone(dev, instdone);
1196
 
1197
	if (IS_G4X(dev)) {
1198
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1199
			u32 ipeir = I915_READ(IPEIR_I965);
1200
 
1201
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1202
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1203
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1204
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1205
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1206
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1207
			I915_WRITE(IPEIR_I965, ipeir);
1208
			POSTING_READ(IPEIR_I965);
1209
		}
1210
		if (eir & GM45_ERROR_PAGE_TABLE) {
1211
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1212
			pr_err("page table error\n");
1213
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1214
			I915_WRITE(PGTBL_ER, pgtbl_err);
1215
			POSTING_READ(PGTBL_ER);
1216
		}
1217
	}
1218
 
1219
	if (!IS_GEN2(dev)) {
1220
		if (eir & I915_ERROR_PAGE_TABLE) {
1221
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1222
			pr_err("page table error\n");
1223
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1224
			I915_WRITE(PGTBL_ER, pgtbl_err);
1225
			POSTING_READ(PGTBL_ER);
1226
		}
1227
	}
1228
 
1229
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1230
		pr_err("memory refresh error:\n");
1231
		for_each_pipe(pipe)
1232
			pr_err("pipe %c stat: 0x%08x\n",
1233
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1234
		/* pipestat has already been acked */
1235
	}
1236
	if (eir & I915_ERROR_INSTRUCTION) {
1237
		pr_err("instruction error\n");
1238
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1239
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1240
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1241
		if (INTEL_INFO(dev)->gen < 4) {
1242
			u32 ipeir = I915_READ(IPEIR);
1243
 
1244
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1245
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1246
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1247
			I915_WRITE(IPEIR, ipeir);
1248
			POSTING_READ(IPEIR);
1249
		} else {
1250
			u32 ipeir = I915_READ(IPEIR_I965);
1251
 
1252
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1253
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1254
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1255
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1256
			I915_WRITE(IPEIR_I965, ipeir);
1257
			POSTING_READ(IPEIR_I965);
1258
		}
1259
	}
1260
 
1261
	I915_WRITE(EIR, eir);
1262
	POSTING_READ(EIR);
1263
	eir = I915_READ(EIR);
1264
	if (eir) {
1265
		/*
1266
		 * some errors might have become stuck,
1267
		 * mask them.
1268
		 */
1269
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1270
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1271
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1272
	}
1273
}
1274
 
1275
/**
1276
 * i915_handle_error - handle an error interrupt
1277
 * @dev: drm device
1278
 *
1279
 * Do some basic checking of regsiter state at error interrupt time and
1280
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1281
 * sure we get a record and make it available in debugfs.  Fire a uevent
1282
 * so userspace knows something bad happened (should trigger collection
1283
 * of a ring dump etc.).
1284
 */
1285
void i915_handle_error(struct drm_device *dev, bool wedged)
1286
{
1287
	struct drm_i915_private *dev_priv = dev->dev_private;
1288
	struct intel_ring_buffer *ring;
1289
	int i;
1290
 
1291
	i915_capture_error_state(dev);
1292
	i915_report_and_clear_eir(dev);
1293
 
1294
	if (wedged) {
1295
//		INIT_COMPLETION(dev_priv->error_completion);
1296
		atomic_set(&dev_priv->mm.wedged, 1);
1297
 
1298
		/*
1299
		 * Wakeup waiting processes so they don't hang
1300
		 */
1301
		for_each_ring(ring, dev_priv, i)
1302
			wake_up_all(&ring->irq_queue);
1303
	}
1304
 
1305
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1306
}
1307
 
1308
#if 0
1309
 
1310
 
1311
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1312
{
1313
	drm_i915_private_t *dev_priv = dev->dev_private;
1314
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1315
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1316
	struct drm_i915_gem_object *obj;
1317
	struct intel_unpin_work *work;
1318
	unsigned long flags;
1319
	bool stall_detected;
1320
 
1321
	/* Ignore early vblank irqs */
1322
	if (intel_crtc == NULL)
1323
		return;
1324
 
1325
	spin_lock_irqsave(&dev->event_lock, flags);
1326
	work = intel_crtc->unpin_work;
1327
 
1328
	if (work == NULL || work->pending || !work->enable_stall_check) {
1329
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1330
		spin_unlock_irqrestore(&dev->event_lock, flags);
1331
		return;
1332
	}
1333
 
1334
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1335
	obj = work->pending_flip_obj;
1336
	if (INTEL_INFO(dev)->gen >= 4) {
1337
		int dspsurf = DSPSURF(intel_crtc->plane);
1338
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1339
					obj->gtt_offset;
1340
	} else {
1341
		int dspaddr = DSPADDR(intel_crtc->plane);
1342
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1343
							crtc->y * crtc->fb->pitches[0] +
1344
							crtc->x * crtc->fb->bits_per_pixel/8);
1345
	}
1346
 
1347
	spin_unlock_irqrestore(&dev->event_lock, flags);
1348
 
1349
	if (stall_detected) {
1350
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1351
		intel_prepare_page_flip(dev, intel_crtc->plane);
1352
	}
1353
}
1354
 
1355
#endif
1356
 
1357
/* Called from drm generic code, passed 'crtc' which
1358
 * we use as a pipe index
1359
 */
1360
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1361
{
1362
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1363
	unsigned long irqflags;
1364
 
1365
	if (!i915_pipe_enabled(dev, pipe))
1366
		return -EINVAL;
1367
 
1368
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1369
	if (INTEL_INFO(dev)->gen >= 4)
1370
		i915_enable_pipestat(dev_priv, pipe,
1371
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1372
	else
1373
		i915_enable_pipestat(dev_priv, pipe,
1374
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1375
 
1376
	/* maintain vblank delivery even in deep C-states */
1377
	if (dev_priv->info->gen == 3)
1378
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1379
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1380
 
1381
	return 0;
1382
}
1383
 
1384
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1385
{
1386
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1387
	unsigned long irqflags;
1388
 
1389
	if (!i915_pipe_enabled(dev, pipe))
1390
		return -EINVAL;
1391
 
1392
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1393
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1394
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1395
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1396
 
1397
	return 0;
1398
}
1399
 
1400
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1401
{
1402
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1403
	unsigned long irqflags;
1404
 
1405
	if (!i915_pipe_enabled(dev, pipe))
1406
		return -EINVAL;
1407
 
1408
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1409
	ironlake_enable_display_irq(dev_priv,
1410
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1411
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1412
 
1413
	return 0;
1414
}
1415
 
1416
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1417
{
1418
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1419
	unsigned long irqflags;
1420
	u32 imr;
1421
 
1422
	if (!i915_pipe_enabled(dev, pipe))
1423
		return -EINVAL;
1424
 
1425
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1426
	imr = I915_READ(VLV_IMR);
1427
	if (pipe == 0)
1428
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1429
	else
1430
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1431
	I915_WRITE(VLV_IMR, imr);
1432
	i915_enable_pipestat(dev_priv, pipe,
1433
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1434
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1435
 
1436
	return 0;
1437
}
1438
 
1439
/* Called from drm generic code, passed 'crtc' which
1440
 * we use as a pipe index
1441
 */
1442
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1443
{
1444
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1445
	unsigned long irqflags;
1446
 
1447
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1448
	if (dev_priv->info->gen == 3)
1449
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1450
 
1451
	i915_disable_pipestat(dev_priv, pipe,
1452
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1453
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1454
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1455
}
1456
 
1457
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1458
{
1459
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1460
	unsigned long irqflags;
1461
 
1462
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1463
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1464
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1465
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1466
}
1467
 
1468
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1469
{
1470
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1471
	unsigned long irqflags;
1472
 
1473
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1474
	ironlake_disable_display_irq(dev_priv,
1475
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1476
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1477
}
1478
 
1479
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1480
{
1481
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1482
	unsigned long irqflags;
1483
	u32 imr;
1484
 
1485
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1486
	i915_disable_pipestat(dev_priv, pipe,
1487
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1488
	imr = I915_READ(VLV_IMR);
1489
	if (pipe == 0)
1490
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1491
	else
1492
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1493
	I915_WRITE(VLV_IMR, imr);
1494
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1495
}
1496
 
1497
static u32
1498
ring_last_seqno(struct intel_ring_buffer *ring)
1499
{
1500
	return list_entry(ring->request_list.prev,
1501
			  struct drm_i915_gem_request, list)->seqno;
1502
}
2351 Serge 1503
/* drm_dma.h hooks
1504
*/
1505
static void ironlake_irq_preinstall(struct drm_device *dev)
1506
{
1507
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1508
 
1509
    atomic_set(&dev_priv->irq_received, 0);
1510
 
1511
    I915_WRITE(HWSTAM, 0xeffe);
1512
 
1513
    /* XXX hotplug from PCH */
1514
 
1515
    I915_WRITE(DEIMR, 0xffffffff);
1516
    I915_WRITE(DEIER, 0x0);
1517
    POSTING_READ(DEIER);
1518
 
1519
    /* and GT */
1520
    I915_WRITE(GTIMR, 0xffffffff);
1521
    I915_WRITE(GTIER, 0x0);
1522
    POSTING_READ(GTIER);
1523
 
1524
    /* south display irq */
1525
    I915_WRITE(SDEIMR, 0xffffffff);
1526
    I915_WRITE(SDEIER, 0x0);
1527
    POSTING_READ(SDEIER);
1528
}
1529
 
3031 serge 1530
static void valleyview_irq_preinstall(struct drm_device *dev)
1531
{
1532
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1533
	int pipe;
1534
 
1535
	atomic_set(&dev_priv->irq_received, 0);
1536
 
1537
	/* VLV magic */
1538
	I915_WRITE(VLV_IMR, 0);
1539
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1540
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1541
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1542
 
1543
	/* and GT */
1544
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1545
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1546
	I915_WRITE(GTIMR, 0xffffffff);
1547
	I915_WRITE(GTIER, 0x0);
1548
	POSTING_READ(GTIER);
1549
 
1550
	I915_WRITE(DPINVGTT, 0xff);
1551
 
1552
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1553
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1554
	for_each_pipe(pipe)
1555
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1556
	I915_WRITE(VLV_IIR, 0xffffffff);
1557
	I915_WRITE(VLV_IMR, 0xffffffff);
1558
	I915_WRITE(VLV_IER, 0x0);
1559
	POSTING_READ(VLV_IER);
1560
}
1561
 
2351 Serge 1562
/*
1563
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1564
 * duration to 2ms (which is the minimum in the Display Port spec)
1565
 *
1566
 * This register is the same on all known PCH chips.
1567
 */
1568
 
1569
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1570
{
1571
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1572
	u32	hotplug;
1573
 
1574
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1575
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1576
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1577
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1578
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1579
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1580
}
1581
 
1582
static int ironlake_irq_postinstall(struct drm_device *dev)
1583
{
1584
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1585
    /* enable kind of interrupts always enabled */
1586
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1587
               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1588
    u32 render_irqs;
1589
    u32 hotplug_mask;
1590
 
1591
    dev_priv->irq_mask = ~display_mask;
1592
 
1593
    /* should always can generate irq */
1594
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1595
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1596
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1597
    POSTING_READ(DEIER);
1598
 
1599
	dev_priv->gt_irq_mask = ~0;
1600
 
1601
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1602
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1603
 
1604
    if (IS_GEN6(dev))
1605
        render_irqs =
1606
            GT_USER_INTERRUPT |
3031 serge 1607
			GEN6_BSD_USER_INTERRUPT |
1608
			GEN6_BLITTER_USER_INTERRUPT;
2351 Serge 1609
    else
1610
        render_irqs =
1611
            GT_USER_INTERRUPT |
1612
            GT_PIPE_NOTIFY |
1613
            GT_BSD_USER_INTERRUPT;
1614
    I915_WRITE(GTIER, render_irqs);
1615
    POSTING_READ(GTIER);
1616
 
1617
    if (HAS_PCH_CPT(dev)) {
1618
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1619
                SDE_PORTB_HOTPLUG_CPT |
1620
                SDE_PORTC_HOTPLUG_CPT |
1621
                SDE_PORTD_HOTPLUG_CPT);
1622
    } else {
1623
        hotplug_mask = (SDE_CRT_HOTPLUG |
1624
                SDE_PORTB_HOTPLUG |
1625
                SDE_PORTC_HOTPLUG |
1626
                SDE_PORTD_HOTPLUG |
1627
                SDE_AUX_MASK);
1628
    }
1629
 
1630
    dev_priv->pch_irq_mask = ~hotplug_mask;
1631
 
1632
    I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1633
    I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1634
    I915_WRITE(SDEIER, hotplug_mask);
1635
    POSTING_READ(SDEIER);
1636
 
3031 serge 1637
//    ironlake_enable_pch_hotplug(dev);
2351 Serge 1638
 
1639
    if (IS_IRONLAKE_M(dev)) {
1640
        /* Clear & enable PCU event interrupts */
1641
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1642
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1643
        ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1644
    }
1645
 
1646
    return 0;
1647
}
1648
 
3031 serge 1649
static int ivybridge_irq_postinstall(struct drm_device *dev)
1650
{
1651
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1652
	/* enable kind of interrupts always enabled */
1653
	u32 display_mask =
1654
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1655
		DE_PLANEC_FLIP_DONE_IVB |
1656
		DE_PLANEB_FLIP_DONE_IVB |
1657
		DE_PLANEA_FLIP_DONE_IVB;
1658
	u32 render_irqs;
1659
	u32 hotplug_mask;
2351 Serge 1660
 
3031 serge 1661
	dev_priv->irq_mask = ~display_mask;
1662
 
1663
	/* should always can generate irq */
1664
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1665
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1666
	I915_WRITE(DEIER,
1667
		   display_mask |
1668
		   DE_PIPEC_VBLANK_IVB |
1669
		   DE_PIPEB_VBLANK_IVB |
1670
		   DE_PIPEA_VBLANK_IVB);
1671
	POSTING_READ(DEIER);
1672
 
1673
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1674
 
1675
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1676
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1677
 
1678
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1679
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1680
	I915_WRITE(GTIER, render_irqs);
1681
	POSTING_READ(GTIER);
1682
 
1683
	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1684
			SDE_PORTB_HOTPLUG_CPT |
1685
			SDE_PORTC_HOTPLUG_CPT |
1686
			SDE_PORTD_HOTPLUG_CPT);
1687
	dev_priv->pch_irq_mask = ~hotplug_mask;
1688
 
1689
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1690
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1691
	I915_WRITE(SDEIER, hotplug_mask);
1692
	POSTING_READ(SDEIER);
1693
 
1694
//	ironlake_enable_pch_hotplug(dev);
1695
 
1696
	return 0;
1697
}
1698
 
1699
static int valleyview_irq_postinstall(struct drm_device *dev)
1700
{
1701
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1702
	u32 enable_mask;
1703
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1704
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1705
	u16 msid;
1706
 
1707
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1708
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1709
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1710
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1711
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1712
 
1713
	/*
1714
	 *Leave vblank interrupts masked initially.  enable/disable will
1715
	 * toggle them based on usage.
1716
	 */
1717
	dev_priv->irq_mask = (~enable_mask) |
1718
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1719
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1720
 
1721
	dev_priv->pipestat[0] = 0;
1722
	dev_priv->pipestat[1] = 0;
1723
 
1724
	/* Hack for broken MSIs on VLV */
1725
	pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1726
	pci_read_config_word(dev->pdev, 0x98, &msid);
1727
	msid &= 0xff; /* mask out delivery bits */
1728
	msid |= (1<<14);
1729
	pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1730
 
1731
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1732
	I915_WRITE(VLV_IER, enable_mask);
1733
	I915_WRITE(VLV_IIR, 0xffffffff);
1734
	I915_WRITE(PIPESTAT(0), 0xffff);
1735
	I915_WRITE(PIPESTAT(1), 0xffff);
1736
	POSTING_READ(VLV_IER);
1737
 
1738
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1739
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1740
 
1741
	I915_WRITE(VLV_IIR, 0xffffffff);
1742
	I915_WRITE(VLV_IIR, 0xffffffff);
1743
 
1744
	dev_priv->gt_irq_mask = ~0;
1745
 
1746
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1747
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1748
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1749
	I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
1750
		   GT_GEN6_BLT_CS_ERROR_INTERRUPT |
1751
		   GT_GEN6_BLT_USER_INTERRUPT |
1752
		   GT_GEN6_BSD_USER_INTERRUPT |
1753
		   GT_GEN6_BSD_CS_ERROR_INTERRUPT |
1754
		   GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
1755
		   GT_PIPE_NOTIFY |
1756
		   GT_RENDER_CS_ERROR_INTERRUPT |
1757
		   GT_SYNC_STATUS |
1758
		   GT_USER_INTERRUPT);
1759
	POSTING_READ(GTIER);
1760
 
1761
	/* ack & enable invalid PTE error interrupts */
1762
#if 0 /* FIXME: add support to irq handler for checking these bits */
1763
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1764
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1765
#endif
1766
 
1767
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1768
#if 0 /* FIXME: check register definitions; some have moved */
1769
	/* Note HDMI and DP share bits */
1770
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1771
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1772
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1773
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1774
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1775
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
1776
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1777
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1778
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1779
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1780
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1781
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1782
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1783
	}
1784
#endif
1785
 
1786
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1787
 
1788
	return 0;
1789
}
1790
 
1791
 
1792
static void valleyview_irq_uninstall(struct drm_device *dev)
1793
{
1794
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1795
	int pipe;
1796
 
1797
	if (!dev_priv)
1798
		return;
1799
 
1800
	for_each_pipe(pipe)
1801
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1802
 
1803
	I915_WRITE(HWSTAM, 0xffffffff);
1804
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1805
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1806
	for_each_pipe(pipe)
1807
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1808
	I915_WRITE(VLV_IIR, 0xffffffff);
1809
	I915_WRITE(VLV_IMR, 0xffffffff);
1810
	I915_WRITE(VLV_IER, 0x0);
1811
	POSTING_READ(VLV_IER);
1812
}
1813
 
1814
static void ironlake_irq_uninstall(struct drm_device *dev)
1815
{
1816
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1817
 
1818
	if (!dev_priv)
1819
		return;
1820
 
1821
	I915_WRITE(HWSTAM, 0xffffffff);
1822
 
1823
	I915_WRITE(DEIMR, 0xffffffff);
1824
	I915_WRITE(DEIER, 0x0);
1825
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1826
 
1827
	I915_WRITE(GTIMR, 0xffffffff);
1828
	I915_WRITE(GTIER, 0x0);
1829
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1830
 
1831
	I915_WRITE(SDEIMR, 0xffffffff);
1832
	I915_WRITE(SDEIER, 0x0);
1833
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1834
}
1835
 
1836
#if 0
1837
 
1838
static void i8xx_irq_preinstall(struct drm_device * dev)
1839
{
1840
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1841
	int pipe;
1842
 
1843
	atomic_set(&dev_priv->irq_received, 0);
1844
 
1845
	for_each_pipe(pipe)
1846
		I915_WRITE(PIPESTAT(pipe), 0);
1847
	I915_WRITE16(IMR, 0xffff);
1848
	I915_WRITE16(IER, 0x0);
1849
	POSTING_READ16(IER);
1850
}
1851
 
1852
static int i8xx_irq_postinstall(struct drm_device *dev)
1853
{
1854
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1855
 
1856
	dev_priv->pipestat[0] = 0;
1857
	dev_priv->pipestat[1] = 0;
1858
 
1859
	I915_WRITE16(EMR,
1860
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1861
 
1862
	/* Unmask the interrupts that we always want on. */
1863
	dev_priv->irq_mask =
1864
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1865
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1866
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1867
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1868
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1869
	I915_WRITE16(IMR, dev_priv->irq_mask);
1870
 
1871
	I915_WRITE16(IER,
1872
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1873
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1874
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1875
		     I915_USER_INTERRUPT);
1876
	POSTING_READ16(IER);
1877
 
1878
	return 0;
1879
}
1880
 
1881
 
1882
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
1883
{
1884
	struct drm_device *dev = (struct drm_device *) arg;
1885
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1886
	u16 iir, new_iir;
1887
	u32 pipe_stats[2];
1888
	unsigned long irqflags;
1889
	int irq_received;
1890
	int pipe;
1891
	u16 flip_mask =
1892
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1893
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1894
 
1895
	atomic_inc(&dev_priv->irq_received);
1896
 
1897
	iir = I915_READ16(IIR);
1898
	if (iir == 0)
1899
		return IRQ_NONE;
1900
 
1901
	while (iir & ~flip_mask) {
1902
		/* Can't rely on pipestat interrupt bit in iir as it might
1903
		 * have been cleared after the pipestat interrupt was received.
1904
		 * It doesn't set the bit in iir again, but it still produces
1905
		 * interrupts (for non-MSI).
1906
		 */
1907
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1908
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1909
			i915_handle_error(dev, false);
1910
 
1911
		for_each_pipe(pipe) {
1912
			int reg = PIPESTAT(pipe);
1913
			pipe_stats[pipe] = I915_READ(reg);
1914
 
1915
			/*
1916
			 * Clear the PIPE*STAT regs before the IIR
1917
			 */
1918
			if (pipe_stats[pipe] & 0x8000ffff) {
1919
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1920
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1921
							 pipe_name(pipe));
1922
				I915_WRITE(reg, pipe_stats[pipe]);
1923
				irq_received = 1;
1924
			}
1925
		}
1926
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1927
 
1928
		I915_WRITE16(IIR, iir & ~flip_mask);
1929
		new_iir = I915_READ16(IIR); /* Flush posted writes */
1930
 
1931
		i915_update_dri1_breadcrumb(dev);
1932
 
1933
		if (iir & I915_USER_INTERRUPT)
1934
			notify_ring(dev, &dev_priv->ring[RCS]);
1935
 
1936
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1937
		    drm_handle_vblank(dev, 0)) {
1938
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1939
				intel_prepare_page_flip(dev, 0);
1940
				intel_finish_page_flip(dev, 0);
1941
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1942
			}
1943
		}
1944
 
1945
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1946
		    drm_handle_vblank(dev, 1)) {
1947
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1948
				intel_prepare_page_flip(dev, 1);
1949
				intel_finish_page_flip(dev, 1);
1950
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1951
			}
1952
		}
1953
 
1954
		iir = new_iir;
1955
	}
1956
 
1957
	return IRQ_HANDLED;
1958
}
1959
 
1960
static void i8xx_irq_uninstall(struct drm_device * dev)
1961
{
1962
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1963
	int pipe;
1964
 
1965
	for_each_pipe(pipe) {
1966
		/* Clear enable bits; then clear status bits */
1967
		I915_WRITE(PIPESTAT(pipe), 0);
1968
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1969
	}
1970
	I915_WRITE16(IMR, 0xffff);
1971
	I915_WRITE16(IER, 0x0);
1972
	I915_WRITE16(IIR, I915_READ16(IIR));
1973
}
1974
 
1975
#endif
1976
 
1977
static void i915_irq_preinstall(struct drm_device * dev)
1978
{
1979
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1980
	int pipe;
1981
 
1982
	atomic_set(&dev_priv->irq_received, 0);
1983
 
1984
	if (I915_HAS_HOTPLUG(dev)) {
1985
		I915_WRITE(PORT_HOTPLUG_EN, 0);
1986
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1987
	}
1988
 
1989
	I915_WRITE16(HWSTAM, 0xeffe);
1990
	for_each_pipe(pipe)
1991
		I915_WRITE(PIPESTAT(pipe), 0);
1992
	I915_WRITE(IMR, 0xffffffff);
1993
	I915_WRITE(IER, 0x0);
1994
	POSTING_READ(IER);
1995
}
1996
 
1997
static int i915_irq_postinstall(struct drm_device *dev)
1998
{
1999
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2000
	u32 enable_mask;
2001
 
2002
	dev_priv->pipestat[0] = 0;
2003
	dev_priv->pipestat[1] = 0;
2004
 
2005
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2006
 
2007
	/* Unmask the interrupts that we always want on. */
2008
	dev_priv->irq_mask =
2009
		~(I915_ASLE_INTERRUPT |
2010
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2011
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2012
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2013
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2014
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2015
 
2016
	enable_mask =
2017
		I915_ASLE_INTERRUPT |
2018
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2019
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2020
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2021
		I915_USER_INTERRUPT;
2022
#if 0
2023
	if (I915_HAS_HOTPLUG(dev)) {
2024
		/* Enable in IER... */
2025
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2026
		/* and unmask in IMR */
2027
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2028
	}
2029
#endif
2030
 
2031
	I915_WRITE(IMR, dev_priv->irq_mask);
2032
	I915_WRITE(IER, enable_mask);
2033
	POSTING_READ(IER);
2034
 
2035
	if (I915_HAS_HOTPLUG(dev)) {
2036
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2037
#if 0
2038
		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2039
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2040
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2041
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2042
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2043
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2044
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2045
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2046
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2047
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2048
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2049
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2050
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2051
		}
2052
#endif
2053
		/* Ignore TV since it's buggy */
2054
 
2055
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2056
	}
2057
 
2058
//	intel_opregion_enable_asle(dev);
2059
 
2060
	return 0;
2061
}
2062
 
2063
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
2064
{
2065
	struct drm_device *dev = (struct drm_device *) arg;
2066
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2067
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2068
	unsigned long irqflags;
2069
	u32 flip_mask =
2070
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2071
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2072
	u32 flip[2] = {
2073
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2074
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2075
	};
2076
	int pipe, ret = IRQ_NONE;
2077
 
2078
	atomic_inc(&dev_priv->irq_received);
2079
 
2080
	iir = I915_READ(IIR);
2081
	do {
2082
		bool irq_received = (iir & ~flip_mask) != 0;
2083
		bool blc_event = false;
2084
 
2085
		/* Can't rely on pipestat interrupt bit in iir as it might
2086
		 * have been cleared after the pipestat interrupt was received.
2087
		 * It doesn't set the bit in iir again, but it still produces
2088
		 * interrupts (for non-MSI).
2089
		 */
2090
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2091
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2092
			i915_handle_error(dev, false);
2093
 
2094
		for_each_pipe(pipe) {
2095
			int reg = PIPESTAT(pipe);
2096
			pipe_stats[pipe] = I915_READ(reg);
2097
 
2098
			/* Clear the PIPE*STAT regs before the IIR */
2099
			if (pipe_stats[pipe] & 0x8000ffff) {
2100
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2101
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2102
							 pipe_name(pipe));
2103
				I915_WRITE(reg, pipe_stats[pipe]);
2104
				irq_received = true;
2105
			}
2106
		}
2107
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2108
 
2109
		if (!irq_received)
2110
			break;
2111
 
2112
		/* Consume port.  Then clear IIR or we'll miss events */
2113
		if ((I915_HAS_HOTPLUG(dev)) &&
2114
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2115
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2116
 
2117
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2118
				  hotplug_status);
2119
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2120
//				queue_work(dev_priv->wq,
2121
//					   &dev_priv->hotplug_work);
2122
 
2123
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2124
			POSTING_READ(PORT_HOTPLUG_STAT);
2125
		}
2126
 
2127
		I915_WRITE(IIR, iir & ~flip_mask);
2128
		new_iir = I915_READ(IIR); /* Flush posted writes */
2129
 
2130
		if (iir & I915_USER_INTERRUPT)
2131
			notify_ring(dev, &dev_priv->ring[RCS]);
2132
 
2133
		for_each_pipe(pipe) {
2134
			int plane = pipe;
2135
			if (IS_MOBILE(dev))
2136
				plane = !plane;
2137
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2138
			    drm_handle_vblank(dev, pipe)) {
2139
				if (iir & flip[plane]) {
2140
//					intel_prepare_page_flip(dev, plane);
2141
//					intel_finish_page_flip(dev, pipe);
2142
					flip_mask &= ~flip[plane];
2143
				}
2144
			}
2145
 
2146
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2147
				blc_event = true;
2148
		}
2149
 
2150
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2151
//			intel_opregion_asle_intr(dev);
2152
 
2153
		/* With MSI, interrupts are only generated when iir
2154
		 * transitions from zero to nonzero.  If another bit got
2155
		 * set while we were handling the existing iir bits, then
2156
		 * we would never get another interrupt.
2157
		 *
2158
		 * This is fine on non-MSI as well, as if we hit this path
2159
		 * we avoid exiting the interrupt handler only to generate
2160
		 * another one.
2161
		 *
2162
		 * Note that for MSI this could cause a stray interrupt report
2163
		 * if an interrupt landed in the time between writing IIR and
2164
		 * the posting read.  This should be rare enough to never
2165
		 * trigger the 99% of 100,000 interrupts test for disabling
2166
		 * stray interrupts.
2167
		 */
2168
		ret = IRQ_HANDLED;
2169
		iir = new_iir;
2170
	} while (iir & ~flip_mask);
2171
 
2172
	i915_update_dri1_breadcrumb(dev);
2173
 
2174
	return ret;
2175
}
2176
 
2177
static void i915_irq_uninstall(struct drm_device * dev)
2178
{
2179
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2180
	int pipe;
2181
 
2182
	if (I915_HAS_HOTPLUG(dev)) {
2183
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2184
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2185
	}
2186
 
2187
	I915_WRITE16(HWSTAM, 0xffff);
2188
	for_each_pipe(pipe) {
2189
		/* Clear enable bits; then clear status bits */
2190
		I915_WRITE(PIPESTAT(pipe), 0);
2191
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2192
	}
2193
	I915_WRITE(IMR, 0xffffffff);
2194
	I915_WRITE(IER, 0x0);
2195
 
2196
	I915_WRITE(IIR, I915_READ(IIR));
2197
}
2198
 
2199
static void i965_irq_preinstall(struct drm_device * dev)
2200
{
2201
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2202
	int pipe;
2203
 
2204
	atomic_set(&dev_priv->irq_received, 0);
2205
 
2206
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2207
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2208
 
2209
	I915_WRITE(HWSTAM, 0xeffe);
2210
	for_each_pipe(pipe)
2211
		I915_WRITE(PIPESTAT(pipe), 0);
2212
	I915_WRITE(IMR, 0xffffffff);
2213
	I915_WRITE(IER, 0x0);
2214
	POSTING_READ(IER);
2215
}
2216
 
2217
static int i965_irq_postinstall(struct drm_device *dev)
2218
{
2219
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2220
	u32 hotplug_en;
2221
	u32 enable_mask;
2222
	u32 error_mask;
2223
 
2224
	/* Unmask the interrupts that we always want on. */
2225
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2226
			       I915_DISPLAY_PORT_INTERRUPT |
2227
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2228
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2229
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2230
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2231
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2232
 
2233
	enable_mask = ~dev_priv->irq_mask;
2234
	enable_mask |= I915_USER_INTERRUPT;
2235
 
2236
	if (IS_G4X(dev))
2237
		enable_mask |= I915_BSD_USER_INTERRUPT;
2238
 
2239
	dev_priv->pipestat[0] = 0;
2240
	dev_priv->pipestat[1] = 0;
2241
 
2242
	/*
2243
	 * Enable some error detection, note the instruction error mask
2244
	 * bit is reserved, so we leave it masked.
2245
	 */
2246
	if (IS_G4X(dev)) {
2247
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2248
			       GM45_ERROR_MEM_PRIV |
2249
			       GM45_ERROR_CP_PRIV |
2250
			       I915_ERROR_MEMORY_REFRESH);
2251
	} else {
2252
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2253
			       I915_ERROR_MEMORY_REFRESH);
2254
	}
2255
	I915_WRITE(EMR, error_mask);
2256
 
2257
	I915_WRITE(IMR, dev_priv->irq_mask);
2258
	I915_WRITE(IER, enable_mask);
2259
	POSTING_READ(IER);
2260
 
2261
	/* Note HDMI and DP share hotplug bits */
2262
	hotplug_en = 0;
2263
#if 0
2264
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2265
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2266
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2267
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2268
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2269
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2270
	if (IS_G4X(dev)) {
2271
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2272
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2273
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2274
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2275
	} else {
2276
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2277
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2278
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2279
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2280
	}
2281
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2282
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2283
 
2284
		/* Programming the CRT detection parameters tends
2285
		   to generate a spurious hotplug event about three
2286
		   seconds later.  So just do it once.
2287
		   */
2288
		if (IS_G4X(dev))
2289
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2290
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2291
	}
2292
#endif
2293
	/* Ignore TV since it's buggy */
2294
 
2295
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2296
 
2297
//	intel_opregion_enable_asle(dev);
2298
 
2299
	return 0;
2300
}
2301
 
2302
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
2303
{
2304
	struct drm_device *dev = (struct drm_device *) arg;
2305
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2306
	u32 iir, new_iir;
2307
	u32 pipe_stats[I915_MAX_PIPES];
2308
	unsigned long irqflags;
2309
	int irq_received;
2310
	int ret = IRQ_NONE, pipe;
2311
 
2312
	atomic_inc(&dev_priv->irq_received);
2313
 
2314
	iir = I915_READ(IIR);
2315
 
2316
	for (;;) {
2317
		bool blc_event = false;
2318
 
2319
		irq_received = iir != 0;
2320
 
2321
		/* Can't rely on pipestat interrupt bit in iir as it might
2322
		 * have been cleared after the pipestat interrupt was received.
2323
		 * It doesn't set the bit in iir again, but it still produces
2324
		 * interrupts (for non-MSI).
2325
		 */
2326
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2327
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2328
			i915_handle_error(dev, false);
2329
 
2330
		for_each_pipe(pipe) {
2331
			int reg = PIPESTAT(pipe);
2332
			pipe_stats[pipe] = I915_READ(reg);
2333
 
2334
			/*
2335
			 * Clear the PIPE*STAT regs before the IIR
2336
			 */
2337
			if (pipe_stats[pipe] & 0x8000ffff) {
2338
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2339
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2340
							 pipe_name(pipe));
2341
				I915_WRITE(reg, pipe_stats[pipe]);
2342
				irq_received = 1;
2343
			}
2344
		}
2345
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2346
 
2347
		if (!irq_received)
2348
			break;
2349
 
2350
		ret = IRQ_HANDLED;
2351
 
2352
		/* Consume port.  Then clear IIR or we'll miss events */
2353
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2354
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2355
 
2356
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2357
				  hotplug_status);
2358
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2359
//				queue_work(dev_priv->wq,
2360
//					   &dev_priv->hotplug_work);
2361
 
2362
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2363
			I915_READ(PORT_HOTPLUG_STAT);
2364
		}
2365
 
2366
		I915_WRITE(IIR, iir);
2367
		new_iir = I915_READ(IIR); /* Flush posted writes */
2368
 
2369
		if (iir & I915_USER_INTERRUPT)
2370
			notify_ring(dev, &dev_priv->ring[RCS]);
2371
		if (iir & I915_BSD_USER_INTERRUPT)
2372
			notify_ring(dev, &dev_priv->ring[VCS]);
2373
 
2374
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2375
//			intel_prepare_page_flip(dev, 0);
2376
 
2377
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2378
//			intel_prepare_page_flip(dev, 1);
2379
 
2380
		for_each_pipe(pipe) {
2381
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2382
			    drm_handle_vblank(dev, pipe)) {
2383
//				i915_pageflip_stall_check(dev, pipe);
2384
//				intel_finish_page_flip(dev, pipe);
2385
			}
2386
 
2387
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2388
				blc_event = true;
2389
		}
2390
 
2391
 
2392
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2393
//			intel_opregion_asle_intr(dev);
2394
 
2395
		/* With MSI, interrupts are only generated when iir
2396
		 * transitions from zero to nonzero.  If another bit got
2397
		 * set while we were handling the existing iir bits, then
2398
		 * we would never get another interrupt.
2399
		 *
2400
		 * This is fine on non-MSI as well, as if we hit this path
2401
		 * we avoid exiting the interrupt handler only to generate
2402
		 * another one.
2403
		 *
2404
		 * Note that for MSI this could cause a stray interrupt report
2405
		 * if an interrupt landed in the time between writing IIR and
2406
		 * the posting read.  This should be rare enough to never
2407
		 * trigger the 99% of 100,000 interrupts test for disabling
2408
		 * stray interrupts.
2409
		 */
2410
		iir = new_iir;
2411
	}
2412
 
2413
	i915_update_dri1_breadcrumb(dev);
2414
 
2415
	return ret;
2416
}
2417
 
2418
static void i965_irq_uninstall(struct drm_device * dev)
2419
{
2420
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2421
	int pipe;
2422
 
2423
	if (!dev_priv)
2424
		return;
2425
 
2426
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2427
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2428
 
2429
	I915_WRITE(HWSTAM, 0xffffffff);
2430
	for_each_pipe(pipe)
2431
		I915_WRITE(PIPESTAT(pipe), 0);
2432
	I915_WRITE(IMR, 0xffffffff);
2433
	I915_WRITE(IER, 0x0);
2434
 
2435
	for_each_pipe(pipe)
2436
		I915_WRITE(PIPESTAT(pipe),
2437
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2438
	I915_WRITE(IIR, I915_READ(IIR));
2439
}
2440
 
2351 Serge 2441
void intel_irq_init(struct drm_device *dev)
2442
{
3031 serge 2443
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 2444
#if 0
3031 serge 2445
//	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2446
//	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2447
//	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2448
//	INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
2449
 
2450
	dev->driver->get_vblank_counter = i915_get_vblank_counter;
2451
	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2452
	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2453
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2454
		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2455
	}
2456
 
2457
//	if (drm_core_check_feature(dev, DRIVER_MODESET))
2458
		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2459
//	else
2460
//		dev->driver->get_vblank_timestamp = NULL;
2461
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2462
 
2463
	if (IS_VALLEYVIEW(dev)) {
2464
		dev->driver->irq_handler = valleyview_irq_handler;
2465
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2466
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2467
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
2468
		dev->driver->enable_vblank = valleyview_enable_vblank;
2469
		dev->driver->disable_vblank = valleyview_disable_vblank;
2470
	} else if (IS_IVYBRIDGE(dev)) {
2351 Serge 2471
		/* Share pre & uninstall handlers with ILK/SNB */
2472
		dev->driver->irq_handler = ivybridge_irq_handler;
2473
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2474
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2475
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2476
		dev->driver->enable_vblank = ivybridge_enable_vblank;
2477
		dev->driver->disable_vblank = ivybridge_disable_vblank;
3031 serge 2478
	} else if (IS_HASWELL(dev)) {
2479
		/* Share interrupts handling with IVB */
2480
		dev->driver->irq_handler = ivybridge_irq_handler;
2481
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2482
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2483
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2484
		dev->driver->enable_vblank = ivybridge_enable_vblank;
2485
		dev->driver->disable_vblank = ivybridge_disable_vblank;
2351 Serge 2486
	} else if (HAS_PCH_SPLIT(dev)) {
2487
		dev->driver->irq_handler = ironlake_irq_handler;
2488
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2489
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2490
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2491
		dev->driver->enable_vblank = ironlake_enable_vblank;
2492
		dev->driver->disable_vblank = ironlake_disable_vblank;
2493
	} else {
3031 serge 2494
		if (INTEL_INFO(dev)->gen == 2) {
2495
			dev->driver->irq_preinstall = i8xx_irq_preinstall;
2496
			dev->driver->irq_postinstall = i8xx_irq_postinstall;
2497
			dev->driver->irq_handler = i8xx_irq_handler;
2498
			dev->driver->irq_uninstall = i8xx_irq_uninstall;
2499
		} else if (INTEL_INFO(dev)->gen == 3) {
2500
			dev->driver->irq_preinstall = i915_irq_preinstall;
2501
			dev->driver->irq_postinstall = i915_irq_postinstall;
2502
			dev->driver->irq_uninstall = i915_irq_uninstall;
2503
			dev->driver->irq_handler = i915_irq_handler;
2504
		} else {
2505
			dev->driver->irq_preinstall = i965_irq_preinstall;
2506
			dev->driver->irq_postinstall = i965_irq_postinstall;
2507
			dev->driver->irq_uninstall = i965_irq_uninstall;
2508
			dev->driver->irq_handler = i965_irq_handler;
2509
		}
2351 Serge 2510
		dev->driver->enable_vblank = i915_enable_vblank;
2511
		dev->driver->disable_vblank = i915_disable_vblank;
2512
	}
2513
#endif
2514
}
2515
 
2516
 
2517
static struct drm_device *irq_device;
2518
 
2519
void irq_handler_kms()
2520
{
2521
//    printf("%s\n",__FUNCTION__);
2522
    ironlake_irq_handler(irq_device);
2523
}
2524
 
2525
int drm_irq_install(struct drm_device *dev)
2526
{
2527
    int irq_line;
2528
    int ret = 0;
2529
 
2530
    ENTER();
2531
 
2532
    mutex_lock(&dev->struct_mutex);
2533
 
2534
    /* Driver must have been initialized */
2535
    if (!dev->dev_private) {
2536
        mutex_unlock(&dev->struct_mutex);
2537
        return -EINVAL;
2538
    }
2539
 
2540
    if (dev->irq_enabled) {
2541
        mutex_unlock(&dev->struct_mutex);
2542
        return -EBUSY;
2543
    }
2544
    dev->irq_enabled = 1;
2545
    mutex_unlock(&dev->struct_mutex);
2546
 
2547
    irq_device = dev;
2548
    irq_line   = drm_dev_to_irq(dev);
2549
 
2550
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2551
 
2552
    ironlake_irq_preinstall(dev);
2553
 
2554
    ret = AttachIntHandler(irq_line, irq_handler_kms, 2);
2555
    if (ret == 0) {
2556
        mutex_lock(&dev->struct_mutex);
2557
        dev->irq_enabled = 0;
2558
        mutex_unlock(&dev->struct_mutex);
2559
        return ret;
2560
    }
2561
 
2562
    ret = ironlake_irq_postinstall(dev);
2563
 
2564
//    if (ret < 0) {
2565
//        mutex_lock(&dev->struct_mutex);
2566
//        dev->irq_enabled = 0;
2567
//        mutex_unlock(&dev->struct_mutex);
2568
//        free_irq(drm_dev_to_irq(dev), dev);
2569
//    }
2570
 
2571
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2572
 
2573
    cmd&= ~(1<<10);
2574
 
2575
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2576
 
2577
    dbgprintf("PCI_CMD: %04x\n", cmd);
2578
 
2579
    DRM_INFO("i915: irq initialized.\n");
2580
    LEAVE();
2581
    return ret;
2582
}
2583
 
2584
 
2585