Subversion Repositories Kolibri OS

Rev

Rev 3243 | Rev 3298 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) ": " fmt
30
 
31
#include 
32
#include 
33
#include 
2351 Serge 34
#include "i915_drv.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
37
 
3031 serge 38
 
39
#define pr_err(fmt, ...) \
40
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
41
 
42
 
2352 Serge 43
#define DRM_WAKEUP( queue ) wake_up( queue )
44
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
45
 
2351 Serge 46
#define MAX_NOPID ((u32)~0)
47
 
48
/**
49
 * Interrupts that are always left unmasked.
50
 *
51
 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
52
 * we leave them always unmasked in IMR and then control enabling them through
53
 * PIPESTAT alone.
54
 */
55
#define I915_INTERRUPT_ENABLE_FIX			\
56
	(I915_ASLE_INTERRUPT |				\
57
	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
58
	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
59
	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
60
	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
61
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
62
 
63
/** Interrupts that we mask and unmask at runtime. */
64
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
65
 
66
#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
67
				 PIPE_VBLANK_INTERRUPT_STATUS)
68
 
69
#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
70
				 PIPE_VBLANK_INTERRUPT_ENABLE)
71
 
72
#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
73
					 DRM_I915_VBLANK_PIPE_B)
74
 
75
/* For display hotplug interrupt */
76
static void
77
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
78
{
79
    if ((dev_priv->irq_mask & mask) != 0) {
80
        dev_priv->irq_mask &= ~mask;
81
        I915_WRITE(DEIMR, dev_priv->irq_mask);
82
        POSTING_READ(DEIMR);
83
    }
84
}
85
 
86
static inline void
87
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
88
{
89
    if ((dev_priv->irq_mask & mask) != mask) {
90
        dev_priv->irq_mask |= mask;
91
        I915_WRITE(DEIMR, dev_priv->irq_mask);
92
        POSTING_READ(DEIMR);
93
    }
94
}
3031 serge 95
 
96
void
97
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
98
{
99
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
100
		u32 reg = PIPESTAT(pipe);
101
 
102
		dev_priv->pipestat[pipe] |= mask;
103
		/* Enable the interrupt, clear any pending status */
104
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
105
		POSTING_READ(reg);
106
	}
107
}
108
 
109
void
110
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
111
{
112
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
113
		u32 reg = PIPESTAT(pipe);
114
 
115
		dev_priv->pipestat[pipe] &= ~mask;
116
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
117
		POSTING_READ(reg);
118
	}
119
}
120
 
121
#if 0
122
/**
123
 * intel_enable_asle - enable ASLE interrupt for OpRegion
124
 */
125
void intel_enable_asle(struct drm_device *dev)
126
{
127
	drm_i915_private_t *dev_priv = dev->dev_private;
128
	unsigned long irqflags;
129
 
130
	/* FIXME: opregion/asle for VLV */
131
	if (IS_VALLEYVIEW(dev))
132
		return;
133
 
134
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
135
 
136
	if (HAS_PCH_SPLIT(dev))
137
		ironlake_enable_display_irq(dev_priv, DE_GSE);
138
	else {
139
		i915_enable_pipestat(dev_priv, 1,
140
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
141
		if (INTEL_INFO(dev)->gen >= 4)
142
			i915_enable_pipestat(dev_priv, 0,
143
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
144
	}
145
 
146
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
147
}
148
#endif
149
 
150
/**
151
 * i915_pipe_enabled - check if a pipe is enabled
152
 * @dev: DRM device
153
 * @pipe: pipe to check
154
 *
155
 * Reading certain registers when the pipe is disabled can hang the chip.
156
 * Use this routine to make sure the PLL is running and the pipe is active
157
 * before reading such registers if unsure.
158
 */
159
static int
160
i915_pipe_enabled(struct drm_device *dev, int pipe)
161
{
162
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3243 Serge 163
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
164
								      pipe);
165
 
166
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
3031 serge 167
}
168
 
169
/* Called from drm generic code, passed a 'crtc', which
170
 * we use as a pipe index
171
 */
172
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
173
{
174
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
175
	unsigned long high_frame;
176
	unsigned long low_frame;
177
	u32 high1, high2, low;
178
 
179
	if (!i915_pipe_enabled(dev, pipe)) {
180
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
181
				"pipe %c\n", pipe_name(pipe));
182
		return 0;
183
	}
184
 
185
	high_frame = PIPEFRAME(pipe);
186
	low_frame = PIPEFRAMEPIXEL(pipe);
187
 
188
	/*
189
	 * High & low register fields aren't synchronized, so make sure
190
	 * we get a low value that's stable across two reads of the high
191
	 * register.
192
	 */
193
	do {
194
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
195
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
196
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
197
	} while (high1 != high2);
198
 
199
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
200
	low >>= PIPE_FRAME_LOW_SHIFT;
201
	return (high1 << 8) | low;
202
}
203
 
204
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
205
{
206
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
207
	int reg = PIPE_FRMCOUNT_GM45(pipe);
208
 
209
	if (!i915_pipe_enabled(dev, pipe)) {
210
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
211
				 "pipe %c\n", pipe_name(pipe));
212
		return 0;
213
	}
214
 
215
	return I915_READ(reg);
216
}
217
 
218
 
2352 Serge 219
static void notify_ring(struct drm_device *dev,
220
			struct intel_ring_buffer *ring)
221
{
222
	struct drm_i915_private *dev_priv = dev->dev_private;
2351 Serge 223
 
2352 Serge 224
	if (ring->obj == NULL)
225
		return;
2351 Serge 226
 
3031 serge 227
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
2351 Serge 228
 
2352 Serge 229
	wake_up_all(&ring->irq_queue);
230
//   if (i915_enable_hangcheck) {
231
//       dev_priv->hangcheck_count = 0;
232
//       mod_timer(&dev_priv->hangcheck_timer,
233
//             jiffies +
234
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
235
//   }
236
}
237
 
3031 serge 238
#if 0
239
static void gen6_pm_rps_work(struct work_struct *work)
240
{
241
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
242
						    rps.work);
243
	u32 pm_iir, pm_imr;
244
	u8 new_delay;
2352 Serge 245
 
3031 serge 246
	spin_lock_irq(&dev_priv->rps.lock);
247
	pm_iir = dev_priv->rps.pm_iir;
248
	dev_priv->rps.pm_iir = 0;
249
	pm_imr = I915_READ(GEN6_PMIMR);
250
	I915_WRITE(GEN6_PMIMR, 0);
251
	spin_unlock_irq(&dev_priv->rps.lock);
2352 Serge 252
 
3031 serge 253
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
254
		return;
255
 
3243 Serge 256
	mutex_lock(&dev_priv->rps.hw_lock);
3031 serge 257
 
258
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
259
		new_delay = dev_priv->rps.cur_delay + 1;
260
	else
261
		new_delay = dev_priv->rps.cur_delay - 1;
262
 
263
	/* sysfs frequency interfaces may have snuck in while servicing the
264
	 * interrupt
265
	 */
266
	if (!(new_delay > dev_priv->rps.max_delay ||
267
	      new_delay < dev_priv->rps.min_delay)) {
268
		gen6_set_rps(dev_priv->dev, new_delay);
269
	}
270
 
3243 Serge 271
	mutex_unlock(&dev_priv->rps.hw_lock);
3031 serge 272
}
273
 
274
 
275
/**
276
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
277
 * occurred.
278
 * @work: workqueue struct
279
 *
280
 * Doesn't actually do anything except notify userspace. As a consequence of
281
 * this event, userspace should try to remap the bad rows since statistically
282
 * it is likely the same row is more likely to go bad again.
283
 */
284
static void ivybridge_parity_work(struct work_struct *work)
2351 Serge 285
{
3031 serge 286
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3243 Serge 287
						    l3_parity.error_work);
3031 serge 288
	u32 error_status, row, bank, subbank;
289
	char *parity_event[5];
290
	uint32_t misccpctl;
291
	unsigned long flags;
292
 
293
	/* We must turn off DOP level clock gating to access the L3 registers.
294
	 * In order to prevent a get/put style interface, acquire struct mutex
295
	 * any time we access those registers.
296
	 */
297
	mutex_lock(&dev_priv->dev->struct_mutex);
298
 
299
	misccpctl = I915_READ(GEN7_MISCCPCTL);
300
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
301
	POSTING_READ(GEN7_MISCCPCTL);
302
 
303
	error_status = I915_READ(GEN7_L3CDERRST1);
304
	row = GEN7_PARITY_ERROR_ROW(error_status);
305
	bank = GEN7_PARITY_ERROR_BANK(error_status);
306
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
307
 
308
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
309
				    GEN7_L3CDERRST1_ENABLE);
310
	POSTING_READ(GEN7_L3CDERRST1);
311
 
312
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
313
 
314
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
315
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
316
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
317
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
318
 
319
	mutex_unlock(&dev_priv->dev->struct_mutex);
320
 
321
	parity_event[0] = "L3_PARITY_ERROR=1";
322
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
323
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
324
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
325
	parity_event[4] = NULL;
326
 
327
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
328
			   KOBJ_CHANGE, parity_event);
329
 
330
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
331
		  row, bank, subbank);
332
 
333
	kfree(parity_event[3]);
334
	kfree(parity_event[2]);
335
	kfree(parity_event[1]);
336
}
337
 
338
static void ivybridge_handle_parity_error(struct drm_device *dev)
339
{
340
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
341
	unsigned long flags;
342
 
343
	if (!HAS_L3_GPU_CACHE(dev))
344
		return;
345
 
346
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
347
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
348
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
349
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
350
 
3243 Serge 351
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
3031 serge 352
}
353
 
354
#endif
355
 
356
static void snb_gt_irq_handler(struct drm_device *dev,
357
			       struct drm_i915_private *dev_priv,
358
			       u32 gt_iir)
359
{
3266 Serge 360
//    printf("%s\n", __FUNCTION__);
3031 serge 361
 
362
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
363
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
364
		notify_ring(dev, &dev_priv->ring[RCS]);
365
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
366
		notify_ring(dev, &dev_priv->ring[VCS]);
367
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
368
		notify_ring(dev, &dev_priv->ring[BCS]);
369
 
370
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
371
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
372
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
373
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
374
		i915_handle_error(dev, false);
375
	}
376
 
377
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
378
//		ivybridge_handle_parity_error(dev);
379
}
380
 
381
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
382
				u32 pm_iir)
383
{
384
	unsigned long flags;
385
 
386
	/*
387
	 * IIR bits should never already be set because IMR should
388
	 * prevent an interrupt from being shown in IIR. The warning
389
	 * displays a case where we've unsafely cleared
390
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
391
	 * type is not a problem, it displays a problem in the logic.
392
	 *
393
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
394
	 */
395
 
396
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
397
	dev_priv->rps.pm_iir |= pm_iir;
398
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
399
	POSTING_READ(GEN6_PMIMR);
400
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
401
 
3243 Serge 402
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
3031 serge 403
}
404
 
3243 Serge 405
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
3031 serge 406
{
407
	struct drm_device *dev = (struct drm_device *) arg;
408
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
409
	u32 iir, gt_iir, pm_iir;
410
	irqreturn_t ret = IRQ_NONE;
411
	unsigned long irqflags;
412
	int pipe;
413
	u32 pipe_stats[I915_MAX_PIPES];
414
	bool blc_event;
415
 
416
	atomic_inc(&dev_priv->irq_received);
417
 
418
	while (true) {
419
		iir = I915_READ(VLV_IIR);
420
		gt_iir = I915_READ(GTIIR);
421
		pm_iir = I915_READ(GEN6_PMIIR);
422
 
423
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
424
			goto out;
425
 
426
		ret = IRQ_HANDLED;
427
 
428
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
429
 
430
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
431
		for_each_pipe(pipe) {
432
			int reg = PIPESTAT(pipe);
433
			pipe_stats[pipe] = I915_READ(reg);
434
 
435
			/*
436
			 * Clear the PIPE*STAT regs before the IIR
437
			 */
438
			if (pipe_stats[pipe] & 0x8000ffff) {
439
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
440
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
441
							 pipe_name(pipe));
442
				I915_WRITE(reg, pipe_stats[pipe]);
443
			}
444
		}
445
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
446
 
447
#if 0
448
		for_each_pipe(pipe) {
449
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
450
				drm_handle_vblank(dev, pipe);
451
 
452
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
453
				intel_prepare_page_flip(dev, pipe);
454
				intel_finish_page_flip(dev, pipe);
455
			}
456
		}
457
#endif
458
 
459
		/* Consume port.  Then clear IIR or we'll miss events */
460
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
461
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
462
 
463
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
464
					 hotplug_status);
465
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
466
//				queue_work(dev_priv->wq,
467
//					   &dev_priv->hotplug_work);
468
 
469
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
470
			I915_READ(PORT_HOTPLUG_STAT);
471
		}
472
 
473
		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
474
			blc_event = true;
475
 
3243 Serge 476
        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
477
            gen6_queue_rps_work(dev_priv, pm_iir);
3031 serge 478
 
479
		I915_WRITE(GTIIR, gt_iir);
480
		I915_WRITE(GEN6_PMIIR, pm_iir);
481
		I915_WRITE(VLV_IIR, iir);
482
	}
483
 
484
out:
485
	return ret;
486
}
487
 
488
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
489
{
490
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
491
	int pipe;
492
 
3243 Serge 493
    printf("%s\n", __FUNCTION__);
494
 
3031 serge 495
	if (pch_iir & SDE_AUDIO_POWER_MASK)
496
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
497
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
498
				 SDE_AUDIO_POWER_SHIFT);
499
 
500
	if (pch_iir & SDE_GMBUS)
501
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
502
 
503
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
504
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
505
 
506
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
507
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
508
 
509
	if (pch_iir & SDE_POISON)
510
		DRM_ERROR("PCH poison interrupt\n");
511
 
512
	if (pch_iir & SDE_FDI_MASK)
513
		for_each_pipe(pipe)
514
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
515
					 pipe_name(pipe),
516
					 I915_READ(FDI_RX_IIR(pipe)));
517
 
518
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
519
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
520
 
521
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
522
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
523
 
524
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
525
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
526
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
527
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
528
}
529
 
530
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
531
{
532
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
533
	int pipe;
534
 
535
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
536
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
537
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
538
				 SDE_AUDIO_POWER_SHIFT_CPT);
539
 
540
	if (pch_iir & SDE_AUX_MASK_CPT)
541
		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
542
 
543
	if (pch_iir & SDE_GMBUS_CPT)
544
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
545
 
546
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
547
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
548
 
549
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
550
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
551
 
552
	if (pch_iir & SDE_FDI_MASK_CPT)
553
		for_each_pipe(pipe)
554
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
555
					 pipe_name(pipe),
556
					 I915_READ(FDI_RX_IIR(pipe)));
557
}
558
 
3243 Serge 559
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
3031 serge 560
{
561
	struct drm_device *dev = (struct drm_device *) arg;
562
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
563
	u32 de_iir, gt_iir, de_ier, pm_iir;
564
	irqreturn_t ret = IRQ_NONE;
565
	int i;
566
 
567
	atomic_inc(&dev_priv->irq_received);
568
 
569
	/* disable master interrupt before clearing iir  */
570
	de_ier = I915_READ(DEIER);
571
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
572
 
573
	gt_iir = I915_READ(GTIIR);
574
	if (gt_iir) {
575
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
576
		I915_WRITE(GTIIR, gt_iir);
577
		ret = IRQ_HANDLED;
578
	}
579
 
580
	de_iir = I915_READ(DEIIR);
581
	if (de_iir) {
582
#if 0
583
		if (de_iir & DE_GSE_IVB)
584
			intel_opregion_gse_intr(dev);
585
 
586
		for (i = 0; i < 3; i++) {
587
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
588
				drm_handle_vblank(dev, i);
589
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
590
				intel_prepare_page_flip(dev, i);
591
				intel_finish_page_flip_plane(dev, i);
592
			}
593
		}
594
#endif
595
		/* check event from PCH */
596
		if (de_iir & DE_PCH_EVENT_IVB) {
597
			u32 pch_iir = I915_READ(SDEIIR);
598
 
599
//			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
600
//				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
601
			cpt_irq_handler(dev, pch_iir);
602
 
603
			/* clear PCH hotplug event before clear CPU irq */
604
			I915_WRITE(SDEIIR, pch_iir);
605
		}
606
 
607
		I915_WRITE(DEIIR, de_iir);
608
		ret = IRQ_HANDLED;
609
	}
610
 
611
	pm_iir = I915_READ(GEN6_PMIIR);
612
	if (pm_iir) {
613
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
614
//			gen6_queue_rps_work(dev_priv, pm_iir);
615
		I915_WRITE(GEN6_PMIIR, pm_iir);
616
		ret = IRQ_HANDLED;
617
	}
618
 
619
	I915_WRITE(DEIER, de_ier);
620
	POSTING_READ(DEIER);
621
 
622
	return ret;
623
}
624
 
625
static void ilk_gt_irq_handler(struct drm_device *dev,
626
			       struct drm_i915_private *dev_priv,
627
			       u32 gt_iir)
628
{
629
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
630
		notify_ring(dev, &dev_priv->ring[RCS]);
631
	if (gt_iir & GT_BSD_USER_INTERRUPT)
632
		notify_ring(dev, &dev_priv->ring[VCS]);
633
}
634
 
3243 Serge 635
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
3031 serge 636
{
637
	struct drm_device *dev = (struct drm_device *) arg;
2351 Serge 638
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
639
    int ret = IRQ_NONE;
640
    u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
641
 
642
    atomic_inc(&dev_priv->irq_received);
643
 
644
    /* disable master interrupt before clearing iir  */
645
    de_ier = I915_READ(DEIER);
646
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
647
    POSTING_READ(DEIER);
648
 
649
    de_iir = I915_READ(DEIIR);
650
    gt_iir = I915_READ(GTIIR);
651
    pch_iir = I915_READ(SDEIIR);
652
    pm_iir = I915_READ(GEN6_PMIIR);
653
 
654
    if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
655
        (!IS_GEN6(dev) || pm_iir == 0))
656
        goto done;
657
 
658
    ret = IRQ_HANDLED;
659
 
3031 serge 660
	if (IS_GEN5(dev))
661
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
662
	else
663
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
664
#if 0
665
	if (de_iir & DE_GSE)
666
		intel_opregion_gse_intr(dev);
2351 Serge 667
 
3031 serge 668
	if (de_iir & DE_PIPEA_VBLANK)
669
		drm_handle_vblank(dev, 0);
2351 Serge 670
 
3031 serge 671
	if (de_iir & DE_PIPEB_VBLANK)
672
		drm_handle_vblank(dev, 1);
2351 Serge 673
 
3031 serge 674
	if (de_iir & DE_PLANEA_FLIP_DONE) {
675
		intel_prepare_page_flip(dev, 0);
676
		intel_finish_page_flip_plane(dev, 0);
677
	}
2351 Serge 678
 
3031 serge 679
	if (de_iir & DE_PLANEB_FLIP_DONE) {
680
		intel_prepare_page_flip(dev, 1);
681
		intel_finish_page_flip_plane(dev, 1);
682
	}
683
#endif
2351 Serge 684
 
3031 serge 685
	/* check event from PCH */
686
	if (de_iir & DE_PCH_EVENT) {
687
//		if (pch_iir & hotplug_mask)
688
//			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
689
		if (HAS_PCH_CPT(dev))
690
			cpt_irq_handler(dev, pch_iir);
691
		else
692
			ibx_irq_handler(dev, pch_iir);
693
	}
694
#if 0
695
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
696
		ironlake_handle_rps_change(dev);
2351 Serge 697
 
3031 serge 698
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
699
		gen6_queue_rps_work(dev_priv, pm_iir);
700
#endif
2351 Serge 701
    /* should clear PCH hotplug event before clear CPU irq */
702
    I915_WRITE(SDEIIR, pch_iir);
703
    I915_WRITE(GTIIR, gt_iir);
704
    I915_WRITE(DEIIR, de_iir);
705
    I915_WRITE(GEN6_PMIIR, pm_iir);
706
 
707
done:
708
    I915_WRITE(DEIER, de_ier);
709
    POSTING_READ(DEIER);
710
 
711
    return ret;
712
}
713
 
714
 
715
 
716
 
3031 serge 717
/* NB: please notice the memset */
718
static void i915_get_extra_instdone(struct drm_device *dev,
719
				    uint32_t *instdone)
720
{
721
	struct drm_i915_private *dev_priv = dev->dev_private;
722
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
2351 Serge 723
 
3031 serge 724
	switch(INTEL_INFO(dev)->gen) {
725
	case 2:
726
	case 3:
727
		instdone[0] = I915_READ(INSTDONE);
728
		break;
729
	case 4:
730
	case 5:
731
	case 6:
732
		instdone[0] = I915_READ(INSTDONE_I965);
733
		instdone[1] = I915_READ(INSTDONE1);
734
		break;
735
	default:
736
        WARN(1, "Unsupported platform\n");
737
	case 7:
738
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
739
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
740
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
741
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
742
		break;
743
	}
744
}
2351 Serge 745
 
3031 serge 746
#ifdef CONFIG_DEBUG_FS
747
static struct drm_i915_error_object *
748
i915_error_object_create(struct drm_i915_private *dev_priv,
749
			 struct drm_i915_gem_object *src)
750
{
751
	struct drm_i915_error_object *dst;
752
	int i, count;
753
	u32 reloc_offset;
2351 Serge 754
 
3031 serge 755
	if (src == NULL || src->pages == NULL)
756
		return NULL;
2351 Serge 757
 
3031 serge 758
	count = src->base.size / PAGE_SIZE;
2351 Serge 759
 
3031 serge 760
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
761
	if (dst == NULL)
762
		return NULL;
763
 
764
	reloc_offset = src->gtt_offset;
765
	for (i = 0; i < count; i++) {
766
		unsigned long flags;
767
		void *d;
768
 
769
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
770
		if (d == NULL)
771
			goto unwind;
772
 
773
		local_irq_save(flags);
774
		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
775
		    src->has_global_gtt_mapping) {
776
			void __iomem *s;
777
 
778
			/* Simply ignore tiling or any overlapping fence.
779
			 * It's part of the error state, and this hopefully
780
			 * captures what the GPU read.
781
			 */
782
 
783
			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
784
						     reloc_offset);
785
			memcpy_fromio(d, s, PAGE_SIZE);
786
			io_mapping_unmap_atomic(s);
787
		} else {
788
			struct page *page;
789
			void *s;
790
 
791
			page = i915_gem_object_get_page(src, i);
792
 
793
			drm_clflush_pages(&page, 1);
794
 
795
			s = kmap_atomic(page);
796
			memcpy(d, s, PAGE_SIZE);
797
			kunmap_atomic(s);
798
 
799
			drm_clflush_pages(&page, 1);
800
		}
801
		local_irq_restore(flags);
802
 
803
		dst->pages[i] = d;
804
 
805
		reloc_offset += PAGE_SIZE;
806
	}
807
	dst->page_count = count;
808
	dst->gtt_offset = src->gtt_offset;
809
 
810
	return dst;
811
 
812
unwind:
813
	while (i--)
814
		kfree(dst->pages[i]);
815
	kfree(dst);
816
	return NULL;
817
}
818
 
819
static void
820
i915_error_object_free(struct drm_i915_error_object *obj)
821
{
822
	int page;
823
 
824
	if (obj == NULL)
825
		return;
826
 
827
	for (page = 0; page < obj->page_count; page++)
828
		kfree(obj->pages[page]);
829
 
830
	kfree(obj);
831
}
832
 
833
void
834
i915_error_state_free(struct kref *error_ref)
835
{
836
	struct drm_i915_error_state *error = container_of(error_ref,
837
							  typeof(*error), ref);
838
	int i;
839
 
840
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
841
		i915_error_object_free(error->ring[i].batchbuffer);
842
		i915_error_object_free(error->ring[i].ringbuffer);
843
		kfree(error->ring[i].requests);
844
	}
845
 
846
	kfree(error->active_bo);
847
	kfree(error->overlay);
848
	kfree(error);
849
}
850
static void capture_bo(struct drm_i915_error_buffer *err,
851
		       struct drm_i915_gem_object *obj)
852
{
853
	err->size = obj->base.size;
854
	err->name = obj->base.name;
855
	err->rseqno = obj->last_read_seqno;
856
	err->wseqno = obj->last_write_seqno;
857
	err->gtt_offset = obj->gtt_offset;
858
	err->read_domains = obj->base.read_domains;
859
	err->write_domain = obj->base.write_domain;
860
	err->fence_reg = obj->fence_reg;
861
	err->pinned = 0;
862
	if (obj->pin_count > 0)
863
		err->pinned = 1;
864
	if (obj->user_pin_count > 0)
865
		err->pinned = -1;
866
	err->tiling = obj->tiling_mode;
867
	err->dirty = obj->dirty;
868
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
869
	err->ring = obj->ring ? obj->ring->id : -1;
870
	err->cache_level = obj->cache_level;
871
}
872
 
873
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
874
			     int count, struct list_head *head)
875
{
876
	struct drm_i915_gem_object *obj;
877
	int i = 0;
878
 
879
	list_for_each_entry(obj, head, mm_list) {
880
		capture_bo(err++, obj);
881
		if (++i == count)
882
			break;
883
	}
884
 
885
	return i;
886
}
887
 
888
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
889
			     int count, struct list_head *head)
890
{
891
	struct drm_i915_gem_object *obj;
892
	int i = 0;
893
 
894
	list_for_each_entry(obj, head, gtt_list) {
895
		if (obj->pin_count == 0)
896
			continue;
897
 
898
		capture_bo(err++, obj);
899
		if (++i == count)
900
			break;
901
	}
902
 
903
	return i;
904
}
905
 
906
static void i915_gem_record_fences(struct drm_device *dev,
907
				   struct drm_i915_error_state *error)
908
{
909
	struct drm_i915_private *dev_priv = dev->dev_private;
910
	int i;
911
 
912
	/* Fences */
913
	switch (INTEL_INFO(dev)->gen) {
914
	case 7:
915
	case 6:
916
		for (i = 0; i < 16; i++)
917
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
918
		break;
919
	case 5:
920
	case 4:
921
		for (i = 0; i < 16; i++)
922
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
923
		break;
924
	case 3:
925
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
926
			for (i = 0; i < 8; i++)
927
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
928
	case 2:
929
		for (i = 0; i < 8; i++)
930
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
931
		break;
932
 
933
	}
934
}
935
 
936
static struct drm_i915_error_object *
937
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
938
			     struct intel_ring_buffer *ring)
939
{
940
	struct drm_i915_gem_object *obj;
941
	u32 seqno;
942
 
943
	if (!ring->get_seqno)
944
		return NULL;
945
 
946
	seqno = ring->get_seqno(ring, false);
947
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
948
		if (obj->ring != ring)
949
			continue;
950
 
951
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
952
			continue;
953
 
954
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
955
			continue;
956
 
957
		/* We need to copy these to an anonymous buffer as the simplest
958
		 * method to avoid being overwritten by userspace.
959
		 */
960
		return i915_error_object_create(dev_priv, obj);
961
	}
962
 
963
	return NULL;
964
}
965
 
966
static void i915_record_ring_state(struct drm_device *dev,
967
				   struct drm_i915_error_state *error,
968
				   struct intel_ring_buffer *ring)
969
{
970
	struct drm_i915_private *dev_priv = dev->dev_private;
971
 
972
	if (INTEL_INFO(dev)->gen >= 6) {
973
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
974
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
975
		error->semaphore_mboxes[ring->id][0]
976
			= I915_READ(RING_SYNC_0(ring->mmio_base));
977
		error->semaphore_mboxes[ring->id][1]
978
			= I915_READ(RING_SYNC_1(ring->mmio_base));
3243 Serge 979
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
980
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
3031 serge 981
	}
982
 
983
	if (INTEL_INFO(dev)->gen >= 4) {
984
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
985
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
986
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
987
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
988
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
989
		if (ring->id == RCS)
990
			error->bbaddr = I915_READ64(BB_ADDR);
991
	} else {
992
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
993
		error->ipeir[ring->id] = I915_READ(IPEIR);
994
		error->ipehr[ring->id] = I915_READ(IPEHR);
995
		error->instdone[ring->id] = I915_READ(INSTDONE);
996
	}
997
 
998
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
999
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1000
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1001
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1002
	error->head[ring->id] = I915_READ_HEAD(ring);
1003
	error->tail[ring->id] = I915_READ_TAIL(ring);
3243 Serge 1004
	error->ctl[ring->id] = I915_READ_CTL(ring);
3031 serge 1005
 
1006
	error->cpu_ring_head[ring->id] = ring->head;
1007
	error->cpu_ring_tail[ring->id] = ring->tail;
1008
}
1009
 
1010
static void i915_gem_record_rings(struct drm_device *dev,
1011
				  struct drm_i915_error_state *error)
1012
{
1013
	struct drm_i915_private *dev_priv = dev->dev_private;
1014
	struct intel_ring_buffer *ring;
1015
	struct drm_i915_gem_request *request;
1016
	int i, count;
1017
 
1018
	for_each_ring(ring, dev_priv, i) {
1019
		i915_record_ring_state(dev, error, ring);
1020
 
1021
		error->ring[i].batchbuffer =
1022
			i915_error_first_batchbuffer(dev_priv, ring);
1023
 
1024
		error->ring[i].ringbuffer =
1025
			i915_error_object_create(dev_priv, ring->obj);
1026
 
1027
		count = 0;
1028
		list_for_each_entry(request, &ring->request_list, list)
1029
			count++;
1030
 
1031
		error->ring[i].num_requests = count;
1032
		error->ring[i].requests =
1033
			kmalloc(count*sizeof(struct drm_i915_error_request),
1034
				GFP_ATOMIC);
1035
		if (error->ring[i].requests == NULL) {
1036
			error->ring[i].num_requests = 0;
1037
			continue;
1038
		}
1039
 
1040
		count = 0;
1041
		list_for_each_entry(request, &ring->request_list, list) {
1042
			struct drm_i915_error_request *erq;
1043
 
1044
			erq = &error->ring[i].requests[count++];
1045
			erq->seqno = request->seqno;
1046
			erq->jiffies = request->emitted_jiffies;
1047
			erq->tail = request->tail;
1048
		}
1049
	}
1050
}
1051
 
1052
/**
1053
 * i915_capture_error_state - capture an error record for later analysis
1054
 * @dev: drm device
1055
 *
1056
 * Should be called when an error is detected (either a hang or an error
1057
 * interrupt) to capture error state from the time of the error.  Fills
1058
 * out a structure which becomes available in debugfs for user level tools
1059
 * to pick up.
1060
 */
1061
static void i915_capture_error_state(struct drm_device *dev)
1062
{
1063
	struct drm_i915_private *dev_priv = dev->dev_private;
1064
	struct drm_i915_gem_object *obj;
1065
	struct drm_i915_error_state *error;
1066
	unsigned long flags;
1067
	int i, pipe;
1068
 
1069
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1070
	error = dev_priv->first_error;
1071
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1072
	if (error)
1073
		return;
1074
 
1075
	/* Account for pipe specific data like PIPE*STAT */
1076
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1077
	if (!error) {
1078
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1079
		return;
1080
	}
1081
 
1082
	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1083
		 dev->primary->index);
1084
 
1085
	kref_init(&error->ref);
1086
	error->eir = I915_READ(EIR);
1087
	error->pgtbl_er = I915_READ(PGTBL_ER);
1088
	error->ccid = I915_READ(CCID);
1089
 
1090
	if (HAS_PCH_SPLIT(dev))
1091
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1092
	else if (IS_VALLEYVIEW(dev))
1093
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1094
	else if (IS_GEN2(dev))
1095
		error->ier = I915_READ16(IER);
1096
	else
1097
		error->ier = I915_READ(IER);
1098
 
3243 Serge 1099
	if (INTEL_INFO(dev)->gen >= 6)
1100
		error->derrmr = I915_READ(DERRMR);
1101
 
1102
	if (IS_VALLEYVIEW(dev))
1103
		error->forcewake = I915_READ(FORCEWAKE_VLV);
1104
	else if (INTEL_INFO(dev)->gen >= 7)
1105
		error->forcewake = I915_READ(FORCEWAKE_MT);
1106
	else if (INTEL_INFO(dev)->gen == 6)
1107
		error->forcewake = I915_READ(FORCEWAKE);
1108
 
3031 serge 1109
	for_each_pipe(pipe)
1110
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1111
 
1112
	if (INTEL_INFO(dev)->gen >= 6) {
1113
		error->error = I915_READ(ERROR_GEN6);
1114
		error->done_reg = I915_READ(DONE_REG);
1115
	}
1116
 
1117
	if (INTEL_INFO(dev)->gen == 7)
1118
		error->err_int = I915_READ(GEN7_ERR_INT);
1119
 
1120
	i915_get_extra_instdone(dev, error->extra_instdone);
1121
 
1122
	i915_gem_record_fences(dev, error);
1123
	i915_gem_record_rings(dev, error);
1124
 
1125
	/* Record buffers on the active and pinned lists. */
1126
	error->active_bo = NULL;
1127
	error->pinned_bo = NULL;
1128
 
1129
	i = 0;
1130
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1131
		i++;
1132
	error->active_bo_count = i;
1133
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1134
		if (obj->pin_count)
1135
			i++;
1136
	error->pinned_bo_count = i - error->active_bo_count;
1137
 
1138
	error->active_bo = NULL;
1139
	error->pinned_bo = NULL;
1140
	if (i) {
1141
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1142
					   GFP_ATOMIC);
1143
		if (error->active_bo)
1144
			error->pinned_bo =
1145
				error->active_bo + error->active_bo_count;
1146
	}
1147
 
1148
	if (error->active_bo)
1149
		error->active_bo_count =
1150
			capture_active_bo(error->active_bo,
1151
					  error->active_bo_count,
1152
					  &dev_priv->mm.active_list);
1153
 
1154
	if (error->pinned_bo)
1155
		error->pinned_bo_count =
1156
			capture_pinned_bo(error->pinned_bo,
1157
					  error->pinned_bo_count,
1158
					  &dev_priv->mm.bound_list);
1159
 
1160
	do_gettimeofday(&error->time);
1161
 
1162
	error->overlay = intel_overlay_capture_error_state(dev);
1163
	error->display = intel_display_capture_error_state(dev);
1164
 
1165
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1166
	if (dev_priv->first_error == NULL) {
1167
		dev_priv->first_error = error;
1168
		error = NULL;
1169
	}
1170
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1171
 
1172
	if (error)
1173
		i915_error_state_free(&error->ref);
1174
}
1175
 
1176
void i915_destroy_error_state(struct drm_device *dev)
1177
{
1178
	struct drm_i915_private *dev_priv = dev->dev_private;
1179
	struct drm_i915_error_state *error;
1180
	unsigned long flags;
1181
 
1182
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1183
	error = dev_priv->first_error;
1184
	dev_priv->first_error = NULL;
1185
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1186
 
1187
	if (error)
1188
		kref_put(&error->ref, i915_error_state_free);
1189
}
1190
#else
1191
#define i915_capture_error_state(x)
1192
#endif
1193
 
1194
static void i915_report_and_clear_eir(struct drm_device *dev)
1195
{
1196
	struct drm_i915_private *dev_priv = dev->dev_private;
1197
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1198
	u32 eir = I915_READ(EIR);
1199
	int pipe, i;
1200
 
1201
	if (!eir)
1202
		return;
1203
 
1204
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1205
 
1206
	i915_get_extra_instdone(dev, instdone);
1207
 
1208
	if (IS_G4X(dev)) {
1209
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1210
			u32 ipeir = I915_READ(IPEIR_I965);
1211
 
1212
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1213
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1214
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1215
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1216
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1217
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1218
			I915_WRITE(IPEIR_I965, ipeir);
1219
			POSTING_READ(IPEIR_I965);
1220
		}
1221
		if (eir & GM45_ERROR_PAGE_TABLE) {
1222
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1223
			pr_err("page table error\n");
1224
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1225
			I915_WRITE(PGTBL_ER, pgtbl_err);
1226
			POSTING_READ(PGTBL_ER);
1227
		}
1228
	}
1229
 
1230
	if (!IS_GEN2(dev)) {
1231
		if (eir & I915_ERROR_PAGE_TABLE) {
1232
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1233
			pr_err("page table error\n");
1234
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1235
			I915_WRITE(PGTBL_ER, pgtbl_err);
1236
			POSTING_READ(PGTBL_ER);
1237
		}
1238
	}
1239
 
1240
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1241
		pr_err("memory refresh error:\n");
1242
		for_each_pipe(pipe)
1243
			pr_err("pipe %c stat: 0x%08x\n",
1244
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1245
		/* pipestat has already been acked */
1246
	}
1247
	if (eir & I915_ERROR_INSTRUCTION) {
1248
		pr_err("instruction error\n");
1249
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1250
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1251
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1252
		if (INTEL_INFO(dev)->gen < 4) {
1253
			u32 ipeir = I915_READ(IPEIR);
1254
 
1255
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1256
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1257
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1258
			I915_WRITE(IPEIR, ipeir);
1259
			POSTING_READ(IPEIR);
1260
		} else {
1261
			u32 ipeir = I915_READ(IPEIR_I965);
1262
 
1263
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1264
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1265
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1266
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1267
			I915_WRITE(IPEIR_I965, ipeir);
1268
			POSTING_READ(IPEIR_I965);
1269
		}
1270
	}
1271
 
1272
	I915_WRITE(EIR, eir);
1273
	POSTING_READ(EIR);
1274
	eir = I915_READ(EIR);
1275
	if (eir) {
1276
		/*
1277
		 * some errors might have become stuck,
1278
		 * mask them.
1279
		 */
1280
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1281
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1282
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1283
	}
1284
}
1285
 
1286
/**
1287
 * i915_handle_error - handle an error interrupt
1288
 * @dev: drm device
1289
 *
1290
 * Do some basic checking of regsiter state at error interrupt time and
1291
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1292
 * sure we get a record and make it available in debugfs.  Fire a uevent
1293
 * so userspace knows something bad happened (should trigger collection
1294
 * of a ring dump etc.).
1295
 */
1296
void i915_handle_error(struct drm_device *dev, bool wedged)
1297
{
1298
	struct drm_i915_private *dev_priv = dev->dev_private;
1299
	struct intel_ring_buffer *ring;
1300
	int i;
1301
 
1302
	i915_capture_error_state(dev);
1303
	i915_report_and_clear_eir(dev);
1304
 
1305
	if (wedged) {
1306
//		INIT_COMPLETION(dev_priv->error_completion);
1307
		atomic_set(&dev_priv->mm.wedged, 1);
1308
 
1309
		/*
1310
		 * Wakeup waiting processes so they don't hang
1311
		 */
1312
		for_each_ring(ring, dev_priv, i)
1313
			wake_up_all(&ring->irq_queue);
1314
	}
1315
 
1316
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1317
}
1318
 
1319
#if 0
1320
 
1321
 
1322
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1323
{
1324
	drm_i915_private_t *dev_priv = dev->dev_private;
1325
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1326
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1327
	struct drm_i915_gem_object *obj;
1328
	struct intel_unpin_work *work;
1329
	unsigned long flags;
1330
	bool stall_detected;
1331
 
1332
	/* Ignore early vblank irqs */
1333
	if (intel_crtc == NULL)
1334
		return;
1335
 
1336
	spin_lock_irqsave(&dev->event_lock, flags);
1337
	work = intel_crtc->unpin_work;
1338
 
3243 Serge 1339
	if (work == NULL ||
1340
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1341
	    !work->enable_stall_check) {
3031 serge 1342
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1343
		spin_unlock_irqrestore(&dev->event_lock, flags);
1344
		return;
1345
	}
1346
 
1347
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1348
	obj = work->pending_flip_obj;
1349
	if (INTEL_INFO(dev)->gen >= 4) {
1350
		int dspsurf = DSPSURF(intel_crtc->plane);
1351
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1352
					obj->gtt_offset;
1353
	} else {
1354
		int dspaddr = DSPADDR(intel_crtc->plane);
1355
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1356
							crtc->y * crtc->fb->pitches[0] +
1357
							crtc->x * crtc->fb->bits_per_pixel/8);
1358
	}
1359
 
1360
	spin_unlock_irqrestore(&dev->event_lock, flags);
1361
 
1362
	if (stall_detected) {
1363
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1364
		intel_prepare_page_flip(dev, intel_crtc->plane);
1365
	}
1366
}
1367
 
1368
#endif
1369
 
1370
/* Called from drm generic code, passed 'crtc' which
1371
 * we use as a pipe index
1372
 */
1373
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1374
{
1375
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1376
	unsigned long irqflags;
1377
 
1378
	if (!i915_pipe_enabled(dev, pipe))
1379
		return -EINVAL;
1380
 
1381
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1382
	if (INTEL_INFO(dev)->gen >= 4)
1383
		i915_enable_pipestat(dev_priv, pipe,
1384
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1385
	else
1386
		i915_enable_pipestat(dev_priv, pipe,
1387
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1388
 
1389
	/* maintain vblank delivery even in deep C-states */
1390
	if (dev_priv->info->gen == 3)
1391
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1392
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1393
 
1394
	return 0;
1395
}
1396
 
1397
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1398
{
1399
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1400
	unsigned long irqflags;
1401
 
1402
	if (!i915_pipe_enabled(dev, pipe))
1403
		return -EINVAL;
1404
 
1405
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1406
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1407
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1408
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1409
 
1410
	return 0;
1411
}
1412
 
1413
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1414
{
1415
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1416
	unsigned long irqflags;
1417
 
1418
	if (!i915_pipe_enabled(dev, pipe))
1419
		return -EINVAL;
1420
 
1421
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1422
	ironlake_enable_display_irq(dev_priv,
1423
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1424
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1425
 
1426
	return 0;
1427
}
1428
 
1429
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1430
{
1431
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1432
	unsigned long irqflags;
1433
	u32 imr;
1434
 
1435
	if (!i915_pipe_enabled(dev, pipe))
1436
		return -EINVAL;
1437
 
1438
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1439
	imr = I915_READ(VLV_IMR);
1440
	if (pipe == 0)
1441
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1442
	else
1443
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1444
	I915_WRITE(VLV_IMR, imr);
1445
	i915_enable_pipestat(dev_priv, pipe,
1446
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1447
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1448
 
1449
	return 0;
1450
}
1451
 
1452
/* Called from drm generic code, passed 'crtc' which
1453
 * we use as a pipe index
1454
 */
1455
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1456
{
1457
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1458
	unsigned long irqflags;
1459
 
1460
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1461
	if (dev_priv->info->gen == 3)
1462
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1463
 
1464
	i915_disable_pipestat(dev_priv, pipe,
1465
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1466
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1467
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1468
}
1469
 
1470
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1471
{
1472
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1473
	unsigned long irqflags;
1474
 
1475
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1476
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1477
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1478
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1479
}
1480
 
1481
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1482
{
1483
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1484
	unsigned long irqflags;
1485
 
1486
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1487
	ironlake_disable_display_irq(dev_priv,
1488
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1489
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1490
}
1491
 
1492
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1493
{
1494
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1495
	unsigned long irqflags;
1496
	u32 imr;
1497
 
1498
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1499
	i915_disable_pipestat(dev_priv, pipe,
1500
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1501
	imr = I915_READ(VLV_IMR);
1502
	if (pipe == 0)
1503
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1504
	else
1505
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1506
	I915_WRITE(VLV_IMR, imr);
1507
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1508
}
1509
 
1510
static u32
1511
ring_last_seqno(struct intel_ring_buffer *ring)
1512
{
1513
	return list_entry(ring->request_list.prev,
1514
			  struct drm_i915_gem_request, list)->seqno;
1515
}
2351 Serge 1516
/* drm_dma.h hooks
1517
*/
1518
static void ironlake_irq_preinstall(struct drm_device *dev)
1519
{
1520
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1521
 
1522
    atomic_set(&dev_priv->irq_received, 0);
1523
 
1524
    I915_WRITE(HWSTAM, 0xeffe);
1525
 
1526
    /* XXX hotplug from PCH */
1527
 
1528
    I915_WRITE(DEIMR, 0xffffffff);
1529
    I915_WRITE(DEIER, 0x0);
1530
    POSTING_READ(DEIER);
1531
 
1532
    /* and GT */
1533
    I915_WRITE(GTIMR, 0xffffffff);
1534
    I915_WRITE(GTIER, 0x0);
1535
    POSTING_READ(GTIER);
1536
 
1537
    /* south display irq */
1538
    I915_WRITE(SDEIMR, 0xffffffff);
1539
    I915_WRITE(SDEIER, 0x0);
1540
    POSTING_READ(SDEIER);
1541
}
1542
 
3031 serge 1543
static void valleyview_irq_preinstall(struct drm_device *dev)
1544
{
1545
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1546
	int pipe;
1547
 
1548
	atomic_set(&dev_priv->irq_received, 0);
1549
 
1550
	/* VLV magic */
1551
	I915_WRITE(VLV_IMR, 0);
1552
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1553
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1554
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1555
 
1556
	/* and GT */
1557
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1558
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1559
	I915_WRITE(GTIMR, 0xffffffff);
1560
	I915_WRITE(GTIER, 0x0);
1561
	POSTING_READ(GTIER);
1562
 
1563
	I915_WRITE(DPINVGTT, 0xff);
1564
 
1565
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1566
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1567
	for_each_pipe(pipe)
1568
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1569
	I915_WRITE(VLV_IIR, 0xffffffff);
1570
	I915_WRITE(VLV_IMR, 0xffffffff);
1571
	I915_WRITE(VLV_IER, 0x0);
1572
	POSTING_READ(VLV_IER);
1573
}
1574
 
2351 Serge 1575
/*
1576
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1577
 * duration to 2ms (which is the minimum in the Display Port spec)
1578
 *
1579
 * This register is the same on all known PCH chips.
1580
 */
1581
 
1582
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1583
{
1584
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1585
	u32	hotplug;
1586
 
1587
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1588
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1589
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1590
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1591
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1592
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1593
}
1594
 
1595
static int ironlake_irq_postinstall(struct drm_device *dev)
1596
{
1597
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1598
    /* enable kind of interrupts always enabled */
1599
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1600
               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1601
    u32 render_irqs;
1602
    u32 hotplug_mask;
1603
 
1604
    dev_priv->irq_mask = ~display_mask;
1605
 
1606
    /* should always can generate irq */
1607
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1608
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1609
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1610
    POSTING_READ(DEIER);
1611
 
1612
	dev_priv->gt_irq_mask = ~0;
1613
 
1614
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1615
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1616
 
1617
    if (IS_GEN6(dev))
1618
        render_irqs =
1619
            GT_USER_INTERRUPT |
3031 serge 1620
			GEN6_BSD_USER_INTERRUPT |
1621
			GEN6_BLITTER_USER_INTERRUPT;
2351 Serge 1622
    else
1623
        render_irqs =
1624
            GT_USER_INTERRUPT |
1625
            GT_PIPE_NOTIFY |
1626
            GT_BSD_USER_INTERRUPT;
1627
    I915_WRITE(GTIER, render_irqs);
1628
    POSTING_READ(GTIER);
1629
 
1630
    if (HAS_PCH_CPT(dev)) {
1631
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1632
                SDE_PORTB_HOTPLUG_CPT |
1633
                SDE_PORTC_HOTPLUG_CPT |
1634
                SDE_PORTD_HOTPLUG_CPT);
1635
    } else {
1636
        hotplug_mask = (SDE_CRT_HOTPLUG |
1637
                SDE_PORTB_HOTPLUG |
1638
                SDE_PORTC_HOTPLUG |
1639
                SDE_PORTD_HOTPLUG |
1640
                SDE_AUX_MASK);
1641
    }
1642
 
1643
    dev_priv->pch_irq_mask = ~hotplug_mask;
1644
 
1645
    I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1646
    I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1647
    I915_WRITE(SDEIER, hotplug_mask);
1648
    POSTING_READ(SDEIER);
1649
 
3031 serge 1650
//    ironlake_enable_pch_hotplug(dev);
2351 Serge 1651
 
1652
    if (IS_IRONLAKE_M(dev)) {
1653
        /* Clear & enable PCU event interrupts */
1654
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1655
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
3243 Serge 1656
//        ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2351 Serge 1657
    }
1658
 
1659
    return 0;
1660
}
1661
 
3031 serge 1662
static int ivybridge_irq_postinstall(struct drm_device *dev)
1663
{
1664
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1665
	/* enable kind of interrupts always enabled */
1666
	u32 display_mask =
1667
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1668
		DE_PLANEC_FLIP_DONE_IVB |
1669
		DE_PLANEB_FLIP_DONE_IVB |
1670
		DE_PLANEA_FLIP_DONE_IVB;
1671
	u32 render_irqs;
1672
	u32 hotplug_mask;
2351 Serge 1673
 
3031 serge 1674
	dev_priv->irq_mask = ~display_mask;
1675
 
1676
	/* should always can generate irq */
1677
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1678
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1679
	I915_WRITE(DEIER,
1680
		   display_mask |
1681
		   DE_PIPEC_VBLANK_IVB |
1682
		   DE_PIPEB_VBLANK_IVB |
1683
		   DE_PIPEA_VBLANK_IVB);
1684
	POSTING_READ(DEIER);
1685
 
1686
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1687
 
1688
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1689
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1690
 
1691
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1692
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1693
	I915_WRITE(GTIER, render_irqs);
1694
	POSTING_READ(GTIER);
1695
 
1696
	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1697
			SDE_PORTB_HOTPLUG_CPT |
1698
			SDE_PORTC_HOTPLUG_CPT |
1699
			SDE_PORTD_HOTPLUG_CPT);
1700
	dev_priv->pch_irq_mask = ~hotplug_mask;
1701
 
1702
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1703
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1704
	I915_WRITE(SDEIER, hotplug_mask);
1705
	POSTING_READ(SDEIER);
1706
 
1707
//	ironlake_enable_pch_hotplug(dev);
1708
 
1709
	return 0;
1710
}
1711
 
1712
static int valleyview_irq_postinstall(struct drm_device *dev)
1713
{
1714
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1715
	u32 enable_mask;
1716
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1717
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
3243 Serge 1718
	u32 render_irqs;
3031 serge 1719
	u16 msid;
1720
 
1721
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1722
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1723
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1724
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1725
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1726
 
1727
	/*
1728
	 *Leave vblank interrupts masked initially.  enable/disable will
1729
	 * toggle them based on usage.
1730
	 */
1731
	dev_priv->irq_mask = (~enable_mask) |
1732
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1733
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1734
 
1735
	dev_priv->pipestat[0] = 0;
1736
	dev_priv->pipestat[1] = 0;
1737
 
1738
	/* Hack for broken MSIs on VLV */
3243 Serge 1739
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1740
//   pci_read_config_word(dev->pdev, 0x98, &msid);
1741
//   msid &= 0xff; /* mask out delivery bits */
1742
//   msid |= (1<<14);
1743
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
3031 serge 1744
 
1745
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1746
	I915_WRITE(VLV_IER, enable_mask);
1747
	I915_WRITE(VLV_IIR, 0xffffffff);
1748
	I915_WRITE(PIPESTAT(0), 0xffff);
1749
	I915_WRITE(PIPESTAT(1), 0xffff);
1750
	POSTING_READ(VLV_IER);
1751
 
1752
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1753
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1754
 
1755
	I915_WRITE(VLV_IIR, 0xffffffff);
1756
	I915_WRITE(VLV_IIR, 0xffffffff);
1757
 
1758
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1759
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3243 Serge 1760
 
1761
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1762
		GEN6_BLITTER_USER_INTERRUPT;
1763
	I915_WRITE(GTIER, render_irqs);
3031 serge 1764
	POSTING_READ(GTIER);
1765
 
1766
	/* ack & enable invalid PTE error interrupts */
1767
#if 0 /* FIXME: add support to irq handler for checking these bits */
1768
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1769
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1770
#endif
1771
 
1772
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1773
#if 0 /* FIXME: check register definitions; some have moved */
1774
	/* Note HDMI and DP share bits */
1775
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1776
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1777
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1778
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1779
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1780
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
3243 Serge 1781
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
3031 serge 1782
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
3243 Serge 1783
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
3031 serge 1784
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1785
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1786
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1787
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1788
	}
1789
#endif
1790
 
1791
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1792
 
1793
	return 0;
1794
}
1795
 
1796
static void valleyview_irq_uninstall(struct drm_device *dev)
1797
{
1798
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1799
	int pipe;
1800
 
1801
	if (!dev_priv)
1802
		return;
1803
 
1804
	for_each_pipe(pipe)
1805
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1806
 
1807
	I915_WRITE(HWSTAM, 0xffffffff);
1808
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1809
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1810
	for_each_pipe(pipe)
1811
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1812
	I915_WRITE(VLV_IIR, 0xffffffff);
1813
	I915_WRITE(VLV_IMR, 0xffffffff);
1814
	I915_WRITE(VLV_IER, 0x0);
1815
	POSTING_READ(VLV_IER);
1816
}
1817
 
1818
static void ironlake_irq_uninstall(struct drm_device *dev)
1819
{
1820
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1821
 
1822
	if (!dev_priv)
1823
		return;
1824
 
1825
	I915_WRITE(HWSTAM, 0xffffffff);
1826
 
1827
	I915_WRITE(DEIMR, 0xffffffff);
1828
	I915_WRITE(DEIER, 0x0);
1829
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1830
 
1831
	I915_WRITE(GTIMR, 0xffffffff);
1832
	I915_WRITE(GTIER, 0x0);
1833
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1834
 
1835
	I915_WRITE(SDEIMR, 0xffffffff);
1836
	I915_WRITE(SDEIER, 0x0);
1837
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1838
}
1839
 
1840
#if 0
1841
 
1842
static void i8xx_irq_preinstall(struct drm_device * dev)
1843
{
1844
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1845
	int pipe;
1846
 
1847
	atomic_set(&dev_priv->irq_received, 0);
1848
 
1849
	for_each_pipe(pipe)
1850
		I915_WRITE(PIPESTAT(pipe), 0);
1851
	I915_WRITE16(IMR, 0xffff);
1852
	I915_WRITE16(IER, 0x0);
1853
	POSTING_READ16(IER);
1854
}
1855
 
1856
static int i8xx_irq_postinstall(struct drm_device *dev)
1857
{
1858
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1859
 
1860
	dev_priv->pipestat[0] = 0;
1861
	dev_priv->pipestat[1] = 0;
1862
 
1863
	I915_WRITE16(EMR,
1864
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1865
 
1866
	/* Unmask the interrupts that we always want on. */
1867
	dev_priv->irq_mask =
1868
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1869
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1870
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1871
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1872
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1873
	I915_WRITE16(IMR, dev_priv->irq_mask);
1874
 
1875
	I915_WRITE16(IER,
1876
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1877
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1878
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1879
		     I915_USER_INTERRUPT);
1880
	POSTING_READ16(IER);
1881
 
1882
	return 0;
1883
}
1884
 
3243 Serge 1885
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3031 serge 1886
{
1887
	struct drm_device *dev = (struct drm_device *) arg;
1888
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1889
	u16 iir, new_iir;
1890
	u32 pipe_stats[2];
1891
	unsigned long irqflags;
1892
	int irq_received;
1893
	int pipe;
1894
	u16 flip_mask =
1895
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1896
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1897
 
1898
	atomic_inc(&dev_priv->irq_received);
1899
 
1900
	iir = I915_READ16(IIR);
1901
	if (iir == 0)
1902
		return IRQ_NONE;
1903
 
1904
	while (iir & ~flip_mask) {
1905
		/* Can't rely on pipestat interrupt bit in iir as it might
1906
		 * have been cleared after the pipestat interrupt was received.
1907
		 * It doesn't set the bit in iir again, but it still produces
1908
		 * interrupts (for non-MSI).
1909
		 */
1910
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1911
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1912
			i915_handle_error(dev, false);
1913
 
1914
		for_each_pipe(pipe) {
1915
			int reg = PIPESTAT(pipe);
1916
			pipe_stats[pipe] = I915_READ(reg);
1917
 
1918
			/*
1919
			 * Clear the PIPE*STAT regs before the IIR
1920
			 */
1921
			if (pipe_stats[pipe] & 0x8000ffff) {
1922
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1923
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1924
							 pipe_name(pipe));
1925
				I915_WRITE(reg, pipe_stats[pipe]);
1926
				irq_received = 1;
1927
			}
1928
		}
1929
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1930
 
1931
		I915_WRITE16(IIR, iir & ~flip_mask);
1932
		new_iir = I915_READ16(IIR); /* Flush posted writes */
1933
 
1934
		i915_update_dri1_breadcrumb(dev);
1935
 
1936
		if (iir & I915_USER_INTERRUPT)
1937
			notify_ring(dev, &dev_priv->ring[RCS]);
1938
 
1939
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1940
		    drm_handle_vblank(dev, 0)) {
1941
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1942
				intel_prepare_page_flip(dev, 0);
1943
				intel_finish_page_flip(dev, 0);
1944
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1945
			}
1946
		}
1947
 
1948
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1949
		    drm_handle_vblank(dev, 1)) {
1950
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1951
				intel_prepare_page_flip(dev, 1);
1952
				intel_finish_page_flip(dev, 1);
1953
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1954
			}
1955
		}
1956
 
1957
		iir = new_iir;
1958
	}
1959
 
1960
	return IRQ_HANDLED;
1961
}
1962
 
1963
static void i8xx_irq_uninstall(struct drm_device * dev)
1964
{
1965
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1966
	int pipe;
1967
 
1968
	for_each_pipe(pipe) {
1969
		/* Clear enable bits; then clear status bits */
1970
		I915_WRITE(PIPESTAT(pipe), 0);
1971
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1972
	}
1973
	I915_WRITE16(IMR, 0xffff);
1974
	I915_WRITE16(IER, 0x0);
1975
	I915_WRITE16(IIR, I915_READ16(IIR));
1976
}
1977
 
1978
#endif
1979
 
1980
static void i915_irq_preinstall(struct drm_device * dev)
1981
{
1982
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1983
	int pipe;
1984
 
1985
	atomic_set(&dev_priv->irq_received, 0);
1986
 
1987
	if (I915_HAS_HOTPLUG(dev)) {
1988
		I915_WRITE(PORT_HOTPLUG_EN, 0);
1989
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1990
	}
1991
 
1992
	I915_WRITE16(HWSTAM, 0xeffe);
1993
	for_each_pipe(pipe)
1994
		I915_WRITE(PIPESTAT(pipe), 0);
1995
	I915_WRITE(IMR, 0xffffffff);
1996
	I915_WRITE(IER, 0x0);
1997
	POSTING_READ(IER);
1998
}
1999
 
2000
static int i915_irq_postinstall(struct drm_device *dev)
2001
{
2002
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2003
	u32 enable_mask;
2004
 
2005
	dev_priv->pipestat[0] = 0;
2006
	dev_priv->pipestat[1] = 0;
2007
 
2008
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2009
 
2010
	/* Unmask the interrupts that we always want on. */
2011
	dev_priv->irq_mask =
2012
		~(I915_ASLE_INTERRUPT |
2013
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2014
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2015
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2016
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2017
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2018
 
2019
	enable_mask =
2020
		I915_ASLE_INTERRUPT |
2021
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2022
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2023
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2024
		I915_USER_INTERRUPT;
2025
#if 0
2026
	if (I915_HAS_HOTPLUG(dev)) {
2027
		/* Enable in IER... */
2028
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2029
		/* and unmask in IMR */
2030
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2031
	}
2032
#endif
2033
 
2034
	I915_WRITE(IMR, dev_priv->irq_mask);
2035
	I915_WRITE(IER, enable_mask);
2036
	POSTING_READ(IER);
2037
 
2038
	if (I915_HAS_HOTPLUG(dev)) {
2039
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2040
#if 0
2041
		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2042
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2043
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2044
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2045
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2046
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2047
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2048
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2049
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2050
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2051
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2052
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2053
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2054
		}
2055
#endif
2056
		/* Ignore TV since it's buggy */
2057
 
2058
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2059
	}
2060
 
2061
//	intel_opregion_enable_asle(dev);
2062
 
2063
	return 0;
2064
}
2065
 
3243 Serge 2066
static irqreturn_t i915_irq_handler(int irq, void *arg)
3031 serge 2067
{
2068
	struct drm_device *dev = (struct drm_device *) arg;
2069
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2070
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2071
	unsigned long irqflags;
2072
	u32 flip_mask =
2073
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2074
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2075
	u32 flip[2] = {
2076
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2077
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2078
	};
2079
	int pipe, ret = IRQ_NONE;
2080
 
2081
	atomic_inc(&dev_priv->irq_received);
2082
 
2083
	iir = I915_READ(IIR);
2084
	do {
2085
		bool irq_received = (iir & ~flip_mask) != 0;
2086
		bool blc_event = false;
2087
 
2088
		/* Can't rely on pipestat interrupt bit in iir as it might
2089
		 * have been cleared after the pipestat interrupt was received.
2090
		 * It doesn't set the bit in iir again, but it still produces
2091
		 * interrupts (for non-MSI).
2092
		 */
2093
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2094
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2095
			i915_handle_error(dev, false);
2096
 
2097
		for_each_pipe(pipe) {
2098
			int reg = PIPESTAT(pipe);
2099
			pipe_stats[pipe] = I915_READ(reg);
2100
 
2101
			/* Clear the PIPE*STAT regs before the IIR */
2102
			if (pipe_stats[pipe] & 0x8000ffff) {
2103
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2104
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2105
							 pipe_name(pipe));
2106
				I915_WRITE(reg, pipe_stats[pipe]);
2107
				irq_received = true;
2108
			}
2109
		}
2110
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2111
 
2112
		if (!irq_received)
2113
			break;
2114
 
2115
		/* Consume port.  Then clear IIR or we'll miss events */
2116
		if ((I915_HAS_HOTPLUG(dev)) &&
2117
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2118
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2119
 
2120
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2121
				  hotplug_status);
2122
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2123
//				queue_work(dev_priv->wq,
2124
//					   &dev_priv->hotplug_work);
2125
 
2126
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2127
			POSTING_READ(PORT_HOTPLUG_STAT);
2128
		}
2129
 
2130
		I915_WRITE(IIR, iir & ~flip_mask);
2131
		new_iir = I915_READ(IIR); /* Flush posted writes */
2132
 
2133
		if (iir & I915_USER_INTERRUPT)
2134
			notify_ring(dev, &dev_priv->ring[RCS]);
2135
 
2136
		for_each_pipe(pipe) {
2137
			int plane = pipe;
2138
			if (IS_MOBILE(dev))
2139
				plane = !plane;
3051 serge 2140
            if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
2141
                drm_handle_vblank(dev, pipe) */) {
3031 serge 2142
				if (iir & flip[plane]) {
2143
//					intel_prepare_page_flip(dev, plane);
2144
//					intel_finish_page_flip(dev, pipe);
2145
					flip_mask &= ~flip[plane];
2146
				}
2147
			}
2148
 
2149
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2150
				blc_event = true;
2151
		}
2152
 
2153
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2154
//			intel_opregion_asle_intr(dev);
2155
 
2156
		/* With MSI, interrupts are only generated when iir
2157
		 * transitions from zero to nonzero.  If another bit got
2158
		 * set while we were handling the existing iir bits, then
2159
		 * we would never get another interrupt.
2160
		 *
2161
		 * This is fine on non-MSI as well, as if we hit this path
2162
		 * we avoid exiting the interrupt handler only to generate
2163
		 * another one.
2164
		 *
2165
		 * Note that for MSI this could cause a stray interrupt report
2166
		 * if an interrupt landed in the time between writing IIR and
2167
		 * the posting read.  This should be rare enough to never
2168
		 * trigger the 99% of 100,000 interrupts test for disabling
2169
		 * stray interrupts.
2170
		 */
2171
		ret = IRQ_HANDLED;
2172
		iir = new_iir;
2173
	} while (iir & ~flip_mask);
2174
 
2175
	i915_update_dri1_breadcrumb(dev);
2176
 
2177
	return ret;
2178
}
2179
 
2180
static void i915_irq_uninstall(struct drm_device * dev)
2181
{
2182
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2183
	int pipe;
2184
 
2185
	if (I915_HAS_HOTPLUG(dev)) {
2186
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2187
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2188
	}
2189
 
2190
	I915_WRITE16(HWSTAM, 0xffff);
2191
	for_each_pipe(pipe) {
2192
		/* Clear enable bits; then clear status bits */
2193
		I915_WRITE(PIPESTAT(pipe), 0);
2194
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2195
	}
2196
	I915_WRITE(IMR, 0xffffffff);
2197
	I915_WRITE(IER, 0x0);
2198
 
2199
	I915_WRITE(IIR, I915_READ(IIR));
2200
}
2201
 
2202
static void i965_irq_preinstall(struct drm_device * dev)
2203
{
2204
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2205
	int pipe;
2206
 
2207
	atomic_set(&dev_priv->irq_received, 0);
2208
 
2209
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2210
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2211
 
2212
	I915_WRITE(HWSTAM, 0xeffe);
2213
	for_each_pipe(pipe)
2214
		I915_WRITE(PIPESTAT(pipe), 0);
2215
	I915_WRITE(IMR, 0xffffffff);
2216
	I915_WRITE(IER, 0x0);
2217
	POSTING_READ(IER);
2218
}
2219
 
2220
static int i965_irq_postinstall(struct drm_device *dev)
2221
{
2222
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2223
	u32 hotplug_en;
2224
	u32 enable_mask;
2225
	u32 error_mask;
2226
 
2227
	/* Unmask the interrupts that we always want on. */
2228
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2229
			       I915_DISPLAY_PORT_INTERRUPT |
2230
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2231
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2232
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2233
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2234
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2235
 
2236
	enable_mask = ~dev_priv->irq_mask;
2237
	enable_mask |= I915_USER_INTERRUPT;
2238
 
2239
	if (IS_G4X(dev))
2240
		enable_mask |= I915_BSD_USER_INTERRUPT;
2241
 
2242
	dev_priv->pipestat[0] = 0;
2243
	dev_priv->pipestat[1] = 0;
2244
 
2245
	/*
2246
	 * Enable some error detection, note the instruction error mask
2247
	 * bit is reserved, so we leave it masked.
2248
	 */
2249
	if (IS_G4X(dev)) {
2250
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2251
			       GM45_ERROR_MEM_PRIV |
2252
			       GM45_ERROR_CP_PRIV |
2253
			       I915_ERROR_MEMORY_REFRESH);
2254
	} else {
2255
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2256
			       I915_ERROR_MEMORY_REFRESH);
2257
	}
2258
	I915_WRITE(EMR, error_mask);
2259
 
2260
	I915_WRITE(IMR, dev_priv->irq_mask);
2261
	I915_WRITE(IER, enable_mask);
2262
	POSTING_READ(IER);
2263
 
2264
	/* Note HDMI and DP share hotplug bits */
2265
	hotplug_en = 0;
2266
#if 0
2267
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2268
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2269
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2270
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2271
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2272
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2273
	if (IS_G4X(dev)) {
2274
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2275
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2276
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2277
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2278
	} else {
2279
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2280
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2281
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2282
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2283
	}
2284
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2285
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2286
 
2287
		/* Programming the CRT detection parameters tends
2288
		   to generate a spurious hotplug event about three
2289
		   seconds later.  So just do it once.
2290
		   */
2291
		if (IS_G4X(dev))
2292
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2293
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2294
	}
2295
#endif
2296
	/* Ignore TV since it's buggy */
2297
 
2298
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2299
 
2300
//	intel_opregion_enable_asle(dev);
2301
 
2302
	return 0;
2303
}
2304
 
3243 Serge 2305
static irqreturn_t i965_irq_handler(int irq, void *arg)
3031 serge 2306
{
2307
	struct drm_device *dev = (struct drm_device *) arg;
2308
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2309
	u32 iir, new_iir;
2310
	u32 pipe_stats[I915_MAX_PIPES];
2311
	unsigned long irqflags;
2312
	int irq_received;
2313
	int ret = IRQ_NONE, pipe;
2314
 
2315
	atomic_inc(&dev_priv->irq_received);
2316
 
2317
	iir = I915_READ(IIR);
2318
 
2319
	for (;;) {
2320
		bool blc_event = false;
2321
 
2322
		irq_received = iir != 0;
2323
 
2324
		/* Can't rely on pipestat interrupt bit in iir as it might
2325
		 * have been cleared after the pipestat interrupt was received.
2326
		 * It doesn't set the bit in iir again, but it still produces
2327
		 * interrupts (for non-MSI).
2328
		 */
2329
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2330
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2331
			i915_handle_error(dev, false);
2332
 
2333
		for_each_pipe(pipe) {
2334
			int reg = PIPESTAT(pipe);
2335
			pipe_stats[pipe] = I915_READ(reg);
2336
 
2337
			/*
2338
			 * Clear the PIPE*STAT regs before the IIR
2339
			 */
2340
			if (pipe_stats[pipe] & 0x8000ffff) {
2341
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2342
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2343
							 pipe_name(pipe));
2344
				I915_WRITE(reg, pipe_stats[pipe]);
2345
				irq_received = 1;
2346
			}
2347
		}
2348
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2349
 
2350
		if (!irq_received)
2351
			break;
2352
 
2353
		ret = IRQ_HANDLED;
2354
 
2355
		/* Consume port.  Then clear IIR or we'll miss events */
2356
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2357
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2358
 
2359
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2360
				  hotplug_status);
2361
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2362
//				queue_work(dev_priv->wq,
2363
//					   &dev_priv->hotplug_work);
2364
 
2365
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2366
			I915_READ(PORT_HOTPLUG_STAT);
2367
		}
2368
 
2369
		I915_WRITE(IIR, iir);
2370
		new_iir = I915_READ(IIR); /* Flush posted writes */
2371
 
2372
		if (iir & I915_USER_INTERRUPT)
2373
			notify_ring(dev, &dev_priv->ring[RCS]);
2374
		if (iir & I915_BSD_USER_INTERRUPT)
2375
			notify_ring(dev, &dev_priv->ring[VCS]);
2376
 
2377
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2378
//			intel_prepare_page_flip(dev, 0);
2379
 
2380
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2381
//			intel_prepare_page_flip(dev, 1);
2382
 
2383
		for_each_pipe(pipe) {
3051 serge 2384
//           if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2385
//               drm_handle_vblank(dev, pipe)) {
3031 serge 2386
//				i915_pageflip_stall_check(dev, pipe);
2387
//				intel_finish_page_flip(dev, pipe);
3051 serge 2388
//           }
3031 serge 2389
 
2390
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2391
				blc_event = true;
2392
		}
2393
 
2394
 
2395
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2396
//			intel_opregion_asle_intr(dev);
2397
 
2398
		/* With MSI, interrupts are only generated when iir
2399
		 * transitions from zero to nonzero.  If another bit got
2400
		 * set while we were handling the existing iir bits, then
2401
		 * we would never get another interrupt.
2402
		 *
2403
		 * This is fine on non-MSI as well, as if we hit this path
2404
		 * we avoid exiting the interrupt handler only to generate
2405
		 * another one.
2406
		 *
2407
		 * Note that for MSI this could cause a stray interrupt report
2408
		 * if an interrupt landed in the time between writing IIR and
2409
		 * the posting read.  This should be rare enough to never
2410
		 * trigger the 99% of 100,000 interrupts test for disabling
2411
		 * stray interrupts.
2412
		 */
2413
		iir = new_iir;
2414
	}
2415
 
2416
	i915_update_dri1_breadcrumb(dev);
2417
 
2418
	return ret;
2419
}
2420
 
2421
static void i965_irq_uninstall(struct drm_device * dev)
2422
{
2423
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2424
	int pipe;
2425
 
2426
	if (!dev_priv)
2427
		return;
2428
 
2429
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2430
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2431
 
2432
	I915_WRITE(HWSTAM, 0xffffffff);
2433
	for_each_pipe(pipe)
2434
		I915_WRITE(PIPESTAT(pipe), 0);
2435
	I915_WRITE(IMR, 0xffffffff);
2436
	I915_WRITE(IER, 0x0);
2437
 
2438
	for_each_pipe(pipe)
2439
		I915_WRITE(PIPESTAT(pipe),
2440
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2441
	I915_WRITE(IIR, I915_READ(IIR));
2442
}
2443
 
2351 Serge 2444
void intel_irq_init(struct drm_device *dev)
2445
{
3031 serge 2446
	struct drm_i915_private *dev_priv = dev->dev_private;
2447
 
2448
	if (IS_VALLEYVIEW(dev)) {
3243 Serge 2449
		dev->driver->irq_handler = valleyview_irq_handler;
2450
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2451
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3031 serge 2452
	} else if (IS_IVYBRIDGE(dev)) {
2351 Serge 2453
		/* Share pre & uninstall handlers with ILK/SNB */
3243 Serge 2454
		dev->driver->irq_handler = ivybridge_irq_handler;
2455
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2456
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3031 serge 2457
	} else if (IS_HASWELL(dev)) {
2458
		/* Share interrupts handling with IVB */
3243 Serge 2459
		dev->driver->irq_handler = ivybridge_irq_handler;
2460
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2461
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2351 Serge 2462
	} else if (HAS_PCH_SPLIT(dev)) {
3243 Serge 2463
		dev->driver->irq_handler = ironlake_irq_handler;
2464
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2465
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2351 Serge 2466
	} else {
3031 serge 2467
		if (INTEL_INFO(dev)->gen == 2) {
2468
		} else if (INTEL_INFO(dev)->gen == 3) {
3243 Serge 2469
			dev->driver->irq_preinstall = i915_irq_preinstall;
2470
			dev->driver->irq_postinstall = i915_irq_postinstall;
2471
			dev->driver->irq_handler = i915_irq_handler;
3031 serge 2472
		} else {
3243 Serge 2473
			dev->driver->irq_preinstall = i965_irq_preinstall;
2474
			dev->driver->irq_postinstall = i965_irq_postinstall;
2475
			dev->driver->irq_handler = i965_irq_handler;
3031 serge 2476
		}
2351 Serge 2477
	}
3243 Serge 2478
 
2479
    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2351 Serge 2480
}
2481
 
3243 Serge 2482
irqreturn_t intel_irq_handler(struct drm_device *dev)
2483
{
2351 Serge 2484
 
3266 Serge 2485
//    printf("i915 irq\n");
3243 Serge 2486
 
2487
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2488
 
2489
    return dev->driver->irq_handler(0, dev);
2490
}
2491
 
2351 Serge 2492
int drm_irq_install(struct drm_device *dev)
2493
{
3051 serge 2494
    unsigned long sh_flags = 0;
2351 Serge 2495
    int irq_line;
2496
    int ret = 0;
2497
 
3051 serge 2498
    char *irqname;
2499
 
2351 Serge 2500
    mutex_lock(&dev->struct_mutex);
2501
 
2502
    /* Driver must have been initialized */
2503
    if (!dev->dev_private) {
3243 Serge 2504
            mutex_unlock(&dev->struct_mutex);
2505
            return -EINVAL;
2351 Serge 2506
    }
2507
 
2508
    if (dev->irq_enabled) {
3243 Serge 2509
            mutex_unlock(&dev->struct_mutex);
2510
            return -EBUSY;
2351 Serge 2511
    }
2512
    dev->irq_enabled = 1;
2513
    mutex_unlock(&dev->struct_mutex);
2514
 
2515
    irq_line   = drm_dev_to_irq(dev);
2516
 
2517
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2518
 
3051 serge 2519
    /* Before installing handler */
3243 Serge 2520
    if (dev->driver->irq_preinstall)
2521
            dev->driver->irq_preinstall(dev);
2351 Serge 2522
 
3243 Serge 2523
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
2351 Serge 2524
 
3051 serge 2525
    /* After installing handler */
3243 Serge 2526
    if (dev->driver->irq_postinstall)
2527
            ret = dev->driver->irq_postinstall(dev);
2351 Serge 2528
 
3051 serge 2529
    if (ret < 0) {
2530
            DRM_ERROR(__FUNCTION__);
2531
    }
2351 Serge 2532
 
2533
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2534
    cmd&= ~(1<<10);
2535
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2536
 
2537
    return ret;
2538
}
2539