Subversion Repositories Kolibri OS

Rev

Rev 3298 | Rev 3746 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3298 Rev 3480
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
2
 */
3
/*
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
5
 * All Rights Reserved.
6
 *
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
13
 * the following conditions:
14
 *
14
 *
15
 * The above copyright notice and this permission notice (including the
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
17
 * of the Software.
18
 *
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
26
 *
27
 */
27
 */
28
 
28
 
29
#define pr_fmt(fmt) ": " fmt
29
#define pr_fmt(fmt) ": " fmt
30
 
30
 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include "i915_drv.h"
34
#include "i915_drv.h"
35
#include "i915_trace.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
 
37
 
38
 
38
 
39
#define pr_err(fmt, ...) \
39
#define pr_err(fmt, ...) \
40
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
40
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
41
 
41
 
42
 
42
 
43
#define DRM_WAKEUP( queue ) wake_up( queue )
43
#define DRM_WAKEUP( queue ) wake_up( queue )
44
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
44
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
45
 
45
 
46
#define MAX_NOPID ((u32)~0)
46
#define MAX_NOPID ((u32)~0)
47
 
-
 
48
/**
-
 
49
 * Interrupts that are always left unmasked.
-
 
50
 *
-
 
51
 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
-
 
52
 * we leave them always unmasked in IMR and then control enabling them through
-
 
53
 * PIPESTAT alone.
-
 
54
 */
-
 
55
#define I915_INTERRUPT_ENABLE_FIX			\
-
 
56
	(I915_ASLE_INTERRUPT |				\
-
 
57
	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
-
 
58
	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
-
 
59
	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
-
 
60
	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
-
 
61
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
 
62
 
-
 
63
/** Interrupts that we mask and unmask at runtime. */
-
 
64
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
-
 
65
 
-
 
66
#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
-
 
67
				 PIPE_VBLANK_INTERRUPT_STATUS)
-
 
68
 
-
 
69
#define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
-
 
70
				 PIPE_VBLANK_INTERRUPT_ENABLE)
-
 
71
 
-
 
72
#define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
47
 
73
					 DRM_I915_VBLANK_PIPE_B)
48
 
74
 
49
 
75
/* For display hotplug interrupt */
50
/* For display hotplug interrupt */
76
static void
51
static void
77
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
52
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
78
{
53
{
79
    if ((dev_priv->irq_mask & mask) != 0) {
54
    if ((dev_priv->irq_mask & mask) != 0) {
80
        dev_priv->irq_mask &= ~mask;
55
        dev_priv->irq_mask &= ~mask;
81
        I915_WRITE(DEIMR, dev_priv->irq_mask);
56
        I915_WRITE(DEIMR, dev_priv->irq_mask);
82
        POSTING_READ(DEIMR);
57
        POSTING_READ(DEIMR);
83
    }
58
    }
84
}
59
}
85
 
60
 
86
static inline void
61
static inline void
87
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
62
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
88
{
63
{
89
    if ((dev_priv->irq_mask & mask) != mask) {
64
    if ((dev_priv->irq_mask & mask) != mask) {
90
        dev_priv->irq_mask |= mask;
65
        dev_priv->irq_mask |= mask;
91
        I915_WRITE(DEIMR, dev_priv->irq_mask);
66
        I915_WRITE(DEIMR, dev_priv->irq_mask);
92
        POSTING_READ(DEIMR);
67
        POSTING_READ(DEIMR);
93
    }
68
    }
94
}
69
}
95
 
70
 
96
void
71
void
97
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
72
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
98
{
73
{
99
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
74
	if ((dev_priv->pipestat[pipe] & mask) != mask) {
100
		u32 reg = PIPESTAT(pipe);
75
		u32 reg = PIPESTAT(pipe);
101
 
76
 
102
		dev_priv->pipestat[pipe] |= mask;
77
		dev_priv->pipestat[pipe] |= mask;
103
		/* Enable the interrupt, clear any pending status */
78
		/* Enable the interrupt, clear any pending status */
104
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
79
		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
105
		POSTING_READ(reg);
80
		POSTING_READ(reg);
106
	}
81
	}
107
}
82
}
108
 
83
 
109
void
84
void
110
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
85
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
111
{
86
{
112
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
87
	if ((dev_priv->pipestat[pipe] & mask) != 0) {
113
		u32 reg = PIPESTAT(pipe);
88
		u32 reg = PIPESTAT(pipe);
114
 
89
 
115
		dev_priv->pipestat[pipe] &= ~mask;
90
		dev_priv->pipestat[pipe] &= ~mask;
116
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
91
		I915_WRITE(reg, dev_priv->pipestat[pipe]);
117
		POSTING_READ(reg);
92
		POSTING_READ(reg);
118
	}
93
	}
119
}
94
}
120
 
95
 
121
#if 0
96
#if 0
122
/**
97
/**
123
 * intel_enable_asle - enable ASLE interrupt for OpRegion
98
 * intel_enable_asle - enable ASLE interrupt for OpRegion
124
 */
99
 */
125
void intel_enable_asle(struct drm_device *dev)
100
void intel_enable_asle(struct drm_device *dev)
126
{
101
{
127
	drm_i915_private_t *dev_priv = dev->dev_private;
102
	drm_i915_private_t *dev_priv = dev->dev_private;
128
	unsigned long irqflags;
103
	unsigned long irqflags;
129
 
104
 
130
	/* FIXME: opregion/asle for VLV */
105
	/* FIXME: opregion/asle for VLV */
131
	if (IS_VALLEYVIEW(dev))
106
	if (IS_VALLEYVIEW(dev))
132
		return;
107
		return;
133
 
108
 
134
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
109
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
135
 
110
 
136
	if (HAS_PCH_SPLIT(dev))
111
	if (HAS_PCH_SPLIT(dev))
137
		ironlake_enable_display_irq(dev_priv, DE_GSE);
112
		ironlake_enable_display_irq(dev_priv, DE_GSE);
138
	else {
113
	else {
139
		i915_enable_pipestat(dev_priv, 1,
114
		i915_enable_pipestat(dev_priv, 1,
140
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
115
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
141
		if (INTEL_INFO(dev)->gen >= 4)
116
		if (INTEL_INFO(dev)->gen >= 4)
142
			i915_enable_pipestat(dev_priv, 0,
117
			i915_enable_pipestat(dev_priv, 0,
143
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
118
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
144
	}
119
	}
145
 
120
 
146
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
121
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
147
}
122
}
148
#endif
123
#endif
149
 
124
 
150
/**
125
/**
151
 * i915_pipe_enabled - check if a pipe is enabled
126
 * i915_pipe_enabled - check if a pipe is enabled
152
 * @dev: DRM device
127
 * @dev: DRM device
153
 * @pipe: pipe to check
128
 * @pipe: pipe to check
154
 *
129
 *
155
 * Reading certain registers when the pipe is disabled can hang the chip.
130
 * Reading certain registers when the pipe is disabled can hang the chip.
156
 * Use this routine to make sure the PLL is running and the pipe is active
131
 * Use this routine to make sure the PLL is running and the pipe is active
157
 * before reading such registers if unsure.
132
 * before reading such registers if unsure.
158
 */
133
 */
159
static int
134
static int
160
i915_pipe_enabled(struct drm_device *dev, int pipe)
135
i915_pipe_enabled(struct drm_device *dev, int pipe)
161
{
136
{
162
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
137
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
163
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
138
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
164
								      pipe);
139
								      pipe);
165
 
140
 
166
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
141
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
167
}
142
}
168
 
143
 
169
/* Called from drm generic code, passed a 'crtc', which
144
/* Called from drm generic code, passed a 'crtc', which
170
 * we use as a pipe index
145
 * we use as a pipe index
171
 */
146
 */
172
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
147
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
173
{
148
{
174
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
149
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
175
	unsigned long high_frame;
150
	unsigned long high_frame;
176
	unsigned long low_frame;
151
	unsigned long low_frame;
177
	u32 high1, high2, low;
152
	u32 high1, high2, low;
178
 
153
 
179
	if (!i915_pipe_enabled(dev, pipe)) {
154
	if (!i915_pipe_enabled(dev, pipe)) {
180
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
155
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
181
				"pipe %c\n", pipe_name(pipe));
156
				"pipe %c\n", pipe_name(pipe));
182
		return 0;
157
		return 0;
183
	}
158
	}
184
 
159
 
185
	high_frame = PIPEFRAME(pipe);
160
	high_frame = PIPEFRAME(pipe);
186
	low_frame = PIPEFRAMEPIXEL(pipe);
161
	low_frame = PIPEFRAMEPIXEL(pipe);
187
 
162
 
188
	/*
163
	/*
189
	 * High & low register fields aren't synchronized, so make sure
164
	 * High & low register fields aren't synchronized, so make sure
190
	 * we get a low value that's stable across two reads of the high
165
	 * we get a low value that's stable across two reads of the high
191
	 * register.
166
	 * register.
192
	 */
167
	 */
193
	do {
168
	do {
194
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
169
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
195
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
170
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
196
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
171
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
197
	} while (high1 != high2);
172
	} while (high1 != high2);
198
 
173
 
199
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
174
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
200
	low >>= PIPE_FRAME_LOW_SHIFT;
175
	low >>= PIPE_FRAME_LOW_SHIFT;
201
	return (high1 << 8) | low;
176
	return (high1 << 8) | low;
202
}
177
}
203
 
178
 
204
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
179
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
205
{
180
{
206
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
181
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
207
	int reg = PIPE_FRMCOUNT_GM45(pipe);
182
	int reg = PIPE_FRMCOUNT_GM45(pipe);
208
 
183
 
209
	if (!i915_pipe_enabled(dev, pipe)) {
184
	if (!i915_pipe_enabled(dev, pipe)) {
210
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
185
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
211
				 "pipe %c\n", pipe_name(pipe));
186
				 "pipe %c\n", pipe_name(pipe));
212
		return 0;
187
		return 0;
213
	}
188
	}
214
 
189
 
215
	return I915_READ(reg);
190
	return I915_READ(reg);
216
}
191
}
-
 
192
 
-
 
193
/*
-
 
194
 * Handle hotplug events outside the interrupt handler proper.
-
 
195
 */
-
 
196
static void i915_hotplug_work_func(struct work_struct *work)
-
 
197
{
-
 
198
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-
 
199
						    hotplug_work);
-
 
200
	struct drm_device *dev = dev_priv->dev;
-
 
201
	struct drm_mode_config *mode_config = &dev->mode_config;
-
 
202
	struct intel_encoder *encoder;
-
 
203
 
-
 
204
	/* HPD irq before everything is fully set up. */
-
 
205
	if (!dev_priv->enable_hotplug_processing)
-
 
206
		return;
-
 
207
 
-
 
208
	mutex_lock(&mode_config->mutex);
-
 
209
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
 
210
 
-
 
211
	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
-
 
212
		if (encoder->hot_plug)
-
 
213
			encoder->hot_plug(encoder);
-
 
214
 
-
 
215
	mutex_unlock(&mode_config->mutex);
-
 
216
 
-
 
217
	/* Just fire off a uevent and let userspace tell us what to do */
-
 
218
	drm_helper_hpd_irq_event(dev);
217
 
219
}
218
 
220
 
219
static void notify_ring(struct drm_device *dev,
221
static void notify_ring(struct drm_device *dev,
220
			struct intel_ring_buffer *ring)
222
			struct intel_ring_buffer *ring)
221
{
223
{
222
	struct drm_i915_private *dev_priv = dev->dev_private;
224
	struct drm_i915_private *dev_priv = dev->dev_private;
223
 
225
 
224
	if (ring->obj == NULL)
226
	if (ring->obj == NULL)
225
		return;
227
		return;
226
 
228
 
227
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
229
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
228
 
230
 
229
	wake_up_all(&ring->irq_queue);
231
	wake_up_all(&ring->irq_queue);
230
//   if (i915_enable_hangcheck) {
232
//   if (i915_enable_hangcheck) {
231
//       dev_priv->hangcheck_count = 0;
233
//       dev_priv->hangcheck_count = 0;
232
//       mod_timer(&dev_priv->hangcheck_timer,
234
//       mod_timer(&dev_priv->hangcheck_timer,
233
//             jiffies +
235
//             jiffies +
234
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
236
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
235
//   }
237
//   }
236
}
238
}
237
 
239
 
238
#if 0
240
#if 0
239
static void gen6_pm_rps_work(struct work_struct *work)
241
static void gen6_pm_rps_work(struct work_struct *work)
240
{
242
{
241
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
243
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
242
						    rps.work);
244
						    rps.work);
243
	u32 pm_iir, pm_imr;
245
	u32 pm_iir, pm_imr;
244
	u8 new_delay;
246
	u8 new_delay;
245
 
247
 
246
	spin_lock_irq(&dev_priv->rps.lock);
248
	spin_lock_irq(&dev_priv->rps.lock);
247
	pm_iir = dev_priv->rps.pm_iir;
249
	pm_iir = dev_priv->rps.pm_iir;
248
	dev_priv->rps.pm_iir = 0;
250
	dev_priv->rps.pm_iir = 0;
249
	pm_imr = I915_READ(GEN6_PMIMR);
251
	pm_imr = I915_READ(GEN6_PMIMR);
250
	I915_WRITE(GEN6_PMIMR, 0);
252
	I915_WRITE(GEN6_PMIMR, 0);
251
	spin_unlock_irq(&dev_priv->rps.lock);
253
	spin_unlock_irq(&dev_priv->rps.lock);
252
 
254
 
253
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
255
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
254
		return;
256
		return;
255
 
257
 
256
	mutex_lock(&dev_priv->rps.hw_lock);
258
	mutex_lock(&dev_priv->rps.hw_lock);
257
 
259
 
258
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
260
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
259
		new_delay = dev_priv->rps.cur_delay + 1;
261
		new_delay = dev_priv->rps.cur_delay + 1;
260
	else
262
	else
261
		new_delay = dev_priv->rps.cur_delay - 1;
263
		new_delay = dev_priv->rps.cur_delay - 1;
262
 
264
 
263
	/* sysfs frequency interfaces may have snuck in while servicing the
265
	/* sysfs frequency interfaces may have snuck in while servicing the
264
	 * interrupt
266
	 * interrupt
265
	 */
267
	 */
266
	if (!(new_delay > dev_priv->rps.max_delay ||
268
	if (!(new_delay > dev_priv->rps.max_delay ||
267
	      new_delay < dev_priv->rps.min_delay)) {
269
	      new_delay < dev_priv->rps.min_delay)) {
268
		gen6_set_rps(dev_priv->dev, new_delay);
270
		gen6_set_rps(dev_priv->dev, new_delay);
269
	}
271
	}
270
 
272
 
271
	mutex_unlock(&dev_priv->rps.hw_lock);
273
	mutex_unlock(&dev_priv->rps.hw_lock);
272
}
274
}
273
 
275
 
274
 
276
 
275
/**
277
/**
276
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
278
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
277
 * occurred.
279
 * occurred.
278
 * @work: workqueue struct
280
 * @work: workqueue struct
279
 *
281
 *
280
 * Doesn't actually do anything except notify userspace. As a consequence of
282
 * Doesn't actually do anything except notify userspace. As a consequence of
281
 * this event, userspace should try to remap the bad rows since statistically
283
 * this event, userspace should try to remap the bad rows since statistically
282
 * it is likely the same row is more likely to go bad again.
284
 * it is likely the same row is more likely to go bad again.
283
 */
285
 */
284
static void ivybridge_parity_work(struct work_struct *work)
286
static void ivybridge_parity_work(struct work_struct *work)
285
{
287
{
286
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
288
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
287
						    l3_parity.error_work);
289
						    l3_parity.error_work);
288
	u32 error_status, row, bank, subbank;
290
	u32 error_status, row, bank, subbank;
289
	char *parity_event[5];
291
	char *parity_event[5];
290
	uint32_t misccpctl;
292
	uint32_t misccpctl;
291
	unsigned long flags;
293
	unsigned long flags;
292
 
294
 
293
	/* We must turn off DOP level clock gating to access the L3 registers.
295
	/* We must turn off DOP level clock gating to access the L3 registers.
294
	 * In order to prevent a get/put style interface, acquire struct mutex
296
	 * In order to prevent a get/put style interface, acquire struct mutex
295
	 * any time we access those registers.
297
	 * any time we access those registers.
296
	 */
298
	 */
297
	mutex_lock(&dev_priv->dev->struct_mutex);
299
	mutex_lock(&dev_priv->dev->struct_mutex);
298
 
300
 
299
	misccpctl = I915_READ(GEN7_MISCCPCTL);
301
	misccpctl = I915_READ(GEN7_MISCCPCTL);
300
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
302
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
301
	POSTING_READ(GEN7_MISCCPCTL);
303
	POSTING_READ(GEN7_MISCCPCTL);
302
 
304
 
303
	error_status = I915_READ(GEN7_L3CDERRST1);
305
	error_status = I915_READ(GEN7_L3CDERRST1);
304
	row = GEN7_PARITY_ERROR_ROW(error_status);
306
	row = GEN7_PARITY_ERROR_ROW(error_status);
305
	bank = GEN7_PARITY_ERROR_BANK(error_status);
307
	bank = GEN7_PARITY_ERROR_BANK(error_status);
306
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
308
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
307
 
309
 
308
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
310
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
309
				    GEN7_L3CDERRST1_ENABLE);
311
				    GEN7_L3CDERRST1_ENABLE);
310
	POSTING_READ(GEN7_L3CDERRST1);
312
	POSTING_READ(GEN7_L3CDERRST1);
311
 
313
 
312
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
314
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
313
 
315
 
314
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
315
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
317
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
316
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
318
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
317
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
319
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
318
 
320
 
319
	mutex_unlock(&dev_priv->dev->struct_mutex);
321
	mutex_unlock(&dev_priv->dev->struct_mutex);
320
 
322
 
321
	parity_event[0] = "L3_PARITY_ERROR=1";
323
	parity_event[0] = "L3_PARITY_ERROR=1";
322
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
324
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
323
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
325
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
324
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
326
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
325
	parity_event[4] = NULL;
327
	parity_event[4] = NULL;
326
 
328
 
327
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
329
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
328
			   KOBJ_CHANGE, parity_event);
330
			   KOBJ_CHANGE, parity_event);
329
 
331
 
330
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
332
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
331
		  row, bank, subbank);
333
		  row, bank, subbank);
332
 
334
 
333
	kfree(parity_event[3]);
335
	kfree(parity_event[3]);
334
	kfree(parity_event[2]);
336
	kfree(parity_event[2]);
335
	kfree(parity_event[1]);
337
	kfree(parity_event[1]);
336
}
338
}
337
 
339
 
338
static void ivybridge_handle_parity_error(struct drm_device *dev)
340
static void ivybridge_handle_parity_error(struct drm_device *dev)
339
{
341
{
340
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
342
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
341
	unsigned long flags;
343
	unsigned long flags;
342
 
344
 
343
	if (!HAS_L3_GPU_CACHE(dev))
345
	if (!HAS_L3_GPU_CACHE(dev))
344
		return;
346
		return;
345
 
347
 
346
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
348
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
347
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
349
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
348
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
350
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
349
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
351
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
350
 
352
 
351
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
353
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
352
}
354
}
353
 
355
 
354
#endif
356
#endif
355
 
357
 
356
static void snb_gt_irq_handler(struct drm_device *dev,
358
static void snb_gt_irq_handler(struct drm_device *dev,
357
			       struct drm_i915_private *dev_priv,
359
			       struct drm_i915_private *dev_priv,
358
			       u32 gt_iir)
360
			       u32 gt_iir)
359
{
361
{
360
//    printf("%s\n", __FUNCTION__);
362
//    printf("%s\n", __FUNCTION__);
361
 
363
 
362
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
364
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
363
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
365
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
364
		notify_ring(dev, &dev_priv->ring[RCS]);
366
		notify_ring(dev, &dev_priv->ring[RCS]);
365
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
367
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
366
		notify_ring(dev, &dev_priv->ring[VCS]);
368
		notify_ring(dev, &dev_priv->ring[VCS]);
367
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
369
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
368
		notify_ring(dev, &dev_priv->ring[BCS]);
370
		notify_ring(dev, &dev_priv->ring[BCS]);
369
 
371
 
370
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
372
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
371
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
373
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
372
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
374
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
373
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
375
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
374
		i915_handle_error(dev, false);
376
		i915_handle_error(dev, false);
375
	}
377
	}
376
 
378
 
377
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
379
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
378
//		ivybridge_handle_parity_error(dev);
380
//		ivybridge_handle_parity_error(dev);
379
}
381
}
380
 
382
 
381
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
383
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
382
				u32 pm_iir)
384
				u32 pm_iir)
383
{
385
{
384
	unsigned long flags;
386
	unsigned long flags;
385
 
387
 
386
	/*
388
	/*
387
	 * IIR bits should never already be set because IMR should
389
	 * IIR bits should never already be set because IMR should
388
	 * prevent an interrupt from being shown in IIR. The warning
390
	 * prevent an interrupt from being shown in IIR. The warning
389
	 * displays a case where we've unsafely cleared
391
	 * displays a case where we've unsafely cleared
390
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
392
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
391
	 * type is not a problem, it displays a problem in the logic.
393
	 * type is not a problem, it displays a problem in the logic.
392
	 *
394
	 *
393
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
395
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
394
	 */
396
	 */
395
 
397
 
396
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
398
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
397
	dev_priv->rps.pm_iir |= pm_iir;
399
	dev_priv->rps.pm_iir |= pm_iir;
398
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
400
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
399
	POSTING_READ(GEN6_PMIMR);
401
	POSTING_READ(GEN6_PMIMR);
400
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
402
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
401
 
403
 
402
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
404
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
403
}
405
}
-
 
406
 
-
 
407
static void gmbus_irq_handler(struct drm_device *dev)
-
 
408
{
-
 
409
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
410
 
-
 
411
	wake_up_all(&dev_priv->gmbus_wait_queue);
-
 
412
}
-
 
413
 
-
 
414
static void dp_aux_irq_handler(struct drm_device *dev)
-
 
415
{
-
 
416
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
417
 
-
 
418
	wake_up_all(&dev_priv->gmbus_wait_queue);
-
 
419
}
404
 
420
 
405
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
421
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
406
{
422
{
407
	struct drm_device *dev = (struct drm_device *) arg;
423
	struct drm_device *dev = (struct drm_device *) arg;
408
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
424
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
409
	u32 iir, gt_iir, pm_iir;
425
	u32 iir, gt_iir, pm_iir;
410
	irqreturn_t ret = IRQ_NONE;
426
	irqreturn_t ret = IRQ_NONE;
411
	unsigned long irqflags;
427
	unsigned long irqflags;
412
	int pipe;
428
	int pipe;
413
	u32 pipe_stats[I915_MAX_PIPES];
429
	u32 pipe_stats[I915_MAX_PIPES];
414
	bool blc_event;
-
 
415
 
430
 
416
	atomic_inc(&dev_priv->irq_received);
431
	atomic_inc(&dev_priv->irq_received);
417
 
432
 
418
	while (true) {
433
	while (true) {
419
		iir = I915_READ(VLV_IIR);
434
		iir = I915_READ(VLV_IIR);
420
		gt_iir = I915_READ(GTIIR);
435
		gt_iir = I915_READ(GTIIR);
421
		pm_iir = I915_READ(GEN6_PMIIR);
436
		pm_iir = I915_READ(GEN6_PMIIR);
422
 
437
 
423
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
438
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
424
			goto out;
439
			goto out;
425
 
440
 
426
		ret = IRQ_HANDLED;
441
		ret = IRQ_HANDLED;
427
 
442
 
428
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
443
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
429
 
444
 
430
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
445
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
431
		for_each_pipe(pipe) {
446
		for_each_pipe(pipe) {
432
			int reg = PIPESTAT(pipe);
447
			int reg = PIPESTAT(pipe);
433
			pipe_stats[pipe] = I915_READ(reg);
448
			pipe_stats[pipe] = I915_READ(reg);
434
 
449
 
435
			/*
450
			/*
436
			 * Clear the PIPE*STAT regs before the IIR
451
			 * Clear the PIPE*STAT regs before the IIR
437
			 */
452
			 */
438
			if (pipe_stats[pipe] & 0x8000ffff) {
453
			if (pipe_stats[pipe] & 0x8000ffff) {
439
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
454
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
440
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
455
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
441
							 pipe_name(pipe));
456
							 pipe_name(pipe));
442
				I915_WRITE(reg, pipe_stats[pipe]);
457
				I915_WRITE(reg, pipe_stats[pipe]);
443
			}
458
			}
444
		}
459
		}
445
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
460
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
446
 
461
 
447
#if 0
462
#if 0
448
		for_each_pipe(pipe) {
463
		for_each_pipe(pipe) {
449
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
464
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
450
				drm_handle_vblank(dev, pipe);
465
				drm_handle_vblank(dev, pipe);
451
 
466
 
452
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
467
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
453
				intel_prepare_page_flip(dev, pipe);
468
				intel_prepare_page_flip(dev, pipe);
454
				intel_finish_page_flip(dev, pipe);
469
				intel_finish_page_flip(dev, pipe);
455
			}
470
			}
456
		}
471
		}
457
#endif
472
#endif
458
 
473
 
459
		/* Consume port.  Then clear IIR or we'll miss events */
474
		/* Consume port.  Then clear IIR or we'll miss events */
460
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
475
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
461
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
476
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
462
 
477
 
463
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
478
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
464
					 hotplug_status);
479
					 hotplug_status);
465
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
480
			if (hotplug_status & dev_priv->hotplug_supported_mask)
466
//				queue_work(dev_priv->wq,
481
				queue_work(dev_priv->wq,
467
//					   &dev_priv->hotplug_work);
482
					   &dev_priv->hotplug_work);
468
 
483
 
469
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
484
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
470
			I915_READ(PORT_HOTPLUG_STAT);
485
			I915_READ(PORT_HOTPLUG_STAT);
471
		}
486
		}
472
 
487
 
473
		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
488
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
474
			blc_event = true;
489
			gmbus_irq_handler(dev);
475
 
490
 
476
        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
491
//        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
477
            gen6_queue_rps_work(dev_priv, pm_iir);
492
//            gen6_queue_rps_work(dev_priv, pm_iir);
478
 
493
 
479
		I915_WRITE(GTIIR, gt_iir);
494
		I915_WRITE(GTIIR, gt_iir);
480
		I915_WRITE(GEN6_PMIIR, pm_iir);
495
		I915_WRITE(GEN6_PMIIR, pm_iir);
481
		I915_WRITE(VLV_IIR, iir);
496
		I915_WRITE(VLV_IIR, iir);
482
	}
497
	}
483
 
498
 
484
out:
499
out:
485
	return ret;
500
	return ret;
486
}
501
}
487
 
502
 
488
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
503
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
489
{
504
{
490
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
505
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
491
	int pipe;
506
	int pipe;
492
 
507
 
-
 
508
	if (pch_iir & SDE_HOTPLUG_MASK)
493
    printf("%s\n", __FUNCTION__);
509
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
494
 
510
 
495
	if (pch_iir & SDE_AUDIO_POWER_MASK)
511
	if (pch_iir & SDE_AUDIO_POWER_MASK)
496
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
512
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
497
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
513
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
498
				 SDE_AUDIO_POWER_SHIFT);
514
				 SDE_AUDIO_POWER_SHIFT);
-
 
515
 
-
 
516
	if (pch_iir & SDE_AUX_MASK)
-
 
517
		dp_aux_irq_handler(dev);
499
 
518
 
500
	if (pch_iir & SDE_GMBUS)
519
	if (pch_iir & SDE_GMBUS)
501
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
520
		gmbus_irq_handler(dev);
502
 
521
 
503
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
522
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
504
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
523
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
505
 
524
 
506
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
525
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
507
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
526
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
508
 
527
 
509
	if (pch_iir & SDE_POISON)
528
	if (pch_iir & SDE_POISON)
510
		DRM_ERROR("PCH poison interrupt\n");
529
		DRM_ERROR("PCH poison interrupt\n");
511
 
530
 
512
	if (pch_iir & SDE_FDI_MASK)
531
	if (pch_iir & SDE_FDI_MASK)
513
		for_each_pipe(pipe)
532
		for_each_pipe(pipe)
514
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
533
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
515
					 pipe_name(pipe),
534
					 pipe_name(pipe),
516
					 I915_READ(FDI_RX_IIR(pipe)));
535
					 I915_READ(FDI_RX_IIR(pipe)));
517
 
536
 
518
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
537
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
519
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
538
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
520
 
539
 
521
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
540
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
522
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
541
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
523
 
542
 
524
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
543
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
525
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
544
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
526
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
545
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
527
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
546
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
528
}
547
}
529
 
548
 
530
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
549
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
531
{
550
{
532
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
551
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
533
	int pipe;
552
	int pipe;
-
 
553
 
-
 
554
	if (pch_iir & SDE_HOTPLUG_MASK_CPT)
-
 
555
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
534
 
556
 
535
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
557
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
536
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
558
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
537
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
559
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
538
				 SDE_AUDIO_POWER_SHIFT_CPT);
560
				 SDE_AUDIO_POWER_SHIFT_CPT);
539
 
561
 
540
	if (pch_iir & SDE_AUX_MASK_CPT)
562
	if (pch_iir & SDE_AUX_MASK_CPT)
541
		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
563
		dp_aux_irq_handler(dev);
542
 
564
 
543
	if (pch_iir & SDE_GMBUS_CPT)
565
	if (pch_iir & SDE_GMBUS_CPT)
544
		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
566
		gmbus_irq_handler(dev);
545
 
567
 
546
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
568
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
547
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
569
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
548
 
570
 
549
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
571
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
550
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
572
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
551
 
573
 
552
	if (pch_iir & SDE_FDI_MASK_CPT)
574
	if (pch_iir & SDE_FDI_MASK_CPT)
553
		for_each_pipe(pipe)
575
		for_each_pipe(pipe)
554
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
576
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
555
					 pipe_name(pipe),
577
					 pipe_name(pipe),
556
					 I915_READ(FDI_RX_IIR(pipe)));
578
					 I915_READ(FDI_RX_IIR(pipe)));
557
}
579
}
558
 
580
 
559
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
581
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
560
{
582
{
561
	struct drm_device *dev = (struct drm_device *) arg;
583
	struct drm_device *dev = (struct drm_device *) arg;
562
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
584
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
563
	u32 de_iir, gt_iir, de_ier, pm_iir;
585
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
564
	irqreturn_t ret = IRQ_NONE;
586
	irqreturn_t ret = IRQ_NONE;
565
	int i;
587
	int i;
566
 
588
 
567
	atomic_inc(&dev_priv->irq_received);
589
	atomic_inc(&dev_priv->irq_received);
568
 
590
 
569
	/* disable master interrupt before clearing iir  */
591
	/* disable master interrupt before clearing iir  */
570
	de_ier = I915_READ(DEIER);
592
	de_ier = I915_READ(DEIER);
571
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
593
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
 
594
 
-
 
595
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
-
 
596
	 * interrupts will will be stored on its back queue, and then we'll be
-
 
597
	 * able to process them after we restore SDEIER (as soon as we restore
-
 
598
	 * it, we'll get an interrupt if SDEIIR still has something to process
-
 
599
	 * due to its back queue). */
-
 
600
	sde_ier = I915_READ(SDEIER);
-
 
601
	I915_WRITE(SDEIER, 0);
-
 
602
	POSTING_READ(SDEIER);
572
 
603
 
573
	gt_iir = I915_READ(GTIIR);
604
	gt_iir = I915_READ(GTIIR);
574
	if (gt_iir) {
605
	if (gt_iir) {
575
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
606
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
576
		I915_WRITE(GTIIR, gt_iir);
607
		I915_WRITE(GTIIR, gt_iir);
577
		ret = IRQ_HANDLED;
608
		ret = IRQ_HANDLED;
578
	}
609
	}
579
 
610
 
580
	de_iir = I915_READ(DEIIR);
611
	de_iir = I915_READ(DEIIR);
581
	if (de_iir) {
612
	if (de_iir) {
-
 
613
		if (de_iir & DE_AUX_CHANNEL_A_IVB)
-
 
614
			dp_aux_irq_handler(dev);
582
#if 0
615
#if 0
583
		if (de_iir & DE_GSE_IVB)
616
		if (de_iir & DE_GSE_IVB)
584
			intel_opregion_gse_intr(dev);
617
			intel_opregion_gse_intr(dev);
585
 
618
 
586
		for (i = 0; i < 3; i++) {
619
		for (i = 0; i < 3; i++) {
587
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
620
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
588
				drm_handle_vblank(dev, i);
621
				drm_handle_vblank(dev, i);
589
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
622
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
590
				intel_prepare_page_flip(dev, i);
623
				intel_prepare_page_flip(dev, i);
591
				intel_finish_page_flip_plane(dev, i);
624
				intel_finish_page_flip_plane(dev, i);
592
			}
625
			}
593
		}
626
		}
594
#endif
627
#endif
595
		/* check event from PCH */
628
		/* check event from PCH */
596
		if (de_iir & DE_PCH_EVENT_IVB) {
629
		if (de_iir & DE_PCH_EVENT_IVB) {
597
			u32 pch_iir = I915_READ(SDEIIR);
630
			u32 pch_iir = I915_READ(SDEIIR);
598
 
-
 
599
//			if (pch_iir & SDE_HOTPLUG_MASK_CPT)
-
 
600
//				queue_work(dev_priv->wq, &dev_priv->hotplug_work);
631
 
601
			cpt_irq_handler(dev, pch_iir);
632
			cpt_irq_handler(dev, pch_iir);
602
 
633
 
603
			/* clear PCH hotplug event before clear CPU irq */
634
			/* clear PCH hotplug event before clear CPU irq */
604
			I915_WRITE(SDEIIR, pch_iir);
635
			I915_WRITE(SDEIIR, pch_iir);
605
		}
636
		}
606
 
637
 
607
		I915_WRITE(DEIIR, de_iir);
638
		I915_WRITE(DEIIR, de_iir);
608
		ret = IRQ_HANDLED;
639
		ret = IRQ_HANDLED;
609
	}
640
	}
610
 
641
 
611
	pm_iir = I915_READ(GEN6_PMIIR);
642
	pm_iir = I915_READ(GEN6_PMIIR);
612
	if (pm_iir) {
643
	if (pm_iir) {
613
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
644
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
614
//			gen6_queue_rps_work(dev_priv, pm_iir);
645
//			gen6_queue_rps_work(dev_priv, pm_iir);
615
		I915_WRITE(GEN6_PMIIR, pm_iir);
646
		I915_WRITE(GEN6_PMIIR, pm_iir);
616
		ret = IRQ_HANDLED;
647
		ret = IRQ_HANDLED;
617
	}
648
	}
618
 
649
 
619
	I915_WRITE(DEIER, de_ier);
650
	I915_WRITE(DEIER, de_ier);
620
	POSTING_READ(DEIER);
651
	POSTING_READ(DEIER);
-
 
652
	I915_WRITE(SDEIER, sde_ier);
-
 
653
	POSTING_READ(SDEIER);
621
 
654
 
622
	return ret;
655
	return ret;
623
}
656
}
624
 
657
 
625
static void ilk_gt_irq_handler(struct drm_device *dev,
658
static void ilk_gt_irq_handler(struct drm_device *dev,
626
			       struct drm_i915_private *dev_priv,
659
			       struct drm_i915_private *dev_priv,
627
			       u32 gt_iir)
660
			       u32 gt_iir)
628
{
661
{
629
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
662
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
630
		notify_ring(dev, &dev_priv->ring[RCS]);
663
		notify_ring(dev, &dev_priv->ring[RCS]);
631
	if (gt_iir & GT_BSD_USER_INTERRUPT)
664
	if (gt_iir & GT_BSD_USER_INTERRUPT)
632
		notify_ring(dev, &dev_priv->ring[VCS]);
665
		notify_ring(dev, &dev_priv->ring[VCS]);
633
}
666
}
634
 
667
 
635
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
668
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
636
{
669
{
637
	struct drm_device *dev = (struct drm_device *) arg;
670
	struct drm_device *dev = (struct drm_device *) arg;
638
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
671
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
639
    int ret = IRQ_NONE;
672
    int ret = IRQ_NONE;
640
    u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
673
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
641
 
674
 
642
    atomic_inc(&dev_priv->irq_received);
675
    atomic_inc(&dev_priv->irq_received);
643
 
676
 
644
    /* disable master interrupt before clearing iir  */
677
    /* disable master interrupt before clearing iir  */
645
    de_ier = I915_READ(DEIER);
678
    de_ier = I915_READ(DEIER);
646
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
679
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
647
    POSTING_READ(DEIER);
680
    POSTING_READ(DEIER);
-
 
681
 
-
 
682
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
-
 
683
	 * interrupts will will be stored on its back queue, and then we'll be
-
 
684
	 * able to process them after we restore SDEIER (as soon as we restore
-
 
685
	 * it, we'll get an interrupt if SDEIIR still has something to process
-
 
686
	 * due to its back queue). */
-
 
687
	sde_ier = I915_READ(SDEIER);
-
 
688
	I915_WRITE(SDEIER, 0);
-
 
689
	POSTING_READ(SDEIER);
648
 
690
 
649
    de_iir = I915_READ(DEIIR);
691
    de_iir = I915_READ(DEIIR);
650
    gt_iir = I915_READ(GTIIR);
-
 
651
    pch_iir = I915_READ(SDEIIR);
692
    gt_iir = I915_READ(GTIIR);
652
    pm_iir = I915_READ(GEN6_PMIIR);
693
    pm_iir = I915_READ(GEN6_PMIIR);
653
 
-
 
654
    if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
694
 
655
        (!IS_GEN6(dev) || pm_iir == 0))
695
	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
656
        goto done;
696
        goto done;
657
 
697
 
658
    ret = IRQ_HANDLED;
698
    ret = IRQ_HANDLED;
659
 
699
 
660
	if (IS_GEN5(dev))
700
	if (IS_GEN5(dev))
661
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
701
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
662
	else
702
	else
663
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
703
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
 
704
 
-
 
705
	if (de_iir & DE_AUX_CHANNEL_A)
-
 
706
		dp_aux_irq_handler(dev);
-
 
707
 
664
#if 0
708
#if 0
665
	if (de_iir & DE_GSE)
709
	if (de_iir & DE_GSE)
666
		intel_opregion_gse_intr(dev);
710
		intel_opregion_gse_intr(dev);
667
 
711
 
668
	if (de_iir & DE_PIPEA_VBLANK)
712
	if (de_iir & DE_PIPEA_VBLANK)
669
		drm_handle_vblank(dev, 0);
713
		drm_handle_vblank(dev, 0);
670
 
714
 
671
	if (de_iir & DE_PIPEB_VBLANK)
715
	if (de_iir & DE_PIPEB_VBLANK)
672
		drm_handle_vblank(dev, 1);
716
		drm_handle_vblank(dev, 1);
673
 
717
 
674
	if (de_iir & DE_PLANEA_FLIP_DONE) {
718
	if (de_iir & DE_PLANEA_FLIP_DONE) {
675
		intel_prepare_page_flip(dev, 0);
719
		intel_prepare_page_flip(dev, 0);
676
		intel_finish_page_flip_plane(dev, 0);
720
		intel_finish_page_flip_plane(dev, 0);
677
	}
721
	}
678
 
722
 
679
	if (de_iir & DE_PLANEB_FLIP_DONE) {
723
	if (de_iir & DE_PLANEB_FLIP_DONE) {
680
		intel_prepare_page_flip(dev, 1);
724
		intel_prepare_page_flip(dev, 1);
681
		intel_finish_page_flip_plane(dev, 1);
725
		intel_finish_page_flip_plane(dev, 1);
682
	}
726
	}
683
#endif
727
#endif
684
 
728
 
685
	/* check event from PCH */
729
	/* check event from PCH */
686
	if (de_iir & DE_PCH_EVENT) {
730
	if (de_iir & DE_PCH_EVENT) {
687
//		if (pch_iir & hotplug_mask)
731
		u32 pch_iir = I915_READ(SDEIIR);
688
//			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-
 
-
 
732
 
689
		if (HAS_PCH_CPT(dev))
733
		if (HAS_PCH_CPT(dev))
690
			cpt_irq_handler(dev, pch_iir);
734
			cpt_irq_handler(dev, pch_iir);
691
		else
735
		else
692
			ibx_irq_handler(dev, pch_iir);
736
			ibx_irq_handler(dev, pch_iir);
-
 
737
 
-
 
738
		/* should clear PCH hotplug event before clear CPU irq */
-
 
739
		I915_WRITE(SDEIIR, pch_iir);
693
	}
740
	}
694
#if 0
741
#if 0
695
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
742
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
696
		ironlake_handle_rps_change(dev);
743
		ironlake_handle_rps_change(dev);
697
 
744
 
698
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
745
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
699
		gen6_queue_rps_work(dev_priv, pm_iir);
746
		gen6_queue_rps_work(dev_priv, pm_iir);
700
#endif
747
#endif
701
    /* should clear PCH hotplug event before clear CPU irq */
-
 
702
    I915_WRITE(SDEIIR, pch_iir);
-
 
703
    I915_WRITE(GTIIR, gt_iir);
748
    I915_WRITE(GTIIR, gt_iir);
704
    I915_WRITE(DEIIR, de_iir);
749
    I915_WRITE(DEIIR, de_iir);
705
    I915_WRITE(GEN6_PMIIR, pm_iir);
750
    I915_WRITE(GEN6_PMIIR, pm_iir);
706
 
751
 
707
done:
752
done:
708
    I915_WRITE(DEIER, de_ier);
753
    I915_WRITE(DEIER, de_ier);
709
    POSTING_READ(DEIER);
754
    POSTING_READ(DEIER);
-
 
755
	I915_WRITE(SDEIER, sde_ier);
-
 
756
	POSTING_READ(SDEIER);
710
 
757
 
711
    return ret;
758
    return ret;
712
}
759
}
713
 
760
 
714
 
761
 
715
 
762
 
716
 
763
 
717
/* NB: please notice the memset */
764
/* NB: please notice the memset */
718
static void i915_get_extra_instdone(struct drm_device *dev,
765
static void i915_get_extra_instdone(struct drm_device *dev,
719
				    uint32_t *instdone)
766
				    uint32_t *instdone)
720
{
767
{
721
	struct drm_i915_private *dev_priv = dev->dev_private;
768
	struct drm_i915_private *dev_priv = dev->dev_private;
722
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
769
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
723
 
770
 
724
	switch(INTEL_INFO(dev)->gen) {
771
	switch(INTEL_INFO(dev)->gen) {
725
	case 2:
772
	case 2:
726
	case 3:
773
	case 3:
727
		instdone[0] = I915_READ(INSTDONE);
774
		instdone[0] = I915_READ(INSTDONE);
728
		break;
775
		break;
729
	case 4:
776
	case 4:
730
	case 5:
777
	case 5:
731
	case 6:
778
	case 6:
732
		instdone[0] = I915_READ(INSTDONE_I965);
779
		instdone[0] = I915_READ(INSTDONE_I965);
733
		instdone[1] = I915_READ(INSTDONE1);
780
		instdone[1] = I915_READ(INSTDONE1);
734
		break;
781
		break;
735
	default:
782
	default:
736
        WARN(1, "Unsupported platform\n");
783
		WARN_ONCE(1, "Unsupported platform\n");
737
	case 7:
784
	case 7:
738
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
785
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
739
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
786
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
740
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
787
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
741
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
788
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
742
		break;
789
		break;
743
	}
790
	}
744
}
791
}
745
 
792
 
746
#ifdef CONFIG_DEBUG_FS
793
#ifdef CONFIG_DEBUG_FS
747
static struct drm_i915_error_object *
794
static struct drm_i915_error_object *
748
i915_error_object_create(struct drm_i915_private *dev_priv,
795
i915_error_object_create(struct drm_i915_private *dev_priv,
749
			 struct drm_i915_gem_object *src)
796
			 struct drm_i915_gem_object *src)
750
{
797
{
751
	struct drm_i915_error_object *dst;
798
	struct drm_i915_error_object *dst;
752
	int i, count;
799
	int i, count;
753
	u32 reloc_offset;
800
	u32 reloc_offset;
754
 
801
 
755
	if (src == NULL || src->pages == NULL)
802
	if (src == NULL || src->pages == NULL)
756
		return NULL;
803
		return NULL;
757
 
804
 
758
	count = src->base.size / PAGE_SIZE;
805
	count = src->base.size / PAGE_SIZE;
759
 
806
 
760
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
807
	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
761
	if (dst == NULL)
808
	if (dst == NULL)
762
		return NULL;
809
		return NULL;
763
 
810
 
764
	reloc_offset = src->gtt_offset;
811
	reloc_offset = src->gtt_offset;
765
	for (i = 0; i < count; i++) {
812
	for (i = 0; i < count; i++) {
766
		unsigned long flags;
813
		unsigned long flags;
767
		void *d;
814
		void *d;
768
 
815
 
769
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
816
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
770
		if (d == NULL)
817
		if (d == NULL)
771
			goto unwind;
818
			goto unwind;
772
 
819
 
773
		local_irq_save(flags);
820
		local_irq_save(flags);
774
		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
821
		if (reloc_offset < dev_priv->gtt.mappable_end &&
775
		    src->has_global_gtt_mapping) {
822
		    src->has_global_gtt_mapping) {
776
			void __iomem *s;
823
			void __iomem *s;
777
 
824
 
778
			/* Simply ignore tiling or any overlapping fence.
825
			/* Simply ignore tiling or any overlapping fence.
779
			 * It's part of the error state, and this hopefully
826
			 * It's part of the error state, and this hopefully
780
			 * captures what the GPU read.
827
			 * captures what the GPU read.
781
			 */
828
			 */
782
 
829
 
783
			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
830
			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
784
						     reloc_offset);
831
						     reloc_offset);
785
			memcpy_fromio(d, s, PAGE_SIZE);
832
			memcpy_fromio(d, s, PAGE_SIZE);
786
			io_mapping_unmap_atomic(s);
833
			io_mapping_unmap_atomic(s);
-
 
834
		} else if (src->stolen) {
-
 
835
			unsigned long offset;
-
 
836
 
-
 
837
			offset = dev_priv->mm.stolen_base;
-
 
838
			offset += src->stolen->start;
-
 
839
			offset += i << PAGE_SHIFT;
-
 
840
 
-
 
841
			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
787
		} else {
842
		} else {
788
			struct page *page;
843
			struct page *page;
789
			void *s;
844
			void *s;
790
 
845
 
791
			page = i915_gem_object_get_page(src, i);
846
			page = i915_gem_object_get_page(src, i);
792
 
847
 
793
			drm_clflush_pages(&page, 1);
848
			drm_clflush_pages(&page, 1);
794
 
849
 
795
			s = kmap_atomic(page);
850
			s = kmap_atomic(page);
796
			memcpy(d, s, PAGE_SIZE);
851
			memcpy(d, s, PAGE_SIZE);
797
			kunmap_atomic(s);
852
			kunmap_atomic(s);
798
 
853
 
799
			drm_clflush_pages(&page, 1);
854
			drm_clflush_pages(&page, 1);
800
		}
855
		}
801
		local_irq_restore(flags);
856
		local_irq_restore(flags);
802
 
857
 
803
		dst->pages[i] = d;
858
		dst->pages[i] = d;
804
 
859
 
805
		reloc_offset += PAGE_SIZE;
860
		reloc_offset += PAGE_SIZE;
806
	}
861
	}
807
	dst->page_count = count;
862
	dst->page_count = count;
808
	dst->gtt_offset = src->gtt_offset;
863
	dst->gtt_offset = src->gtt_offset;
809
 
864
 
810
	return dst;
865
	return dst;
811
 
866
 
812
unwind:
867
unwind:
813
	while (i--)
868
	while (i--)
814
		kfree(dst->pages[i]);
869
		kfree(dst->pages[i]);
815
	kfree(dst);
870
	kfree(dst);
816
	return NULL;
871
	return NULL;
817
}
872
}
818
 
873
 
819
static void
874
static void
820
i915_error_object_free(struct drm_i915_error_object *obj)
875
i915_error_object_free(struct drm_i915_error_object *obj)
821
{
876
{
822
	int page;
877
	int page;
823
 
878
 
824
	if (obj == NULL)
879
	if (obj == NULL)
825
		return;
880
		return;
826
 
881
 
827
	for (page = 0; page < obj->page_count; page++)
882
	for (page = 0; page < obj->page_count; page++)
828
		kfree(obj->pages[page]);
883
		kfree(obj->pages[page]);
829
 
884
 
830
	kfree(obj);
885
	kfree(obj);
831
}
886
}
832
 
887
 
833
void
888
void
834
i915_error_state_free(struct kref *error_ref)
889
i915_error_state_free(struct kref *error_ref)
835
{
890
{
836
	struct drm_i915_error_state *error = container_of(error_ref,
891
	struct drm_i915_error_state *error = container_of(error_ref,
837
							  typeof(*error), ref);
892
							  typeof(*error), ref);
838
	int i;
893
	int i;
839
 
894
 
840
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
895
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
841
		i915_error_object_free(error->ring[i].batchbuffer);
896
		i915_error_object_free(error->ring[i].batchbuffer);
842
		i915_error_object_free(error->ring[i].ringbuffer);
897
		i915_error_object_free(error->ring[i].ringbuffer);
843
		kfree(error->ring[i].requests);
898
		kfree(error->ring[i].requests);
844
	}
899
	}
845
 
900
 
846
	kfree(error->active_bo);
901
	kfree(error->active_bo);
847
	kfree(error->overlay);
902
	kfree(error->overlay);
848
	kfree(error);
903
	kfree(error);
849
}
904
}
850
static void capture_bo(struct drm_i915_error_buffer *err,
905
static void capture_bo(struct drm_i915_error_buffer *err,
851
		       struct drm_i915_gem_object *obj)
906
		       struct drm_i915_gem_object *obj)
852
{
907
{
853
	err->size = obj->base.size;
908
	err->size = obj->base.size;
854
	err->name = obj->base.name;
909
	err->name = obj->base.name;
855
	err->rseqno = obj->last_read_seqno;
910
	err->rseqno = obj->last_read_seqno;
856
	err->wseqno = obj->last_write_seqno;
911
	err->wseqno = obj->last_write_seqno;
857
	err->gtt_offset = obj->gtt_offset;
912
	err->gtt_offset = obj->gtt_offset;
858
	err->read_domains = obj->base.read_domains;
913
	err->read_domains = obj->base.read_domains;
859
	err->write_domain = obj->base.write_domain;
914
	err->write_domain = obj->base.write_domain;
860
	err->fence_reg = obj->fence_reg;
915
	err->fence_reg = obj->fence_reg;
861
	err->pinned = 0;
916
	err->pinned = 0;
862
	if (obj->pin_count > 0)
917
	if (obj->pin_count > 0)
863
		err->pinned = 1;
918
		err->pinned = 1;
864
	if (obj->user_pin_count > 0)
919
	if (obj->user_pin_count > 0)
865
		err->pinned = -1;
920
		err->pinned = -1;
866
	err->tiling = obj->tiling_mode;
921
	err->tiling = obj->tiling_mode;
867
	err->dirty = obj->dirty;
922
	err->dirty = obj->dirty;
868
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
923
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
869
	err->ring = obj->ring ? obj->ring->id : -1;
924
	err->ring = obj->ring ? obj->ring->id : -1;
870
	err->cache_level = obj->cache_level;
925
	err->cache_level = obj->cache_level;
871
}
926
}
872
 
927
 
873
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
928
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
874
			     int count, struct list_head *head)
929
			     int count, struct list_head *head)
875
{
930
{
876
	struct drm_i915_gem_object *obj;
931
	struct drm_i915_gem_object *obj;
877
	int i = 0;
932
	int i = 0;
878
 
933
 
879
	list_for_each_entry(obj, head, mm_list) {
934
	list_for_each_entry(obj, head, mm_list) {
880
		capture_bo(err++, obj);
935
		capture_bo(err++, obj);
881
		if (++i == count)
936
		if (++i == count)
882
			break;
937
			break;
883
	}
938
	}
884
 
939
 
885
	return i;
940
	return i;
886
}
941
}
887
 
942
 
888
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
943
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
889
			     int count, struct list_head *head)
944
			     int count, struct list_head *head)
890
{
945
{
891
	struct drm_i915_gem_object *obj;
946
	struct drm_i915_gem_object *obj;
892
	int i = 0;
947
	int i = 0;
893
 
948
 
894
	list_for_each_entry(obj, head, gtt_list) {
949
	list_for_each_entry(obj, head, gtt_list) {
895
		if (obj->pin_count == 0)
950
		if (obj->pin_count == 0)
896
			continue;
951
			continue;
897
 
952
 
898
		capture_bo(err++, obj);
953
		capture_bo(err++, obj);
899
		if (++i == count)
954
		if (++i == count)
900
			break;
955
			break;
901
	}
956
	}
902
 
957
 
903
	return i;
958
	return i;
904
}
959
}
905
 
960
 
906
static void i915_gem_record_fences(struct drm_device *dev,
961
static void i915_gem_record_fences(struct drm_device *dev,
907
				   struct drm_i915_error_state *error)
962
				   struct drm_i915_error_state *error)
908
{
963
{
909
	struct drm_i915_private *dev_priv = dev->dev_private;
964
	struct drm_i915_private *dev_priv = dev->dev_private;
910
	int i;
965
	int i;
911
 
966
 
912
	/* Fences */
967
	/* Fences */
913
	switch (INTEL_INFO(dev)->gen) {
968
	switch (INTEL_INFO(dev)->gen) {
914
	case 7:
969
	case 7:
915
	case 6:
970
	case 6:
916
		for (i = 0; i < 16; i++)
971
		for (i = 0; i < 16; i++)
917
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
972
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
918
		break;
973
		break;
919
	case 5:
974
	case 5:
920
	case 4:
975
	case 4:
921
		for (i = 0; i < 16; i++)
976
		for (i = 0; i < 16; i++)
922
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
977
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
923
		break;
978
		break;
924
	case 3:
979
	case 3:
925
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
980
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
926
			for (i = 0; i < 8; i++)
981
			for (i = 0; i < 8; i++)
927
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
982
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
928
	case 2:
983
	case 2:
929
		for (i = 0; i < 8; i++)
984
		for (i = 0; i < 8; i++)
930
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
985
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
931
		break;
986
		break;
-
 
987
 
-
 
988
	default:
932
 
989
		BUG();
933
	}
990
	}
934
}
991
}
935
 
992
 
936
static struct drm_i915_error_object *
993
static struct drm_i915_error_object *
937
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
994
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
938
			     struct intel_ring_buffer *ring)
995
			     struct intel_ring_buffer *ring)
939
{
996
{
940
	struct drm_i915_gem_object *obj;
997
	struct drm_i915_gem_object *obj;
941
	u32 seqno;
998
	u32 seqno;
942
 
999
 
943
	if (!ring->get_seqno)
1000
	if (!ring->get_seqno)
944
		return NULL;
1001
		return NULL;
-
 
1002
 
-
 
1003
	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
-
 
1004
		u32 acthd = I915_READ(ACTHD);
-
 
1005
 
-
 
1006
		if (WARN_ON(ring->id != RCS))
-
 
1007
			return NULL;
-
 
1008
 
-
 
1009
		obj = ring->private;
-
 
1010
		if (acthd >= obj->gtt_offset &&
-
 
1011
		    acthd < obj->gtt_offset + obj->base.size)
-
 
1012
			return i915_error_object_create(dev_priv, obj);
-
 
1013
	}
945
 
1014
 
946
	seqno = ring->get_seqno(ring, false);
1015
	seqno = ring->get_seqno(ring, false);
947
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1016
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
948
		if (obj->ring != ring)
1017
		if (obj->ring != ring)
949
			continue;
1018
			continue;
950
 
1019
 
951
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
1020
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
952
			continue;
1021
			continue;
953
 
1022
 
954
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1023
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
955
			continue;
1024
			continue;
956
 
1025
 
957
		/* We need to copy these to an anonymous buffer as the simplest
1026
		/* We need to copy these to an anonymous buffer as the simplest
958
		 * method to avoid being overwritten by userspace.
1027
		 * method to avoid being overwritten by userspace.
959
		 */
1028
		 */
960
		return i915_error_object_create(dev_priv, obj);
1029
		return i915_error_object_create(dev_priv, obj);
961
	}
1030
	}
962
 
1031
 
963
	return NULL;
1032
	return NULL;
964
}
1033
}
965
 
1034
 
966
static void i915_record_ring_state(struct drm_device *dev,
1035
static void i915_record_ring_state(struct drm_device *dev,
967
				   struct drm_i915_error_state *error,
1036
				   struct drm_i915_error_state *error,
968
				   struct intel_ring_buffer *ring)
1037
				   struct intel_ring_buffer *ring)
969
{
1038
{
970
	struct drm_i915_private *dev_priv = dev->dev_private;
1039
	struct drm_i915_private *dev_priv = dev->dev_private;
971
 
1040
 
972
	if (INTEL_INFO(dev)->gen >= 6) {
1041
	if (INTEL_INFO(dev)->gen >= 6) {
973
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1042
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
974
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1043
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
975
		error->semaphore_mboxes[ring->id][0]
1044
		error->semaphore_mboxes[ring->id][0]
976
			= I915_READ(RING_SYNC_0(ring->mmio_base));
1045
			= I915_READ(RING_SYNC_0(ring->mmio_base));
977
		error->semaphore_mboxes[ring->id][1]
1046
		error->semaphore_mboxes[ring->id][1]
978
			= I915_READ(RING_SYNC_1(ring->mmio_base));
1047
			= I915_READ(RING_SYNC_1(ring->mmio_base));
979
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1048
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
980
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1049
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
981
	}
1050
	}
982
 
1051
 
983
	if (INTEL_INFO(dev)->gen >= 4) {
1052
	if (INTEL_INFO(dev)->gen >= 4) {
984
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1053
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
985
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1054
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
986
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1055
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
987
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1056
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
988
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1057
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
989
		if (ring->id == RCS)
1058
		if (ring->id == RCS)
990
			error->bbaddr = I915_READ64(BB_ADDR);
1059
			error->bbaddr = I915_READ64(BB_ADDR);
991
	} else {
1060
	} else {
992
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1061
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
993
		error->ipeir[ring->id] = I915_READ(IPEIR);
1062
		error->ipeir[ring->id] = I915_READ(IPEIR);
994
		error->ipehr[ring->id] = I915_READ(IPEHR);
1063
		error->ipehr[ring->id] = I915_READ(IPEHR);
995
		error->instdone[ring->id] = I915_READ(INSTDONE);
1064
		error->instdone[ring->id] = I915_READ(INSTDONE);
996
	}
1065
	}
997
 
1066
 
998
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1067
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
999
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1068
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1000
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1069
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1001
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1070
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1002
	error->head[ring->id] = I915_READ_HEAD(ring);
1071
	error->head[ring->id] = I915_READ_HEAD(ring);
1003
	error->tail[ring->id] = I915_READ_TAIL(ring);
1072
	error->tail[ring->id] = I915_READ_TAIL(ring);
1004
	error->ctl[ring->id] = I915_READ_CTL(ring);
1073
	error->ctl[ring->id] = I915_READ_CTL(ring);
1005
 
1074
 
1006
	error->cpu_ring_head[ring->id] = ring->head;
1075
	error->cpu_ring_head[ring->id] = ring->head;
1007
	error->cpu_ring_tail[ring->id] = ring->tail;
1076
	error->cpu_ring_tail[ring->id] = ring->tail;
1008
}
1077
}
1009
 
1078
 
1010
static void i915_gem_record_rings(struct drm_device *dev,
1079
static void i915_gem_record_rings(struct drm_device *dev,
1011
				  struct drm_i915_error_state *error)
1080
				  struct drm_i915_error_state *error)
1012
{
1081
{
1013
	struct drm_i915_private *dev_priv = dev->dev_private;
1082
	struct drm_i915_private *dev_priv = dev->dev_private;
1014
	struct intel_ring_buffer *ring;
1083
	struct intel_ring_buffer *ring;
1015
	struct drm_i915_gem_request *request;
1084
	struct drm_i915_gem_request *request;
1016
	int i, count;
1085
	int i, count;
1017
 
1086
 
1018
	for_each_ring(ring, dev_priv, i) {
1087
	for_each_ring(ring, dev_priv, i) {
1019
		i915_record_ring_state(dev, error, ring);
1088
		i915_record_ring_state(dev, error, ring);
1020
 
1089
 
1021
		error->ring[i].batchbuffer =
1090
		error->ring[i].batchbuffer =
1022
			i915_error_first_batchbuffer(dev_priv, ring);
1091
			i915_error_first_batchbuffer(dev_priv, ring);
1023
 
1092
 
1024
		error->ring[i].ringbuffer =
1093
		error->ring[i].ringbuffer =
1025
			i915_error_object_create(dev_priv, ring->obj);
1094
			i915_error_object_create(dev_priv, ring->obj);
1026
 
1095
 
1027
		count = 0;
1096
		count = 0;
1028
		list_for_each_entry(request, &ring->request_list, list)
1097
		list_for_each_entry(request, &ring->request_list, list)
1029
			count++;
1098
			count++;
1030
 
1099
 
1031
		error->ring[i].num_requests = count;
1100
		error->ring[i].num_requests = count;
1032
		error->ring[i].requests =
1101
		error->ring[i].requests =
1033
			kmalloc(count*sizeof(struct drm_i915_error_request),
1102
			kmalloc(count*sizeof(struct drm_i915_error_request),
1034
				GFP_ATOMIC);
1103
				GFP_ATOMIC);
1035
		if (error->ring[i].requests == NULL) {
1104
		if (error->ring[i].requests == NULL) {
1036
			error->ring[i].num_requests = 0;
1105
			error->ring[i].num_requests = 0;
1037
			continue;
1106
			continue;
1038
		}
1107
		}
1039
 
1108
 
1040
		count = 0;
1109
		count = 0;
1041
		list_for_each_entry(request, &ring->request_list, list) {
1110
		list_for_each_entry(request, &ring->request_list, list) {
1042
			struct drm_i915_error_request *erq;
1111
			struct drm_i915_error_request *erq;
1043
 
1112
 
1044
			erq = &error->ring[i].requests[count++];
1113
			erq = &error->ring[i].requests[count++];
1045
			erq->seqno = request->seqno;
1114
			erq->seqno = request->seqno;
1046
			erq->jiffies = request->emitted_jiffies;
1115
			erq->jiffies = request->emitted_jiffies;
1047
			erq->tail = request->tail;
1116
			erq->tail = request->tail;
1048
		}
1117
		}
1049
	}
1118
	}
1050
}
1119
}
1051
 
1120
 
1052
/**
1121
/**
1053
 * i915_capture_error_state - capture an error record for later analysis
1122
 * i915_capture_error_state - capture an error record for later analysis
1054
 * @dev: drm device
1123
 * @dev: drm device
1055
 *
1124
 *
1056
 * Should be called when an error is detected (either a hang or an error
1125
 * Should be called when an error is detected (either a hang or an error
1057
 * interrupt) to capture error state from the time of the error.  Fills
1126
 * interrupt) to capture error state from the time of the error.  Fills
1058
 * out a structure which becomes available in debugfs for user level tools
1127
 * out a structure which becomes available in debugfs for user level tools
1059
 * to pick up.
1128
 * to pick up.
1060
 */
1129
 */
1061
static void i915_capture_error_state(struct drm_device *dev)
1130
static void i915_capture_error_state(struct drm_device *dev)
1062
{
1131
{
1063
	struct drm_i915_private *dev_priv = dev->dev_private;
1132
	struct drm_i915_private *dev_priv = dev->dev_private;
1064
	struct drm_i915_gem_object *obj;
1133
	struct drm_i915_gem_object *obj;
1065
	struct drm_i915_error_state *error;
1134
	struct drm_i915_error_state *error;
1066
	unsigned long flags;
1135
	unsigned long flags;
1067
	int i, pipe;
1136
	int i, pipe;
1068
 
1137
 
1069
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1138
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1070
	error = dev_priv->first_error;
1139
	error = dev_priv->gpu_error.first_error;
1071
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1140
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1072
	if (error)
1141
	if (error)
1073
		return;
1142
		return;
1074
 
1143
 
1075
	/* Account for pipe specific data like PIPE*STAT */
1144
	/* Account for pipe specific data like PIPE*STAT */
1076
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1145
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1077
	if (!error) {
1146
	if (!error) {
1078
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1147
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1079
		return;
1148
		return;
1080
	}
1149
	}
1081
 
1150
 
-
 
1151
	DRM_INFO("capturing error event; look for more information in"
1082
	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1152
		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1083
		 dev->primary->index);
1153
		 dev->primary->index);
1084
 
1154
 
1085
	kref_init(&error->ref);
1155
	kref_init(&error->ref);
1086
	error->eir = I915_READ(EIR);
1156
	error->eir = I915_READ(EIR);
1087
	error->pgtbl_er = I915_READ(PGTBL_ER);
1157
	error->pgtbl_er = I915_READ(PGTBL_ER);
1088
	error->ccid = I915_READ(CCID);
1158
	error->ccid = I915_READ(CCID);
1089
 
1159
 
1090
	if (HAS_PCH_SPLIT(dev))
1160
	if (HAS_PCH_SPLIT(dev))
1091
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1161
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1092
	else if (IS_VALLEYVIEW(dev))
1162
	else if (IS_VALLEYVIEW(dev))
1093
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1163
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1094
	else if (IS_GEN2(dev))
1164
	else if (IS_GEN2(dev))
1095
		error->ier = I915_READ16(IER);
1165
		error->ier = I915_READ16(IER);
1096
	else
1166
	else
1097
		error->ier = I915_READ(IER);
1167
		error->ier = I915_READ(IER);
1098
 
1168
 
1099
	if (INTEL_INFO(dev)->gen >= 6)
1169
	if (INTEL_INFO(dev)->gen >= 6)
1100
		error->derrmr = I915_READ(DERRMR);
1170
		error->derrmr = I915_READ(DERRMR);
1101
 
1171
 
1102
	if (IS_VALLEYVIEW(dev))
1172
	if (IS_VALLEYVIEW(dev))
1103
		error->forcewake = I915_READ(FORCEWAKE_VLV);
1173
		error->forcewake = I915_READ(FORCEWAKE_VLV);
1104
	else if (INTEL_INFO(dev)->gen >= 7)
1174
	else if (INTEL_INFO(dev)->gen >= 7)
1105
		error->forcewake = I915_READ(FORCEWAKE_MT);
1175
		error->forcewake = I915_READ(FORCEWAKE_MT);
1106
	else if (INTEL_INFO(dev)->gen == 6)
1176
	else if (INTEL_INFO(dev)->gen == 6)
1107
		error->forcewake = I915_READ(FORCEWAKE);
1177
		error->forcewake = I915_READ(FORCEWAKE);
1108
 
1178
 
1109
	for_each_pipe(pipe)
1179
	for_each_pipe(pipe)
1110
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1180
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1111
 
1181
 
1112
	if (INTEL_INFO(dev)->gen >= 6) {
1182
	if (INTEL_INFO(dev)->gen >= 6) {
1113
		error->error = I915_READ(ERROR_GEN6);
1183
		error->error = I915_READ(ERROR_GEN6);
1114
		error->done_reg = I915_READ(DONE_REG);
1184
		error->done_reg = I915_READ(DONE_REG);
1115
	}
1185
	}
1116
 
1186
 
1117
	if (INTEL_INFO(dev)->gen == 7)
1187
	if (INTEL_INFO(dev)->gen == 7)
1118
		error->err_int = I915_READ(GEN7_ERR_INT);
1188
		error->err_int = I915_READ(GEN7_ERR_INT);
1119
 
1189
 
1120
	i915_get_extra_instdone(dev, error->extra_instdone);
1190
	i915_get_extra_instdone(dev, error->extra_instdone);
1121
 
1191
 
1122
	i915_gem_record_fences(dev, error);
1192
	i915_gem_record_fences(dev, error);
1123
	i915_gem_record_rings(dev, error);
1193
	i915_gem_record_rings(dev, error);
1124
 
1194
 
1125
	/* Record buffers on the active and pinned lists. */
1195
	/* Record buffers on the active and pinned lists. */
1126
	error->active_bo = NULL;
1196
	error->active_bo = NULL;
1127
	error->pinned_bo = NULL;
1197
	error->pinned_bo = NULL;
1128
 
1198
 
1129
	i = 0;
1199
	i = 0;
1130
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1200
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1131
		i++;
1201
		i++;
1132
	error->active_bo_count = i;
1202
	error->active_bo_count = i;
1133
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1203
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1134
		if (obj->pin_count)
1204
		if (obj->pin_count)
1135
			i++;
1205
			i++;
1136
	error->pinned_bo_count = i - error->active_bo_count;
1206
	error->pinned_bo_count = i - error->active_bo_count;
1137
 
1207
 
1138
	error->active_bo = NULL;
1208
	error->active_bo = NULL;
1139
	error->pinned_bo = NULL;
1209
	error->pinned_bo = NULL;
1140
	if (i) {
1210
	if (i) {
1141
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1211
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1142
					   GFP_ATOMIC);
1212
					   GFP_ATOMIC);
1143
		if (error->active_bo)
1213
		if (error->active_bo)
1144
			error->pinned_bo =
1214
			error->pinned_bo =
1145
				error->active_bo + error->active_bo_count;
1215
				error->active_bo + error->active_bo_count;
1146
	}
1216
	}
1147
 
1217
 
1148
	if (error->active_bo)
1218
	if (error->active_bo)
1149
		error->active_bo_count =
1219
		error->active_bo_count =
1150
			capture_active_bo(error->active_bo,
1220
			capture_active_bo(error->active_bo,
1151
					  error->active_bo_count,
1221
					  error->active_bo_count,
1152
					  &dev_priv->mm.active_list);
1222
					  &dev_priv->mm.active_list);
1153
 
1223
 
1154
	if (error->pinned_bo)
1224
	if (error->pinned_bo)
1155
		error->pinned_bo_count =
1225
		error->pinned_bo_count =
1156
			capture_pinned_bo(error->pinned_bo,
1226
			capture_pinned_bo(error->pinned_bo,
1157
					  error->pinned_bo_count,
1227
					  error->pinned_bo_count,
1158
					  &dev_priv->mm.bound_list);
1228
					  &dev_priv->mm.bound_list);
1159
 
1229
 
1160
	do_gettimeofday(&error->time);
1230
	do_gettimeofday(&error->time);
1161
 
1231
 
1162
	error->overlay = intel_overlay_capture_error_state(dev);
1232
	error->overlay = intel_overlay_capture_error_state(dev);
1163
	error->display = intel_display_capture_error_state(dev);
1233
	error->display = intel_display_capture_error_state(dev);
1164
 
1234
 
1165
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1235
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1166
	if (dev_priv->first_error == NULL) {
1236
	if (dev_priv->gpu_error.first_error == NULL) {
1167
		dev_priv->first_error = error;
1237
		dev_priv->gpu_error.first_error = error;
1168
		error = NULL;
1238
		error = NULL;
1169
	}
1239
	}
1170
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1240
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1171
 
1241
 
1172
	if (error)
1242
	if (error)
1173
		i915_error_state_free(&error->ref);
1243
		i915_error_state_free(&error->ref);
1174
}
1244
}
1175
 
1245
 
1176
void i915_destroy_error_state(struct drm_device *dev)
1246
void i915_destroy_error_state(struct drm_device *dev)
1177
{
1247
{
1178
	struct drm_i915_private *dev_priv = dev->dev_private;
1248
	struct drm_i915_private *dev_priv = dev->dev_private;
1179
	struct drm_i915_error_state *error;
1249
	struct drm_i915_error_state *error;
1180
	unsigned long flags;
1250
	unsigned long flags;
1181
 
1251
 
1182
	spin_lock_irqsave(&dev_priv->error_lock, flags);
1252
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1183
	error = dev_priv->first_error;
1253
	error = dev_priv->gpu_error.first_error;
1184
	dev_priv->first_error = NULL;
1254
	dev_priv->gpu_error.first_error = NULL;
1185
	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1255
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1186
 
1256
 
1187
	if (error)
1257
	if (error)
1188
		kref_put(&error->ref, i915_error_state_free);
1258
		kref_put(&error->ref, i915_error_state_free);
1189
}
1259
}
1190
#else
1260
#else
1191
#define i915_capture_error_state(x)
1261
#define i915_capture_error_state(x)
1192
#endif
1262
#endif
1193
 
1263
 
1194
static void i915_report_and_clear_eir(struct drm_device *dev)
1264
static void i915_report_and_clear_eir(struct drm_device *dev)
1195
{
1265
{
1196
	struct drm_i915_private *dev_priv = dev->dev_private;
1266
	struct drm_i915_private *dev_priv = dev->dev_private;
1197
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1267
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1198
	u32 eir = I915_READ(EIR);
1268
	u32 eir = I915_READ(EIR);
1199
	int pipe, i;
1269
	int pipe, i;
1200
 
1270
 
1201
	if (!eir)
1271
	if (!eir)
1202
		return;
1272
		return;
1203
 
1273
 
1204
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1274
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1205
 
1275
 
1206
	i915_get_extra_instdone(dev, instdone);
1276
	i915_get_extra_instdone(dev, instdone);
1207
 
1277
 
1208
	if (IS_G4X(dev)) {
1278
	if (IS_G4X(dev)) {
1209
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1279
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1210
			u32 ipeir = I915_READ(IPEIR_I965);
1280
			u32 ipeir = I915_READ(IPEIR_I965);
1211
 
1281
 
1212
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1282
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1213
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1283
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1214
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1284
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1215
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1285
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1216
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1286
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1217
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1287
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1218
			I915_WRITE(IPEIR_I965, ipeir);
1288
			I915_WRITE(IPEIR_I965, ipeir);
1219
			POSTING_READ(IPEIR_I965);
1289
			POSTING_READ(IPEIR_I965);
1220
		}
1290
		}
1221
		if (eir & GM45_ERROR_PAGE_TABLE) {
1291
		if (eir & GM45_ERROR_PAGE_TABLE) {
1222
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1292
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1223
			pr_err("page table error\n");
1293
			pr_err("page table error\n");
1224
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1294
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1225
			I915_WRITE(PGTBL_ER, pgtbl_err);
1295
			I915_WRITE(PGTBL_ER, pgtbl_err);
1226
			POSTING_READ(PGTBL_ER);
1296
			POSTING_READ(PGTBL_ER);
1227
		}
1297
		}
1228
	}
1298
	}
1229
 
1299
 
1230
	if (!IS_GEN2(dev)) {
1300
	if (!IS_GEN2(dev)) {
1231
		if (eir & I915_ERROR_PAGE_TABLE) {
1301
		if (eir & I915_ERROR_PAGE_TABLE) {
1232
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1302
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1233
			pr_err("page table error\n");
1303
			pr_err("page table error\n");
1234
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1304
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1235
			I915_WRITE(PGTBL_ER, pgtbl_err);
1305
			I915_WRITE(PGTBL_ER, pgtbl_err);
1236
			POSTING_READ(PGTBL_ER);
1306
			POSTING_READ(PGTBL_ER);
1237
		}
1307
		}
1238
	}
1308
	}
1239
 
1309
 
1240
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1310
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1241
		pr_err("memory refresh error:\n");
1311
		pr_err("memory refresh error:\n");
1242
		for_each_pipe(pipe)
1312
		for_each_pipe(pipe)
1243
			pr_err("pipe %c stat: 0x%08x\n",
1313
			pr_err("pipe %c stat: 0x%08x\n",
1244
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1314
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1245
		/* pipestat has already been acked */
1315
		/* pipestat has already been acked */
1246
	}
1316
	}
1247
	if (eir & I915_ERROR_INSTRUCTION) {
1317
	if (eir & I915_ERROR_INSTRUCTION) {
1248
		pr_err("instruction error\n");
1318
		pr_err("instruction error\n");
1249
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1319
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1250
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1320
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1251
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1321
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1252
		if (INTEL_INFO(dev)->gen < 4) {
1322
		if (INTEL_INFO(dev)->gen < 4) {
1253
			u32 ipeir = I915_READ(IPEIR);
1323
			u32 ipeir = I915_READ(IPEIR);
1254
 
1324
 
1255
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1325
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1256
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1326
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1257
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1327
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1258
			I915_WRITE(IPEIR, ipeir);
1328
			I915_WRITE(IPEIR, ipeir);
1259
			POSTING_READ(IPEIR);
1329
			POSTING_READ(IPEIR);
1260
		} else {
1330
		} else {
1261
			u32 ipeir = I915_READ(IPEIR_I965);
1331
			u32 ipeir = I915_READ(IPEIR_I965);
1262
 
1332
 
1263
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1333
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1264
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1334
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1265
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1335
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1266
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1336
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1267
			I915_WRITE(IPEIR_I965, ipeir);
1337
			I915_WRITE(IPEIR_I965, ipeir);
1268
			POSTING_READ(IPEIR_I965);
1338
			POSTING_READ(IPEIR_I965);
1269
		}
1339
		}
1270
	}
1340
	}
1271
 
1341
 
1272
	I915_WRITE(EIR, eir);
1342
	I915_WRITE(EIR, eir);
1273
	POSTING_READ(EIR);
1343
	POSTING_READ(EIR);
1274
	eir = I915_READ(EIR);
1344
	eir = I915_READ(EIR);
1275
	if (eir) {
1345
	if (eir) {
1276
		/*
1346
		/*
1277
		 * some errors might have become stuck,
1347
		 * some errors might have become stuck,
1278
		 * mask them.
1348
		 * mask them.
1279
		 */
1349
		 */
1280
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1350
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1281
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1351
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1282
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1352
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1283
	}
1353
	}
1284
}
1354
}
1285
 
1355
 
1286
/**
1356
/**
1287
 * i915_handle_error - handle an error interrupt
1357
 * i915_handle_error - handle an error interrupt
1288
 * @dev: drm device
1358
 * @dev: drm device
1289
 *
1359
 *
1290
 * Do some basic checking of regsiter state at error interrupt time and
1360
 * Do some basic checking of regsiter state at error interrupt time and
1291
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1361
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1292
 * sure we get a record and make it available in debugfs.  Fire a uevent
1362
 * sure we get a record and make it available in debugfs.  Fire a uevent
1293
 * so userspace knows something bad happened (should trigger collection
1363
 * so userspace knows something bad happened (should trigger collection
1294
 * of a ring dump etc.).
1364
 * of a ring dump etc.).
1295
 */
1365
 */
1296
void i915_handle_error(struct drm_device *dev, bool wedged)
1366
void i915_handle_error(struct drm_device *dev, bool wedged)
1297
{
1367
{
1298
	struct drm_i915_private *dev_priv = dev->dev_private;
1368
	struct drm_i915_private *dev_priv = dev->dev_private;
1299
	struct intel_ring_buffer *ring;
1369
	struct intel_ring_buffer *ring;
1300
	int i;
1370
	int i;
1301
 
1371
 
1302
	i915_capture_error_state(dev);
1372
	i915_capture_error_state(dev);
1303
	i915_report_and_clear_eir(dev);
1373
	i915_report_and_clear_eir(dev);
1304
 
1374
 
1305
	if (wedged) {
1375
	if (wedged) {
1306
//		INIT_COMPLETION(dev_priv->error_completion);
1376
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1307
		atomic_set(&dev_priv->mm.wedged, 1);
1377
				&dev_priv->gpu_error.reset_counter);
1308
 
1378
 
1309
		/*
1379
		/*
-
 
1380
		 * Wakeup waiting processes so that the reset work item
1310
		 * Wakeup waiting processes so they don't hang
1381
		 * doesn't deadlock trying to grab various locks.
1311
		 */
1382
		 */
1312
		for_each_ring(ring, dev_priv, i)
1383
		for_each_ring(ring, dev_priv, i)
1313
			wake_up_all(&ring->irq_queue);
1384
			wake_up_all(&ring->irq_queue);
1314
	}
1385
	}
1315
 
1386
 
1316
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1387
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1317
}
1388
}
1318
 
1389
 
1319
#if 0
1390
#if 0
1320
 
1391
 
1321
 
1392
 
1322
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1393
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1323
{
1394
{
1324
	drm_i915_private_t *dev_priv = dev->dev_private;
1395
	drm_i915_private_t *dev_priv = dev->dev_private;
1325
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1396
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1326
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1397
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1327
	struct drm_i915_gem_object *obj;
1398
	struct drm_i915_gem_object *obj;
1328
	struct intel_unpin_work *work;
1399
	struct intel_unpin_work *work;
1329
	unsigned long flags;
1400
	unsigned long flags;
1330
	bool stall_detected;
1401
	bool stall_detected;
1331
 
1402
 
1332
	/* Ignore early vblank irqs */
1403
	/* Ignore early vblank irqs */
1333
	if (intel_crtc == NULL)
1404
	if (intel_crtc == NULL)
1334
		return;
1405
		return;
1335
 
1406
 
1336
	spin_lock_irqsave(&dev->event_lock, flags);
1407
	spin_lock_irqsave(&dev->event_lock, flags);
1337
	work = intel_crtc->unpin_work;
1408
	work = intel_crtc->unpin_work;
1338
 
1409
 
1339
	if (work == NULL ||
1410
	if (work == NULL ||
1340
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1411
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1341
	    !work->enable_stall_check) {
1412
	    !work->enable_stall_check) {
1342
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1413
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1343
		spin_unlock_irqrestore(&dev->event_lock, flags);
1414
		spin_unlock_irqrestore(&dev->event_lock, flags);
1344
		return;
1415
		return;
1345
	}
1416
	}
1346
 
1417
 
1347
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1418
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1348
	obj = work->pending_flip_obj;
1419
	obj = work->pending_flip_obj;
1349
	if (INTEL_INFO(dev)->gen >= 4) {
1420
	if (INTEL_INFO(dev)->gen >= 4) {
1350
		int dspsurf = DSPSURF(intel_crtc->plane);
1421
		int dspsurf = DSPSURF(intel_crtc->plane);
1351
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1422
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1352
					obj->gtt_offset;
1423
					obj->gtt_offset;
1353
	} else {
1424
	} else {
1354
		int dspaddr = DSPADDR(intel_crtc->plane);
1425
		int dspaddr = DSPADDR(intel_crtc->plane);
1355
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1426
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1356
							crtc->y * crtc->fb->pitches[0] +
1427
							crtc->y * crtc->fb->pitches[0] +
1357
							crtc->x * crtc->fb->bits_per_pixel/8);
1428
							crtc->x * crtc->fb->bits_per_pixel/8);
1358
	}
1429
	}
1359
 
1430
 
1360
	spin_unlock_irqrestore(&dev->event_lock, flags);
1431
	spin_unlock_irqrestore(&dev->event_lock, flags);
1361
 
1432
 
1362
	if (stall_detected) {
1433
	if (stall_detected) {
1363
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1434
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1364
		intel_prepare_page_flip(dev, intel_crtc->plane);
1435
		intel_prepare_page_flip(dev, intel_crtc->plane);
1365
	}
1436
	}
1366
}
1437
}
1367
 
1438
 
1368
#endif
1439
#endif
1369
 
1440
 
1370
/* Called from drm generic code, passed 'crtc' which
1441
/* Called from drm generic code, passed 'crtc' which
1371
 * we use as a pipe index
1442
 * we use as a pipe index
1372
 */
1443
 */
1373
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1444
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1374
{
1445
{
1375
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1446
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1376
	unsigned long irqflags;
1447
	unsigned long irqflags;
1377
 
1448
 
1378
	if (!i915_pipe_enabled(dev, pipe))
1449
	if (!i915_pipe_enabled(dev, pipe))
1379
		return -EINVAL;
1450
		return -EINVAL;
1380
 
1451
 
1381
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1452
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1382
	if (INTEL_INFO(dev)->gen >= 4)
1453
	if (INTEL_INFO(dev)->gen >= 4)
1383
		i915_enable_pipestat(dev_priv, pipe,
1454
		i915_enable_pipestat(dev_priv, pipe,
1384
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1455
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1385
	else
1456
	else
1386
		i915_enable_pipestat(dev_priv, pipe,
1457
		i915_enable_pipestat(dev_priv, pipe,
1387
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1458
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1388
 
1459
 
1389
	/* maintain vblank delivery even in deep C-states */
1460
	/* maintain vblank delivery even in deep C-states */
1390
	if (dev_priv->info->gen == 3)
1461
	if (dev_priv->info->gen == 3)
1391
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1462
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1392
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1463
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1393
 
1464
 
1394
	return 0;
1465
	return 0;
1395
}
1466
}
1396
 
1467
 
1397
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1468
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1398
{
1469
{
1399
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1470
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1400
	unsigned long irqflags;
1471
	unsigned long irqflags;
1401
 
1472
 
1402
	if (!i915_pipe_enabled(dev, pipe))
1473
	if (!i915_pipe_enabled(dev, pipe))
1403
		return -EINVAL;
1474
		return -EINVAL;
1404
 
1475
 
1405
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1476
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1406
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1477
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1407
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1478
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1408
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1479
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1409
 
1480
 
1410
	return 0;
1481
	return 0;
1411
}
1482
}
1412
 
1483
 
1413
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1484
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1414
{
1485
{
1415
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1486
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1416
	unsigned long irqflags;
1487
	unsigned long irqflags;
1417
 
1488
 
1418
	if (!i915_pipe_enabled(dev, pipe))
1489
	if (!i915_pipe_enabled(dev, pipe))
1419
		return -EINVAL;
1490
		return -EINVAL;
1420
 
1491
 
1421
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1492
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1422
	ironlake_enable_display_irq(dev_priv,
1493
	ironlake_enable_display_irq(dev_priv,
1423
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1494
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1424
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1495
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1425
 
1496
 
1426
	return 0;
1497
	return 0;
1427
}
1498
}
1428
 
1499
 
1429
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1500
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1430
{
1501
{
1431
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1502
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1432
	unsigned long irqflags;
1503
	unsigned long irqflags;
1433
	u32 imr;
1504
	u32 imr;
1434
 
1505
 
1435
	if (!i915_pipe_enabled(dev, pipe))
1506
	if (!i915_pipe_enabled(dev, pipe))
1436
		return -EINVAL;
1507
		return -EINVAL;
1437
 
1508
 
1438
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1509
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1439
	imr = I915_READ(VLV_IMR);
1510
	imr = I915_READ(VLV_IMR);
1440
	if (pipe == 0)
1511
	if (pipe == 0)
1441
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1512
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1442
	else
1513
	else
1443
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1514
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1444
	I915_WRITE(VLV_IMR, imr);
1515
	I915_WRITE(VLV_IMR, imr);
1445
	i915_enable_pipestat(dev_priv, pipe,
1516
	i915_enable_pipestat(dev_priv, pipe,
1446
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1517
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1447
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1518
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1448
 
1519
 
1449
	return 0;
1520
	return 0;
1450
}
1521
}
1451
 
1522
 
1452
/* Called from drm generic code, passed 'crtc' which
1523
/* Called from drm generic code, passed 'crtc' which
1453
 * we use as a pipe index
1524
 * we use as a pipe index
1454
 */
1525
 */
1455
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1526
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1456
{
1527
{
1457
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1528
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1458
	unsigned long irqflags;
1529
	unsigned long irqflags;
1459
 
1530
 
1460
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1531
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1461
	if (dev_priv->info->gen == 3)
1532
	if (dev_priv->info->gen == 3)
1462
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1533
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1463
 
1534
 
1464
	i915_disable_pipestat(dev_priv, pipe,
1535
	i915_disable_pipestat(dev_priv, pipe,
1465
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1536
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1466
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1537
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1467
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1538
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1468
}
1539
}
1469
 
1540
 
1470
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1541
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1471
{
1542
{
1472
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1543
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1473
	unsigned long irqflags;
1544
	unsigned long irqflags;
1474
 
1545
 
1475
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1546
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1476
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1547
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1477
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1548
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1478
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1549
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1479
}
1550
}
1480
 
1551
 
1481
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1552
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1482
{
1553
{
1483
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1554
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1484
	unsigned long irqflags;
1555
	unsigned long irqflags;
1485
 
1556
 
1486
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1557
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1487
	ironlake_disable_display_irq(dev_priv,
1558
	ironlake_disable_display_irq(dev_priv,
1488
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1559
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1489
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1560
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1490
}
1561
}
1491
 
1562
 
1492
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1563
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1493
{
1564
{
1494
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1565
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1495
	unsigned long irqflags;
1566
	unsigned long irqflags;
1496
	u32 imr;
1567
	u32 imr;
1497
 
1568
 
1498
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1569
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1499
	i915_disable_pipestat(dev_priv, pipe,
1570
	i915_disable_pipestat(dev_priv, pipe,
1500
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1571
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1501
	imr = I915_READ(VLV_IMR);
1572
	imr = I915_READ(VLV_IMR);
1502
	if (pipe == 0)
1573
	if (pipe == 0)
1503
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1574
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1504
	else
1575
	else
1505
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1576
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1506
	I915_WRITE(VLV_IMR, imr);
1577
	I915_WRITE(VLV_IMR, imr);
1507
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1578
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1508
}
1579
}
1509
 
1580
 
1510
static u32
1581
static u32
1511
ring_last_seqno(struct intel_ring_buffer *ring)
1582
ring_last_seqno(struct intel_ring_buffer *ring)
1512
{
1583
{
1513
	return list_entry(ring->request_list.prev,
1584
	return list_entry(ring->request_list.prev,
1514
			  struct drm_i915_gem_request, list)->seqno;
1585
			  struct drm_i915_gem_request, list)->seqno;
1515
}
1586
}
1516
/* drm_dma.h hooks
1587
/* drm_dma.h hooks
1517
*/
1588
*/
1518
static void ironlake_irq_preinstall(struct drm_device *dev)
1589
static void ironlake_irq_preinstall(struct drm_device *dev)
1519
{
1590
{
1520
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1591
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1521
 
1592
 
1522
    atomic_set(&dev_priv->irq_received, 0);
1593
    atomic_set(&dev_priv->irq_received, 0);
1523
 
1594
 
1524
    I915_WRITE(HWSTAM, 0xeffe);
1595
    I915_WRITE(HWSTAM, 0xeffe);
1525
 
1596
 
1526
    /* XXX hotplug from PCH */
1597
    /* XXX hotplug from PCH */
1527
 
1598
 
1528
    I915_WRITE(DEIMR, 0xffffffff);
1599
    I915_WRITE(DEIMR, 0xffffffff);
1529
    I915_WRITE(DEIER, 0x0);
1600
    I915_WRITE(DEIER, 0x0);
1530
    POSTING_READ(DEIER);
1601
    POSTING_READ(DEIER);
1531
 
1602
 
1532
    /* and GT */
1603
    /* and GT */
1533
    I915_WRITE(GTIMR, 0xffffffff);
1604
    I915_WRITE(GTIMR, 0xffffffff);
1534
    I915_WRITE(GTIER, 0x0);
1605
    I915_WRITE(GTIER, 0x0);
1535
    POSTING_READ(GTIER);
1606
    POSTING_READ(GTIER);
1536
 
1607
 
1537
    /* south display irq */
1608
    /* south display irq */
1538
    I915_WRITE(SDEIMR, 0xffffffff);
1609
    I915_WRITE(SDEIMR, 0xffffffff);
1539
    I915_WRITE(SDEIER, 0x0);
1610
    I915_WRITE(SDEIER, 0x0);
1540
    POSTING_READ(SDEIER);
1611
    POSTING_READ(SDEIER);
1541
}
1612
}
1542
 
1613
 
1543
static void valleyview_irq_preinstall(struct drm_device *dev)
1614
static void valleyview_irq_preinstall(struct drm_device *dev)
1544
{
1615
{
1545
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1616
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1546
	int pipe;
1617
	int pipe;
1547
 
1618
 
1548
	atomic_set(&dev_priv->irq_received, 0);
1619
	atomic_set(&dev_priv->irq_received, 0);
1549
 
1620
 
1550
	/* VLV magic */
1621
	/* VLV magic */
1551
	I915_WRITE(VLV_IMR, 0);
1622
	I915_WRITE(VLV_IMR, 0);
1552
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1623
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1553
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1624
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1554
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1625
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1555
 
1626
 
1556
	/* and GT */
1627
	/* and GT */
1557
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1628
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1558
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1629
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1559
	I915_WRITE(GTIMR, 0xffffffff);
1630
	I915_WRITE(GTIMR, 0xffffffff);
1560
	I915_WRITE(GTIER, 0x0);
1631
	I915_WRITE(GTIER, 0x0);
1561
	POSTING_READ(GTIER);
1632
	POSTING_READ(GTIER);
1562
 
1633
 
1563
	I915_WRITE(DPINVGTT, 0xff);
1634
	I915_WRITE(DPINVGTT, 0xff);
1564
 
1635
 
1565
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1636
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1566
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1637
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1567
	for_each_pipe(pipe)
1638
	for_each_pipe(pipe)
1568
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1639
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1569
	I915_WRITE(VLV_IIR, 0xffffffff);
1640
	I915_WRITE(VLV_IIR, 0xffffffff);
1570
	I915_WRITE(VLV_IMR, 0xffffffff);
1641
	I915_WRITE(VLV_IMR, 0xffffffff);
1571
	I915_WRITE(VLV_IER, 0x0);
1642
	I915_WRITE(VLV_IER, 0x0);
1572
	POSTING_READ(VLV_IER);
1643
	POSTING_READ(VLV_IER);
1573
}
1644
}
1574
 
1645
 
1575
/*
1646
/*
1576
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1647
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1577
 * duration to 2ms (which is the minimum in the Display Port spec)
1648
 * duration to 2ms (which is the minimum in the Display Port spec)
1578
 *
1649
 *
1579
 * This register is the same on all known PCH chips.
1650
 * This register is the same on all known PCH chips.
1580
 */
1651
 */
1581
 
1652
 
1582
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1653
static void ibx_enable_hotplug(struct drm_device *dev)
1583
{
1654
{
1584
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1655
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1585
	u32	hotplug;
1656
	u32	hotplug;
1586
 
1657
 
1587
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1658
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1588
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1659
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1589
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1660
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1590
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1661
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1591
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1662
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1592
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1663
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1593
}
1664
}
-
 
1665
 
-
 
1666
static void ibx_irq_postinstall(struct drm_device *dev)
-
 
1667
{
-
 
1668
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1669
	u32 mask;
-
 
1670
 
-
 
1671
	if (HAS_PCH_IBX(dev))
-
 
1672
		mask = SDE_HOTPLUG_MASK |
-
 
1673
		       SDE_GMBUS |
-
 
1674
		       SDE_AUX_MASK;
-
 
1675
	else
-
 
1676
		mask = SDE_HOTPLUG_MASK_CPT |
-
 
1677
		       SDE_GMBUS_CPT |
-
 
1678
		       SDE_AUX_MASK_CPT;
-
 
1679
 
-
 
1680
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-
 
1681
	I915_WRITE(SDEIMR, ~mask);
-
 
1682
	I915_WRITE(SDEIER, mask);
-
 
1683
	POSTING_READ(SDEIER);
-
 
1684
 
-
 
1685
	ibx_enable_hotplug(dev);
-
 
1686
}
1594
 
1687
 
1595
static int ironlake_irq_postinstall(struct drm_device *dev)
1688
static int ironlake_irq_postinstall(struct drm_device *dev)
1596
{
1689
{
1597
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1690
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1598
    /* enable kind of interrupts always enabled */
1691
    /* enable kind of interrupts always enabled */
1599
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1692
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1600
               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1693
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-
 
1694
			   DE_AUX_CHANNEL_A;
1601
    u32 render_irqs;
1695
    u32 render_irqs;
1602
    u32 hotplug_mask;
-
 
1603
 
1696
 
1604
    dev_priv->irq_mask = ~display_mask;
1697
    dev_priv->irq_mask = ~display_mask;
1605
 
1698
 
1606
    /* should always can generate irq */
1699
    /* should always can generate irq */
1607
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1700
    I915_WRITE(DEIIR, I915_READ(DEIIR));
1608
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1701
    I915_WRITE(DEIMR, dev_priv->irq_mask);
1609
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1702
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1610
    POSTING_READ(DEIER);
1703
    POSTING_READ(DEIER);
1611
 
1704
 
1612
	dev_priv->gt_irq_mask = ~0;
1705
	dev_priv->gt_irq_mask = ~0;
1613
 
1706
 
1614
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1707
    I915_WRITE(GTIIR, I915_READ(GTIIR));
1615
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1708
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1616
 
1709
 
1617
    if (IS_GEN6(dev))
1710
    if (IS_GEN6(dev))
1618
        render_irqs =
1711
        render_irqs =
1619
            GT_USER_INTERRUPT |
1712
            GT_USER_INTERRUPT |
1620
			GEN6_BSD_USER_INTERRUPT |
1713
			GEN6_BSD_USER_INTERRUPT |
1621
			GEN6_BLITTER_USER_INTERRUPT;
1714
			GEN6_BLITTER_USER_INTERRUPT;
1622
    else
1715
    else
1623
        render_irqs =
1716
        render_irqs =
1624
            GT_USER_INTERRUPT |
1717
            GT_USER_INTERRUPT |
1625
            GT_PIPE_NOTIFY |
1718
            GT_PIPE_NOTIFY |
1626
            GT_BSD_USER_INTERRUPT;
1719
            GT_BSD_USER_INTERRUPT;
1627
    I915_WRITE(GTIER, render_irqs);
1720
    I915_WRITE(GTIER, render_irqs);
1628
    POSTING_READ(GTIER);
1721
    POSTING_READ(GTIER);
1629
 
1722
 
1630
    if (HAS_PCH_CPT(dev)) {
-
 
1631
        hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
-
 
1632
                SDE_PORTB_HOTPLUG_CPT |
-
 
1633
                SDE_PORTC_HOTPLUG_CPT |
-
 
1634
                SDE_PORTD_HOTPLUG_CPT);
-
 
1635
    } else {
-
 
1636
        hotplug_mask = (SDE_CRT_HOTPLUG |
-
 
1637
                SDE_PORTB_HOTPLUG |
-
 
1638
                SDE_PORTC_HOTPLUG |
-
 
1639
                SDE_PORTD_HOTPLUG |
-
 
1640
                SDE_AUX_MASK);
-
 
1641
    }
-
 
1642
 
-
 
1643
    dev_priv->pch_irq_mask = ~hotplug_mask;
-
 
1644
 
-
 
1645
    I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-
 
1646
    I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
-
 
1647
    I915_WRITE(SDEIER, hotplug_mask);
-
 
1648
    POSTING_READ(SDEIER);
-
 
1649
 
-
 
1650
//    ironlake_enable_pch_hotplug(dev);
1723
	ibx_irq_postinstall(dev);
1651
 
1724
 
1652
    if (IS_IRONLAKE_M(dev)) {
1725
    if (IS_IRONLAKE_M(dev)) {
1653
        /* Clear & enable PCU event interrupts */
1726
        /* Clear & enable PCU event interrupts */
1654
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1727
        I915_WRITE(DEIIR, DE_PCU_EVENT);
1655
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1728
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1656
//        ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1729
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1657
    }
1730
    }
1658
 
1731
 
1659
    return 0;
1732
    return 0;
1660
}
1733
}
1661
 
1734
 
1662
static int ivybridge_irq_postinstall(struct drm_device *dev)
1735
static int ivybridge_irq_postinstall(struct drm_device *dev)
1663
{
1736
{
1664
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1737
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1665
	/* enable kind of interrupts always enabled */
1738
	/* enable kind of interrupts always enabled */
1666
	u32 display_mask =
1739
	u32 display_mask =
1667
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1740
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1668
		DE_PLANEC_FLIP_DONE_IVB |
1741
		DE_PLANEC_FLIP_DONE_IVB |
1669
		DE_PLANEB_FLIP_DONE_IVB |
1742
		DE_PLANEB_FLIP_DONE_IVB |
1670
		DE_PLANEA_FLIP_DONE_IVB;
1743
		DE_PLANEA_FLIP_DONE_IVB |
-
 
1744
		DE_AUX_CHANNEL_A_IVB;
1671
	u32 render_irqs;
1745
	u32 render_irqs;
1672
	u32 hotplug_mask;
-
 
1673
 
1746
 
1674
	dev_priv->irq_mask = ~display_mask;
1747
	dev_priv->irq_mask = ~display_mask;
1675
 
1748
 
1676
	/* should always can generate irq */
1749
	/* should always can generate irq */
1677
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1750
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1678
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1751
	I915_WRITE(DEIMR, dev_priv->irq_mask);
1679
	I915_WRITE(DEIER,
1752
	I915_WRITE(DEIER,
1680
		   display_mask |
1753
		   display_mask |
1681
		   DE_PIPEC_VBLANK_IVB |
1754
		   DE_PIPEC_VBLANK_IVB |
1682
		   DE_PIPEB_VBLANK_IVB |
1755
		   DE_PIPEB_VBLANK_IVB |
1683
		   DE_PIPEA_VBLANK_IVB);
1756
		   DE_PIPEA_VBLANK_IVB);
1684
	POSTING_READ(DEIER);
1757
	POSTING_READ(DEIER);
1685
 
1758
 
1686
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1759
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1687
 
1760
 
1688
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1761
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1689
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1762
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1690
 
1763
 
1691
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1764
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1692
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1765
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1693
	I915_WRITE(GTIER, render_irqs);
1766
	I915_WRITE(GTIER, render_irqs);
1694
	POSTING_READ(GTIER);
1767
	POSTING_READ(GTIER);
1695
 
-
 
1696
	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
-
 
1697
			SDE_PORTB_HOTPLUG_CPT |
-
 
1698
			SDE_PORTC_HOTPLUG_CPT |
-
 
1699
			SDE_PORTD_HOTPLUG_CPT);
-
 
1700
	dev_priv->pch_irq_mask = ~hotplug_mask;
-
 
1701
 
-
 
1702
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-
 
1703
	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
-
 
1704
	I915_WRITE(SDEIER, hotplug_mask);
-
 
1705
	POSTING_READ(SDEIER);
-
 
1706
 
1768
 
1707
//	ironlake_enable_pch_hotplug(dev);
1769
	ibx_irq_postinstall(dev);
1708
 
1770
 
1709
	return 0;
1771
	return 0;
1710
}
1772
}
1711
 
1773
 
1712
static int valleyview_irq_postinstall(struct drm_device *dev)
1774
static int valleyview_irq_postinstall(struct drm_device *dev)
1713
{
1775
{
1714
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1776
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1715
	u32 enable_mask;
1777
	u32 enable_mask;
1716
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
 
1717
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1778
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1718
	u32 render_irqs;
1779
	u32 render_irqs;
1719
	u16 msid;
1780
	u16 msid;
1720
 
1781
 
1721
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1782
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1722
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1783
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1723
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1784
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1724
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1785
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1725
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1786
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1726
 
1787
 
1727
	/*
1788
	/*
1728
	 *Leave vblank interrupts masked initially.  enable/disable will
1789
	 *Leave vblank interrupts masked initially.  enable/disable will
1729
	 * toggle them based on usage.
1790
	 * toggle them based on usage.
1730
	 */
1791
	 */
1731
	dev_priv->irq_mask = (~enable_mask) |
1792
	dev_priv->irq_mask = (~enable_mask) |
1732
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1793
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1733
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1794
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1734
 
1795
 
1735
	dev_priv->pipestat[0] = 0;
1796
	dev_priv->pipestat[0] = 0;
1736
	dev_priv->pipestat[1] = 0;
1797
	dev_priv->pipestat[1] = 0;
1737
 
1798
 
1738
	/* Hack for broken MSIs on VLV */
1799
	/* Hack for broken MSIs on VLV */
1739
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1800
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
1740
//   pci_read_config_word(dev->pdev, 0x98, &msid);
1801
//   pci_read_config_word(dev->pdev, 0x98, &msid);
1741
//   msid &= 0xff; /* mask out delivery bits */
1802
//   msid &= 0xff; /* mask out delivery bits */
1742
//   msid |= (1<<14);
1803
//   msid |= (1<<14);
1743
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
1804
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
-
 
1805
 
-
 
1806
	I915_WRITE(PORT_HOTPLUG_EN, 0);
-
 
1807
	POSTING_READ(PORT_HOTPLUG_EN);
1744
 
1808
 
1745
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1809
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1746
	I915_WRITE(VLV_IER, enable_mask);
1810
	I915_WRITE(VLV_IER, enable_mask);
1747
	I915_WRITE(VLV_IIR, 0xffffffff);
1811
	I915_WRITE(VLV_IIR, 0xffffffff);
1748
	I915_WRITE(PIPESTAT(0), 0xffff);
1812
	I915_WRITE(PIPESTAT(0), 0xffff);
1749
	I915_WRITE(PIPESTAT(1), 0xffff);
1813
	I915_WRITE(PIPESTAT(1), 0xffff);
1750
	POSTING_READ(VLV_IER);
1814
	POSTING_READ(VLV_IER);
1751
 
1815
 
1752
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1816
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
-
 
1817
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
1753
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1818
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1754
 
1819
 
1755
	I915_WRITE(VLV_IIR, 0xffffffff);
1820
	I915_WRITE(VLV_IIR, 0xffffffff);
1756
	I915_WRITE(VLV_IIR, 0xffffffff);
1821
	I915_WRITE(VLV_IIR, 0xffffffff);
1757
 
1822
 
1758
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1823
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1759
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1824
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1760
 
1825
 
1761
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1826
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1762
		GEN6_BLITTER_USER_INTERRUPT;
1827
		GEN6_BLITTER_USER_INTERRUPT;
1763
	I915_WRITE(GTIER, render_irqs);
1828
	I915_WRITE(GTIER, render_irqs);
1764
	POSTING_READ(GTIER);
1829
	POSTING_READ(GTIER);
1765
 
1830
 
1766
	/* ack & enable invalid PTE error interrupts */
1831
	/* ack & enable invalid PTE error interrupts */
1767
#if 0 /* FIXME: add support to irq handler for checking these bits */
1832
#if 0 /* FIXME: add support to irq handler for checking these bits */
1768
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1833
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1769
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1834
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1770
#endif
1835
#endif
1771
 
1836
 
1772
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1837
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-
 
1838
 
-
 
1839
	return 0;
-
 
1840
}
-
 
1841
 
1773
#if 0 /* FIXME: check register definitions; some have moved */
1842
static void valleyview_hpd_irq_setup(struct drm_device *dev)
-
 
1843
{
-
 
1844
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1845
	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
 
1846
 
1774
	/* Note HDMI and DP share bits */
1847
	/* Note HDMI and DP share bits */
1775
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1848
	if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
1776
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1849
		hotplug_en |= PORTB_HOTPLUG_INT_EN;
1777
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1850
	if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
1778
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1851
		hotplug_en |= PORTC_HOTPLUG_INT_EN;
1779
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1852
	if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
1780
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
1853
		hotplug_en |= PORTD_HOTPLUG_INT_EN;
1781
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
1854
	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
1782
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1855
		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1783
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
1856
	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
1784
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1857
		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1785
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1858
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
1786
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1859
		hotplug_en |= CRT_HOTPLUG_INT_EN;
1787
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1860
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
1788
	}
1861
	}
1789
#endif
-
 
1790
 
1862
 
1791
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
-
 
1792
 
-
 
1793
	return 0;
1863
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1794
}
1864
}
1795
 
1865
 
1796
static void valleyview_irq_uninstall(struct drm_device *dev)
1866
static void valleyview_irq_uninstall(struct drm_device *dev)
1797
{
1867
{
1798
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1868
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1799
	int pipe;
1869
	int pipe;
1800
 
1870
 
1801
	if (!dev_priv)
1871
	if (!dev_priv)
1802
		return;
1872
		return;
1803
 
1873
 
1804
	for_each_pipe(pipe)
1874
	for_each_pipe(pipe)
1805
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1875
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1806
 
1876
 
1807
	I915_WRITE(HWSTAM, 0xffffffff);
1877
	I915_WRITE(HWSTAM, 0xffffffff);
1808
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1878
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1809
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1879
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1810
	for_each_pipe(pipe)
1880
	for_each_pipe(pipe)
1811
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1881
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1812
	I915_WRITE(VLV_IIR, 0xffffffff);
1882
	I915_WRITE(VLV_IIR, 0xffffffff);
1813
	I915_WRITE(VLV_IMR, 0xffffffff);
1883
	I915_WRITE(VLV_IMR, 0xffffffff);
1814
	I915_WRITE(VLV_IER, 0x0);
1884
	I915_WRITE(VLV_IER, 0x0);
1815
	POSTING_READ(VLV_IER);
1885
	POSTING_READ(VLV_IER);
1816
}
1886
}
1817
 
1887
 
1818
static void ironlake_irq_uninstall(struct drm_device *dev)
1888
static void ironlake_irq_uninstall(struct drm_device *dev)
1819
{
1889
{
1820
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1890
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1821
 
1891
 
1822
	if (!dev_priv)
1892
	if (!dev_priv)
1823
		return;
1893
		return;
1824
 
1894
 
1825
	I915_WRITE(HWSTAM, 0xffffffff);
1895
	I915_WRITE(HWSTAM, 0xffffffff);
1826
 
1896
 
1827
	I915_WRITE(DEIMR, 0xffffffff);
1897
	I915_WRITE(DEIMR, 0xffffffff);
1828
	I915_WRITE(DEIER, 0x0);
1898
	I915_WRITE(DEIER, 0x0);
1829
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1899
	I915_WRITE(DEIIR, I915_READ(DEIIR));
1830
 
1900
 
1831
	I915_WRITE(GTIMR, 0xffffffff);
1901
	I915_WRITE(GTIMR, 0xffffffff);
1832
	I915_WRITE(GTIER, 0x0);
1902
	I915_WRITE(GTIER, 0x0);
1833
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1903
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1834
 
1904
 
1835
	I915_WRITE(SDEIMR, 0xffffffff);
1905
	I915_WRITE(SDEIMR, 0xffffffff);
1836
	I915_WRITE(SDEIER, 0x0);
1906
	I915_WRITE(SDEIER, 0x0);
1837
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1907
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1838
}
1908
}
1839
 
1909
 
1840
#if 0
1910
#if 0
1841
 
1911
 
1842
static void i8xx_irq_preinstall(struct drm_device * dev)
1912
static void i8xx_irq_preinstall(struct drm_device * dev)
1843
{
1913
{
1844
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1914
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1845
	int pipe;
1915
	int pipe;
1846
 
1916
 
1847
	atomic_set(&dev_priv->irq_received, 0);
1917
	atomic_set(&dev_priv->irq_received, 0);
1848
 
1918
 
1849
	for_each_pipe(pipe)
1919
	for_each_pipe(pipe)
1850
		I915_WRITE(PIPESTAT(pipe), 0);
1920
		I915_WRITE(PIPESTAT(pipe), 0);
1851
	I915_WRITE16(IMR, 0xffff);
1921
	I915_WRITE16(IMR, 0xffff);
1852
	I915_WRITE16(IER, 0x0);
1922
	I915_WRITE16(IER, 0x0);
1853
	POSTING_READ16(IER);
1923
	POSTING_READ16(IER);
1854
}
1924
}
1855
 
1925
 
1856
static int i8xx_irq_postinstall(struct drm_device *dev)
1926
static int i8xx_irq_postinstall(struct drm_device *dev)
1857
{
1927
{
1858
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1928
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1859
 
1929
 
1860
	dev_priv->pipestat[0] = 0;
1930
	dev_priv->pipestat[0] = 0;
1861
	dev_priv->pipestat[1] = 0;
1931
	dev_priv->pipestat[1] = 0;
1862
 
1932
 
1863
	I915_WRITE16(EMR,
1933
	I915_WRITE16(EMR,
1864
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1934
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
1865
 
1935
 
1866
	/* Unmask the interrupts that we always want on. */
1936
	/* Unmask the interrupts that we always want on. */
1867
	dev_priv->irq_mask =
1937
	dev_priv->irq_mask =
1868
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1938
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1869
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1939
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1870
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1940
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1871
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1941
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
1872
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1942
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1873
	I915_WRITE16(IMR, dev_priv->irq_mask);
1943
	I915_WRITE16(IMR, dev_priv->irq_mask);
1874
 
1944
 
1875
	I915_WRITE16(IER,
1945
	I915_WRITE16(IER,
1876
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1946
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1877
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1947
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1878
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1948
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
1879
		     I915_USER_INTERRUPT);
1949
		     I915_USER_INTERRUPT);
1880
	POSTING_READ16(IER);
1950
	POSTING_READ16(IER);
1881
 
1951
 
1882
	return 0;
1952
	return 0;
1883
}
1953
}
1884
 
1954
 
1885
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
1955
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
1886
{
1956
{
1887
	struct drm_device *dev = (struct drm_device *) arg;
1957
	struct drm_device *dev = (struct drm_device *) arg;
1888
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1958
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1889
	u16 iir, new_iir;
1959
	u16 iir, new_iir;
1890
	u32 pipe_stats[2];
1960
	u32 pipe_stats[2];
1891
	unsigned long irqflags;
1961
	unsigned long irqflags;
1892
	int irq_received;
1962
	int irq_received;
1893
	int pipe;
1963
	int pipe;
1894
	u16 flip_mask =
1964
	u16 flip_mask =
1895
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1965
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
1896
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1966
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1897
 
1967
 
1898
	atomic_inc(&dev_priv->irq_received);
1968
	atomic_inc(&dev_priv->irq_received);
1899
 
1969
 
1900
	iir = I915_READ16(IIR);
1970
	iir = I915_READ16(IIR);
1901
	if (iir == 0)
1971
	if (iir == 0)
1902
		return IRQ_NONE;
1972
		return IRQ_NONE;
1903
 
1973
 
1904
	while (iir & ~flip_mask) {
1974
	while (iir & ~flip_mask) {
1905
		/* Can't rely on pipestat interrupt bit in iir as it might
1975
		/* Can't rely on pipestat interrupt bit in iir as it might
1906
		 * have been cleared after the pipestat interrupt was received.
1976
		 * have been cleared after the pipestat interrupt was received.
1907
		 * It doesn't set the bit in iir again, but it still produces
1977
		 * It doesn't set the bit in iir again, but it still produces
1908
		 * interrupts (for non-MSI).
1978
		 * interrupts (for non-MSI).
1909
		 */
1979
		 */
1910
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1980
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1911
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1981
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
1912
			i915_handle_error(dev, false);
1982
			i915_handle_error(dev, false);
1913
 
1983
 
1914
		for_each_pipe(pipe) {
1984
		for_each_pipe(pipe) {
1915
			int reg = PIPESTAT(pipe);
1985
			int reg = PIPESTAT(pipe);
1916
			pipe_stats[pipe] = I915_READ(reg);
1986
			pipe_stats[pipe] = I915_READ(reg);
1917
 
1987
 
1918
			/*
1988
			/*
1919
			 * Clear the PIPE*STAT regs before the IIR
1989
			 * Clear the PIPE*STAT regs before the IIR
1920
			 */
1990
			 */
1921
			if (pipe_stats[pipe] & 0x8000ffff) {
1991
			if (pipe_stats[pipe] & 0x8000ffff) {
1922
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1992
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1923
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1993
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1924
							 pipe_name(pipe));
1994
							 pipe_name(pipe));
1925
				I915_WRITE(reg, pipe_stats[pipe]);
1995
				I915_WRITE(reg, pipe_stats[pipe]);
1926
				irq_received = 1;
1996
				irq_received = 1;
1927
			}
1997
			}
1928
		}
1998
		}
1929
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1999
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1930
 
2000
 
1931
		I915_WRITE16(IIR, iir & ~flip_mask);
2001
		I915_WRITE16(IIR, iir & ~flip_mask);
1932
		new_iir = I915_READ16(IIR); /* Flush posted writes */
2002
		new_iir = I915_READ16(IIR); /* Flush posted writes */
1933
 
2003
 
1934
		i915_update_dri1_breadcrumb(dev);
2004
		i915_update_dri1_breadcrumb(dev);
1935
 
2005
 
1936
		if (iir & I915_USER_INTERRUPT)
2006
		if (iir & I915_USER_INTERRUPT)
1937
			notify_ring(dev, &dev_priv->ring[RCS]);
2007
			notify_ring(dev, &dev_priv->ring[RCS]);
1938
 
2008
 
1939
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2009
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
1940
		    drm_handle_vblank(dev, 0)) {
2010
		    drm_handle_vblank(dev, 0)) {
1941
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2011
			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1942
				intel_prepare_page_flip(dev, 0);
2012
				intel_prepare_page_flip(dev, 0);
1943
				intel_finish_page_flip(dev, 0);
2013
				intel_finish_page_flip(dev, 0);
1944
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2014
				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
1945
			}
2015
			}
1946
		}
2016
		}
1947
 
2017
 
1948
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2018
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
1949
		    drm_handle_vblank(dev, 1)) {
2019
		    drm_handle_vblank(dev, 1)) {
1950
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2020
			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
1951
				intel_prepare_page_flip(dev, 1);
2021
				intel_prepare_page_flip(dev, 1);
1952
				intel_finish_page_flip(dev, 1);
2022
				intel_finish_page_flip(dev, 1);
1953
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2023
				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1954
			}
2024
			}
1955
		}
2025
		}
1956
 
2026
 
1957
		iir = new_iir;
2027
		iir = new_iir;
1958
	}
2028
	}
1959
 
2029
 
1960
	return IRQ_HANDLED;
2030
	return IRQ_HANDLED;
1961
}
2031
}
1962
 
2032
 
1963
static void i8xx_irq_uninstall(struct drm_device * dev)
2033
static void i8xx_irq_uninstall(struct drm_device * dev)
1964
{
2034
{
1965
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2035
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1966
	int pipe;
2036
	int pipe;
1967
 
2037
 
1968
	for_each_pipe(pipe) {
2038
	for_each_pipe(pipe) {
1969
		/* Clear enable bits; then clear status bits */
2039
		/* Clear enable bits; then clear status bits */
1970
		I915_WRITE(PIPESTAT(pipe), 0);
2040
		I915_WRITE(PIPESTAT(pipe), 0);
1971
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2041
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
1972
	}
2042
	}
1973
	I915_WRITE16(IMR, 0xffff);
2043
	I915_WRITE16(IMR, 0xffff);
1974
	I915_WRITE16(IER, 0x0);
2044
	I915_WRITE16(IER, 0x0);
1975
	I915_WRITE16(IIR, I915_READ16(IIR));
2045
	I915_WRITE16(IIR, I915_READ16(IIR));
1976
}
2046
}
1977
 
2047
 
1978
#endif
2048
#endif
1979
 
2049
 
1980
static void i915_irq_preinstall(struct drm_device * dev)
2050
static void i915_irq_preinstall(struct drm_device * dev)
1981
{
2051
{
1982
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2052
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1983
	int pipe;
2053
	int pipe;
1984
 
2054
 
1985
	atomic_set(&dev_priv->irq_received, 0);
2055
	atomic_set(&dev_priv->irq_received, 0);
1986
 
2056
 
1987
	if (I915_HAS_HOTPLUG(dev)) {
2057
	if (I915_HAS_HOTPLUG(dev)) {
1988
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2058
		I915_WRITE(PORT_HOTPLUG_EN, 0);
1989
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2059
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1990
	}
2060
	}
1991
 
2061
 
1992
	I915_WRITE16(HWSTAM, 0xeffe);
2062
	I915_WRITE16(HWSTAM, 0xeffe);
1993
	for_each_pipe(pipe)
2063
	for_each_pipe(pipe)
1994
		I915_WRITE(PIPESTAT(pipe), 0);
2064
		I915_WRITE(PIPESTAT(pipe), 0);
1995
	I915_WRITE(IMR, 0xffffffff);
2065
	I915_WRITE(IMR, 0xffffffff);
1996
	I915_WRITE(IER, 0x0);
2066
	I915_WRITE(IER, 0x0);
1997
	POSTING_READ(IER);
2067
	POSTING_READ(IER);
1998
}
2068
}
1999
 
2069
 
2000
static int i915_irq_postinstall(struct drm_device *dev)
2070
static int i915_irq_postinstall(struct drm_device *dev)
2001
{
2071
{
2002
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2072
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2003
	u32 enable_mask;
2073
	u32 enable_mask;
2004
 
2074
 
2005
	dev_priv->pipestat[0] = 0;
2075
	dev_priv->pipestat[0] = 0;
2006
	dev_priv->pipestat[1] = 0;
2076
	dev_priv->pipestat[1] = 0;
2007
 
2077
 
2008
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2078
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2009
 
2079
 
2010
	/* Unmask the interrupts that we always want on. */
2080
	/* Unmask the interrupts that we always want on. */
2011
	dev_priv->irq_mask =
2081
	dev_priv->irq_mask =
2012
		~(I915_ASLE_INTERRUPT |
2082
		~(I915_ASLE_INTERRUPT |
2013
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2083
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2014
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2084
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2015
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2085
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2016
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2086
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2017
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2087
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2018
 
2088
 
2019
	enable_mask =
2089
	enable_mask =
2020
		I915_ASLE_INTERRUPT |
2090
		I915_ASLE_INTERRUPT |
2021
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2091
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2022
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2092
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2023
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2093
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2024
		I915_USER_INTERRUPT;
2094
		I915_USER_INTERRUPT;
2025
#if 0
2095
 
2026
	if (I915_HAS_HOTPLUG(dev)) {
2096
	if (I915_HAS_HOTPLUG(dev)) {
-
 
2097
		I915_WRITE(PORT_HOTPLUG_EN, 0);
-
 
2098
		POSTING_READ(PORT_HOTPLUG_EN);
-
 
2099
 
2027
		/* Enable in IER... */
2100
		/* Enable in IER... */
2028
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2101
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2029
		/* and unmask in IMR */
2102
		/* and unmask in IMR */
2030
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2103
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2031
	}
2104
	}
2032
#endif
-
 
2033
 
2105
 
2034
	I915_WRITE(IMR, dev_priv->irq_mask);
2106
	I915_WRITE(IMR, dev_priv->irq_mask);
2035
	I915_WRITE(IER, enable_mask);
2107
	I915_WRITE(IER, enable_mask);
2036
	POSTING_READ(IER);
2108
	POSTING_READ(IER);
-
 
2109
 
-
 
2110
//	intel_opregion_enable_asle(dev);
-
 
2111
 
-
 
2112
	return 0;
-
 
2113
}
-
 
2114
 
-
 
2115
static void i915_hpd_irq_setup(struct drm_device *dev)
-
 
2116
{
-
 
2117
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2118
	u32 hotplug_en;
2037
 
2119
 
2038
	if (I915_HAS_HOTPLUG(dev)) {
2120
	if (I915_HAS_HOTPLUG(dev)) {
2039
		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2121
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2040
#if 0
2122
 
2041
		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2123
		if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2042
			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2124
			hotplug_en |= PORTB_HOTPLUG_INT_EN;
2043
		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2125
		if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2044
			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2126
			hotplug_en |= PORTC_HOTPLUG_INT_EN;
2045
		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2127
		if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2046
			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2128
			hotplug_en |= PORTD_HOTPLUG_INT_EN;
2047
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2129
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2048
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2130
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2049
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2131
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2050
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2132
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2051
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2133
		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2052
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2134
			hotplug_en |= CRT_HOTPLUG_INT_EN;
2053
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2135
			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2054
		}
2136
		}
2055
#endif
2137
 
2056
		/* Ignore TV since it's buggy */
2138
		/* Ignore TV since it's buggy */
2057
 
2139
 
2058
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2140
		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2059
	}
2141
	}
2060
 
-
 
2061
//	intel_opregion_enable_asle(dev);
-
 
2062
 
-
 
2063
	return 0;
-
 
2064
}
2142
}
2065
 
2143
 
2066
static irqreturn_t i915_irq_handler(int irq, void *arg)
2144
static irqreturn_t i915_irq_handler(int irq, void *arg)
2067
{
2145
{
2068
	struct drm_device *dev = (struct drm_device *) arg;
2146
	struct drm_device *dev = (struct drm_device *) arg;
2069
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2147
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2070
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2148
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2071
	unsigned long irqflags;
2149
	unsigned long irqflags;
2072
	u32 flip_mask =
2150
	u32 flip_mask =
2073
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2151
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2074
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2152
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2075
	u32 flip[2] = {
2153
	u32 flip[2] = {
2076
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2154
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2077
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2155
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2078
	};
2156
	};
2079
	int pipe, ret = IRQ_NONE;
2157
	int pipe, ret = IRQ_NONE;
2080
 
2158
 
2081
	atomic_inc(&dev_priv->irq_received);
2159
	atomic_inc(&dev_priv->irq_received);
2082
 
2160
 
2083
	iir = I915_READ(IIR);
2161
	iir = I915_READ(IIR);
2084
	do {
2162
	do {
2085
		bool irq_received = (iir & ~flip_mask) != 0;
2163
		bool irq_received = (iir & ~flip_mask) != 0;
2086
		bool blc_event = false;
2164
		bool blc_event = false;
2087
 
2165
 
2088
		/* Can't rely on pipestat interrupt bit in iir as it might
2166
		/* Can't rely on pipestat interrupt bit in iir as it might
2089
		 * have been cleared after the pipestat interrupt was received.
2167
		 * have been cleared after the pipestat interrupt was received.
2090
		 * It doesn't set the bit in iir again, but it still produces
2168
		 * It doesn't set the bit in iir again, but it still produces
2091
		 * interrupts (for non-MSI).
2169
		 * interrupts (for non-MSI).
2092
		 */
2170
		 */
2093
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2171
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2094
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2172
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2095
			i915_handle_error(dev, false);
2173
			i915_handle_error(dev, false);
2096
 
2174
 
2097
		for_each_pipe(pipe) {
2175
		for_each_pipe(pipe) {
2098
			int reg = PIPESTAT(pipe);
2176
			int reg = PIPESTAT(pipe);
2099
			pipe_stats[pipe] = I915_READ(reg);
2177
			pipe_stats[pipe] = I915_READ(reg);
2100
 
2178
 
2101
			/* Clear the PIPE*STAT regs before the IIR */
2179
			/* Clear the PIPE*STAT regs before the IIR */
2102
			if (pipe_stats[pipe] & 0x8000ffff) {
2180
			if (pipe_stats[pipe] & 0x8000ffff) {
2103
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2181
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2104
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2182
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2105
							 pipe_name(pipe));
2183
							 pipe_name(pipe));
2106
				I915_WRITE(reg, pipe_stats[pipe]);
2184
				I915_WRITE(reg, pipe_stats[pipe]);
2107
				irq_received = true;
2185
				irq_received = true;
2108
			}
2186
			}
2109
		}
2187
		}
2110
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2188
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2111
 
2189
 
2112
		if (!irq_received)
2190
		if (!irq_received)
2113
			break;
2191
			break;
2114
 
2192
 
2115
		/* Consume port.  Then clear IIR or we'll miss events */
2193
		/* Consume port.  Then clear IIR or we'll miss events */
2116
		if ((I915_HAS_HOTPLUG(dev)) &&
2194
		if ((I915_HAS_HOTPLUG(dev)) &&
2117
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2195
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2118
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2196
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2119
 
2197
 
2120
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2198
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2121
				  hotplug_status);
2199
				  hotplug_status);
2122
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2200
			if (hotplug_status & dev_priv->hotplug_supported_mask)
2123
//				queue_work(dev_priv->wq,
2201
				queue_work(dev_priv->wq,
2124
//					   &dev_priv->hotplug_work);
2202
					   &dev_priv->hotplug_work);
2125
 
2203
 
2126
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2204
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2127
			POSTING_READ(PORT_HOTPLUG_STAT);
2205
			POSTING_READ(PORT_HOTPLUG_STAT);
2128
		}
2206
		}
2129
 
2207
 
2130
		I915_WRITE(IIR, iir & ~flip_mask);
2208
		I915_WRITE(IIR, iir & ~flip_mask);
2131
		new_iir = I915_READ(IIR); /* Flush posted writes */
2209
		new_iir = I915_READ(IIR); /* Flush posted writes */
2132
 
2210
 
2133
		if (iir & I915_USER_INTERRUPT)
2211
		if (iir & I915_USER_INTERRUPT)
2134
			notify_ring(dev, &dev_priv->ring[RCS]);
2212
			notify_ring(dev, &dev_priv->ring[RCS]);
2135
 
2213
 
2136
		for_each_pipe(pipe) {
2214
		for_each_pipe(pipe) {
2137
			int plane = pipe;
2215
			int plane = pipe;
2138
			if (IS_MOBILE(dev))
2216
			if (IS_MOBILE(dev))
2139
				plane = !plane;
2217
				plane = !plane;
2140
            if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
2218
            if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS /* &&
2141
                drm_handle_vblank(dev, pipe) */) {
2219
                drm_handle_vblank(dev, pipe) */) {
2142
				if (iir & flip[plane]) {
2220
				if (iir & flip[plane]) {
2143
//					intel_prepare_page_flip(dev, plane);
2221
//					intel_prepare_page_flip(dev, plane);
2144
//					intel_finish_page_flip(dev, pipe);
2222
//					intel_finish_page_flip(dev, pipe);
2145
					flip_mask &= ~flip[plane];
2223
					flip_mask &= ~flip[plane];
2146
				}
2224
				}
2147
			}
2225
			}
2148
 
2226
 
2149
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2227
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2150
				blc_event = true;
2228
				blc_event = true;
2151
		}
2229
		}
2152
 
2230
 
2153
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2231
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2154
//			intel_opregion_asle_intr(dev);
2232
//			intel_opregion_asle_intr(dev);
2155
 
2233
 
2156
		/* With MSI, interrupts are only generated when iir
2234
		/* With MSI, interrupts are only generated when iir
2157
		 * transitions from zero to nonzero.  If another bit got
2235
		 * transitions from zero to nonzero.  If another bit got
2158
		 * set while we were handling the existing iir bits, then
2236
		 * set while we were handling the existing iir bits, then
2159
		 * we would never get another interrupt.
2237
		 * we would never get another interrupt.
2160
		 *
2238
		 *
2161
		 * This is fine on non-MSI as well, as if we hit this path
2239
		 * This is fine on non-MSI as well, as if we hit this path
2162
		 * we avoid exiting the interrupt handler only to generate
2240
		 * we avoid exiting the interrupt handler only to generate
2163
		 * another one.
2241
		 * another one.
2164
		 *
2242
		 *
2165
		 * Note that for MSI this could cause a stray interrupt report
2243
		 * Note that for MSI this could cause a stray interrupt report
2166
		 * if an interrupt landed in the time between writing IIR and
2244
		 * if an interrupt landed in the time between writing IIR and
2167
		 * the posting read.  This should be rare enough to never
2245
		 * the posting read.  This should be rare enough to never
2168
		 * trigger the 99% of 100,000 interrupts test for disabling
2246
		 * trigger the 99% of 100,000 interrupts test for disabling
2169
		 * stray interrupts.
2247
		 * stray interrupts.
2170
		 */
2248
		 */
2171
		ret = IRQ_HANDLED;
2249
		ret = IRQ_HANDLED;
2172
		iir = new_iir;
2250
		iir = new_iir;
2173
	} while (iir & ~flip_mask);
2251
	} while (iir & ~flip_mask);
2174
 
2252
 
2175
	i915_update_dri1_breadcrumb(dev);
2253
	i915_update_dri1_breadcrumb(dev);
2176
 
2254
 
2177
	return ret;
2255
	return ret;
2178
}
2256
}
2179
 
2257
 
2180
static void i915_irq_uninstall(struct drm_device * dev)
2258
static void i915_irq_uninstall(struct drm_device * dev)
2181
{
2259
{
2182
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2260
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2183
	int pipe;
2261
	int pipe;
2184
 
2262
 
2185
	if (I915_HAS_HOTPLUG(dev)) {
2263
	if (I915_HAS_HOTPLUG(dev)) {
2186
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2264
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2187
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2265
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2188
	}
2266
	}
2189
 
2267
 
2190
	I915_WRITE16(HWSTAM, 0xffff);
2268
	I915_WRITE16(HWSTAM, 0xffff);
2191
	for_each_pipe(pipe) {
2269
	for_each_pipe(pipe) {
2192
		/* Clear enable bits; then clear status bits */
2270
		/* Clear enable bits; then clear status bits */
2193
		I915_WRITE(PIPESTAT(pipe), 0);
2271
		I915_WRITE(PIPESTAT(pipe), 0);
2194
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2272
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2195
	}
2273
	}
2196
	I915_WRITE(IMR, 0xffffffff);
2274
	I915_WRITE(IMR, 0xffffffff);
2197
	I915_WRITE(IER, 0x0);
2275
	I915_WRITE(IER, 0x0);
2198
 
2276
 
2199
	I915_WRITE(IIR, I915_READ(IIR));
2277
	I915_WRITE(IIR, I915_READ(IIR));
2200
}
2278
}
2201
 
2279
 
2202
static void i965_irq_preinstall(struct drm_device * dev)
2280
static void i965_irq_preinstall(struct drm_device * dev)
2203
{
2281
{
2204
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2282
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2205
	int pipe;
2283
	int pipe;
2206
 
2284
 
2207
	atomic_set(&dev_priv->irq_received, 0);
2285
	atomic_set(&dev_priv->irq_received, 0);
2208
 
2286
 
2209
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2287
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2210
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2288
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2211
 
2289
 
2212
	I915_WRITE(HWSTAM, 0xeffe);
2290
	I915_WRITE(HWSTAM, 0xeffe);
2213
	for_each_pipe(pipe)
2291
	for_each_pipe(pipe)
2214
		I915_WRITE(PIPESTAT(pipe), 0);
2292
		I915_WRITE(PIPESTAT(pipe), 0);
2215
	I915_WRITE(IMR, 0xffffffff);
2293
	I915_WRITE(IMR, 0xffffffff);
2216
	I915_WRITE(IER, 0x0);
2294
	I915_WRITE(IER, 0x0);
2217
	POSTING_READ(IER);
2295
	POSTING_READ(IER);
2218
}
2296
}
2219
 
2297
 
2220
static int i965_irq_postinstall(struct drm_device *dev)
2298
static int i965_irq_postinstall(struct drm_device *dev)
2221
{
2299
{
2222
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2300
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2223
	u32 hotplug_en;
-
 
2224
	u32 enable_mask;
2301
	u32 enable_mask;
2225
	u32 error_mask;
2302
	u32 error_mask;
2226
 
2303
 
2227
	/* Unmask the interrupts that we always want on. */
2304
	/* Unmask the interrupts that we always want on. */
2228
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2305
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2229
			       I915_DISPLAY_PORT_INTERRUPT |
2306
			       I915_DISPLAY_PORT_INTERRUPT |
2230
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2307
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2231
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2308
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2232
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2309
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2233
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2310
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2234
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2311
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2235
 
2312
 
2236
	enable_mask = ~dev_priv->irq_mask;
2313
	enable_mask = ~dev_priv->irq_mask;
2237
	enable_mask |= I915_USER_INTERRUPT;
2314
	enable_mask |= I915_USER_INTERRUPT;
2238
 
2315
 
2239
	if (IS_G4X(dev))
2316
	if (IS_G4X(dev))
2240
		enable_mask |= I915_BSD_USER_INTERRUPT;
2317
		enable_mask |= I915_BSD_USER_INTERRUPT;
2241
 
2318
 
2242
	dev_priv->pipestat[0] = 0;
2319
	dev_priv->pipestat[0] = 0;
2243
	dev_priv->pipestat[1] = 0;
2320
	dev_priv->pipestat[1] = 0;
-
 
2321
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2244
 
2322
 
2245
	/*
2323
	/*
2246
	 * Enable some error detection, note the instruction error mask
2324
	 * Enable some error detection, note the instruction error mask
2247
	 * bit is reserved, so we leave it masked.
2325
	 * bit is reserved, so we leave it masked.
2248
	 */
2326
	 */
2249
	if (IS_G4X(dev)) {
2327
	if (IS_G4X(dev)) {
2250
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2328
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2251
			       GM45_ERROR_MEM_PRIV |
2329
			       GM45_ERROR_MEM_PRIV |
2252
			       GM45_ERROR_CP_PRIV |
2330
			       GM45_ERROR_CP_PRIV |
2253
			       I915_ERROR_MEMORY_REFRESH);
2331
			       I915_ERROR_MEMORY_REFRESH);
2254
	} else {
2332
	} else {
2255
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2333
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2256
			       I915_ERROR_MEMORY_REFRESH);
2334
			       I915_ERROR_MEMORY_REFRESH);
2257
	}
2335
	}
2258
	I915_WRITE(EMR, error_mask);
2336
	I915_WRITE(EMR, error_mask);
2259
 
2337
 
2260
	I915_WRITE(IMR, dev_priv->irq_mask);
2338
	I915_WRITE(IMR, dev_priv->irq_mask);
2261
	I915_WRITE(IER, enable_mask);
2339
	I915_WRITE(IER, enable_mask);
2262
	POSTING_READ(IER);
2340
	POSTING_READ(IER);
-
 
2341
 
-
 
2342
	I915_WRITE(PORT_HOTPLUG_EN, 0);
-
 
2343
	POSTING_READ(PORT_HOTPLUG_EN);
-
 
2344
 
-
 
2345
//	intel_opregion_enable_asle(dev);
-
 
2346
 
-
 
2347
	return 0;
-
 
2348
}
-
 
2349
 
-
 
2350
static void i965_hpd_irq_setup(struct drm_device *dev)
-
 
2351
{
-
 
2352
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2353
	u32 hotplug_en;
2263
 
2354
 
2264
	/* Note HDMI and DP share hotplug bits */
2355
	/* Note HDMI and DP share hotplug bits */
2265
	hotplug_en = 0;
-
 
2266
#if 0
2356
	hotplug_en = 0;
2267
	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2357
	if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2268
		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2358
		hotplug_en |= PORTB_HOTPLUG_INT_EN;
2269
	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2359
	if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2270
		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2360
		hotplug_en |= PORTC_HOTPLUG_INT_EN;
2271
	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2361
	if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2272
		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2362
		hotplug_en |= PORTD_HOTPLUG_INT_EN;
2273
	if (IS_G4X(dev)) {
2363
	if (IS_G4X(dev)) {
2274
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2364
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2275
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2365
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2276
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2366
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2277
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2367
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2278
	} else {
2368
	} else {
2279
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2369
		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2280
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2370
			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2281
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2371
		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2282
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2372
			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2283
	}
2373
	}
2284
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2374
	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2285
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2375
		hotplug_en |= CRT_HOTPLUG_INT_EN;
2286
 
2376
 
2287
		/* Programming the CRT detection parameters tends
2377
		/* Programming the CRT detection parameters tends
2288
		   to generate a spurious hotplug event about three
2378
		   to generate a spurious hotplug event about three
2289
		   seconds later.  So just do it once.
2379
		   seconds later.  So just do it once.
2290
		   */
2380
		   */
2291
		if (IS_G4X(dev))
2381
		if (IS_G4X(dev))
2292
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2382
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2293
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2383
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2294
	}
2384
	}
2295
#endif
2385
 
2296
	/* Ignore TV since it's buggy */
2386
	/* Ignore TV since it's buggy */
2297
 
2387
 
2298
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2388
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2299
 
-
 
2300
//	intel_opregion_enable_asle(dev);
-
 
2301
 
-
 
2302
	return 0;
-
 
2303
}
2389
}
2304
 
2390
 
2305
static irqreturn_t i965_irq_handler(int irq, void *arg)
2391
static irqreturn_t i965_irq_handler(int irq, void *arg)
2306
{
2392
{
2307
	struct drm_device *dev = (struct drm_device *) arg;
2393
	struct drm_device *dev = (struct drm_device *) arg;
2308
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2394
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2309
	u32 iir, new_iir;
2395
	u32 iir, new_iir;
2310
	u32 pipe_stats[I915_MAX_PIPES];
2396
	u32 pipe_stats[I915_MAX_PIPES];
2311
	unsigned long irqflags;
2397
	unsigned long irqflags;
2312
	int irq_received;
2398
	int irq_received;
2313
	int ret = IRQ_NONE, pipe;
2399
	int ret = IRQ_NONE, pipe;
2314
 
2400
 
2315
	atomic_inc(&dev_priv->irq_received);
2401
	atomic_inc(&dev_priv->irq_received);
2316
 
2402
 
2317
	iir = I915_READ(IIR);
2403
	iir = I915_READ(IIR);
2318
 
2404
 
2319
	for (;;) {
2405
	for (;;) {
2320
		bool blc_event = false;
2406
		bool blc_event = false;
2321
 
2407
 
2322
		irq_received = iir != 0;
2408
		irq_received = iir != 0;
2323
 
2409
 
2324
		/* Can't rely on pipestat interrupt bit in iir as it might
2410
		/* Can't rely on pipestat interrupt bit in iir as it might
2325
		 * have been cleared after the pipestat interrupt was received.
2411
		 * have been cleared after the pipestat interrupt was received.
2326
		 * It doesn't set the bit in iir again, but it still produces
2412
		 * It doesn't set the bit in iir again, but it still produces
2327
		 * interrupts (for non-MSI).
2413
		 * interrupts (for non-MSI).
2328
		 */
2414
		 */
2329
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2415
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2330
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2416
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2331
			i915_handle_error(dev, false);
2417
			i915_handle_error(dev, false);
2332
 
2418
 
2333
		for_each_pipe(pipe) {
2419
		for_each_pipe(pipe) {
2334
			int reg = PIPESTAT(pipe);
2420
			int reg = PIPESTAT(pipe);
2335
			pipe_stats[pipe] = I915_READ(reg);
2421
			pipe_stats[pipe] = I915_READ(reg);
2336
 
2422
 
2337
			/*
2423
			/*
2338
			 * Clear the PIPE*STAT regs before the IIR
2424
			 * Clear the PIPE*STAT regs before the IIR
2339
			 */
2425
			 */
2340
			if (pipe_stats[pipe] & 0x8000ffff) {
2426
			if (pipe_stats[pipe] & 0x8000ffff) {
2341
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2427
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2342
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2428
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2343
							 pipe_name(pipe));
2429
							 pipe_name(pipe));
2344
				I915_WRITE(reg, pipe_stats[pipe]);
2430
				I915_WRITE(reg, pipe_stats[pipe]);
2345
				irq_received = 1;
2431
				irq_received = 1;
2346
			}
2432
			}
2347
		}
2433
		}
2348
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2434
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2349
 
2435
 
2350
		if (!irq_received)
2436
		if (!irq_received)
2351
			break;
2437
			break;
2352
 
2438
 
2353
		ret = IRQ_HANDLED;
2439
		ret = IRQ_HANDLED;
2354
 
2440
 
2355
		/* Consume port.  Then clear IIR or we'll miss events */
2441
		/* Consume port.  Then clear IIR or we'll miss events */
2356
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2442
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2357
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2443
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2358
 
2444
 
2359
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2445
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2360
				  hotplug_status);
2446
				  hotplug_status);
2361
//			if (hotplug_status & dev_priv->hotplug_supported_mask)
2447
			if (hotplug_status & dev_priv->hotplug_supported_mask)
2362
//				queue_work(dev_priv->wq,
2448
				queue_work(dev_priv->wq,
2363
//					   &dev_priv->hotplug_work);
2449
					   &dev_priv->hotplug_work);
2364
 
2450
 
2365
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2451
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2366
			I915_READ(PORT_HOTPLUG_STAT);
2452
			I915_READ(PORT_HOTPLUG_STAT);
2367
		}
2453
		}
2368
 
2454
 
2369
		I915_WRITE(IIR, iir);
2455
		I915_WRITE(IIR, iir);
2370
		new_iir = I915_READ(IIR); /* Flush posted writes */
2456
		new_iir = I915_READ(IIR); /* Flush posted writes */
2371
 
2457
 
2372
		if (iir & I915_USER_INTERRUPT)
2458
		if (iir & I915_USER_INTERRUPT)
2373
			notify_ring(dev, &dev_priv->ring[RCS]);
2459
			notify_ring(dev, &dev_priv->ring[RCS]);
2374
		if (iir & I915_BSD_USER_INTERRUPT)
2460
		if (iir & I915_BSD_USER_INTERRUPT)
2375
			notify_ring(dev, &dev_priv->ring[VCS]);
2461
			notify_ring(dev, &dev_priv->ring[VCS]);
2376
 
2462
 
2377
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2463
//		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2378
//			intel_prepare_page_flip(dev, 0);
2464
//			intel_prepare_page_flip(dev, 0);
2379
 
2465
 
2380
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2466
//		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2381
//			intel_prepare_page_flip(dev, 1);
2467
//			intel_prepare_page_flip(dev, 1);
2382
 
2468
 
2383
		for_each_pipe(pipe) {
2469
		for_each_pipe(pipe) {
2384
//           if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2470
//           if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2385
//               drm_handle_vblank(dev, pipe)) {
2471
//               drm_handle_vblank(dev, pipe)) {
2386
//				i915_pageflip_stall_check(dev, pipe);
2472
//				i915_pageflip_stall_check(dev, pipe);
2387
//				intel_finish_page_flip(dev, pipe);
2473
//				intel_finish_page_flip(dev, pipe);
2388
//           }
2474
//           }
2389
 
2475
 
2390
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2476
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2391
				blc_event = true;
2477
				blc_event = true;
2392
		}
2478
		}
2393
 
2479
 
2394
 
2480
 
2395
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2481
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2396
//			intel_opregion_asle_intr(dev);
2482
//			intel_opregion_asle_intr(dev);
-
 
2483
 
-
 
2484
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-
 
2485
			gmbus_irq_handler(dev);
2397
 
2486
 
2398
		/* With MSI, interrupts are only generated when iir
2487
		/* With MSI, interrupts are only generated when iir
2399
		 * transitions from zero to nonzero.  If another bit got
2488
		 * transitions from zero to nonzero.  If another bit got
2400
		 * set while we were handling the existing iir bits, then
2489
		 * set while we were handling the existing iir bits, then
2401
		 * we would never get another interrupt.
2490
		 * we would never get another interrupt.
2402
		 *
2491
		 *
2403
		 * This is fine on non-MSI as well, as if we hit this path
2492
		 * This is fine on non-MSI as well, as if we hit this path
2404
		 * we avoid exiting the interrupt handler only to generate
2493
		 * we avoid exiting the interrupt handler only to generate
2405
		 * another one.
2494
		 * another one.
2406
		 *
2495
		 *
2407
		 * Note that for MSI this could cause a stray interrupt report
2496
		 * Note that for MSI this could cause a stray interrupt report
2408
		 * if an interrupt landed in the time between writing IIR and
2497
		 * if an interrupt landed in the time between writing IIR and
2409
		 * the posting read.  This should be rare enough to never
2498
		 * the posting read.  This should be rare enough to never
2410
		 * trigger the 99% of 100,000 interrupts test for disabling
2499
		 * trigger the 99% of 100,000 interrupts test for disabling
2411
		 * stray interrupts.
2500
		 * stray interrupts.
2412
		 */
2501
		 */
2413
		iir = new_iir;
2502
		iir = new_iir;
2414
	}
2503
	}
2415
 
2504
 
2416
	i915_update_dri1_breadcrumb(dev);
2505
	i915_update_dri1_breadcrumb(dev);
2417
 
2506
 
2418
	return ret;
2507
	return ret;
2419
}
2508
}
2420
 
2509
 
2421
static void i965_irq_uninstall(struct drm_device * dev)
2510
static void i965_irq_uninstall(struct drm_device * dev)
2422
{
2511
{
2423
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2512
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2424
	int pipe;
2513
	int pipe;
2425
 
2514
 
2426
	if (!dev_priv)
2515
	if (!dev_priv)
2427
		return;
2516
		return;
2428
 
2517
 
2429
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2518
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2430
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2519
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2431
 
2520
 
2432
	I915_WRITE(HWSTAM, 0xffffffff);
2521
	I915_WRITE(HWSTAM, 0xffffffff);
2433
	for_each_pipe(pipe)
2522
	for_each_pipe(pipe)
2434
		I915_WRITE(PIPESTAT(pipe), 0);
2523
		I915_WRITE(PIPESTAT(pipe), 0);
2435
	I915_WRITE(IMR, 0xffffffff);
2524
	I915_WRITE(IMR, 0xffffffff);
2436
	I915_WRITE(IER, 0x0);
2525
	I915_WRITE(IER, 0x0);
2437
 
2526
 
2438
	for_each_pipe(pipe)
2527
	for_each_pipe(pipe)
2439
		I915_WRITE(PIPESTAT(pipe),
2528
		I915_WRITE(PIPESTAT(pipe),
2440
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2529
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2441
	I915_WRITE(IIR, I915_READ(IIR));
2530
	I915_WRITE(IIR, I915_READ(IIR));
2442
}
2531
}
2443
 
2532
 
2444
void intel_irq_init(struct drm_device *dev)
2533
void intel_irq_init(struct drm_device *dev)
2445
{
2534
{
2446
	struct drm_i915_private *dev_priv = dev->dev_private;
2535
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2536
 
-
 
2537
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
-
 
2538
 
-
 
2539
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
-
 
2540
 
-
 
2541
 
2447
 
2542
 
2448
	if (IS_VALLEYVIEW(dev)) {
2543
	if (IS_VALLEYVIEW(dev)) {
2449
		dev->driver->irq_handler = valleyview_irq_handler;
2544
		dev->driver->irq_handler = valleyview_irq_handler;
2450
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2545
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2451
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2546
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
-
 
2547
		dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
2452
	} else if (IS_IVYBRIDGE(dev)) {
2548
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2453
		/* Share pre & uninstall handlers with ILK/SNB */
2549
		/* Share pre & uninstall handlers with ILK/SNB */
2454
		dev->driver->irq_handler = ivybridge_irq_handler;
2550
		dev->driver->irq_handler = ivybridge_irq_handler;
2455
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2551
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2456
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2552
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2457
	} else if (IS_HASWELL(dev)) {
-
 
2458
		/* Share interrupts handling with IVB */
-
 
2459
		dev->driver->irq_handler = ivybridge_irq_handler;
-
 
2460
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
-
 
2461
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-
 
2462
	} else if (HAS_PCH_SPLIT(dev)) {
2553
	} else if (HAS_PCH_SPLIT(dev)) {
2463
		dev->driver->irq_handler = ironlake_irq_handler;
2554
		dev->driver->irq_handler = ironlake_irq_handler;
2464
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2555
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2465
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2556
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2466
	} else {
2557
	} else {
2467
		if (INTEL_INFO(dev)->gen == 2) {
2558
		if (INTEL_INFO(dev)->gen == 2) {
2468
		} else if (INTEL_INFO(dev)->gen == 3) {
2559
		} else if (INTEL_INFO(dev)->gen == 3) {
2469
			dev->driver->irq_preinstall = i915_irq_preinstall;
2560
			dev->driver->irq_preinstall = i915_irq_preinstall;
2470
			dev->driver->irq_postinstall = i915_irq_postinstall;
2561
			dev->driver->irq_postinstall = i915_irq_postinstall;
2471
			dev->driver->irq_handler = i915_irq_handler;
2562
			dev->driver->irq_handler = i915_irq_handler;
-
 
2563
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2472
		} else {
2564
		} else {
2473
			dev->driver->irq_preinstall = i965_irq_preinstall;
2565
			dev->driver->irq_preinstall = i965_irq_preinstall;
2474
			dev->driver->irq_postinstall = i965_irq_postinstall;
2566
			dev->driver->irq_postinstall = i965_irq_postinstall;
2475
			dev->driver->irq_handler = i965_irq_handler;
2567
			dev->driver->irq_handler = i965_irq_handler;
-
 
2568
			dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
-
 
2569
		}
2476
		}
2570
	}
2477
	}
2571
}
-
 
2572
 
-
 
2573
void intel_hpd_init(struct drm_device *dev)
2478
 
2574
{
-
 
2575
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2576
 
-
 
2577
	if (dev_priv->display.hpd_irq_setup)
2479
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2578
		dev_priv->display.hpd_irq_setup(dev);
-
 
2579
}
2480
}
2580
 
2481
 
2581
 
2482
irqreturn_t intel_irq_handler(struct drm_device *dev)
2582
irqreturn_t intel_irq_handler(struct drm_device *dev)
2483
{
2583
{
2484
 
2584
 
2485
//    printf("i915 irq\n");
2585
//    printf("i915 irq\n");
2486
 
2586
 
2487
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2587
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2488
 
2588
 
2489
    return dev->driver->irq_handler(0, dev);
2589
    return dev->driver->irq_handler(0, dev);
2490
}
2590
}
2491
 
2591
 
2492
int drm_irq_install(struct drm_device *dev)
2592
int drm_irq_install(struct drm_device *dev)
2493
{
2593
{
2494
    unsigned long sh_flags = 0;
2594
    unsigned long sh_flags = 0;
2495
    int irq_line;
2595
    int irq_line;
2496
    int ret = 0;
2596
    int ret = 0;
2497
 
2597
 
2498
    char *irqname;
2598
    char *irqname;
2499
 
2599
 
2500
    mutex_lock(&dev->struct_mutex);
2600
    mutex_lock(&dev->struct_mutex);
2501
 
2601
 
2502
    /* Driver must have been initialized */
2602
    /* Driver must have been initialized */
2503
    if (!dev->dev_private) {
2603
    if (!dev->dev_private) {
2504
            mutex_unlock(&dev->struct_mutex);
2604
            mutex_unlock(&dev->struct_mutex);
2505
            return -EINVAL;
2605
            return -EINVAL;
2506
    }
2606
    }
2507
 
2607
 
2508
    if (dev->irq_enabled) {
2608
    if (dev->irq_enabled) {
2509
            mutex_unlock(&dev->struct_mutex);
2609
            mutex_unlock(&dev->struct_mutex);
2510
            return -EBUSY;
2610
            return -EBUSY;
2511
    }
2611
    }
2512
    dev->irq_enabled = 1;
2612
    dev->irq_enabled = 1;
2513
    mutex_unlock(&dev->struct_mutex);
2613
    mutex_unlock(&dev->struct_mutex);
2514
 
2614
 
2515
    irq_line   = drm_dev_to_irq(dev);
2615
    irq_line   = drm_dev_to_irq(dev);
2516
 
2616
 
2517
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2617
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
2518
 
2618
 
2519
    /* Before installing handler */
2619
    /* Before installing handler */
2520
    if (dev->driver->irq_preinstall)
2620
    if (dev->driver->irq_preinstall)
2521
            dev->driver->irq_preinstall(dev);
2621
            dev->driver->irq_preinstall(dev);
2522
 
2622
 
2523
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
2623
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
2524
 
2624
 
2525
    /* After installing handler */
2625
    /* After installing handler */
2526
    if (dev->driver->irq_postinstall)
2626
    if (dev->driver->irq_postinstall)
2527
            ret = dev->driver->irq_postinstall(dev);
2627
            ret = dev->driver->irq_postinstall(dev);
2528
 
2628
 
2529
    if (ret < 0) {
2629
    if (ret < 0) {
2530
            DRM_ERROR(__FUNCTION__);
2630
            DRM_ERROR(__FUNCTION__);
2531
    }
2631
    }
2532
 
2632
 
2533
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2633
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
2534
    cmd&= ~(1<<10);
2634
    cmd&= ~(1<<10);
2535
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2635
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
2536
 
2636
 
2537
    return ret;
2637
    return ret;
2538
}
2638
}
2539
>
2639
>
2540
>
2640
>
2541
//>
2641
//>
2542
//>
2642
//>