Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4126 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3746 Rev 4104
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
2
 */
3
/*
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
5
 * All Rights Reserved.
6
 *
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
13
 * the following conditions:
14
 *
14
 *
15
 * The above copyright notice and this permission notice (including the
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
17
 * of the Software.
18
 *
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
26
 *
27
 */
27
 */
28
 
28
 
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
 
30
 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include "i915_drv.h"
34
#include "i915_drv.h"
35
#include "i915_trace.h"
35
#include "i915_trace.h"
36
#include "intel_drv.h"
36
#include "intel_drv.h"
-
 
37
 
-
 
38
#define assert_spin_locked(a)
37
 
39
 
38
static const u32 hpd_ibx[] = {
40
static const u32 hpd_ibx[] = {
39
	[HPD_CRT] = SDE_CRT_HOTPLUG,
41
	[HPD_CRT] = SDE_CRT_HOTPLUG,
40
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
41
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
42
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
43
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
45
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
44
};
46
};
45
 
47
 
46
static const u32 hpd_cpt[] = {
48
static const u32 hpd_cpt[] = {
47
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
48
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
49
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
50
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
51
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
52
};
54
};
53
 
55
 
54
static const u32 hpd_mask_i915[] = {
56
static const u32 hpd_mask_i915[] = {
55
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
57
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
56
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
57
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
58
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
59
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
60
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
61
};
63
};
62
 
64
 
63
static const u32 hpd_status_gen4[] = {
65
static const u32 hpd_status_gen4[] = {
64
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
65
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
66
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
67
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
68
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
69
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
70
};
72
};
71
 
-
 
72
static const u32 hpd_status_i965[] = {
-
 
73
	 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
-
 
74
	 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
-
 
75
	 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
-
 
76
	 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
-
 
77
	 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-
 
78
	 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
-
 
79
};
-
 
80
 
73
 
81
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74
static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
82
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
83
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
84
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
85
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
86
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
87
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
88
};
81
};
89
 
-
 
90
static void ibx_hpd_irq_setup(struct drm_device *dev);
-
 
91
static void i915_hpd_irq_setup(struct drm_device *dev);
82
 
92
 
83
 
93
#define pr_err(fmt, ...) \
84
#define pr_err(fmt, ...) \
94
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
85
        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
95
 
86
 
96
 
87
 
97
#define DRM_WAKEUP( queue ) wake_up( queue )
88
#define DRM_WAKEUP( queue ) wake_up( queue )
98
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
89
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
99
 
90
 
100
#define MAX_NOPID ((u32)~0)
91
#define MAX_NOPID ((u32)~0)
101
 
92
 
102
 
93
 
103
 
94
 
104
/* For display hotplug interrupt */
95
/* For display hotplug interrupt */
105
static void
96
static void
106
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
107
{
98
{
-
 
99
	assert_spin_locked(&dev_priv->irq_lock);
-
 
100
 
-
 
101
	if (dev_priv->pc8.irqs_disabled) {
-
 
102
		WARN(1, "IRQs disabled\n");
-
 
103
		dev_priv->pc8.regsave.deimr &= ~mask;
-
 
104
		return;
-
 
105
	}
-
 
106
 
108
    if ((dev_priv->irq_mask & mask) != 0) {
107
    if ((dev_priv->irq_mask & mask) != 0) {
109
        dev_priv->irq_mask &= ~mask;
108
        dev_priv->irq_mask &= ~mask;
110
        I915_WRITE(DEIMR, dev_priv->irq_mask);
109
        I915_WRITE(DEIMR, dev_priv->irq_mask);
111
        POSTING_READ(DEIMR);
110
        POSTING_READ(DEIMR);
112
    }
111
    }
113
}
112
}
114
 
113
 
115
static void
114
static void
116
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
115
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
117
{
116
{
-
 
117
	assert_spin_locked(&dev_priv->irq_lock);
-
 
118
 
-
 
119
	if (dev_priv->pc8.irqs_disabled) {
-
 
120
		WARN(1, "IRQs disabled\n");
-
 
121
		dev_priv->pc8.regsave.deimr |= mask;
-
 
122
		return;
-
 
123
	}
-
 
124
 
118
    if ((dev_priv->irq_mask & mask) != mask) {
125
    if ((dev_priv->irq_mask & mask) != mask) {
119
        dev_priv->irq_mask |= mask;
126
        dev_priv->irq_mask |= mask;
120
        I915_WRITE(DEIMR, dev_priv->irq_mask);
127
        I915_WRITE(DEIMR, dev_priv->irq_mask);
121
        POSTING_READ(DEIMR);
128
        POSTING_READ(DEIMR);
122
    }
129
    }
123
}
130
}
-
 
131
 
-
 
132
/**
-
 
133
 * ilk_update_gt_irq - update GTIMR
-
 
134
 * @dev_priv: driver private
-
 
135
 * @interrupt_mask: mask of interrupt bits to update
-
 
136
 * @enabled_irq_mask: mask of interrupt bits to enable
-
 
137
 */
-
 
138
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
-
 
139
			      uint32_t interrupt_mask,
-
 
140
			      uint32_t enabled_irq_mask)
-
 
141
{
-
 
142
	assert_spin_locked(&dev_priv->irq_lock);
-
 
143
 
-
 
144
	if (dev_priv->pc8.irqs_disabled) {
-
 
145
		WARN(1, "IRQs disabled\n");
-
 
146
		dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
-
 
147
		dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
-
 
148
						interrupt_mask);
-
 
149
		return;
-
 
150
	}
-
 
151
 
-
 
152
	dev_priv->gt_irq_mask &= ~interrupt_mask;
-
 
153
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
-
 
154
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
155
	POSTING_READ(GTIMR);
-
 
156
}
-
 
157
 
-
 
158
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
159
{
-
 
160
	ilk_update_gt_irq(dev_priv, mask, mask);
-
 
161
}
-
 
162
 
-
 
163
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
164
{
-
 
165
	ilk_update_gt_irq(dev_priv, mask, 0);
-
 
166
}
-
 
167
 
-
 
168
/**
-
 
169
  * snb_update_pm_irq - update GEN6_PMIMR
-
 
170
  * @dev_priv: driver private
-
 
171
  * @interrupt_mask: mask of interrupt bits to update
-
 
172
  * @enabled_irq_mask: mask of interrupt bits to enable
-
 
173
  */
-
 
174
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
-
 
175
			      uint32_t interrupt_mask,
-
 
176
			      uint32_t enabled_irq_mask)
-
 
177
{
-
 
178
	uint32_t new_val;
-
 
179
 
-
 
180
	assert_spin_locked(&dev_priv->irq_lock);
-
 
181
 
-
 
182
	if (dev_priv->pc8.irqs_disabled) {
-
 
183
		WARN(1, "IRQs disabled\n");
-
 
184
		dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
-
 
185
		dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
-
 
186
						     interrupt_mask);
-
 
187
		return;
-
 
188
	}
-
 
189
 
-
 
190
	new_val = dev_priv->pm_irq_mask;
-
 
191
	new_val &= ~interrupt_mask;
-
 
192
	new_val |= (~enabled_irq_mask & interrupt_mask);
-
 
193
 
-
 
194
	if (new_val != dev_priv->pm_irq_mask) {
-
 
195
		dev_priv->pm_irq_mask = new_val;
-
 
196
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
-
 
197
		POSTING_READ(GEN6_PMIMR);
-
 
198
	}
-
 
199
}
-
 
200
 
-
 
201
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
202
{
-
 
203
	snb_update_pm_irq(dev_priv, mask, mask);
-
 
204
}
-
 
205
 
-
 
206
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
-
 
207
{
-
 
208
	snb_update_pm_irq(dev_priv, mask, 0);
-
 
209
}
-
 
210
 
-
 
211
static bool ivb_can_enable_err_int(struct drm_device *dev)
-
 
212
{
-
 
213
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
214
	struct intel_crtc *crtc;
-
 
215
	enum pipe pipe;
-
 
216
 
-
 
217
	assert_spin_locked(&dev_priv->irq_lock);
-
 
218
 
-
 
219
	for_each_pipe(pipe) {
-
 
220
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
 
221
 
-
 
222
		if (crtc->cpu_fifo_underrun_disabled)
-
 
223
			return false;
-
 
224
	}
-
 
225
 
-
 
226
	return true;
-
 
227
}
-
 
228
 
-
 
229
static bool cpt_can_enable_serr_int(struct drm_device *dev)
-
 
230
{
-
 
231
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
232
	enum pipe pipe;
-
 
233
	struct intel_crtc *crtc;
-
 
234
 
-
 
235
	assert_spin_locked(&dev_priv->irq_lock);
-
 
236
 
-
 
237
	for_each_pipe(pipe) {
-
 
238
		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
 
239
 
-
 
240
		if (crtc->pch_fifo_underrun_disabled)
-
 
241
			return false;
-
 
242
	}
-
 
243
 
-
 
244
	return true;
-
 
245
}
-
 
246
 
-
 
247
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
248
						 enum pipe pipe, bool enable)
-
 
249
{
-
 
250
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
251
	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
-
 
252
					  DE_PIPEB_FIFO_UNDERRUN;
-
 
253
 
-
 
254
	if (enable)
-
 
255
		ironlake_enable_display_irq(dev_priv, bit);
-
 
256
	else
-
 
257
		ironlake_disable_display_irq(dev_priv, bit);
-
 
258
}
-
 
259
 
-
 
260
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
261
						  enum pipe pipe, bool enable)
-
 
262
{
-
 
263
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
264
	if (enable) {
-
 
265
		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
-
 
266
 
-
 
267
		if (!ivb_can_enable_err_int(dev))
-
 
268
			return;
-
 
269
 
-
 
270
		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
271
	} else {
-
 
272
		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
-
 
273
 
-
 
274
		/* Change the state _after_ we've read out the current one. */
-
 
275
		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
276
 
-
 
277
		if (!was_enabled &&
-
 
278
		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
-
 
279
			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
-
 
280
				      pipe_name(pipe));
-
 
281
	}
-
 
282
}
-
 
283
}
-
 
284
 
-
 
285
/**
-
 
286
 * ibx_display_interrupt_update - update SDEIMR
-
 
287
 * @dev_priv: driver private
-
 
288
 * @interrupt_mask: mask of interrupt bits to update
-
 
289
 * @enabled_irq_mask: mask of interrupt bits to enable
-
 
290
 */
-
 
291
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
-
 
292
					 uint32_t interrupt_mask,
-
 
293
					 uint32_t enabled_irq_mask)
-
 
294
{
-
 
295
	uint32_t sdeimr = I915_READ(SDEIMR);
-
 
296
	sdeimr &= ~interrupt_mask;
-
 
297
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
-
 
298
 
-
 
299
	assert_spin_locked(&dev_priv->irq_lock);
-
 
300
 
-
 
301
	if (dev_priv->pc8.irqs_disabled &&
-
 
302
	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
-
 
303
		WARN(1, "IRQs disabled\n");
-
 
304
		dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
-
 
305
		dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
-
 
306
						 interrupt_mask);
-
 
307
		return;
-
 
308
	}
-
 
309
 
-
 
310
	I915_WRITE(SDEIMR, sdeimr);
-
 
311
	POSTING_READ(SDEIMR);
-
 
312
}
-
 
313
#define ibx_enable_display_interrupt(dev_priv, bits) \
-
 
314
	ibx_display_interrupt_update((dev_priv), (bits), (bits))
-
 
315
#define ibx_disable_display_interrupt(dev_priv, bits) \
-
 
316
	ibx_display_interrupt_update((dev_priv), (bits), 0)
-
 
317
 
-
 
318
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
319
					    enum transcoder pch_transcoder,
-
 
320
					    bool enable)
-
 
321
{
-
 
322
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
323
	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
-
 
324
		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
-
 
325
 
-
 
326
	if (enable)
-
 
327
		ibx_enable_display_interrupt(dev_priv, bit);
-
 
328
	else
-
 
329
		ibx_disable_display_interrupt(dev_priv, bit);
-
 
330
}
-
 
331
 
-
 
332
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
-
 
333
					    enum transcoder pch_transcoder,
-
 
334
					    bool enable)
-
 
335
{
-
 
336
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
337
 
-
 
338
	if (enable) {
-
 
339
		I915_WRITE(SERR_INT,
-
 
340
			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-
 
341
 
-
 
342
		if (!cpt_can_enable_serr_int(dev))
-
 
343
			return;
-
 
344
 
-
 
345
		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
 
346
	} else {
-
 
347
		uint32_t tmp = I915_READ(SERR_INT);
-
 
348
		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
-
 
349
 
-
 
350
		/* Change the state _after_ we've read out the current one. */
-
 
351
		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
-
 
352
 
-
 
353
		if (!was_enabled &&
-
 
354
		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
-
 
355
			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
-
 
356
				      transcoder_name(pch_transcoder));
-
 
357
		}
-
 
358
	}
-
 
359
}
-
 
360
 
-
 
361
/**
-
 
362
 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
-
 
363
 * @dev: drm device
-
 
364
 * @pipe: pipe
-
 
365
 * @enable: true if we want to report FIFO underrun errors, false otherwise
-
 
366
 *
-
 
367
 * This function makes us disable or enable CPU fifo underruns for a specific
-
 
368
 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
-
 
369
 * reporting for one pipe may also disable all the other CPU error interruts for
-
 
370
 * the other pipes, due to the fact that there's just one interrupt mask/enable
-
 
371
 * bit for all the pipes.
-
 
372
 *
-
 
373
 * Returns the previous state of underrun reporting.
-
 
374
 */
-
 
375
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-
 
376
					   enum pipe pipe, bool enable)
-
 
377
{
-
 
378
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
379
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
 
380
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
381
	unsigned long flags;
-
 
382
	bool ret;
-
 
383
 
-
 
384
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
385
 
-
 
386
	ret = !intel_crtc->cpu_fifo_underrun_disabled;
-
 
387
 
-
 
388
	if (enable == ret)
-
 
389
		goto done;
-
 
390
 
-
 
391
	intel_crtc->cpu_fifo_underrun_disabled = !enable;
-
 
392
 
-
 
393
	if (IS_GEN5(dev) || IS_GEN6(dev))
-
 
394
		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
-
 
395
	else if (IS_GEN7(dev))
-
 
396
		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
-
 
397
 
-
 
398
done:
-
 
399
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
400
	return ret;
-
 
401
}
-
 
402
 
-
 
403
/**
-
 
404
 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
-
 
405
 * @dev: drm device
-
 
406
 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
-
 
407
 * @enable: true if we want to report FIFO underrun errors, false otherwise
-
 
408
 *
-
 
409
 * This function makes us disable or enable PCH fifo underruns for a specific
-
 
410
 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
-
 
411
 * underrun reporting for one transcoder may also disable all the other PCH
-
 
412
 * error interruts for the other transcoders, due to the fact that there's just
-
 
413
 * one interrupt mask/enable bit for all the transcoders.
-
 
414
 *
-
 
415
 * Returns the previous state of underrun reporting.
-
 
416
 */
-
 
417
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
-
 
418
					   enum transcoder pch_transcoder,
-
 
419
					   bool enable)
-
 
420
{
-
 
421
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
422
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
-
 
423
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
424
	unsigned long flags;
-
 
425
	bool ret;
-
 
426
 
-
 
427
	/*
-
 
428
	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
-
 
429
	 * has only one pch transcoder A that all pipes can use. To avoid racy
-
 
430
	 * pch transcoder -> pipe lookups from interrupt code simply store the
-
 
431
	 * underrun statistics in crtc A. Since we never expose this anywhere
-
 
432
	 * nor use it outside of the fifo underrun code here using the "wrong"
-
 
433
	 * crtc on LPT won't cause issues.
-
 
434
	 */
-
 
435
 
-
 
436
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
437
 
-
 
438
	ret = !intel_crtc->pch_fifo_underrun_disabled;
-
 
439
 
-
 
440
	if (enable == ret)
-
 
441
		goto done;
-
 
442
 
-
 
443
	intel_crtc->pch_fifo_underrun_disabled = !enable;
-
 
444
 
-
 
445
	if (HAS_PCH_IBX(dev))
-
 
446
		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
-
 
447
	else
-
 
448
		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
-
 
449
 
-
 
450
done:
-
 
451
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
452
	return ret;
-
 
453
}
-
 
454
 
124
 
455
 
125
void
456
void
126
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
457
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
127
{
458
{
128
		u32 reg = PIPESTAT(pipe);
459
		u32 reg = PIPESTAT(pipe);
129
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
460
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
-
 
461
 
-
 
462
	assert_spin_locked(&dev_priv->irq_lock);
130
 
463
 
131
	if ((pipestat & mask) == mask)
464
	if ((pipestat & mask) == mask)
132
		return;
465
		return;
133
 
466
 
134
		/* Enable the interrupt, clear any pending status */
467
		/* Enable the interrupt, clear any pending status */
135
	pipestat |= mask | (mask >> 16);
468
	pipestat |= mask | (mask >> 16);
136
	I915_WRITE(reg, pipestat);
469
	I915_WRITE(reg, pipestat);
137
		POSTING_READ(reg);
470
		POSTING_READ(reg);
138
}
471
}
139
 
472
 
140
void
473
void
141
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
474
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
142
{
475
{
143
		u32 reg = PIPESTAT(pipe);
476
		u32 reg = PIPESTAT(pipe);
144
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
477
	u32 pipestat = I915_READ(reg) & 0x7fff0000;
-
 
478
 
-
 
479
	assert_spin_locked(&dev_priv->irq_lock);
145
 
480
 
146
	if ((pipestat & mask) == 0)
481
	if ((pipestat & mask) == 0)
147
		return;
482
		return;
148
 
483
 
149
	pipestat &= ~mask;
484
	pipestat &= ~mask;
150
	I915_WRITE(reg, pipestat);
485
	I915_WRITE(reg, pipestat);
151
		POSTING_READ(reg);
486
		POSTING_READ(reg);
152
}
487
}
153
 
488
 
154
#if 0
489
#if 0
155
/**
490
/**
156
 * intel_enable_asle - enable ASLE interrupt for OpRegion
491
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
157
 */
492
 */
158
void intel_enable_asle(struct drm_device *dev)
493
static void i915_enable_asle_pipestat(struct drm_device *dev)
159
{
494
{
160
	drm_i915_private_t *dev_priv = dev->dev_private;
495
	drm_i915_private_t *dev_priv = dev->dev_private;
161
	unsigned long irqflags;
496
	unsigned long irqflags;
162
 
497
 
163
	/* FIXME: opregion/asle for VLV */
-
 
164
	if (IS_VALLEYVIEW(dev))
498
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
165
		return;
499
		return;
166
 
500
 
167
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
501
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
168
 
-
 
169
	if (HAS_PCH_SPLIT(dev))
-
 
170
		ironlake_enable_display_irq(dev_priv, DE_GSE);
-
 
171
	else {
-
 
172
		i915_enable_pipestat(dev_priv, 1,
502
 
173
				     PIPE_LEGACY_BLC_EVENT_ENABLE);
503
	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
174
		if (INTEL_INFO(dev)->gen >= 4)
-
 
175
			i915_enable_pipestat(dev_priv, 0,
504
		if (INTEL_INFO(dev)->gen >= 4)
176
					     PIPE_LEGACY_BLC_EVENT_ENABLE);
-
 
177
	}
505
		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
178
 
506
 
179
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
507
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
180
}
508
}
181
#endif
509
#endif
182
 
510
 
183
/**
511
/**
184
 * i915_pipe_enabled - check if a pipe is enabled
512
 * i915_pipe_enabled - check if a pipe is enabled
185
 * @dev: DRM device
513
 * @dev: DRM device
186
 * @pipe: pipe to check
514
 * @pipe: pipe to check
187
 *
515
 *
188
 * Reading certain registers when the pipe is disabled can hang the chip.
516
 * Reading certain registers when the pipe is disabled can hang the chip.
189
 * Use this routine to make sure the PLL is running and the pipe is active
517
 * Use this routine to make sure the PLL is running and the pipe is active
190
 * before reading such registers if unsure.
518
 * before reading such registers if unsure.
191
 */
519
 */
192
static int
520
static int
193
i915_pipe_enabled(struct drm_device *dev, int pipe)
521
i915_pipe_enabled(struct drm_device *dev, int pipe)
194
{
522
{
195
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
523
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-
 
197
								      pipe);
-
 
-
 
524
 
-
 
525
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-
 
526
		/* Locking is horribly broken here, but whatever. */
-
 
527
		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
 
528
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
529
 
-
 
530
		return intel_crtc->active;
198
 
531
	} else {
-
 
532
		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
199
	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
533
	}
200
}
534
}
201
 
535
 
202
/* Called from drm generic code, passed a 'crtc', which
536
/* Called from drm generic code, passed a 'crtc', which
203
 * we use as a pipe index
537
 * we use as a pipe index
204
 */
538
 */
205
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
539
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
206
{
540
{
207
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
541
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
208
	unsigned long high_frame;
542
	unsigned long high_frame;
209
	unsigned long low_frame;
543
	unsigned long low_frame;
210
	u32 high1, high2, low;
544
	u32 high1, high2, low;
211
 
545
 
212
	if (!i915_pipe_enabled(dev, pipe)) {
546
	if (!i915_pipe_enabled(dev, pipe)) {
213
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
547
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
214
				"pipe %c\n", pipe_name(pipe));
548
				"pipe %c\n", pipe_name(pipe));
215
		return 0;
549
		return 0;
216
	}
550
	}
217
 
551
 
218
	high_frame = PIPEFRAME(pipe);
552
	high_frame = PIPEFRAME(pipe);
219
	low_frame = PIPEFRAMEPIXEL(pipe);
553
	low_frame = PIPEFRAMEPIXEL(pipe);
220
 
554
 
221
	/*
555
	/*
222
	 * High & low register fields aren't synchronized, so make sure
556
	 * High & low register fields aren't synchronized, so make sure
223
	 * we get a low value that's stable across two reads of the high
557
	 * we get a low value that's stable across two reads of the high
224
	 * register.
558
	 * register.
225
	 */
559
	 */
226
	do {
560
	do {
227
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
561
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
228
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
562
		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
229
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
563
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
230
	} while (high1 != high2);
564
	} while (high1 != high2);
231
 
565
 
232
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
566
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
233
	low >>= PIPE_FRAME_LOW_SHIFT;
567
	low >>= PIPE_FRAME_LOW_SHIFT;
234
	return (high1 << 8) | low;
568
	return (high1 << 8) | low;
235
}
569
}
236
 
570
 
237
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
571
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
238
{
572
{
239
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
573
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
240
	int reg = PIPE_FRMCOUNT_GM45(pipe);
574
	int reg = PIPE_FRMCOUNT_GM45(pipe);
241
 
575
 
242
	if (!i915_pipe_enabled(dev, pipe)) {
576
	if (!i915_pipe_enabled(dev, pipe)) {
243
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
577
		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
244
				 "pipe %c\n", pipe_name(pipe));
578
				 "pipe %c\n", pipe_name(pipe));
245
		return 0;
579
		return 0;
246
	}
580
	}
247
 
581
 
248
	return I915_READ(reg);
582
	return I915_READ(reg);
249
}
583
}
250
 
584
 
251
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
585
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
252
			     int *vpos, int *hpos)
586
			     int *vpos, int *hpos)
253
{
587
{
254
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
588
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255
	u32 vbl = 0, position = 0;
589
	u32 vbl = 0, position = 0;
256
	int vbl_start, vbl_end, htotal, vtotal;
590
	int vbl_start, vbl_end, htotal, vtotal;
257
	bool in_vbl = true;
591
	bool in_vbl = true;
258
	int ret = 0;
592
	int ret = 0;
259
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
593
	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
260
								      pipe);
594
								      pipe);
261
 
595
 
262
	if (!i915_pipe_enabled(dev, pipe)) {
596
	if (!i915_pipe_enabled(dev, pipe)) {
263
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
597
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
264
				 "pipe %c\n", pipe_name(pipe));
598
				 "pipe %c\n", pipe_name(pipe));
265
		return 0;
599
		return 0;
266
	}
600
	}
267
 
601
 
268
	/* Get vtotal. */
602
	/* Get vtotal. */
269
	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
603
	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
270
 
604
 
271
	if (INTEL_INFO(dev)->gen >= 4) {
605
	if (INTEL_INFO(dev)->gen >= 4) {
272
		/* No obvious pixelcount register. Only query vertical
606
		/* No obvious pixelcount register. Only query vertical
273
		 * scanout position from Display scan line register.
607
		 * scanout position from Display scan line register.
274
		 */
608
		 */
275
		position = I915_READ(PIPEDSL(pipe));
609
		position = I915_READ(PIPEDSL(pipe));
276
 
610
 
277
		/* Decode into vertical scanout position. Don't have
611
		/* Decode into vertical scanout position. Don't have
278
		 * horizontal scanout position.
612
		 * horizontal scanout position.
279
		 */
613
		 */
280
		*vpos = position & 0x1fff;
614
		*vpos = position & 0x1fff;
281
		*hpos = 0;
615
		*hpos = 0;
282
	} else {
616
	} else {
283
		/* Have access to pixelcount since start of frame.
617
		/* Have access to pixelcount since start of frame.
284
		 * We can split this into vertical and horizontal
618
		 * We can split this into vertical and horizontal
285
		 * scanout position.
619
		 * scanout position.
286
		 */
620
		 */
287
		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
621
		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
288
 
622
 
289
		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
623
		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
290
		*vpos = position / htotal;
624
		*vpos = position / htotal;
291
		*hpos = position - (*vpos * htotal);
625
		*hpos = position - (*vpos * htotal);
292
	}
626
	}
293
 
627
 
294
	/* Query vblank area. */
628
	/* Query vblank area. */
295
	vbl = I915_READ(VBLANK(cpu_transcoder));
629
	vbl = I915_READ(VBLANK(cpu_transcoder));
296
 
630
 
297
	/* Test position against vblank region. */
631
	/* Test position against vblank region. */
298
	vbl_start = vbl & 0x1fff;
632
	vbl_start = vbl & 0x1fff;
299
	vbl_end = (vbl >> 16) & 0x1fff;
633
	vbl_end = (vbl >> 16) & 0x1fff;
300
 
634
 
301
	if ((*vpos < vbl_start) || (*vpos > vbl_end))
635
	if ((*vpos < vbl_start) || (*vpos > vbl_end))
302
		in_vbl = false;
636
		in_vbl = false;
303
 
637
 
304
	/* Inside "upper part" of vblank area? Apply corrective offset: */
638
	/* Inside "upper part" of vblank area? Apply corrective offset: */
305
	if (in_vbl && (*vpos >= vbl_start))
639
	if (in_vbl && (*vpos >= vbl_start))
306
		*vpos = *vpos - vtotal;
640
		*vpos = *vpos - vtotal;
307
 
641
 
308
	/* Readouts valid? */
642
	/* Readouts valid? */
309
	if (vbl > 0)
643
	if (vbl > 0)
310
		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
644
		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
311
 
645
 
312
	/* In vblank? */
646
	/* In vblank? */
313
	if (in_vbl)
647
	if (in_vbl)
314
		ret |= DRM_SCANOUTPOS_INVBL;
648
		ret |= DRM_SCANOUTPOS_INVBL;
315
 
649
 
316
	return ret;
650
	return ret;
317
}
651
}
318
 
652
 
319
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
653
static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
320
			      int *max_error,
654
			      int *max_error,
321
			      struct timeval *vblank_time,
655
			      struct timeval *vblank_time,
322
			      unsigned flags)
656
			      unsigned flags)
323
{
657
{
324
	struct drm_crtc *crtc;
658
	struct drm_crtc *crtc;
325
 
659
 
326
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
660
	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
327
		DRM_ERROR("Invalid crtc %d\n", pipe);
661
		DRM_ERROR("Invalid crtc %d\n", pipe);
328
		return -EINVAL;
662
		return -EINVAL;
329
	}
663
	}
330
 
664
 
331
	/* Get drm_crtc to timestamp: */
665
	/* Get drm_crtc to timestamp: */
332
	crtc = intel_get_crtc_for_pipe(dev, pipe);
666
	crtc = intel_get_crtc_for_pipe(dev, pipe);
333
	if (crtc == NULL) {
667
	if (crtc == NULL) {
334
		DRM_ERROR("Invalid crtc %d\n", pipe);
668
		DRM_ERROR("Invalid crtc %d\n", pipe);
335
		return -EINVAL;
669
		return -EINVAL;
336
	}
670
	}
337
 
671
 
338
	if (!crtc->enabled) {
672
	if (!crtc->enabled) {
339
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
673
		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
340
		return -EBUSY;
674
		return -EBUSY;
341
	}
675
	}
342
 
676
 
343
	/* Helper routine in DRM core does all the work: */
677
	/* Helper routine in DRM core does all the work: */
344
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
678
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
345
						     vblank_time, flags,
679
						     vblank_time, flags,
346
						     crtc);
680
						     crtc);
347
}
681
}
-
 
682
 
-
 
683
static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
-
 
684
{
-
 
685
	enum drm_connector_status old_status;
-
 
686
 
-
 
687
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-
 
688
	old_status = connector->status;
-
 
689
 
-
 
690
	connector->status = connector->funcs->detect(connector, false);
-
 
691
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
-
 
692
		      connector->base.id,
-
 
693
		      drm_get_connector_name(connector),
-
 
694
		      old_status, connector->status);
-
 
695
	return (old_status != connector->status);
-
 
696
}
348
 
697
 
349
/*
698
/*
350
 * Handle hotplug events outside the interrupt handler proper.
699
 * Handle hotplug events outside the interrupt handler proper.
351
 */
700
 */
352
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
701
#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
353
 
702
 
354
static void i915_hotplug_work_func(struct work_struct *work)
703
static void i915_hotplug_work_func(struct work_struct *work)
355
{
704
{
356
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
705
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
357
						    hotplug_work);
706
						    hotplug_work);
358
	struct drm_device *dev = dev_priv->dev;
707
	struct drm_device *dev = dev_priv->dev;
359
	struct drm_mode_config *mode_config = &dev->mode_config;
708
	struct drm_mode_config *mode_config = &dev->mode_config;
360
	struct intel_connector *intel_connector;
709
	struct intel_connector *intel_connector;
361
	struct intel_encoder *intel_encoder;
710
	struct intel_encoder *intel_encoder;
362
	struct drm_connector *connector;
711
	struct drm_connector *connector;
363
	unsigned long irqflags;
712
	unsigned long irqflags;
364
	bool hpd_disabled = false;
713
	bool hpd_disabled = false;
-
 
714
	bool changed = false;
-
 
715
	u32 hpd_event_bits;
365
 
716
 
366
	/* HPD irq before everything is fully set up. */
717
	/* HPD irq before everything is fully set up. */
367
	if (!dev_priv->enable_hotplug_processing)
718
	if (!dev_priv->enable_hotplug_processing)
368
		return;
719
		return;
369
 
720
 
370
	mutex_lock(&mode_config->mutex);
721
	mutex_lock(&mode_config->mutex);
371
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
722
	DRM_DEBUG_KMS("running encoder hotplug functions\n");
372
 
723
 
373
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
724
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
725
 
-
 
726
	hpd_event_bits = dev_priv->hpd_event_bits;
-
 
727
	dev_priv->hpd_event_bits = 0;
374
	list_for_each_entry(connector, &mode_config->connector_list, head) {
728
	list_for_each_entry(connector, &mode_config->connector_list, head) {
375
		intel_connector = to_intel_connector(connector);
729
		intel_connector = to_intel_connector(connector);
376
		intel_encoder = intel_connector->encoder;
730
		intel_encoder = intel_connector->encoder;
377
		if (intel_encoder->hpd_pin > HPD_NONE &&
731
		if (intel_encoder->hpd_pin > HPD_NONE &&
378
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
732
		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
379
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
733
		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
380
			DRM_INFO("HPD interrupt storm detected on connector %s: "
734
			DRM_INFO("HPD interrupt storm detected on connector %s: "
381
				 "switching from hotplug detection to polling\n",
735
				 "switching from hotplug detection to polling\n",
382
				drm_get_connector_name(connector));
736
				drm_get_connector_name(connector));
383
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
737
			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
384
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
738
			connector->polled = DRM_CONNECTOR_POLL_CONNECT
385
				| DRM_CONNECTOR_POLL_DISCONNECT;
739
				| DRM_CONNECTOR_POLL_DISCONNECT;
386
			hpd_disabled = true;
740
			hpd_disabled = true;
387
		}
741
		}
-
 
742
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-
 
743
			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-
 
744
				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
-
 
745
		}
388
	}
746
	}
389
	 /* if there were no outputs to poll, poll was disabled,
747
	 /* if there were no outputs to poll, poll was disabled,
390
	  * therefore make sure it's enabled when disabling HPD on
748
	  * therefore make sure it's enabled when disabling HPD on
391
	  * some connectors */
749
	  * some connectors */
392
	if (hpd_disabled) {
750
	if (hpd_disabled) {
393
		drm_kms_helper_poll_enable(dev);
751
		drm_kms_helper_poll_enable(dev);
394
//       mod_timer(&dev_priv->hotplug_reenable_timer,
752
//       mod_timer(&dev_priv->hotplug_reenable_timer,
395
//             jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
753
//             jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
396
	}
754
	}
397
 
755
 
398
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
756
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
399
 
757
 
-
 
758
	list_for_each_entry(connector, &mode_config->connector_list, head) {
-
 
759
		intel_connector = to_intel_connector(connector);
-
 
760
		intel_encoder = intel_connector->encoder;
400
	list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
761
		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
401
		if (intel_encoder->hot_plug)
762
		if (intel_encoder->hot_plug)
-
 
763
			intel_encoder->hot_plug(intel_encoder);
-
 
764
			if (intel_hpd_irq_event(dev, connector))
-
 
765
				changed = true;
402
			intel_encoder->hot_plug(intel_encoder);
766
		}
403
 
767
	}
404
	mutex_unlock(&mode_config->mutex);
768
	mutex_unlock(&mode_config->mutex);
405
 
769
 
406
	/* Just fire off a uevent and let userspace tell us what to do */
770
	if (changed)
407
	drm_helper_hpd_irq_event(dev);
771
		drm_kms_helper_hotplug_event(dev);
408
}
772
}
409
 
773
 
410
static void ironlake_handle_rps_change(struct drm_device *dev)
774
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
411
{
775
{
412
	drm_i915_private_t *dev_priv = dev->dev_private;
776
	drm_i915_private_t *dev_priv = dev->dev_private;
413
	u32 busy_up, busy_down, max_avg, min_avg;
777
	u32 busy_up, busy_down, max_avg, min_avg;
414
	u8 new_delay;
778
	u8 new_delay;
415
	unsigned long flags;
-
 
416
 
779
 
417
	spin_lock_irqsave(&mchdev_lock, flags);
780
	spin_lock(&mchdev_lock);
418
 
781
 
419
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
782
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
420
 
783
 
421
	new_delay = dev_priv->ips.cur_delay;
784
	new_delay = dev_priv->ips.cur_delay;
422
 
785
 
423
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
786
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
424
	busy_up = I915_READ(RCPREVBSYTUPAVG);
787
	busy_up = I915_READ(RCPREVBSYTUPAVG);
425
	busy_down = I915_READ(RCPREVBSYTDNAVG);
788
	busy_down = I915_READ(RCPREVBSYTDNAVG);
426
	max_avg = I915_READ(RCBMAXAVG);
789
	max_avg = I915_READ(RCBMAXAVG);
427
	min_avg = I915_READ(RCBMINAVG);
790
	min_avg = I915_READ(RCBMINAVG);
428
 
791
 
429
	/* Handle RCS change request from hw */
792
	/* Handle RCS change request from hw */
430
	if (busy_up > max_avg) {
793
	if (busy_up > max_avg) {
431
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
794
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
432
			new_delay = dev_priv->ips.cur_delay - 1;
795
			new_delay = dev_priv->ips.cur_delay - 1;
433
		if (new_delay < dev_priv->ips.max_delay)
796
		if (new_delay < dev_priv->ips.max_delay)
434
			new_delay = dev_priv->ips.max_delay;
797
			new_delay = dev_priv->ips.max_delay;
435
	} else if (busy_down < min_avg) {
798
	} else if (busy_down < min_avg) {
436
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
799
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
437
			new_delay = dev_priv->ips.cur_delay + 1;
800
			new_delay = dev_priv->ips.cur_delay + 1;
438
		if (new_delay > dev_priv->ips.min_delay)
801
		if (new_delay > dev_priv->ips.min_delay)
439
			new_delay = dev_priv->ips.min_delay;
802
			new_delay = dev_priv->ips.min_delay;
440
	}
803
	}
441
 
804
 
442
	if (ironlake_set_drps(dev, new_delay))
805
	if (ironlake_set_drps(dev, new_delay))
443
		dev_priv->ips.cur_delay = new_delay;
806
		dev_priv->ips.cur_delay = new_delay;
444
 
807
 
445
	spin_unlock_irqrestore(&mchdev_lock, flags);
808
	spin_unlock(&mchdev_lock);
446
 
809
 
447
	return;
810
	return;
448
}
811
}
449
 
812
 
450
static void notify_ring(struct drm_device *dev,
813
static void notify_ring(struct drm_device *dev,
451
			struct intel_ring_buffer *ring)
814
			struct intel_ring_buffer *ring)
452
{
815
{
453
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
454
 
-
 
455
	if (ring->obj == NULL)
816
	if (ring->obj == NULL)
456
		return;
817
		return;
457
 
818
 
458
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
819
	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
459
 
820
 
460
	wake_up_all(&ring->irq_queue);
821
	wake_up_all(&ring->irq_queue);
461
//   if (i915_enable_hangcheck) {
-
 
462
//       dev_priv->hangcheck_count = 0;
-
 
463
//       mod_timer(&dev_priv->hangcheck_timer,
-
 
464
//             jiffies +
-
 
465
//             msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
-
 
466
//   }
-
 
467
}
822
}
468
 
823
 
469
#if 0
824
#if 0
470
static void gen6_pm_rps_work(struct work_struct *work)
825
static void gen6_pm_rps_work(struct work_struct *work)
471
{
826
{
472
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
827
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
473
						    rps.work);
828
						    rps.work);
474
	u32 pm_iir, pm_imr;
829
	u32 pm_iir;
475
	u8 new_delay;
830
	u8 new_delay;
476
 
831
 
477
	spin_lock_irq(&dev_priv->rps.lock);
832
	spin_lock_irq(&dev_priv->irq_lock);
478
	pm_iir = dev_priv->rps.pm_iir;
833
	pm_iir = dev_priv->rps.pm_iir;
479
	dev_priv->rps.pm_iir = 0;
834
	dev_priv->rps.pm_iir = 0;
480
	pm_imr = I915_READ(GEN6_PMIMR);
835
	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
481
	I915_WRITE(GEN6_PMIMR, 0);
836
	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
-
 
837
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
838
 
-
 
839
	/* Make sure we didn't queue anything we're not going to process. */
482
	spin_unlock_irq(&dev_priv->rps.lock);
840
	WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
483
 
841
 
484
	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
842
	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
485
		return;
843
		return;
486
 
844
 
487
	mutex_lock(&dev_priv->rps.hw_lock);
845
	mutex_lock(&dev_priv->rps.hw_lock);
488
 
846
 
489
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
847
	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-
 
848
		new_delay = dev_priv->rps.cur_delay + 1;
-
 
849
 
-
 
850
		/*
-
 
851
		 * For better performance, jump directly
-
 
852
		 * to RPe if we're below it.
-
 
853
		 */
-
 
854
		if (IS_VALLEYVIEW(dev_priv->dev) &&
-
 
855
		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
490
		new_delay = dev_priv->rps.cur_delay + 1;
856
			new_delay = dev_priv->rps.rpe_delay;
491
	else
857
	} else
492
		new_delay = dev_priv->rps.cur_delay - 1;
858
		new_delay = dev_priv->rps.cur_delay - 1;
493
 
859
 
494
	/* sysfs frequency interfaces may have snuck in while servicing the
860
	/* sysfs frequency interfaces may have snuck in while servicing the
495
	 * interrupt
861
	 * interrupt
496
	 */
862
	 */
497
	if (!(new_delay > dev_priv->rps.max_delay ||
863
	if (new_delay >= dev_priv->rps.min_delay &&
498
	      new_delay < dev_priv->rps.min_delay)) {
864
	    new_delay <= dev_priv->rps.max_delay) {
-
 
865
		if (IS_VALLEYVIEW(dev_priv->dev))
-
 
866
			valleyview_set_rps(dev_priv->dev, new_delay);
-
 
867
		else
499
		gen6_set_rps(dev_priv->dev, new_delay);
868
		gen6_set_rps(dev_priv->dev, new_delay);
500
	}
869
	}
-
 
870
 
-
 
871
	if (IS_VALLEYVIEW(dev_priv->dev)) {
-
 
872
		/*
-
 
873
		 * On VLV, when we enter RC6 we may not be at the minimum
-
 
874
		 * voltage level, so arm a timer to check.  It should only
-
 
875
		 * fire when there's activity or once after we've entered
-
 
876
		 * RC6, and then won't be re-armed until the next RPS interrupt.
-
 
877
		 */
-
 
878
		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
-
 
879
				 msecs_to_jiffies(100));
-
 
880
	}
501
 
881
 
502
	mutex_unlock(&dev_priv->rps.hw_lock);
882
	mutex_unlock(&dev_priv->rps.hw_lock);
503
}
883
}
504
 
884
 
505
 
885
 
506
/**
886
/**
507
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
887
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
508
 * occurred.
888
 * occurred.
509
 * @work: workqueue struct
889
 * @work: workqueue struct
510
 *
890
 *
511
 * Doesn't actually do anything except notify userspace. As a consequence of
891
 * Doesn't actually do anything except notify userspace. As a consequence of
512
 * this event, userspace should try to remap the bad rows since statistically
892
 * this event, userspace should try to remap the bad rows since statistically
513
 * it is likely the same row is more likely to go bad again.
893
 * it is likely the same row is more likely to go bad again.
514
 */
894
 */
515
static void ivybridge_parity_work(struct work_struct *work)
895
static void ivybridge_parity_work(struct work_struct *work)
516
{
896
{
517
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
897
	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
518
						    l3_parity.error_work);
898
						    l3_parity.error_work);
519
	u32 error_status, row, bank, subbank;
899
	u32 error_status, row, bank, subbank;
520
	char *parity_event[5];
900
	char *parity_event[5];
521
	uint32_t misccpctl;
901
	uint32_t misccpctl;
522
	unsigned long flags;
902
	unsigned long flags;
523
 
903
 
524
	/* We must turn off DOP level clock gating to access the L3 registers.
904
	/* We must turn off DOP level clock gating to access the L3 registers.
525
	 * In order to prevent a get/put style interface, acquire struct mutex
905
	 * In order to prevent a get/put style interface, acquire struct mutex
526
	 * any time we access those registers.
906
	 * any time we access those registers.
527
	 */
907
	 */
528
	mutex_lock(&dev_priv->dev->struct_mutex);
908
	mutex_lock(&dev_priv->dev->struct_mutex);
529
 
909
 
530
	misccpctl = I915_READ(GEN7_MISCCPCTL);
910
	misccpctl = I915_READ(GEN7_MISCCPCTL);
531
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
911
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
532
	POSTING_READ(GEN7_MISCCPCTL);
912
	POSTING_READ(GEN7_MISCCPCTL);
533
 
913
 
534
	error_status = I915_READ(GEN7_L3CDERRST1);
914
	error_status = I915_READ(GEN7_L3CDERRST1);
535
	row = GEN7_PARITY_ERROR_ROW(error_status);
915
	row = GEN7_PARITY_ERROR_ROW(error_status);
536
	bank = GEN7_PARITY_ERROR_BANK(error_status);
916
	bank = GEN7_PARITY_ERROR_BANK(error_status);
537
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
917
	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
538
 
918
 
539
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
919
	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
540
				    GEN7_L3CDERRST1_ENABLE);
920
				    GEN7_L3CDERRST1_ENABLE);
541
	POSTING_READ(GEN7_L3CDERRST1);
921
	POSTING_READ(GEN7_L3CDERRST1);
542
 
922
 
543
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
923
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
544
 
924
 
545
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
925
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
546
	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
926
	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
547
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
548
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
927
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
549
 
928
 
550
	mutex_unlock(&dev_priv->dev->struct_mutex);
929
	mutex_unlock(&dev_priv->dev->struct_mutex);
551
 
930
 
552
	parity_event[0] = "L3_PARITY_ERROR=1";
931
	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
553
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
932
	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
554
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
933
	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
555
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
934
	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
556
	parity_event[4] = NULL;
935
	parity_event[4] = NULL;
557
 
936
 
558
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
937
	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
559
			   KOBJ_CHANGE, parity_event);
938
			   KOBJ_CHANGE, parity_event);
560
 
939
 
561
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
940
	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
562
		  row, bank, subbank);
941
		  row, bank, subbank);
563
 
942
 
564
	kfree(parity_event[3]);
943
	kfree(parity_event[3]);
565
	kfree(parity_event[2]);
944
	kfree(parity_event[2]);
566
	kfree(parity_event[1]);
945
	kfree(parity_event[1]);
567
}
946
}
568
 
947
 
569
static void ivybridge_handle_parity_error(struct drm_device *dev)
948
static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
570
{
949
{
571
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
572
	unsigned long flags;
950
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
573
 
951
 
574
	if (!HAS_L3_GPU_CACHE(dev))
952
	if (!HAS_L3_GPU_CACHE(dev))
575
		return;
953
		return;
576
 
954
 
577
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
955
	spin_lock(&dev_priv->irq_lock);
578
	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
-
 
579
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
956
	ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
580
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
957
	spin_unlock(&dev_priv->irq_lock);
581
 
958
 
582
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
959
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
583
}
960
}
584
 
961
 
585
#endif
962
#endif
-
 
963
 
-
 
964
static void ilk_gt_irq_handler(struct drm_device *dev,
-
 
965
			       struct drm_i915_private *dev_priv,
-
 
966
			       u32 gt_iir)
-
 
967
{
-
 
968
	if (gt_iir &
-
 
969
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-
 
970
		notify_ring(dev, &dev_priv->ring[RCS]);
-
 
971
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
-
 
972
		notify_ring(dev, &dev_priv->ring[VCS]);
-
 
973
}
586
 
974
 
587
static void snb_gt_irq_handler(struct drm_device *dev,
975
static void snb_gt_irq_handler(struct drm_device *dev,
588
			       struct drm_i915_private *dev_priv,
976
			       struct drm_i915_private *dev_priv,
589
			       u32 gt_iir)
977
			       u32 gt_iir)
590
{
978
{
591
 
979
 
592
	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
980
	if (gt_iir &
593
		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
981
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
594
		notify_ring(dev, &dev_priv->ring[RCS]);
982
		notify_ring(dev, &dev_priv->ring[RCS]);
595
	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
983
	if (gt_iir & GT_BSD_USER_INTERRUPT)
596
		notify_ring(dev, &dev_priv->ring[VCS]);
984
		notify_ring(dev, &dev_priv->ring[VCS]);
597
	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
985
	if (gt_iir & GT_BLT_USER_INTERRUPT)
598
		notify_ring(dev, &dev_priv->ring[BCS]);
986
		notify_ring(dev, &dev_priv->ring[BCS]);
599
 
987
 
600
	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
988
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
601
		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
989
		      GT_BSD_CS_ERROR_INTERRUPT |
602
		      GT_RENDER_CS_ERROR_INTERRUPT)) {
990
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
603
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
991
		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
604
		i915_handle_error(dev, false);
992
//       i915_handle_error(dev, false);
605
	}
993
	}
606
 
994
 
607
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
995
//	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
608
//		ivybridge_handle_parity_error(dev);
996
//		ivybridge_handle_parity_error(dev);
609
}
997
}
610
 
-
 
611
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
-
 
612
				u32 pm_iir)
-
 
613
{
-
 
614
	unsigned long flags;
-
 
615
 
-
 
616
	/*
-
 
617
	 * IIR bits should never already be set because IMR should
-
 
618
	 * prevent an interrupt from being shown in IIR. The warning
-
 
619
	 * displays a case where we've unsafely cleared
-
 
620
	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
-
 
621
	 * type is not a problem, it displays a problem in the logic.
-
 
622
	 *
-
 
623
	 * The mask bit in IMR is cleared by dev_priv->rps.work.
-
 
624
	 */
-
 
625
 
-
 
626
	spin_lock_irqsave(&dev_priv->rps.lock, flags);
-
 
627
	dev_priv->rps.pm_iir |= pm_iir;
-
 
628
	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
-
 
629
	POSTING_READ(GEN6_PMIMR);
-
 
630
	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
-
 
631
 
-
 
632
//   queue_work(dev_priv->wq, &dev_priv->rps.work);
-
 
633
}
-
 
634
 
998
 
635
#define HPD_STORM_DETECT_PERIOD 1000
999
#define HPD_STORM_DETECT_PERIOD 1000
636
#define HPD_STORM_THRESHOLD 5
1000
#define HPD_STORM_THRESHOLD 5
637
 
1001
 
638
static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
1002
static inline void intel_hpd_irq_handler(struct drm_device *dev,
639
					    u32 hotplug_trigger,
1003
					    u32 hotplug_trigger,
640
					    const u32 *hpd)
1004
					    const u32 *hpd)
641
{
1005
{
642
	drm_i915_private_t *dev_priv = dev->dev_private;
1006
	drm_i915_private_t *dev_priv = dev->dev_private;
643
	unsigned long irqflags;
-
 
644
	int i;
1007
	int i;
645
	bool ret = false;
1008
	bool storm_detected = false;
646
 
1009
 
-
 
1010
	if (!hotplug_trigger)
-
 
1011
		return;
647
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1012
 
-
 
1013
	spin_lock(&dev_priv->irq_lock);
-
 
1014
	for (i = 1; i < HPD_NUM_PINS; i++) {
-
 
1015
 
-
 
1016
		WARN(((hpd[i] & hotplug_trigger) &&
648
 
1017
		      dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
649
	for (i = 1; i < HPD_NUM_PINS; i++) {
1018
		     "Received HPD interrupt although disabled\n");
650
 
1019
 
651
		if (!(hpd[i] & hotplug_trigger) ||
1020
		if (!(hpd[i] & hotplug_trigger) ||
652
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1021
		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
653
			continue;
1022
			continue;
-
 
1023
 
654
 
1024
		dev_priv->hpd_event_bits |= (1 << i);
655
//        if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
1025
//        if (!time_in_range(GetTimerTicks(), dev_priv->hpd_stats[i].hpd_last_jiffies,
656
//                  dev_priv->hpd_stats[i].hpd_last_jiffies
1026
//                  dev_priv->hpd_stats[i].hpd_last_jiffies
657
//                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1027
//                  + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
658
//            dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks;
1028
//            dev_priv->hpd_stats[i].hpd_last_jiffies = GetTimerTicks;
659
//           dev_priv->hpd_stats[i].hpd_cnt = 0;
1029
//           dev_priv->hpd_stats[i].hpd_cnt = 0;
660
//       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1030
//       } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
661
//           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1031
//           dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
662
//           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1032
//           DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
663
//           ret = true;
1033
//           ret = true;
664
//       } else {
1034
//       } else {
665
			dev_priv->hpd_stats[i].hpd_cnt++;
1035
			dev_priv->hpd_stats[i].hpd_cnt++;
666
//       }
1036
//       }
667
	}
1037
	}
-
 
1038
 
-
 
1039
	if (storm_detected)
668
 
1040
		dev_priv->display.hpd_irq_setup(dev);
-
 
1041
	spin_unlock(&dev_priv->irq_lock);
669
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
670
 
1042
 
671
	return ret;
1043
 
672
}
1044
}
673
 
1045
 
674
static void gmbus_irq_handler(struct drm_device *dev)
1046
static void gmbus_irq_handler(struct drm_device *dev)
675
{
1047
{
676
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1048
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
677
 
1049
 
678
	wake_up_all(&dev_priv->gmbus_wait_queue);
1050
	wake_up_all(&dev_priv->gmbus_wait_queue);
679
}
1051
}
680
 
1052
 
681
static void dp_aux_irq_handler(struct drm_device *dev)
1053
static void dp_aux_irq_handler(struct drm_device *dev)
682
{
1054
{
683
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1055
	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
684
 
1056
 
685
	wake_up_all(&dev_priv->gmbus_wait_queue);
1057
	wake_up_all(&dev_priv->gmbus_wait_queue);
686
}
1058
}
-
 
1059
 
-
 
1060
/* The RPS events need forcewake, so we add them to a work queue and mask their
-
 
1061
 * IMR bits until the work is done. Other interrupts can be processed without
-
 
1062
 * the work queue. */
-
 
1063
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-
 
1064
{
-
 
1065
	if (pm_iir & GEN6_PM_RPS_EVENTS) {
-
 
1066
		spin_lock(&dev_priv->irq_lock);
-
 
1067
		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
-
 
1068
		snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
-
 
1069
		spin_unlock(&dev_priv->irq_lock);
-
 
1070
 
-
 
1071
		queue_work(dev_priv->wq, &dev_priv->rps.work);
-
 
1072
	}
-
 
1073
 
-
 
1074
	if (HAS_VEBOX(dev_priv->dev)) {
-
 
1075
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-
 
1076
			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
-
 
1077
 
-
 
1078
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
-
 
1079
			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
-
 
1080
//           i915_handle_error(dev_priv->dev, false);
-
 
1081
		}
-
 
1082
	}
-
 
1083
}
687
 
1084
 
688
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1085
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
689
{
1086
{
690
	struct drm_device *dev = (struct drm_device *) arg;
1087
	struct drm_device *dev = (struct drm_device *) arg;
691
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1088
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
692
	u32 iir, gt_iir, pm_iir;
1089
	u32 iir, gt_iir, pm_iir;
693
	irqreturn_t ret = IRQ_NONE;
1090
	irqreturn_t ret = IRQ_NONE;
694
	unsigned long irqflags;
1091
	unsigned long irqflags;
695
	int pipe;
1092
	int pipe;
696
	u32 pipe_stats[I915_MAX_PIPES];
1093
	u32 pipe_stats[I915_MAX_PIPES];
697
 
1094
 
698
	atomic_inc(&dev_priv->irq_received);
1095
	atomic_inc(&dev_priv->irq_received);
699
 
1096
 
700
	while (true) {
1097
	while (true) {
701
		iir = I915_READ(VLV_IIR);
1098
		iir = I915_READ(VLV_IIR);
702
		gt_iir = I915_READ(GTIIR);
1099
		gt_iir = I915_READ(GTIIR);
703
		pm_iir = I915_READ(GEN6_PMIIR);
1100
		pm_iir = I915_READ(GEN6_PMIIR);
704
 
1101
 
705
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1102
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
706
			goto out;
1103
			goto out;
707
 
1104
 
708
		ret = IRQ_HANDLED;
1105
		ret = IRQ_HANDLED;
709
 
1106
 
710
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1107
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
711
 
1108
 
712
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1109
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
713
		for_each_pipe(pipe) {
1110
		for_each_pipe(pipe) {
714
			int reg = PIPESTAT(pipe);
1111
			int reg = PIPESTAT(pipe);
715
			pipe_stats[pipe] = I915_READ(reg);
1112
			pipe_stats[pipe] = I915_READ(reg);
716
 
1113
 
717
			/*
1114
			/*
718
			 * Clear the PIPE*STAT regs before the IIR
1115
			 * Clear the PIPE*STAT regs before the IIR
719
			 */
1116
			 */
720
			if (pipe_stats[pipe] & 0x8000ffff) {
1117
			if (pipe_stats[pipe] & 0x8000ffff) {
721
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1118
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
722
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
1119
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
723
							 pipe_name(pipe));
1120
							 pipe_name(pipe));
724
				I915_WRITE(reg, pipe_stats[pipe]);
1121
				I915_WRITE(reg, pipe_stats[pipe]);
725
			}
1122
			}
726
		}
1123
		}
727
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1124
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
728
 
1125
 
729
#if 0
1126
#if 0
730
		for_each_pipe(pipe) {
1127
		for_each_pipe(pipe) {
731
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1128
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
732
				drm_handle_vblank(dev, pipe);
1129
				drm_handle_vblank(dev, pipe);
733
 
1130
 
734
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1131
			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
735
				intel_prepare_page_flip(dev, pipe);
1132
				intel_prepare_page_flip(dev, pipe);
736
				intel_finish_page_flip(dev, pipe);
1133
				intel_finish_page_flip(dev, pipe);
737
			}
1134
			}
738
		}
1135
		}
739
#endif
1136
#endif
740
 
1137
 
741
		/* Consume port.  Then clear IIR or we'll miss events */
1138
		/* Consume port.  Then clear IIR or we'll miss events */
742
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1139
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
743
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1140
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
744
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1141
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
745
 
1142
 
746
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1143
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
747
					 hotplug_status);
1144
					 hotplug_status);
748
			if (hotplug_trigger) {
-
 
-
 
1145
 
749
				if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1146
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
750
					i915_hpd_irq_setup(dev);
-
 
751
				queue_work(dev_priv->wq,
-
 
752
					   &dev_priv->hotplug_work);
-
 
753
			}
1147
 
754
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1148
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
755
			I915_READ(PORT_HOTPLUG_STAT);
1149
			I915_READ(PORT_HOTPLUG_STAT);
756
		}
1150
		}
757
 
1151
 
758
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1152
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
759
			gmbus_irq_handler(dev);
1153
			gmbus_irq_handler(dev);
760
 
1154
 
761
//        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
1155
//        if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
762
//            gen6_queue_rps_work(dev_priv, pm_iir);
1156
//            gen6_queue_rps_work(dev_priv, pm_iir);
763
 
1157
 
764
		I915_WRITE(GTIIR, gt_iir);
1158
		I915_WRITE(GTIIR, gt_iir);
765
		I915_WRITE(GEN6_PMIIR, pm_iir);
1159
		I915_WRITE(GEN6_PMIIR, pm_iir);
766
		I915_WRITE(VLV_IIR, iir);
1160
		I915_WRITE(VLV_IIR, iir);
767
	}
1161
	}
768
 
1162
 
769
out:
1163
out:
770
	return ret;
1164
	return ret;
771
}
1165
}
772
 
1166
 
773
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1167
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
774
{
1168
{
775
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1169
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
776
	int pipe;
1170
	int pipe;
777
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1171
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
778
 
-
 
779
	if (hotplug_trigger) {
1172
 
780
		if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
-
 
781
			ibx_hpd_irq_setup(dev);
-
 
782
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1173
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
783
	}
1174
 
784
	if (pch_iir & SDE_AUDIO_POWER_MASK)
-
 
785
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1175
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
786
				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
1176
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
-
 
1177
			       SDE_AUDIO_POWER_SHIFT);
-
 
1178
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-
 
1179
				 port_name(port));
787
				 SDE_AUDIO_POWER_SHIFT);
1180
	}
788
 
1181
 
789
	if (pch_iir & SDE_AUX_MASK)
1182
	if (pch_iir & SDE_AUX_MASK)
790
		dp_aux_irq_handler(dev);
1183
		dp_aux_irq_handler(dev);
791
 
1184
 
792
	if (pch_iir & SDE_GMBUS)
1185
	if (pch_iir & SDE_GMBUS)
793
		gmbus_irq_handler(dev);
1186
		gmbus_irq_handler(dev);
794
 
1187
 
795
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1188
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
796
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1189
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
797
 
1190
 
798
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1191
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
799
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1192
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
800
 
1193
 
801
	if (pch_iir & SDE_POISON)
1194
	if (pch_iir & SDE_POISON)
802
		DRM_ERROR("PCH poison interrupt\n");
1195
		DRM_ERROR("PCH poison interrupt\n");
803
 
1196
 
804
	if (pch_iir & SDE_FDI_MASK)
1197
	if (pch_iir & SDE_FDI_MASK)
805
		for_each_pipe(pipe)
1198
		for_each_pipe(pipe)
806
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1199
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
807
					 pipe_name(pipe),
1200
					 pipe_name(pipe),
808
					 I915_READ(FDI_RX_IIR(pipe)));
1201
					 I915_READ(FDI_RX_IIR(pipe)));
809
 
1202
 
810
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1203
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
811
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1204
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
812
 
1205
 
813
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1206
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
814
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1207
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
815
 
-
 
816
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-
 
817
		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
1208
 
-
 
1209
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
-
 
1210
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
-
 
1211
							  false))
-
 
1212
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
-
 
1213
 
-
 
1214
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-
 
1215
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
-
 
1216
							  false))
-
 
1217
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
-
 
1218
}
-
 
1219
 
-
 
1220
static void ivb_err_int_handler(struct drm_device *dev)
-
 
1221
{
-
 
1222
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1223
	u32 err_int = I915_READ(GEN7_ERR_INT);
-
 
1224
 
-
 
1225
	if (err_int & ERR_INT_POISON)
-
 
1226
		DRM_ERROR("Poison interrupt\n");
-
 
1227
 
-
 
1228
	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
-
 
1229
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-
 
1230
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
 
1231
 
-
 
1232
	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
-
 
1233
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-
 
1234
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
 
1235
 
-
 
1236
	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
-
 
1237
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
-
 
1238
			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
-
 
1239
 
-
 
1240
	I915_WRITE(GEN7_ERR_INT, err_int);
-
 
1241
}
-
 
1242
 
-
 
1243
static void cpt_serr_int_handler(struct drm_device *dev)
-
 
1244
{
-
 
1245
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1246
	u32 serr_int = I915_READ(SERR_INT);
-
 
1247
 
-
 
1248
	if (serr_int & SERR_INT_POISON)
-
 
1249
		DRM_ERROR("PCH poison interrupt\n");
-
 
1250
 
-
 
1251
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
-
 
1252
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
818
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1253
							  false))
-
 
1254
			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
-
 
1255
 
-
 
1256
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
-
 
1257
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
-
 
1258
							  false))
-
 
1259
			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
-
 
1260
 
-
 
1261
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
-
 
1262
		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
-
 
1263
							  false))
-
 
1264
			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
-
 
1265
 
819
		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
1266
	I915_WRITE(SERR_INT, serr_int);
820
}
1267
}
821
 
1268
 
822
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1269
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
823
{
1270
{
824
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1271
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
825
	int pipe;
1272
	int pipe;
826
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1273
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
827
 
-
 
828
	if (hotplug_trigger) {
1274
 
829
		if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
-
 
830
			ibx_hpd_irq_setup(dev);
-
 
831
		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1275
	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
832
	}
1276
 
833
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
-
 
834
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1277
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
835
				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1278
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
-
 
1279
			       SDE_AUDIO_POWER_SHIFT_CPT);
-
 
1280
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
-
 
1281
				 port_name(port));
836
				 SDE_AUDIO_POWER_SHIFT_CPT);
1282
	}
837
 
1283
 
838
	if (pch_iir & SDE_AUX_MASK_CPT)
1284
	if (pch_iir & SDE_AUX_MASK_CPT)
839
		dp_aux_irq_handler(dev);
1285
		dp_aux_irq_handler(dev);
840
 
1286
 
841
	if (pch_iir & SDE_GMBUS_CPT)
1287
	if (pch_iir & SDE_GMBUS_CPT)
842
		gmbus_irq_handler(dev);
1288
		gmbus_irq_handler(dev);
843
 
1289
 
844
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1290
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
845
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1291
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
846
 
1292
 
847
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1293
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
848
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1294
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
849
 
1295
 
850
	if (pch_iir & SDE_FDI_MASK_CPT)
1296
	if (pch_iir & SDE_FDI_MASK_CPT)
851
		for_each_pipe(pipe)
1297
		for_each_pipe(pipe)
852
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1298
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
853
					 pipe_name(pipe),
1299
					 pipe_name(pipe),
854
					 I915_READ(FDI_RX_IIR(pipe)));
1300
					 I915_READ(FDI_RX_IIR(pipe)));
855
}
-
 
856
 
-
 
857
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
-
 
858
{
-
 
859
	struct drm_device *dev = (struct drm_device *) arg;
-
 
860
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
861
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
-
 
862
	irqreturn_t ret = IRQ_NONE;
-
 
863
	int i;
-
 
864
 
-
 
865
	atomic_inc(&dev_priv->irq_received);
-
 
866
 
-
 
867
	/* disable master interrupt before clearing iir  */
-
 
868
	de_ier = I915_READ(DEIER);
-
 
869
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
 
870
 
-
 
871
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
-
 
872
	 * interrupts will will be stored on its back queue, and then we'll be
-
 
873
	 * able to process them after we restore SDEIER (as soon as we restore
-
 
874
	 * it, we'll get an interrupt if SDEIIR still has something to process
-
 
875
	 * due to its back queue). */
-
 
876
	if (!HAS_PCH_NOP(dev)) {
-
 
877
	sde_ier = I915_READ(SDEIER);
-
 
878
	I915_WRITE(SDEIER, 0);
-
 
879
	POSTING_READ(SDEIER);
-
 
880
	}
-
 
881
 
-
 
882
	gt_iir = I915_READ(GTIIR);
-
 
883
	if (gt_iir) {
-
 
884
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
 
885
		I915_WRITE(GTIIR, gt_iir);
-
 
886
		ret = IRQ_HANDLED;
-
 
887
	}
-
 
888
 
-
 
889
	de_iir = I915_READ(DEIIR);
-
 
890
	if (de_iir) {
-
 
891
		if (de_iir & DE_AUX_CHANNEL_A_IVB)
-
 
892
			dp_aux_irq_handler(dev);
-
 
893
#if 0
-
 
894
		if (de_iir & DE_GSE_IVB)
-
 
895
			intel_opregion_gse_intr(dev);
-
 
896
 
-
 
897
		for (i = 0; i < 3; i++) {
-
 
898
			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-
 
899
				drm_handle_vblank(dev, i);
-
 
900
			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
-
 
901
				intel_prepare_page_flip(dev, i);
-
 
902
				intel_finish_page_flip_plane(dev, i);
-
 
903
			}
-
 
904
		}
-
 
905
#endif
-
 
906
		/* check event from PCH */
-
 
907
		if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
-
 
908
			u32 pch_iir = I915_READ(SDEIIR);
-
 
909
 
1301
 
910
			cpt_irq_handler(dev, pch_iir);
-
 
911
 
-
 
912
			/* clear PCH hotplug event before clear CPU irq */
1302
	if (pch_iir & SDE_ERROR_CPT)
913
			I915_WRITE(SDEIIR, pch_iir);
1303
		cpt_serr_int_handler(dev);
914
		}
-
 
915
 
-
 
916
		I915_WRITE(DEIIR, de_iir);
-
 
917
		ret = IRQ_HANDLED;
-
 
918
	}
-
 
919
 
-
 
920
	pm_iir = I915_READ(GEN6_PMIIR);
-
 
921
	if (pm_iir) {
-
 
922
//		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
-
 
923
//			gen6_queue_rps_work(dev_priv, pm_iir);
-
 
924
		I915_WRITE(GEN6_PMIIR, pm_iir);
-
 
925
		ret = IRQ_HANDLED;
-
 
926
	}
-
 
927
 
-
 
928
	I915_WRITE(DEIER, de_ier);
-
 
929
	POSTING_READ(DEIER);
-
 
930
	if (!HAS_PCH_NOP(dev)) {
-
 
931
	I915_WRITE(SDEIER, sde_ier);
-
 
932
	POSTING_READ(SDEIER);
-
 
933
	}
-
 
934
 
-
 
935
	return ret;
-
 
936
}
1304
	}
937
 
-
 
938
static void ilk_gt_irq_handler(struct drm_device *dev,
-
 
939
			       struct drm_i915_private *dev_priv,
-
 
940
			       u32 gt_iir)
-
 
941
{
-
 
942
	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
-
 
943
		notify_ring(dev, &dev_priv->ring[RCS]);
-
 
944
	if (gt_iir & GT_BSD_USER_INTERRUPT)
-
 
945
		notify_ring(dev, &dev_priv->ring[VCS]);
-
 
946
}
-
 
947
 
1305
 
948
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-
 
949
{
1306
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
950
	struct drm_device *dev = (struct drm_device *) arg;
-
 
951
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
952
    int ret = IRQ_NONE;
-
 
953
	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
 
954
 
-
 
955
    atomic_inc(&dev_priv->irq_received);
-
 
956
 
-
 
957
    /* disable master interrupt before clearing iir  */
-
 
958
    de_ier = I915_READ(DEIER);
-
 
959
    I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
 
960
    POSTING_READ(DEIER);
-
 
961
 
-
 
962
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
-
 
963
	 * interrupts will will be stored on its back queue, and then we'll be
-
 
964
	 * able to process them after we restore SDEIER (as soon as we restore
-
 
965
	 * it, we'll get an interrupt if SDEIIR still has something to process
-
 
966
	 * due to its back queue). */
-
 
967
	sde_ier = I915_READ(SDEIER);
-
 
968
	I915_WRITE(SDEIER, 0);
-
 
969
	POSTING_READ(SDEIER);
-
 
970
 
-
 
971
    de_iir = I915_READ(DEIIR);
-
 
972
    gt_iir = I915_READ(GTIIR);
-
 
973
    pm_iir = I915_READ(GEN6_PMIIR);
-
 
974
 
-
 
975
	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
-
 
976
        goto done;
-
 
977
 
-
 
978
    ret = IRQ_HANDLED;
-
 
979
 
-
 
980
	if (IS_GEN5(dev))
-
 
981
		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-
 
982
	else
1307
{
983
		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1308
	struct drm_i915_private *dev_priv = dev->dev_private;
984
 
1309
 
985
	if (de_iir & DE_AUX_CHANNEL_A)
1310
	if (de_iir & DE_AUX_CHANNEL_A)
986
		dp_aux_irq_handler(dev);
1311
		dp_aux_irq_handler(dev);
987
 
1312
 
988
#if 0
1313
#if 0
989
	if (de_iir & DE_GSE)
1314
	if (de_iir & DE_GSE)
990
		intel_opregion_gse_intr(dev);
1315
		intel_opregion_asle_intr(dev);
991
 
1316
 
992
	if (de_iir & DE_PIPEA_VBLANK)
1317
	if (de_iir & DE_PIPEA_VBLANK)
993
		drm_handle_vblank(dev, 0);
1318
		drm_handle_vblank(dev, 0);
994
 
1319
 
995
	if (de_iir & DE_PIPEB_VBLANK)
1320
	if (de_iir & DE_PIPEB_VBLANK)
996
		drm_handle_vblank(dev, 1);
1321
		drm_handle_vblank(dev, 1);
-
 
1322
 
-
 
1323
	if (de_iir & DE_POISON)
-
 
1324
		DRM_ERROR("Poison interrupt\n");
-
 
1325
#endif
-
 
1326
 
-
 
1327
	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
-
 
1328
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-
 
1329
			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
 
1330
 
-
 
1331
	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
-
 
1332
		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-
 
1333
			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
997
 
1334
#if 0
998
	if (de_iir & DE_PLANEA_FLIP_DONE) {
1335
	if (de_iir & DE_PLANEA_FLIP_DONE) {
999
		intel_prepare_page_flip(dev, 0);
1336
		intel_prepare_page_flip(dev, 0);
1000
		intel_finish_page_flip_plane(dev, 0);
1337
		intel_finish_page_flip_plane(dev, 0);
1001
	}
1338
	}
1002
 
1339
 
1003
	if (de_iir & DE_PLANEB_FLIP_DONE) {
1340
	if (de_iir & DE_PLANEB_FLIP_DONE) {
1004
		intel_prepare_page_flip(dev, 1);
1341
		intel_prepare_page_flip(dev, 1);
1005
		intel_finish_page_flip_plane(dev, 1);
1342
		intel_finish_page_flip_plane(dev, 1);
1006
	}
1343
	}
1007
#endif
1344
#endif
1008
 
1345
 
1009
	/* check event from PCH */
1346
	/* check event from PCH */
1010
	if (de_iir & DE_PCH_EVENT) {
1347
	if (de_iir & DE_PCH_EVENT) {
1011
		u32 pch_iir = I915_READ(SDEIIR);
1348
		u32 pch_iir = I915_READ(SDEIIR);
1012
 
1349
 
1013
		if (HAS_PCH_CPT(dev))
1350
		if (HAS_PCH_CPT(dev))
1014
			cpt_irq_handler(dev, pch_iir);
1351
			cpt_irq_handler(dev, pch_iir);
1015
		else
1352
		else
1016
			ibx_irq_handler(dev, pch_iir);
1353
			ibx_irq_handler(dev, pch_iir);
1017
 
1354
 
1018
		/* should clear PCH hotplug event before clear CPU irq */
1355
		/* should clear PCH hotplug event before clear CPU irq */
1019
		I915_WRITE(SDEIIR, pch_iir);
1356
		I915_WRITE(SDEIIR, pch_iir);
1020
	}
1357
	}
1021
#if 0
-
 
1022
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
-
 
1023
		ironlake_handle_rps_change(dev);
-
 
1024
 
-
 
1025
	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
-
 
1026
		gen6_queue_rps_work(dev_priv, pm_iir);
-
 
1027
#endif
-
 
1028
    I915_WRITE(GTIIR, gt_iir);
-
 
1029
    I915_WRITE(DEIIR, de_iir);
-
 
1030
    I915_WRITE(GEN6_PMIIR, pm_iir);
-
 
1031
 
-
 
1032
done:
-
 
1033
    I915_WRITE(DEIER, de_ier);
-
 
1034
    POSTING_READ(DEIER);
-
 
1035
	I915_WRITE(SDEIER, sde_ier);
-
 
1036
	POSTING_READ(SDEIER);
-
 
-
 
1358
 
1037
 
1359
	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
1038
    return ret;
1360
		ironlake_rps_change_irq_handler(dev);
1039
}
-
 
1040
 
-
 
1041
 
-
 
1042
 
-
 
1043
 
1361
}
1044
/* NB: please notice the memset */
-
 
1045
static void i915_get_extra_instdone(struct drm_device *dev,
1362
 
1046
				    uint32_t *instdone)
1363
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1047
{
-
 
1048
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1049
	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
 
1050
 
-
 
1051
	switch(INTEL_INFO(dev)->gen) {
-
 
1052
	case 2:
-
 
1053
	case 3:
-
 
1054
		instdone[0] = I915_READ(INSTDONE);
-
 
1055
		break;
-
 
1056
	case 4:
-
 
1057
	case 5:
-
 
1058
	case 6:
-
 
1059
		instdone[0] = I915_READ(INSTDONE_I965);
-
 
1060
		instdone[1] = I915_READ(INSTDONE1);
-
 
1061
		break;
-
 
1062
	default:
-
 
1063
		WARN_ONCE(1, "Unsupported platform\n");
-
 
1064
	case 7:
-
 
1065
		instdone[0] = I915_READ(GEN7_INSTDONE_1);
-
 
1066
		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
-
 
1067
		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
-
 
1068
		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
-
 
1069
		break;
-
 
1070
	}
-
 
1071
}
-
 
1072
 
-
 
1073
#ifdef CONFIG_DEBUG_FS
-
 
1074
static struct drm_i915_error_object *
-
 
1075
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
-
 
1076
			       struct drm_i915_gem_object *src,
-
 
1077
			       const int num_pages)
-
 
1078
{
1364
{
1079
	struct drm_i915_error_object *dst;
-
 
1080
	int i;
-
 
1081
	u32 reloc_offset;
-
 
1082
 
-
 
1083
	if (src == NULL || src->pages == NULL)
-
 
1084
		return NULL;
1365
	struct drm_i915_private *dev_priv = dev->dev_private;
1085
 
-
 
1086
	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
-
 
1087
	if (dst == NULL)
-
 
1088
		return NULL;
1366
	int i;
1089
 
-
 
1090
	reloc_offset = src->gtt_offset;
-
 
1091
	for (i = 0; i < num_pages; i++) {
1367
 
1092
		unsigned long flags;
-
 
1093
		void *d;
-
 
1094
 
-
 
1095
		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1368
//	if (de_iir & DE_ERR_INT_IVB)
1096
		if (d == NULL)
-
 
1097
			goto unwind;
-
 
1098
 
-
 
1099
		local_irq_save(flags);
-
 
1100
		if (reloc_offset < dev_priv->gtt.mappable_end &&
-
 
1101
		    src->has_global_gtt_mapping) {
-
 
1102
			void __iomem *s;
-
 
1103
 
-
 
1104
			/* Simply ignore tiling or any overlapping fence.
-
 
1105
			 * It's part of the error state, and this hopefully
-
 
1106
			 * captures what the GPU read.
-
 
1107
			 */
-
 
1108
 
-
 
1109
			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-
 
1110
						     reloc_offset);
-
 
1111
			memcpy_fromio(d, s, PAGE_SIZE);
-
 
1112
			io_mapping_unmap_atomic(s);
-
 
1113
		} else if (src->stolen) {
-
 
1114
			unsigned long offset;
-
 
1115
 
-
 
1116
			offset = dev_priv->mm.stolen_base;
-
 
1117
			offset += src->stolen->start;
-
 
1118
			offset += i << PAGE_SHIFT;
-
 
1119
 
-
 
1120
			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
-
 
1121
		} else {
-
 
1122
			struct page *page;
-
 
1123
			void *s;
-
 
1124
 
-
 
1125
			page = i915_gem_object_get_page(src, i);
-
 
1126
 
-
 
1127
			drm_clflush_pages(&page, 1);
-
 
1128
 
-
 
1129
			s = kmap_atomic(page);
-
 
1130
			memcpy(d, s, PAGE_SIZE);
-
 
1131
			kunmap_atomic(s);
-
 
1132
 
-
 
1133
			drm_clflush_pages(&page, 1);
-
 
1134
		}
-
 
1135
		local_irq_restore(flags);
-
 
1136
 
-
 
1137
		dst->pages[i] = d;
-
 
1138
 
-
 
1139
		reloc_offset += PAGE_SIZE;
-
 
1140
	}
-
 
-
 
1369
//		ivb_err_int_handler(dev);
-
 
1370
 
1141
	dst->page_count = num_pages;
1371
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1142
	dst->gtt_offset = src->gtt_offset;
1372
		dp_aux_irq_handler(dev);
-
 
1373
 
1143
 
1374
	if (de_iir & DE_GSE_IVB)
-
 
1375
		intel_opregion_asle_intr(dev);
1144
	return dst;
1376
#if 0
1145
 
1377
	for (i = 0; i < 3; i++) {
1146
unwind:
1378
		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1147
	while (i--)
-
 
1148
		kfree(dst->pages[i]);
-
 
1149
	kfree(dst);
-
 
1150
	return NULL;
-
 
1151
}
-
 
1152
#define i915_error_object_create(dev_priv, src) \
-
 
1153
	i915_error_object_create_sized((dev_priv), (src), \
-
 
1154
				       (src)->base.size>>PAGE_SHIFT)
-
 
1155
 
-
 
1156
static void
-
 
1157
i915_error_object_free(struct drm_i915_error_object *obj)
-
 
1158
{
-
 
1159
	int page;
-
 
1160
 
-
 
1161
	if (obj == NULL)
-
 
1162
		return;
-
 
1163
 
1379
			drm_handle_vblank(dev, i);
-
 
1380
		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1164
	for (page = 0; page < obj->page_count; page++)
-
 
1165
		kfree(obj->pages[page]);
-
 
1166
 
-
 
1167
	kfree(obj);
-
 
1168
}
-
 
1169
 
-
 
1170
void
-
 
1171
i915_error_state_free(struct kref *error_ref)
-
 
1172
{
-
 
1173
	struct drm_i915_error_state *error = container_of(error_ref,
-
 
1174
							  typeof(*error), ref);
-
 
1175
	int i;
-
 
1176
 
-
 
1177
	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-
 
1178
		i915_error_object_free(error->ring[i].batchbuffer);
-
 
1179
		i915_error_object_free(error->ring[i].ringbuffer);
-
 
1180
		kfree(error->ring[i].requests);
-
 
1181
	}
-
 
1182
 
-
 
1183
	kfree(error->active_bo);
-
 
1184
	kfree(error->overlay);
-
 
1185
	kfree(error);
-
 
1186
}
-
 
1187
static void capture_bo(struct drm_i915_error_buffer *err,
-
 
1188
		       struct drm_i915_gem_object *obj)
-
 
1189
{
-
 
1190
	err->size = obj->base.size;
-
 
1191
	err->name = obj->base.name;
-
 
1192
	err->rseqno = obj->last_read_seqno;
-
 
1193
	err->wseqno = obj->last_write_seqno;
1381
			intel_prepare_page_flip(dev, i);
1194
	err->gtt_offset = obj->gtt_offset;
-
 
1195
	err->read_domains = obj->base.read_domains;
-
 
1196
	err->write_domain = obj->base.write_domain;
-
 
1197
	err->fence_reg = obj->fence_reg;
-
 
1198
	err->pinned = 0;
-
 
1199
	if (obj->pin_count > 0)
1382
			intel_finish_page_flip_plane(dev, i);
1200
		err->pinned = 1;
-
 
1201
	if (obj->user_pin_count > 0)
1383
		}
1202
		err->pinned = -1;
-
 
1203
	err->tiling = obj->tiling_mode;
-
 
1204
	err->dirty = obj->dirty;
-
 
1205
	err->purgeable = obj->madv != I915_MADV_WILLNEED;
-
 
1206
	err->ring = obj->ring ? obj->ring->id : -1;
1384
	}
1207
	err->cache_level = obj->cache_level;
-
 
1208
}
1385
#endif
1209
 
1386
 
1210
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
-
 
1211
			     int count, struct list_head *head)
-
 
1212
{
1387
	/* check event from PCH */
1213
	struct drm_i915_gem_object *obj;
-
 
1214
	int i = 0;
-
 
1215
 
1388
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1216
	list_for_each_entry(obj, head, mm_list) {
1389
		u32 pch_iir = I915_READ(SDEIIR);
1217
		capture_bo(err++, obj);
-
 
1218
		if (++i == count)
1390
 
1219
			break;
1391
		cpt_irq_handler(dev, pch_iir);
1220
	}
-
 
1221
 
-
 
1222
	return i;
1392
 
1223
}
1393
		/* clear PCH hotplug event before clear CPU irq */
1224
 
-
 
1225
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
-
 
1226
			     int count, struct list_head *head)
1394
		I915_WRITE(SDEIIR, pch_iir);
1227
{
1395
}
1228
	struct drm_i915_gem_object *obj;
-
 
1229
	int i = 0;
-
 
1230
 
1396
}
1231
	list_for_each_entry(obj, head, gtt_list) {
-
 
1232
		if (obj->pin_count == 0)
1397
 
1233
			continue;
1398
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1234
 
-
 
1235
		capture_bo(err++, obj);
1399
{
1236
		if (++i == count)
-
 
1237
			break;
-
 
1238
	}
-
 
1239
 
-
 
1240
	return i;
-
 
1241
}
1400
	struct drm_device *dev = (struct drm_device *) arg;
1242
 
-
 
1243
static void i915_gem_record_fences(struct drm_device *dev,
-
 
1244
				   struct drm_i915_error_state *error)
-
 
1245
{
-
 
1246
	struct drm_i915_private *dev_priv = dev->dev_private;
1401
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1247
	int i;
1402
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1248
 
-
 
1249
	/* Fences */
-
 
1250
	switch (INTEL_INFO(dev)->gen) {
-
 
1251
	case 7:
1403
	irqreturn_t ret = IRQ_NONE;
1252
	case 6:
-
 
1253
		for (i = 0; i < dev_priv->num_fence_regs; i++)
-
 
1254
			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
-
 
1255
		break;
-
 
1256
	case 5:
-
 
-
 
1404
	bool err_int_reenable = false;
-
 
1405
 
-
 
1406
	atomic_inc(&dev_priv->irq_received);
-
 
1407
 
1257
	case 4:
1408
	/* We get interrupts on unclaimed registers, so check for this before we
-
 
1409
	 * do any I915_{READ,WRITE}. */
-
 
1410
	intel_uncore_check_errors(dev);
-
 
1411
 
1258
		for (i = 0; i < 16; i++)
1412
	/* disable master interrupt before clearing iir  */
1259
			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-
 
1260
		break;
1413
	de_ier = I915_READ(DEIER);
1261
	case 3:
1414
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1262
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1415
	POSTING_READ(DEIER);
1263
			for (i = 0; i < 8; i++)
-
 
1264
				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
-
 
1265
	case 2:
-
 
1266
		for (i = 0; i < 8; i++)
1416
 
1267
			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
 
1268
		break;
1417
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1269
 
-
 
1270
	default:
-
 
1271
		BUG();
1418
	 * interrupts will will be stored on its back queue, and then we'll be
1272
	}
1419
	 * able to process them after we restore SDEIER (as soon as we restore
1273
}
-
 
1274
 
-
 
1275
static struct drm_i915_error_object *
-
 
1276
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
-
 
1277
			     struct intel_ring_buffer *ring)
1420
	 * it, we'll get an interrupt if SDEIIR still has something to process
1278
{
-
 
1279
	struct drm_i915_gem_object *obj;
1421
	 * due to its back queue). */
1280
	u32 seqno;
1422
	if (!HAS_PCH_NOP(dev)) {
1281
 
1423
		sde_ier = I915_READ(SDEIER);
1282
	if (!ring->get_seqno)
-
 
1283
		return NULL;
-
 
1284
 
1424
		I915_WRITE(SDEIER, 0);
1285
	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1425
		POSTING_READ(SDEIER);
1286
		u32 acthd = I915_READ(ACTHD);
-
 
1287
 
1426
	}
1288
		if (WARN_ON(ring->id != RCS))
-
 
1289
			return NULL;
-
 
1290
 
1427
 
1291
		obj = ring->private;
1428
	/* On Haswell, also mask ERR_INT because we don't want to risk
1292
		if (acthd >= obj->gtt_offset &&
-
 
1293
		    acthd < obj->gtt_offset + obj->base.size)
1429
	 * generating "unclaimed register" interrupts from inside the interrupt
1294
			return i915_error_object_create(dev_priv, obj);
1430
	 * handler. */
1295
	}
-
 
1296
 
1431
	if (IS_HASWELL(dev)) {
1297
	seqno = ring->get_seqno(ring, false);
1432
		spin_lock(&dev_priv->irq_lock);
-
 
1433
		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-
 
1434
		if (err_int_reenable)
-
 
1435
			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
1436
		spin_unlock(&dev_priv->irq_lock);
-
 
1437
}
-
 
1438
 
-
 
1439
	gt_iir = I915_READ(GTIIR);
1298
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1440
	if (gt_iir) {
1299
		if (obj->ring != ring)
1441
		if (INTEL_INFO(dev)->gen >= 6)
1300
			continue;
-
 
1301
 
-
 
1302
		if (i915_seqno_passed(seqno, obj->last_read_seqno))
-
 
1303
			continue;
-
 
1304
 
-
 
1305
		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
-
 
1306
			continue;
1442
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1307
 
-
 
1308
		/* We need to copy these to an anonymous buffer as the simplest
1443
		else
1309
		 * method to avoid being overwritten by userspace.
1444
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1310
		 */
-
 
1311
		return i915_error_object_create(dev_priv, obj);
1445
		I915_WRITE(GTIIR, gt_iir);
1312
	}
1446
		ret = IRQ_HANDLED;
1313
 
-
 
1314
	return NULL;
1447
}
1315
}
1448
 
1316
 
-
 
1317
static void i915_record_ring_state(struct drm_device *dev,
-
 
1318
				   struct drm_i915_error_state *error,
-
 
1319
				   struct intel_ring_buffer *ring)
-
 
1320
{
-
 
1321
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1322
 
-
 
1323
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
1324
		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
-
 
1325
		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
-
 
1326
		error->semaphore_mboxes[ring->id][0]
-
 
1327
			= I915_READ(RING_SYNC_0(ring->mmio_base));
-
 
1328
		error->semaphore_mboxes[ring->id][1]
-
 
1329
			= I915_READ(RING_SYNC_1(ring->mmio_base));
-
 
1330
		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1449
	de_iir = I915_READ(DEIIR);
1331
		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
-
 
1332
	}
-
 
1333
 
1450
	if (de_iir) {
1334
	if (INTEL_INFO(dev)->gen >= 4) {
-
 
1335
		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1451
		if (INTEL_INFO(dev)->gen >= 7)
1336
		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1452
			ivb_display_irq_handler(dev, de_iir);
1337
		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1453
		else
1338
		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
-
 
1339
		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1454
			ilk_display_irq_handler(dev, de_iir);
1340
		if (ring->id == RCS)
-
 
1341
			error->bbaddr = I915_READ64(BB_ADDR);
1455
		I915_WRITE(DEIIR, de_iir);
1342
	} else {
-
 
1343
		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
-
 
1344
		error->ipeir[ring->id] = I915_READ(IPEIR);
-
 
1345
		error->ipehr[ring->id] = I915_READ(IPEHR);
1456
		ret = IRQ_HANDLED;
1346
		error->instdone[ring->id] = I915_READ(INSTDONE);
-
 
1347
	}
-
 
1348
 
1457
	}
1349
	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
-
 
1350
	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-
 
1351
	error->seqno[ring->id] = ring->get_seqno(ring, false);
1458
 
1352
	error->acthd[ring->id] = intel_ring_get_active_head(ring);
-
 
1353
	error->head[ring->id] = I915_READ_HEAD(ring);
-
 
1354
	error->tail[ring->id] = I915_READ_TAIL(ring);
-
 
1355
	error->ctl[ring->id] = I915_READ_CTL(ring);
-
 
1356
 
1459
	if (INTEL_INFO(dev)->gen >= 6) {
1357
	error->cpu_ring_head[ring->id] = ring->head;
1460
		u32 pm_iir = I915_READ(GEN6_PMIIR);
1358
	error->cpu_ring_tail[ring->id] = ring->tail;
-
 
1359
}
1461
		if (pm_iir) {
-
 
1462
			gen6_rps_irq_handler(dev_priv, pm_iir);
-
 
1463
			I915_WRITE(GEN6_PMIIR, pm_iir);
1360
 
1464
			ret = IRQ_HANDLED;
1361
 
1465
	}
1362
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1466
}
1363
					   struct drm_i915_error_state *error,
1467
 
1364
					   struct drm_i915_error_ring *ering)
-
 
1365
{
1468
	if (err_int_reenable) {
1366
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1367
	struct drm_i915_gem_object *obj;
1469
		spin_lock(&dev_priv->irq_lock);
1368
 
-
 
1369
	/* Currently render ring is the only HW context user */
-
 
1370
	if (ring->id != RCS || !error->ccid)
-
 
1371
		return;
-
 
1372
 
-
 
1373
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
-
 
1374
		if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
-
 
1375
			ering->ctx = i915_error_object_create_sized(dev_priv,
-
 
1376
								    obj, 1);
-
 
1377
		}
-
 
-
 
1470
		if (ivb_can_enable_err_int(dev))
-
 
1471
			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-
 
1472
		spin_unlock(&dev_priv->irq_lock);
-
 
1473
	}
1378
	}
1474
 
-
 
1475
	I915_WRITE(DEIER, de_ier);
1379
}
1476
	POSTING_READ(DEIER);
1380
 
1477
	if (!HAS_PCH_NOP(dev)) {
1381
static void i915_gem_record_rings(struct drm_device *dev,
1478
		I915_WRITE(SDEIER, sde_ier);
1382
				  struct drm_i915_error_state *error)
-
 
1383
{
-
 
1384
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1385
	struct intel_ring_buffer *ring;
-
 
1386
	struct drm_i915_gem_request *request;
-
 
1387
	int i, count;
-
 
1388
 
-
 
1389
	for_each_ring(ring, dev_priv, i) {
-
 
1390
		i915_record_ring_state(dev, error, ring);
1479
		POSTING_READ(SDEIER);
1391
 
1480
	}
1392
		error->ring[i].batchbuffer =
1481
 
1393
			i915_error_first_batchbuffer(dev_priv, ring);
1482
	return ret;
1394
 
-
 
1395
		error->ring[i].ringbuffer =
1483
}
1396
			i915_error_object_create(dev_priv, ring->obj);
1484
 
1397
 
-
 
1398
 
-
 
1399
		i915_gem_record_active_context(ring, error, &error->ring[i]);
-
 
1400
 
1485
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
-
 
1486
			       bool reset_completed)
1401
		count = 0;
1487
{
1402
		list_for_each_entry(request, &ring->request_list, list)
1488
	struct intel_ring_buffer *ring;
1403
			count++;
1489
	int i;
1404
 
1490
 
1405
		error->ring[i].num_requests = count;
1491
	/*
1406
		error->ring[i].requests =
-
 
1407
			kmalloc(count*sizeof(struct drm_i915_error_request),
-
 
1408
				GFP_ATOMIC);
1492
	 * Notify all waiters for GPU completion events that reset state has
1409
		if (error->ring[i].requests == NULL) {
1493
	 * been changed, and that they need to restart their wait after
1410
			error->ring[i].num_requests = 0;
1494
	 * checking for potential errors (and bail out to drop locks if there is
1411
			continue;
1495
	 * a gpu reset pending so that i915_error_work_func can acquire them).
1412
		}
1496
	 */
1413
 
-
 
1414
		count = 0;
-
 
1415
		list_for_each_entry(request, &ring->request_list, list) {
-
 
1416
			struct drm_i915_error_request *erq;
1497
 
1417
 
-
 
1418
			erq = &error->ring[i].requests[count++];
1498
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1419
			erq->seqno = request->seqno;
-
 
1420
			erq->jiffies = request->emitted_jiffies;
-
 
1421
			erq->tail = request->tail;
-
 
1422
		}
-
 
1423
	}
-
 
1424
}
-
 
1425
 
-
 
1426
/**
1499
	for_each_ring(ring, dev_priv, i)
1427
 * i915_capture_error_state - capture an error record for later analysis
-
 
1428
 * @dev: drm device
-
 
1429
 *
-
 
1430
 * Should be called when an error is detected (either a hang or an error
-
 
1431
 * interrupt) to capture error state from the time of the error.  Fills
-
 
1432
 * out a structure which becomes available in debugfs for user level tools
-
 
1433
 * to pick up.
-
 
1434
 */
-
 
1435
static void i915_capture_error_state(struct drm_device *dev)
-
 
1436
{
-
 
1437
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1438
	struct drm_i915_gem_object *obj;
-
 
1439
	struct drm_i915_error_state *error;
-
 
1440
	unsigned long flags;
-
 
1441
	int i, pipe;
-
 
1442
 
-
 
1443
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-
 
1444
	error = dev_priv->gpu_error.first_error;
-
 
1445
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
 
1446
	if (error)
-
 
1447
		return;
-
 
1448
 
-
 
1449
	/* Account for pipe specific data like PIPE*STAT */
-
 
1450
	error = kzalloc(sizeof(*error), GFP_ATOMIC);
-
 
1451
	if (!error) {
1500
		wake_up_all(&ring->irq_queue);
1452
		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
-
 
1453
		return;
-
 
1454
	}
1501
 
1455
 
-
 
1456
	DRM_INFO("capturing error event; look for more information in "
-
 
1457
		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
-
 
1458
		 dev->primary->index);
-
 
1459
 
-
 
1460
	kref_init(&error->ref);
-
 
1461
	error->eir = I915_READ(EIR);
-
 
1462
	error->pgtbl_er = I915_READ(PGTBL_ER);
1502
 
1463
	if (HAS_HW_CONTEXTS(dev))
-
 
1464
	error->ccid = I915_READ(CCID);
-
 
1465
 
-
 
1466
	if (HAS_PCH_SPLIT(dev))
-
 
1467
		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
-
 
1468
	else if (IS_VALLEYVIEW(dev))
-
 
1469
		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
-
 
1470
	else if (IS_GEN2(dev))
1503
	/*
1471
		error->ier = I915_READ16(IER);
-
 
1472
	else
-
 
1473
		error->ier = I915_READ(IER);
-
 
1474
 
-
 
1475
	if (INTEL_INFO(dev)->gen >= 6)
-
 
1476
		error->derrmr = I915_READ(DERRMR);
-
 
1477
 
-
 
1478
	if (IS_VALLEYVIEW(dev))
-
 
1479
		error->forcewake = I915_READ(FORCEWAKE_VLV);
-
 
1480
	else if (INTEL_INFO(dev)->gen >= 7)
-
 
1481
		error->forcewake = I915_READ(FORCEWAKE_MT);
1504
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1482
	else if (INTEL_INFO(dev)->gen == 6)
-
 
1483
		error->forcewake = I915_READ(FORCEWAKE);
-
 
1484
 
-
 
1485
	if (!HAS_PCH_SPLIT(dev))
-
 
1486
	for_each_pipe(pipe)
-
 
1487
		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
 
1488
 
-
 
1489
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
1490
		error->error = I915_READ(ERROR_GEN6);
-
 
1491
		error->done_reg = I915_READ(DONE_REG);
-
 
1492
	}
1505
	 * reset state is cleared.
1493
 
-
 
1494
	if (INTEL_INFO(dev)->gen == 7)
-
 
1495
		error->err_int = I915_READ(GEN7_ERR_INT);
-
 
1496
 
-
 
1497
	i915_get_extra_instdone(dev, error->extra_instdone);
-
 
-
 
1506
	 */
-
 
1507
	if (reset_completed)
-
 
1508
		wake_up_all(&dev_priv->gpu_error.reset_queue);
1498
 
1509
}
-
 
1510
 
-
 
1511
#if 0
-
 
1512
/**
-
 
1513
 * i915_error_work_func - do process context error handling work
1499
	i915_gem_record_fences(dev, error);
1514
 * @work: work struct
-
 
1515
 *
-
 
1516
 * Fire an error uevent so userspace can see that a hang or error
1500
	i915_gem_record_rings(dev, error);
1517
 * was detected.
1501
 
1518
 */
1502
	/* Record buffers on the active and pinned lists. */
1519
static void i915_error_work_func(struct work_struct *work)
-
 
1520
{
1503
	error->active_bo = NULL;
1521
	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1504
	error->pinned_bo = NULL;
1522
						    work);
1505
 
1523
	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1506
	i = 0;
1524
						    gpu_error);
-
 
1525
	struct drm_device *dev = dev_priv->dev;
1507
	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1526
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1508
		i++;
1527
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
-
 
1528
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
-
 
1529
	int ret;
-
 
1530
 
-
 
1531
	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
-
 
1532
 
-
 
1533
	/*
-
 
1534
	 * Note that there's only one work item which does gpu resets, so we
1509
	error->active_bo_count = i;
1535
	 * need not worry about concurrent gpu resets potentially incrementing
-
 
1536
	 * error->reset_counter twice. We only need to take care of another
-
 
1537
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
-
 
1538
	 * quick check for that is good enough: schedule_work ensures the
-
 
1539
	 * correct ordering between hang detection and this work item, and since
1510
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1540
	 * the reset in-progress bit is only ever set by code outside of this
1511
		if (obj->pin_count)
1541
	 * work we don't need to worry about any other races.
1512
			i++;
-
 
1513
	error->pinned_bo_count = i - error->active_bo_count;
1542
	 */
1514
 
1543
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
-
 
1544
		DRM_DEBUG_DRIVER("resetting chip\n");
1515
	error->active_bo = NULL;
1545
		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1516
	error->pinned_bo = NULL;
-
 
-
 
1546
				   reset_event);
-
 
1547
 
-
 
1548
		/*
1517
	if (i) {
1549
		 * All state reset _must_ be completed before we update the
1518
		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1550
		 * reset counter, for otherwise waiters might miss the reset
1519
					   GFP_ATOMIC);
1551
		 * pending state and not properly drop locks, resulting in
1520
		if (error->active_bo)
-
 
1521
			error->pinned_bo =
-
 
1522
				error->active_bo + error->active_bo_count;
-
 
1523
	}
-
 
1524
 
-
 
1525
	if (error->active_bo)
-
 
1526
		error->active_bo_count =
-
 
1527
			capture_active_bo(error->active_bo,
-
 
1528
					  error->active_bo_count,
-
 
1529
					  &dev_priv->mm.active_list);
-
 
1530
 
-
 
1531
	if (error->pinned_bo)
-
 
1532
		error->pinned_bo_count =
-
 
1533
			capture_pinned_bo(error->pinned_bo,
-
 
1534
					  error->pinned_bo_count,
1552
		 * deadlocks with the reset work.
1535
					  &dev_priv->mm.bound_list);
-
 
1536
 
-
 
1537
	do_gettimeofday(&error->time);
-
 
1538
 
1553
		 */
1539
	error->overlay = intel_overlay_capture_error_state(dev);
1554
		ret = i915_reset(dev);
1540
	error->display = intel_display_capture_error_state(dev);
1555
 
1541
 
1556
		intel_display_handle_reset(dev);
1542
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1557
 
1543
	if (dev_priv->gpu_error.first_error == NULL) {
1558
		if (ret == 0) {
1544
		dev_priv->gpu_error.first_error = error;
1559
			/*
1545
		error = NULL;
1560
			 * After all the gem state is reset, increment the reset
1546
	}
1561
			 * counter and wake up everyone waiting for the reset to
1547
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1562
			 * complete.
1548
 
1563
			 *
1549
	if (error)
1564
			 * Since unlock operations are a one-sided barrier only,
1550
		i915_error_state_free(&error->ref);
1565
			 * we need to insert a barrier here to order any seqno
1551
}
1566
			 * updates before
1552
 
1567
			 * the counter increment.
1553
void i915_destroy_error_state(struct drm_device *dev)
1568
			 */
1554
{
1569
			smp_mb__before_atomic_inc();
1555
	struct drm_i915_private *dev_priv = dev->dev_private;
1570
			atomic_inc(&dev_priv->gpu_error.reset_counter);
1556
	struct drm_i915_error_state *error;
1571
 
1557
	unsigned long flags;
1572
			kobject_uevent_env(&dev->primary->kdev.kobj,
1558
 
1573
					   KOBJ_CHANGE, reset_done_event);
1559
	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1574
		} else {
1560
	error = dev_priv->gpu_error.first_error;
1575
			atomic_set(&error->reset_counter, I915_WEDGED);
1561
	dev_priv->gpu_error.first_error = NULL;
1576
	}
1562
	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1577
 
1563
 
1578
		/*
1564
	if (error)
1579
		 * Note: The wake_up also serves as a memory barrier so that
1565
		kref_put(&error->ref, i915_error_state_free);
1580
		 * waiters see the update value of the reset counter atomic_t.
1566
}
1581
		 */
1567
#else
1582
		i915_error_wake_up(dev_priv, true);
1568
#define i915_capture_error_state(x)
1583
	}
1569
#endif
1584
}
1570
 
1585
 
1571
static void i915_report_and_clear_eir(struct drm_device *dev)
1586
static void i915_report_and_clear_eir(struct drm_device *dev)
1572
{
1587
{
1573
	struct drm_i915_private *dev_priv = dev->dev_private;
1588
	struct drm_i915_private *dev_priv = dev->dev_private;
1574
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1589
	uint32_t instdone[I915_NUM_INSTDONE_REG];
1575
	u32 eir = I915_READ(EIR);
1590
	u32 eir = I915_READ(EIR);
1576
	int pipe, i;
1591
	int pipe, i;
1577
 
1592
 
1578
	if (!eir)
1593
	if (!eir)
1579
		return;
1594
		return;
1580
 
1595
 
1581
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1596
	pr_err("render error detected, EIR: 0x%08x\n", eir);
1582
 
1597
 
1583
	i915_get_extra_instdone(dev, instdone);
1598
	i915_get_extra_instdone(dev, instdone);
1584
 
1599
 
1585
	if (IS_G4X(dev)) {
1600
	if (IS_G4X(dev)) {
1586
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1601
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1587
			u32 ipeir = I915_READ(IPEIR_I965);
1602
			u32 ipeir = I915_READ(IPEIR_I965);
1588
 
1603
 
1589
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1604
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1590
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1605
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1591
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1606
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1592
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1607
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1593
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1608
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1594
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1609
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1595
			I915_WRITE(IPEIR_I965, ipeir);
1610
			I915_WRITE(IPEIR_I965, ipeir);
1596
			POSTING_READ(IPEIR_I965);
1611
			POSTING_READ(IPEIR_I965);
1597
		}
1612
		}
1598
		if (eir & GM45_ERROR_PAGE_TABLE) {
1613
		if (eir & GM45_ERROR_PAGE_TABLE) {
1599
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1614
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1600
			pr_err("page table error\n");
1615
			pr_err("page table error\n");
1601
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1616
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1602
			I915_WRITE(PGTBL_ER, pgtbl_err);
1617
			I915_WRITE(PGTBL_ER, pgtbl_err);
1603
			POSTING_READ(PGTBL_ER);
1618
			POSTING_READ(PGTBL_ER);
1604
		}
1619
		}
1605
	}
1620
	}
1606
 
1621
 
1607
	if (!IS_GEN2(dev)) {
1622
	if (!IS_GEN2(dev)) {
1608
		if (eir & I915_ERROR_PAGE_TABLE) {
1623
		if (eir & I915_ERROR_PAGE_TABLE) {
1609
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1624
			u32 pgtbl_err = I915_READ(PGTBL_ER);
1610
			pr_err("page table error\n");
1625
			pr_err("page table error\n");
1611
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1626
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1612
			I915_WRITE(PGTBL_ER, pgtbl_err);
1627
			I915_WRITE(PGTBL_ER, pgtbl_err);
1613
			POSTING_READ(PGTBL_ER);
1628
			POSTING_READ(PGTBL_ER);
1614
		}
1629
		}
1615
	}
1630
	}
1616
 
1631
 
1617
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1632
	if (eir & I915_ERROR_MEMORY_REFRESH) {
1618
		pr_err("memory refresh error:\n");
1633
		pr_err("memory refresh error:\n");
1619
		for_each_pipe(pipe)
1634
		for_each_pipe(pipe)
1620
			pr_err("pipe %c stat: 0x%08x\n",
1635
			pr_err("pipe %c stat: 0x%08x\n",
1621
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1636
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1622
		/* pipestat has already been acked */
1637
		/* pipestat has already been acked */
1623
	}
1638
	}
1624
	if (eir & I915_ERROR_INSTRUCTION) {
1639
	if (eir & I915_ERROR_INSTRUCTION) {
1625
		pr_err("instruction error\n");
1640
		pr_err("instruction error\n");
1626
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1641
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1627
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1642
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1628
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1643
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1629
		if (INTEL_INFO(dev)->gen < 4) {
1644
		if (INTEL_INFO(dev)->gen < 4) {
1630
			u32 ipeir = I915_READ(IPEIR);
1645
			u32 ipeir = I915_READ(IPEIR);
1631
 
1646
 
1632
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1647
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1633
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1648
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1634
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1649
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1635
			I915_WRITE(IPEIR, ipeir);
1650
			I915_WRITE(IPEIR, ipeir);
1636
			POSTING_READ(IPEIR);
1651
			POSTING_READ(IPEIR);
1637
		} else {
1652
		} else {
1638
			u32 ipeir = I915_READ(IPEIR_I965);
1653
			u32 ipeir = I915_READ(IPEIR_I965);
1639
 
1654
 
1640
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1655
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1641
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1656
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1642
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1657
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1643
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1658
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1644
			I915_WRITE(IPEIR_I965, ipeir);
1659
			I915_WRITE(IPEIR_I965, ipeir);
1645
			POSTING_READ(IPEIR_I965);
1660
			POSTING_READ(IPEIR_I965);
1646
		}
1661
		}
1647
	}
1662
	}
1648
 
1663
 
1649
	I915_WRITE(EIR, eir);
1664
	I915_WRITE(EIR, eir);
1650
	POSTING_READ(EIR);
1665
	POSTING_READ(EIR);
1651
	eir = I915_READ(EIR);
1666
	eir = I915_READ(EIR);
1652
	if (eir) {
1667
	if (eir) {
1653
		/*
1668
		/*
1654
		 * some errors might have become stuck,
1669
		 * some errors might have become stuck,
1655
		 * mask them.
1670
		 * mask them.
1656
		 */
1671
		 */
1657
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1672
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1658
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1673
		I915_WRITE(EMR, I915_READ(EMR) | eir);
1659
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1674
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1660
	}
1675
	}
1661
}
1676
}
1662
 
1677
 
1663
/**
1678
/**
1664
 * i915_handle_error - handle an error interrupt
1679
 * i915_handle_error - handle an error interrupt
1665
 * @dev: drm device
1680
 * @dev: drm device
1666
 *
1681
 *
1667
 * Do some basic checking of regsiter state at error interrupt time and
1682
 * Do some basic checking of regsiter state at error interrupt time and
1668
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1683
 * dump it to the syslog.  Also call i915_capture_error_state() to make
1669
 * sure we get a record and make it available in debugfs.  Fire a uevent
1684
 * sure we get a record and make it available in debugfs.  Fire a uevent
1670
 * so userspace knows something bad happened (should trigger collection
1685
 * so userspace knows something bad happened (should trigger collection
1671
 * of a ring dump etc.).
1686
 * of a ring dump etc.).
1672
 */
1687
 */
1673
void i915_handle_error(struct drm_device *dev, bool wedged)
1688
void i915_handle_error(struct drm_device *dev, bool wedged)
1674
{
1689
{
1675
	struct drm_i915_private *dev_priv = dev->dev_private;
1690
	struct drm_i915_private *dev_priv = dev->dev_private;
1676
	struct intel_ring_buffer *ring;
-
 
1677
	int i;
-
 
1678
 
1691
 
1679
	i915_capture_error_state(dev);
1692
	i915_capture_error_state(dev);
1680
	i915_report_and_clear_eir(dev);
1693
	i915_report_and_clear_eir(dev);
1681
 
1694
 
1682
	if (wedged) {
1695
	if (wedged) {
1683
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1696
		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1684
				&dev_priv->gpu_error.reset_counter);
1697
				&dev_priv->gpu_error.reset_counter);
1685
 
1698
 
1686
		/*
1699
		/*
1687
		 * Wakeup waiting processes so that the reset work item
1700
		 * Wakeup waiting processes so that the reset work function
1688
		 * doesn't deadlock trying to grab various locks.
1701
		 * i915_error_work_func doesn't deadlock trying to grab various
-
 
1702
		 * locks. By bumping the reset counter first, the woken
-
 
1703
		 * processes will see a reset in progress and back off,
-
 
1704
		 * releasing their locks and then wait for the reset completion.
-
 
1705
		 * We must do this for _all_ gpu waiters that might hold locks
-
 
1706
		 * that the reset work needs to acquire.
-
 
1707
		 *
-
 
1708
		 * Note: The wake_up serves as the required memory barrier to
-
 
1709
		 * ensure that the waiters see the updated value of the reset
-
 
1710
		 * counter atomic_t.
1689
		 */
1711
		 */
1690
		for_each_ring(ring, dev_priv, i)
1712
		i915_error_wake_up(dev_priv, false);
1691
			wake_up_all(&ring->irq_queue);
-
 
1692
	}
1713
	}
-
 
1714
 
-
 
1715
	/*
-
 
1716
	 * Our reset work can grab modeset locks (since it needs to reset the
-
 
1717
	 * state of outstanding pagelips). Hence it must not be run on our own
-
 
1718
	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
-
 
1719
	 * code will deadlock.
1693
 
1720
	 */
1694
//	queue_work(dev_priv->wq, &dev_priv->error_work);
1721
	schedule_work(&dev_priv->gpu_error.work);
1695
}
-
 
1696
 
-
 
1697
#if 0
-
 
1698
 
1722
}
1699
 
1723
 
1700
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1724
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1701
{
1725
{
1702
	drm_i915_private_t *dev_priv = dev->dev_private;
1726
	drm_i915_private_t *dev_priv = dev->dev_private;
1703
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1727
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1704
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1728
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1705
	struct drm_i915_gem_object *obj;
1729
	struct drm_i915_gem_object *obj;
1706
	struct intel_unpin_work *work;
1730
	struct intel_unpin_work *work;
1707
	unsigned long flags;
1731
	unsigned long flags;
1708
	bool stall_detected;
1732
	bool stall_detected;
1709
 
1733
 
1710
	/* Ignore early vblank irqs */
1734
	/* Ignore early vblank irqs */
1711
	if (intel_crtc == NULL)
1735
	if (intel_crtc == NULL)
1712
		return;
1736
		return;
1713
 
1737
 
1714
	spin_lock_irqsave(&dev->event_lock, flags);
1738
	spin_lock_irqsave(&dev->event_lock, flags);
1715
	work = intel_crtc->unpin_work;
1739
	work = intel_crtc->unpin_work;
1716
 
1740
 
1717
	if (work == NULL ||
1741
	if (work == NULL ||
1718
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1742
	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1719
	    !work->enable_stall_check) {
1743
	    !work->enable_stall_check) {
1720
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1744
		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1721
		spin_unlock_irqrestore(&dev->event_lock, flags);
1745
		spin_unlock_irqrestore(&dev->event_lock, flags);
1722
		return;
1746
		return;
1723
	}
1747
	}
1724
 
1748
 
1725
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1749
	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1726
	obj = work->pending_flip_obj;
1750
	obj = work->pending_flip_obj;
1727
	if (INTEL_INFO(dev)->gen >= 4) {
1751
	if (INTEL_INFO(dev)->gen >= 4) {
1728
		int dspsurf = DSPSURF(intel_crtc->plane);
1752
		int dspsurf = DSPSURF(intel_crtc->plane);
1729
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1753
		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1730
					obj->gtt_offset;
1754
					i915_gem_obj_ggtt_offset(obj);
1731
	} else {
1755
	} else {
1732
		int dspaddr = DSPADDR(intel_crtc->plane);
1756
		int dspaddr = DSPADDR(intel_crtc->plane);
1733
		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1757
		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
1734
							crtc->y * crtc->fb->pitches[0] +
1758
							crtc->y * crtc->fb->pitches[0] +
1735
							crtc->x * crtc->fb->bits_per_pixel/8);
1759
							crtc->x * crtc->fb->bits_per_pixel/8);
1736
	}
1760
	}
1737
 
1761
 
1738
	spin_unlock_irqrestore(&dev->event_lock, flags);
1762
	spin_unlock_irqrestore(&dev->event_lock, flags);
1739
 
1763
 
1740
	if (stall_detected) {
1764
	if (stall_detected) {
1741
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1765
		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1742
		intel_prepare_page_flip(dev, intel_crtc->plane);
1766
		intel_prepare_page_flip(dev, intel_crtc->plane);
1743
	}
1767
	}
1744
}
1768
}
1745
 
1769
 
1746
#endif
1770
#endif
1747
 
1771
 
1748
/* Called from drm generic code, passed 'crtc' which
1772
/* Called from drm generic code, passed 'crtc' which
1749
 * we use as a pipe index
1773
 * we use as a pipe index
1750
 */
1774
 */
1751
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1775
static int i915_enable_vblank(struct drm_device *dev, int pipe)
1752
{
1776
{
1753
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1777
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1754
	unsigned long irqflags;
1778
	unsigned long irqflags;
1755
 
1779
 
1756
	if (!i915_pipe_enabled(dev, pipe))
1780
	if (!i915_pipe_enabled(dev, pipe))
1757
		return -EINVAL;
1781
		return -EINVAL;
1758
 
1782
 
1759
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1783
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1760
	if (INTEL_INFO(dev)->gen >= 4)
1784
	if (INTEL_INFO(dev)->gen >= 4)
1761
		i915_enable_pipestat(dev_priv, pipe,
1785
		i915_enable_pipestat(dev_priv, pipe,
1762
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1786
				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1763
	else
1787
	else
1764
		i915_enable_pipestat(dev_priv, pipe,
1788
		i915_enable_pipestat(dev_priv, pipe,
1765
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1789
				     PIPE_VBLANK_INTERRUPT_ENABLE);
1766
 
1790
 
1767
	/* maintain vblank delivery even in deep C-states */
1791
	/* maintain vblank delivery even in deep C-states */
1768
	if (dev_priv->info->gen == 3)
1792
	if (dev_priv->info->gen == 3)
1769
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1793
		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1770
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1794
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1771
 
1795
 
1772
	return 0;
1796
	return 0;
1773
}
1797
}
1774
 
1798
 
1775
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1799
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1776
{
1800
{
1777
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1801
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1778
	unsigned long irqflags;
1802
	unsigned long irqflags;
-
 
1803
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-
 
1804
						     DE_PIPE_VBLANK_ILK(pipe);
1779
 
1805
 
1780
	if (!i915_pipe_enabled(dev, pipe))
1806
	if (!i915_pipe_enabled(dev, pipe))
1781
		return -EINVAL;
1807
		return -EINVAL;
1782
 
1808
 
1783
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1809
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1784
	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-
 
1785
				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-
 
1786
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
1787
 
-
 
1788
	return 0;
-
 
1789
}
-
 
1790
 
-
 
1791
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-
 
1792
{
-
 
1793
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1794
	unsigned long irqflags;
-
 
1795
 
-
 
1796
	if (!i915_pipe_enabled(dev, pipe))
-
 
1797
		return -EINVAL;
-
 
1798
 
-
 
1799
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
1800
	ironlake_enable_display_irq(dev_priv,
1810
	ironlake_enable_display_irq(dev_priv, bit);
1801
				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
-
 
1802
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1811
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1803
 
1812
 
1804
	return 0;
1813
	return 0;
1805
}
1814
}
1806
 
1815
 
1807
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1816
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1808
{
1817
{
1809
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1818
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1810
	unsigned long irqflags;
1819
	unsigned long irqflags;
1811
	u32 imr;
1820
	u32 imr;
1812
 
1821
 
1813
	if (!i915_pipe_enabled(dev, pipe))
1822
	if (!i915_pipe_enabled(dev, pipe))
1814
		return -EINVAL;
1823
		return -EINVAL;
1815
 
1824
 
1816
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1825
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1817
	imr = I915_READ(VLV_IMR);
1826
	imr = I915_READ(VLV_IMR);
1818
	if (pipe == 0)
1827
	if (pipe == 0)
1819
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1828
		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1820
	else
1829
	else
1821
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1830
		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1822
	I915_WRITE(VLV_IMR, imr);
1831
	I915_WRITE(VLV_IMR, imr);
1823
	i915_enable_pipestat(dev_priv, pipe,
1832
	i915_enable_pipestat(dev_priv, pipe,
1824
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1833
			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1825
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1834
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1826
 
1835
 
1827
	return 0;
1836
	return 0;
1828
}
1837
}
1829
 
1838
 
1830
/* Called from drm generic code, passed 'crtc' which
1839
/* Called from drm generic code, passed 'crtc' which
1831
 * we use as a pipe index
1840
 * we use as a pipe index
1832
 */
1841
 */
1833
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1842
static void i915_disable_vblank(struct drm_device *dev, int pipe)
1834
{
1843
{
1835
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1844
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1836
	unsigned long irqflags;
1845
	unsigned long irqflags;
1837
 
1846
 
1838
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1847
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1839
	if (dev_priv->info->gen == 3)
1848
	if (dev_priv->info->gen == 3)
1840
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1849
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1841
 
1850
 
1842
	i915_disable_pipestat(dev_priv, pipe,
1851
	i915_disable_pipestat(dev_priv, pipe,
1843
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1852
			      PIPE_VBLANK_INTERRUPT_ENABLE |
1844
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1853
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1845
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1854
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1846
}
1855
}
1847
 
1856
 
1848
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1857
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1849
{
1858
{
1850
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1859
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1851
	unsigned long irqflags;
1860
	unsigned long irqflags;
-
 
1861
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-
 
1862
						     DE_PIPE_VBLANK_ILK(pipe);
1852
 
1863
 
1853
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
1854
	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-
 
1855
				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-
 
1856
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
1857
}
-
 
1858
 
-
 
1859
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-
 
1860
{
-
 
1861
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
1862
	unsigned long irqflags;
-
 
1863
 
-
 
1864
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1864
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1865
	ironlake_disable_display_irq(dev_priv,
-
 
1866
				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1865
	ironlake_disable_display_irq(dev_priv, bit);
1867
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1866
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1868
}
1867
}
1869
 
1868
 
1870
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1869
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1871
{
1870
{
1872
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1871
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1873
	unsigned long irqflags;
1872
	unsigned long irqflags;
1874
	u32 imr;
1873
	u32 imr;
1875
 
1874
 
1876
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1875
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1877
	i915_disable_pipestat(dev_priv, pipe,
1876
	i915_disable_pipestat(dev_priv, pipe,
1878
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1877
			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1879
	imr = I915_READ(VLV_IMR);
1878
	imr = I915_READ(VLV_IMR);
1880
	if (pipe == 0)
1879
	if (pipe == 0)
1881
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1880
		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1882
	else
1881
	else
1883
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1882
		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1884
	I915_WRITE(VLV_IMR, imr);
1883
	I915_WRITE(VLV_IMR, imr);
1885
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1884
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1886
}
1885
}
1887
 
1886
 
1888
static u32
1887
static u32
1889
ring_last_seqno(struct intel_ring_buffer *ring)
1888
ring_last_seqno(struct intel_ring_buffer *ring)
1890
{
1889
{
1891
	return list_entry(ring->request_list.prev,
1890
	return list_entry(ring->request_list.prev,
1892
			  struct drm_i915_gem_request, list)->seqno;
1891
			  struct drm_i915_gem_request, list)->seqno;
1893
}
1892
}
-
 
1893
 
-
 
1894
static bool
-
 
1895
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
-
 
1896
{
-
 
1897
	return (list_empty(&ring->request_list) ||
-
 
1898
		i915_seqno_passed(seqno, ring_last_seqno(ring)));
-
 
1899
}
-
 
1900
 
-
 
1901
static struct intel_ring_buffer *
-
 
1902
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
-
 
1903
{
-
 
1904
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1905
	u32 cmd, ipehr, acthd, acthd_min;
-
 
1906
 
-
 
1907
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
-
 
1908
	if ((ipehr & ~(0x3 << 16)) !=
-
 
1909
	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1894
/* drm_dma.h hooks
1910
		return NULL;
-
 
1911
 
-
 
1912
	/* ACTHD is likely pointing to the dword after the actual command,
-
 
1913
	 * so scan backwards until we find the MBOX.
1895
*/
1914
	 */
-
 
1915
	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
-
 
1916
	acthd_min = max((int)acthd - 3 * 4, 0);
-
 
1917
	do {
-
 
1918
		cmd = ioread32(ring->virtual_start + acthd);
-
 
1919
		if (cmd == ipehr)
-
 
1920
			break;
-
 
1921
 
-
 
1922
		acthd -= 4;
-
 
1923
		if (acthd < acthd_min)
-
 
1924
			return NULL;
-
 
1925
	} while (1);
-
 
1926
 
-
 
1927
	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
-
 
1928
	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
-
 
1929
}
-
 
1930
 
1896
static void ironlake_irq_preinstall(struct drm_device *dev)
1931
static int semaphore_passed(struct intel_ring_buffer *ring)
1897
{
1932
{
1898
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1933
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1934
	struct intel_ring_buffer *signaller;
-
 
1935
	u32 seqno, ctl;
1899
 
1936
 
1900
    atomic_set(&dev_priv->irq_received, 0);
1937
	ring->hangcheck.deadlock = true;
-
 
1938
 
-
 
1939
	signaller = semaphore_waits_for(ring, &seqno);
1901
 
1940
	if (signaller == NULL || signaller->hangcheck.deadlock)
-
 
1941
		return -1;
-
 
1942
 
-
 
1943
	/* cursory check for an unkickable deadlock */
1902
    I915_WRITE(HWSTAM, 0xeffe);
1944
	ctl = I915_READ_CTL(signaller);
1903
 
-
 
1904
    /* XXX hotplug from PCH */
-
 
-
 
1945
	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
-
 
1946
		return -1;
-
 
1947
 
-
 
1948
	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
-
 
1949
}
-
 
1950
 
-
 
1951
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-
 
1952
{
-
 
1953
	struct intel_ring_buffer *ring;
-
 
1954
	int i;
-
 
1955
 
-
 
1956
	for_each_ring(ring, dev_priv, i)
-
 
1957
		ring->hangcheck.deadlock = false;
-
 
1958
}
-
 
1959
 
-
 
1960
static enum intel_ring_hangcheck_action
-
 
1961
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
-
 
1962
{
-
 
1963
	struct drm_device *dev = ring->dev;
-
 
1964
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1965
	u32 tmp;
-
 
1966
 
-
 
1967
	if (ring->hangcheck.acthd != acthd)
-
 
1968
		return HANGCHECK_ACTIVE;
-
 
1969
 
-
 
1970
	if (IS_GEN2(dev))
-
 
1971
		return HANGCHECK_HUNG;
-
 
1972
 
-
 
1973
	/* Is the chip hanging on a WAIT_FOR_EVENT?
-
 
1974
	 * If so we can simply poke the RB_WAIT bit
-
 
1975
	 * and break the hang. This should work on
1905
 
1976
	 * all but the second generation chipsets.
1906
    I915_WRITE(DEIMR, 0xffffffff);
1977
	 */
-
 
1978
	tmp = I915_READ_CTL(ring);
-
 
1979
	if (tmp & RING_WAIT) {
-
 
1980
		DRM_ERROR("Kicking stuck wait on %s\n",
-
 
1981
			  ring->name);
-
 
1982
		I915_WRITE_CTL(ring, tmp);
-
 
1983
		return HANGCHECK_KICK;
-
 
1984
	}
-
 
1985
 
-
 
1986
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
-
 
1987
		switch (semaphore_passed(ring)) {
1907
    I915_WRITE(DEIER, 0x0);
1988
		default:
-
 
1989
			return HANGCHECK_HUNG;
-
 
1990
		case 1:
-
 
1991
			DRM_ERROR("Kicking stuck semaphore on %s\n",
-
 
1992
				  ring->name);
-
 
1993
			I915_WRITE_CTL(ring, tmp);
-
 
1994
			return HANGCHECK_KICK;
-
 
1995
		case 0:
-
 
1996
			return HANGCHECK_WAIT;
-
 
1997
		}
-
 
1998
	}
-
 
1999
 
-
 
2000
	return HANGCHECK_HUNG;
-
 
2001
}
-
 
2002
 
-
 
2003
/**
-
 
2004
 * This is called when the chip hasn't reported back with completed
-
 
2005
 * batchbuffers in a long time. We keep track per ring seqno progress and
-
 
2006
 * if there are no progress, hangcheck score for that ring is increased.
-
 
2007
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
-
 
2008
 * we kick the ring. If we see no progress on three subsequent calls
-
 
2009
 * we assume chip is wedged and try to fix it by resetting the chip.
-
 
2010
 */
-
 
2011
static void i915_hangcheck_elapsed(unsigned long data)
-
 
2012
{
-
 
2013
	struct drm_device *dev = (struct drm_device *)data;
-
 
2014
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
2015
	struct intel_ring_buffer *ring;
-
 
2016
	int i;
-
 
2017
	int busy_count = 0, rings_hung = 0;
-
 
2018
	bool stuck[I915_NUM_RINGS] = { 0 };
-
 
2019
#define BUSY 1
-
 
2020
#define KICK 5
-
 
2021
#define HUNG 20
-
 
2022
#define FIRE 30
-
 
2023
 
-
 
2024
	if (!i915_enable_hangcheck)
-
 
2025
		return;
-
 
2026
 
-
 
2027
	for_each_ring(ring, dev_priv, i) {
-
 
2028
		u32 seqno, acthd;
-
 
2029
		bool busy = true;
-
 
2030
 
-
 
2031
		semaphore_clear_deadlocks(dev_priv);
-
 
2032
 
-
 
2033
		seqno = ring->get_seqno(ring, false);
-
 
2034
		acthd = intel_ring_get_active_head(ring);
-
 
2035
 
-
 
2036
		if (ring->hangcheck.seqno == seqno) {
-
 
2037
			if (ring_idle(ring, seqno)) {
-
 
2038
//               if (waitqueue_active(&ring->irq_queue)) {
-
 
2039
					/* Issue a wake-up to catch stuck h/w. */
-
 
2040
//                   DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-
 
2041
//                         ring->name);
-
 
2042
//                   wake_up_all(&ring->irq_queue);
-
 
2043
//                   ring->hangcheck.score += HUNG;
-
 
2044
//               } else
-
 
2045
					busy = false;
-
 
2046
			} else {
-
 
2047
				/* We always increment the hangcheck score
-
 
2048
				 * if the ring is busy and still processing
-
 
2049
				 * the same request, so that no single request
-
 
2050
				 * can run indefinitely (such as a chain of
-
 
2051
				 * batches). The only time we do not increment
-
 
2052
				 * the hangcheck score on this ring, if this
-
 
2053
				 * ring is in a legitimate wait for another
-
 
2054
				 * ring. In that case the waiting ring is a
-
 
2055
				 * victim and we want to be sure we catch the
-
 
2056
				 * right culprit. Then every time we do kick
-
 
2057
				 * the ring, add a small increment to the
-
 
2058
				 * score so that we can catch a batch that is
-
 
2059
				 * being repeatedly kicked and so responsible
-
 
2060
				 * for stalling the machine.
-
 
2061
				 */
-
 
2062
				ring->hangcheck.action = ring_stuck(ring,
1908
    POSTING_READ(DEIER);
2063
								    acthd);
-
 
2064
 
-
 
2065
				switch (ring->hangcheck.action) {
-
 
2066
				case HANGCHECK_WAIT:
-
 
2067
					break;
-
 
2068
				case HANGCHECK_ACTIVE:
-
 
2069
					ring->hangcheck.score += BUSY;
-
 
2070
					break;
-
 
2071
				case HANGCHECK_KICK:
-
 
2072
					ring->hangcheck.score += KICK;
-
 
2073
					break;
-
 
2074
				case HANGCHECK_HUNG:
-
 
2075
					ring->hangcheck.score += HUNG;
-
 
2076
					stuck[i] = true;
-
 
2077
					break;
-
 
2078
				}
-
 
2079
			}
-
 
2080
		} else {
-
 
2081
			/* Gradually reduce the count so that we catch DoS
-
 
2082
			 * attempts across multiple batches.
-
 
2083
			 */
-
 
2084
			if (ring->hangcheck.score > 0)
-
 
2085
				ring->hangcheck.score--;
-
 
2086
		}
-
 
2087
 
-
 
2088
		ring->hangcheck.seqno = seqno;
-
 
2089
		ring->hangcheck.acthd = acthd;
-
 
2090
		busy_count += busy;
-
 
2091
	}
-
 
2092
 
-
 
2093
	for_each_ring(ring, dev_priv, i) {
-
 
2094
		if (ring->hangcheck.score > FIRE) {
-
 
2095
			DRM_INFO("%s on %s\n",
-
 
2096
				  stuck[i] ? "stuck" : "no progress",
-
 
2097
				  ring->name);
-
 
2098
			rings_hung++;
-
 
2099
		}
-
 
2100
	}
-
 
2101
 
-
 
2102
//   if (rings_hung)
-
 
2103
//       return i915_handle_error(dev, true);
-
 
2104
 
1909
 
2105
}
1910
    /* and GT */
2106
 
1911
    I915_WRITE(GTIMR, 0xffffffff);
2107
static void ibx_irq_preinstall(struct drm_device *dev)
1912
    I915_WRITE(GTIER, 0x0);
2108
{
1913
    POSTING_READ(GTIER);
2109
	struct drm_i915_private *dev_priv = dev->dev_private;
1914
 
2110
 
1915
	if (HAS_PCH_NOP(dev))
2111
	if (HAS_PCH_NOP(dev))
1916
		return;
2112
		return;
1917
 
2113
 
1918
    /* south display irq */
2114
	/* south display irq */
1919
    I915_WRITE(SDEIMR, 0xffffffff);
2115
	I915_WRITE(SDEIMR, 0xffffffff);
1920
	/*
2116
	/*
1921
	 * SDEIER is also touched by the interrupt handler to work around missed
2117
	 * SDEIER is also touched by the interrupt handler to work around missed
1922
	 * PCH interrupts. Hence we can't update it after the interrupt handler
2118
	 * PCH interrupts. Hence we can't update it after the interrupt handler
1923
	 * is enabled - instead we unconditionally enable all PCH interrupt
2119
	 * is enabled - instead we unconditionally enable all PCH interrupt
1924
	 * sources here, but then only unmask them as needed with SDEIMR.
2120
	 * sources here, but then only unmask them as needed with SDEIMR.
1925
	 */
2121
	 */
1926
	I915_WRITE(SDEIER, 0xffffffff);
2122
	I915_WRITE(SDEIER, 0xffffffff);
1927
    POSTING_READ(SDEIER);
2123
	POSTING_READ(SDEIER);
1928
}
2124
}
-
 
2125
 
-
 
2126
static void gen5_gt_irq_preinstall(struct drm_device *dev)
-
 
2127
{
-
 
2128
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2129
 
-
 
2130
    /* and GT */
-
 
2131
    I915_WRITE(GTIMR, 0xffffffff);
-
 
2132
    I915_WRITE(GTIER, 0x0);
-
 
2133
    POSTING_READ(GTIER);
-
 
2134
 
-
 
2135
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
2136
		/* and PM */
-
 
2137
		I915_WRITE(GEN6_PMIMR, 0xffffffff);
-
 
2138
		I915_WRITE(GEN6_PMIER, 0x0);
-
 
2139
		POSTING_READ(GEN6_PMIER);
-
 
2140
}
-
 
2141
}
-
 
2142
 
-
 
2143
/* drm_dma.h hooks
-
 
2144
*/
-
 
2145
static void ironlake_irq_preinstall(struct drm_device *dev)
-
 
2146
{
-
 
2147
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2148
 
-
 
2149
	atomic_set(&dev_priv->irq_received, 0);
-
 
2150
 
-
 
2151
	I915_WRITE(HWSTAM, 0xeffe);
-
 
2152
 
-
 
2153
	I915_WRITE(DEIMR, 0xffffffff);
-
 
2154
	I915_WRITE(DEIER, 0x0);
-
 
2155
	POSTING_READ(DEIER);
-
 
2156
 
-
 
2157
	gen5_gt_irq_preinstall(dev);
-
 
2158
 
-
 
2159
	ibx_irq_preinstall(dev);
-
 
2160
}
1929
 
2161
 
1930
static void valleyview_irq_preinstall(struct drm_device *dev)
2162
static void valleyview_irq_preinstall(struct drm_device *dev)
1931
{
2163
{
1932
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2164
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1933
	int pipe;
2165
	int pipe;
1934
 
2166
 
1935
	atomic_set(&dev_priv->irq_received, 0);
2167
	atomic_set(&dev_priv->irq_received, 0);
1936
 
2168
 
1937
	/* VLV magic */
2169
	/* VLV magic */
1938
	I915_WRITE(VLV_IMR, 0);
2170
	I915_WRITE(VLV_IMR, 0);
1939
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2171
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1940
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2172
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1941
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2173
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1942
 
2174
 
1943
	/* and GT */
2175
	/* and GT */
1944
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2176
	I915_WRITE(GTIIR, I915_READ(GTIIR));
1945
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2177
	I915_WRITE(GTIIR, I915_READ(GTIIR));
-
 
2178
 
1946
	I915_WRITE(GTIMR, 0xffffffff);
2179
	gen5_gt_irq_preinstall(dev);
1947
	I915_WRITE(GTIER, 0x0);
-
 
1948
	POSTING_READ(GTIER);
-
 
1949
 
2180
 
1950
	I915_WRITE(DPINVGTT, 0xff);
2181
	I915_WRITE(DPINVGTT, 0xff);
1951
 
2182
 
1952
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2183
	I915_WRITE(PORT_HOTPLUG_EN, 0);
1953
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2184
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1954
	for_each_pipe(pipe)
2185
	for_each_pipe(pipe)
1955
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2186
		I915_WRITE(PIPESTAT(pipe), 0xffff);
1956
	I915_WRITE(VLV_IIR, 0xffffffff);
2187
	I915_WRITE(VLV_IIR, 0xffffffff);
1957
	I915_WRITE(VLV_IMR, 0xffffffff);
2188
	I915_WRITE(VLV_IMR, 0xffffffff);
1958
	I915_WRITE(VLV_IER, 0x0);
2189
	I915_WRITE(VLV_IER, 0x0);
1959
	POSTING_READ(VLV_IER);
2190
	POSTING_READ(VLV_IER);
1960
}
2191
}
1961
 
2192
 
1962
static void ibx_hpd_irq_setup(struct drm_device *dev)
2193
static void ibx_hpd_irq_setup(struct drm_device *dev)
1963
{
2194
{
1964
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2195
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1965
	struct drm_mode_config *mode_config = &dev->mode_config;
2196
	struct drm_mode_config *mode_config = &dev->mode_config;
1966
	struct intel_encoder *intel_encoder;
2197
	struct intel_encoder *intel_encoder;
1967
	u32 mask = ~I915_READ(SDEIMR);
2198
	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
1968
	u32 hotplug;
-
 
1969
 
2199
 
1970
	if (HAS_PCH_IBX(dev)) {
2200
	if (HAS_PCH_IBX(dev)) {
1971
		mask &= ~SDE_HOTPLUG_MASK;
2201
		hotplug_irqs = SDE_HOTPLUG_MASK;
1972
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2202
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
1973
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2203
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
1974
				mask |= hpd_ibx[intel_encoder->hpd_pin];
2204
				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
1975
	} else {
2205
	} else {
1976
		mask &= ~SDE_HOTPLUG_MASK_CPT;
2206
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
1977
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2207
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
1978
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2208
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
1979
				mask |= hpd_cpt[intel_encoder->hpd_pin];
2209
				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
1980
	}
2210
	}
1981
 
2211
 
1982
	I915_WRITE(SDEIMR, ~mask);
2212
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
1983
 
2213
 
1984
	/*
2214
	/*
1985
 * Enable digital hotplug on the PCH, and configure the DP short pulse
2215
 * Enable digital hotplug on the PCH, and configure the DP short pulse
1986
 * duration to 2ms (which is the minimum in the Display Port spec)
2216
 * duration to 2ms (which is the minimum in the Display Port spec)
1987
 *
2217
 *
1988
 * This register is the same on all known PCH chips.
2218
 * This register is the same on all known PCH chips.
1989
 */
2219
 */
1990
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2220
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1991
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2221
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1992
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2222
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1993
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2223
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1994
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2224
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1995
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2225
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1996
}
2226
}
1997
 
2227
 
1998
static void ibx_irq_postinstall(struct drm_device *dev)
2228
static void ibx_irq_postinstall(struct drm_device *dev)
1999
{
2229
{
2000
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2230
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2001
	u32 mask;
2231
	u32 mask;
2002
 
-
 
2003
	if (HAS_PCH_IBX(dev))
-
 
2004
		mask = SDE_GMBUS | SDE_AUX_MASK;
-
 
2005
	else
-
 
2006
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
-
 
2007
 
2232
 
2008
	if (HAS_PCH_NOP(dev))
2233
	if (HAS_PCH_NOP(dev))
2009
		return;
2234
		return;
-
 
2235
 
-
 
2236
	if (HAS_PCH_IBX(dev)) {
-
 
2237
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
-
 
2238
		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
-
 
2239
	} else {
-
 
2240
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
-
 
2241
 
-
 
2242
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
-
 
2243
	}
2010
 
2244
 
2011
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2245
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2012
	I915_WRITE(SDEIMR, ~mask);
2246
	I915_WRITE(SDEIMR, ~mask);
2013
}
2247
}
2014
 
2248
 
2015
static int ironlake_irq_postinstall(struct drm_device *dev)
2249
static void gen5_gt_irq_postinstall(struct drm_device *dev)
2016
{
2250
{
2017
    drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
 
2018
    /* enable kind of interrupts always enabled */
-
 
2019
    u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-
 
2020
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-
 
2021
			   DE_AUX_CHANNEL_A;
2251
	struct drm_i915_private *dev_priv = dev->dev_private;
2022
    u32 render_irqs;
-
 
2023
 
-
 
2024
    dev_priv->irq_mask = ~display_mask;
-
 
2025
 
-
 
2026
    /* should always can generate irq */
-
 
2027
    I915_WRITE(DEIIR, I915_READ(DEIIR));
-
 
2028
    I915_WRITE(DEIMR, dev_priv->irq_mask);
2252
	u32 pm_irqs, gt_irqs;
2029
    I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2253
 
-
 
2254
	pm_irqs = gt_irqs = 0;
-
 
2255
 
-
 
2256
	dev_priv->gt_irq_mask = ~0;
-
 
2257
	if (HAS_L3_GPU_CACHE(dev)) {
-
 
2258
		/* L3 parity interrupt is always unmasked. */
-
 
2259
		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
 
2260
		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
 
2261
	}
-
 
2262
 
-
 
2263
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
-
 
2264
	if (IS_GEN5(dev)) {
-
 
2265
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
-
 
2266
			   ILK_BSD_USER_INTERRUPT;
2030
    POSTING_READ(DEIER);
2267
	} else {
2031
 
2268
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2032
	dev_priv->gt_irq_mask = ~0;
2269
	}
2033
 
2270
 
2034
    I915_WRITE(GTIIR, I915_READ(GTIIR));
2271
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2035
    I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2272
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2036
 
-
 
2037
    if (IS_GEN6(dev))
-
 
2038
        render_irqs =
-
 
2039
            GT_USER_INTERRUPT |
-
 
2040
			GEN6_BSD_USER_INTERRUPT |
-
 
2041
			GEN6_BLITTER_USER_INTERRUPT;
-
 
2042
    else
-
 
2043
        render_irqs =
-
 
2044
            GT_USER_INTERRUPT |
-
 
2045
            GT_PIPE_NOTIFY |
-
 
2046
            GT_BSD_USER_INTERRUPT;
-
 
2047
    I915_WRITE(GTIER, render_irqs);
2273
	I915_WRITE(GTIER, gt_irqs);
2048
    POSTING_READ(GTIER);
2274
    POSTING_READ(GTIER);
-
 
2275
 
2049
 
2276
	if (INTEL_INFO(dev)->gen >= 6) {
2050
	ibx_irq_postinstall(dev);
2277
		pm_irqs |= GEN6_PM_RPS_EVENTS;
2051
 
-
 
2052
    if (IS_IRONLAKE_M(dev)) {
2278
 
2053
        /* Clear & enable PCU event interrupts */
-
 
2054
        I915_WRITE(DEIIR, DE_PCU_EVENT);
-
 
2055
        I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
-
 
-
 
2279
		if (HAS_VEBOX(dev))
-
 
2280
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
-
 
2281
 
-
 
2282
		dev_priv->pm_irq_mask = 0xffffffff;
-
 
2283
		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2056
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2284
		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2057
    }
2285
		I915_WRITE(GEN6_PMIER, pm_irqs);
2058
 
2286
		POSTING_READ(GEN6_PMIER);
2059
    return 0;
2287
    }
-
 
2288
}
2060
}
2289
 
2061
 
2290
static int ironlake_irq_postinstall(struct drm_device *dev)
-
 
2291
{
2062
static int ivybridge_irq_postinstall(struct drm_device *dev)
2292
	unsigned long irqflags;
2063
{
2293
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2064
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2294
	u32 display_mask, extra_mask;
2065
	/* enable kind of interrupts always enabled */
2295
 
2066
	u32 display_mask =
2296
	if (INTEL_INFO(dev)->gen >= 7) {
2067
		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2297
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
-
 
2298
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
-
 
2299
		DE_PLANEB_FLIP_DONE_IVB |
-
 
2300
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
-
 
2301
				DE_ERR_INT_IVB);
2068
		DE_PLANEC_FLIP_DONE_IVB |
2302
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
-
 
2303
			      DE_PIPEA_VBLANK_IVB);
-
 
2304
 
-
 
2305
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
-
 
2306
	} else {
-
 
2307
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-
 
2308
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2069
		DE_PLANEB_FLIP_DONE_IVB |
2309
				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2070
		DE_PLANEA_FLIP_DONE_IVB |
2310
				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2071
		DE_AUX_CHANNEL_A_IVB;
2311
		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2072
	u32 render_irqs;
2312
	}
2073
 
2313
 
2074
	dev_priv->irq_mask = ~display_mask;
2314
	dev_priv->irq_mask = ~display_mask;
2075
 
2315
 
2076
	/* should always can generate irq */
2316
	/* should always can generate irq */
2077
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2317
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2078
	I915_WRITE(DEIMR, dev_priv->irq_mask);
2318
	I915_WRITE(DEIMR, dev_priv->irq_mask);
2079
	I915_WRITE(DEIER,
2319
	I915_WRITE(DEIER, display_mask | extra_mask);
2080
		   display_mask |
-
 
2081
		   DE_PIPEC_VBLANK_IVB |
-
 
2082
		   DE_PIPEB_VBLANK_IVB |
-
 
2083
		   DE_PIPEA_VBLANK_IVB);
-
 
2084
	POSTING_READ(DEIER);
2320
	POSTING_READ(DEIER);
2085
 
-
 
2086
	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
-
 
2087
 
-
 
2088
	I915_WRITE(GTIIR, I915_READ(GTIIR));
-
 
2089
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
2090
 
-
 
2091
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-
 
2092
		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2321
 
2093
	I915_WRITE(GTIER, render_irqs);
-
 
2094
	POSTING_READ(GTIER);
2322
	gen5_gt_irq_postinstall(dev);
2095
 
2323
 
2096
	ibx_irq_postinstall(dev);
2324
	ibx_irq_postinstall(dev);
-
 
2325
 
-
 
2326
	if (IS_IRONLAKE_M(dev)) {
-
 
2327
		/* Enable PCU event interrupts
-
 
2328
		 *
-
 
2329
		 * spinlocking not required here for correctness since interrupt
-
 
2330
		 * setup is guaranteed to run in single-threaded context. But we
-
 
2331
		 * need it to make the assert_spin_locked happy. */
-
 
2332
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
2333
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
-
 
2334
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
 
2335
	}
2097
 
2336
 
2098
	return 0;
2337
	return 0;
2099
}
2338
}
2100
 
2339
 
2101
static int valleyview_irq_postinstall(struct drm_device *dev)
2340
static int valleyview_irq_postinstall(struct drm_device *dev)
2102
{
2341
{
2103
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2342
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2104
	u32 enable_mask;
2343
	u32 enable_mask;
2105
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2344
	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2106
	u32 render_irqs;
2345
	unsigned long irqflags;
2107
	u16 msid;
-
 
2108
 
2346
 
2109
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2347
	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2110
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2348
	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2111
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2349
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2112
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2350
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2113
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2351
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2114
 
2352
 
2115
	/*
2353
	/*
2116
	 *Leave vblank interrupts masked initially.  enable/disable will
2354
	 *Leave vblank interrupts masked initially.  enable/disable will
2117
	 * toggle them based on usage.
2355
	 * toggle them based on usage.
2118
	 */
2356
	 */
2119
	dev_priv->irq_mask = (~enable_mask) |
2357
	dev_priv->irq_mask = (~enable_mask) |
2120
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2358
		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2121
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2359
		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2122
 
-
 
2123
	/* Hack for broken MSIs on VLV */
-
 
2124
//   pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
-
 
2125
//   pci_read_config_word(dev->pdev, 0x98, &msid);
-
 
2126
//   msid &= 0xff; /* mask out delivery bits */
-
 
2127
//   msid |= (1<<14);
-
 
2128
//   pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
-
 
2129
 
2360
 
2130
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2361
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2131
	POSTING_READ(PORT_HOTPLUG_EN);
2362
	POSTING_READ(PORT_HOTPLUG_EN);
2132
 
2363
 
2133
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2364
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2134
	I915_WRITE(VLV_IER, enable_mask);
2365
	I915_WRITE(VLV_IER, enable_mask);
2135
	I915_WRITE(VLV_IIR, 0xffffffff);
2366
	I915_WRITE(VLV_IIR, 0xffffffff);
2136
	I915_WRITE(PIPESTAT(0), 0xffff);
2367
	I915_WRITE(PIPESTAT(0), 0xffff);
2137
	I915_WRITE(PIPESTAT(1), 0xffff);
2368
	I915_WRITE(PIPESTAT(1), 0xffff);
2138
	POSTING_READ(VLV_IER);
2369
	POSTING_READ(VLV_IER);
-
 
2370
 
-
 
2371
	/* Interrupt setup is already guaranteed to be single-threaded, this is
-
 
2372
	 * just to make the assert_spin_locked check happy. */
2139
 
2373
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2140
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2374
	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2141
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2375
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2142
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2376
	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
-
 
2377
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2143
 
2378
 
2144
	I915_WRITE(VLV_IIR, 0xffffffff);
2379
	I915_WRITE(VLV_IIR, 0xffffffff);
2145
	I915_WRITE(VLV_IIR, 0xffffffff);
2380
	I915_WRITE(VLV_IIR, 0xffffffff);
2146
 
-
 
2147
	I915_WRITE(GTIIR, I915_READ(GTIIR));
-
 
2148
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
 
2149
 
-
 
2150
	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-
 
2151
		GEN6_BLITTER_USER_INTERRUPT;
2381
 
2152
	I915_WRITE(GTIER, render_irqs);
-
 
2153
	POSTING_READ(GTIER);
2382
	gen5_gt_irq_postinstall(dev);
2154
 
2383
 
2155
	/* ack & enable invalid PTE error interrupts */
2384
	/* ack & enable invalid PTE error interrupts */
2156
#if 0 /* FIXME: add support to irq handler for checking these bits */
2385
#if 0 /* FIXME: add support to irq handler for checking these bits */
2157
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2386
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2158
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2387
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2159
#endif
2388
#endif
2160
 
2389
 
2161
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2390
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2162
 
2391
 
2163
	return 0;
2392
	return 0;
2164
}
2393
}
2165
 
2394
 
2166
static void valleyview_irq_uninstall(struct drm_device *dev)
2395
static void valleyview_irq_uninstall(struct drm_device *dev)
2167
{
2396
{
2168
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2397
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2169
	int pipe;
2398
	int pipe;
2170
 
2399
 
2171
	if (!dev_priv)
2400
	if (!dev_priv)
2172
		return;
2401
		return;
2173
 
2402
 
2174
	for_each_pipe(pipe)
2403
	for_each_pipe(pipe)
2175
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2404
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2176
 
2405
 
2177
	I915_WRITE(HWSTAM, 0xffffffff);
2406
	I915_WRITE(HWSTAM, 0xffffffff);
2178
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2407
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2179
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2408
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2180
	for_each_pipe(pipe)
2409
	for_each_pipe(pipe)
2181
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2410
		I915_WRITE(PIPESTAT(pipe), 0xffff);
2182
	I915_WRITE(VLV_IIR, 0xffffffff);
2411
	I915_WRITE(VLV_IIR, 0xffffffff);
2183
	I915_WRITE(VLV_IMR, 0xffffffff);
2412
	I915_WRITE(VLV_IMR, 0xffffffff);
2184
	I915_WRITE(VLV_IER, 0x0);
2413
	I915_WRITE(VLV_IER, 0x0);
2185
	POSTING_READ(VLV_IER);
2414
	POSTING_READ(VLV_IER);
2186
}
2415
}
2187
 
2416
 
2188
static void ironlake_irq_uninstall(struct drm_device *dev)
2417
static void ironlake_irq_uninstall(struct drm_device *dev)
2189
{
2418
{
2190
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2419
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2191
 
2420
 
2192
	if (!dev_priv)
2421
	if (!dev_priv)
2193
		return;
2422
		return;
2194
 
2423
 
2195
	I915_WRITE(HWSTAM, 0xffffffff);
2424
	I915_WRITE(HWSTAM, 0xffffffff);
2196
 
2425
 
2197
	I915_WRITE(DEIMR, 0xffffffff);
2426
	I915_WRITE(DEIMR, 0xffffffff);
2198
	I915_WRITE(DEIER, 0x0);
2427
	I915_WRITE(DEIER, 0x0);
2199
	I915_WRITE(DEIIR, I915_READ(DEIIR));
2428
	I915_WRITE(DEIIR, I915_READ(DEIIR));
-
 
2429
	if (IS_GEN7(dev))
-
 
2430
		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2200
 
2431
 
2201
	I915_WRITE(GTIMR, 0xffffffff);
2432
	I915_WRITE(GTIMR, 0xffffffff);
2202
	I915_WRITE(GTIER, 0x0);
2433
	I915_WRITE(GTIER, 0x0);
2203
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2434
	I915_WRITE(GTIIR, I915_READ(GTIIR));
2204
 
2435
 
2205
	if (HAS_PCH_NOP(dev))
2436
	if (HAS_PCH_NOP(dev))
2206
		return;
2437
		return;
2207
 
2438
 
2208
	I915_WRITE(SDEIMR, 0xffffffff);
2439
	I915_WRITE(SDEIMR, 0xffffffff);
2209
	I915_WRITE(SDEIER, 0x0);
2440
	I915_WRITE(SDEIER, 0x0);
2210
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2441
	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-
 
2442
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
-
 
2443
		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2211
}
2444
}
2212
 
2445
 
2213
#if 0
2446
#if 0
2214
 
2447
 
2215
static void i8xx_irq_preinstall(struct drm_device * dev)
2448
static void i8xx_irq_preinstall(struct drm_device * dev)
2216
{
2449
{
2217
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2450
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2218
	int pipe;
2451
	int pipe;
2219
 
2452
 
2220
	atomic_set(&dev_priv->irq_received, 0);
2453
	atomic_set(&dev_priv->irq_received, 0);
2221
 
2454
 
2222
	for_each_pipe(pipe)
2455
	for_each_pipe(pipe)
2223
		I915_WRITE(PIPESTAT(pipe), 0);
2456
		I915_WRITE(PIPESTAT(pipe), 0);
2224
	I915_WRITE16(IMR, 0xffff);
2457
	I915_WRITE16(IMR, 0xffff);
2225
	I915_WRITE16(IER, 0x0);
2458
	I915_WRITE16(IER, 0x0);
2226
	POSTING_READ16(IER);
2459
	POSTING_READ16(IER);
2227
}
2460
}
2228
 
2461
 
2229
static int i8xx_irq_postinstall(struct drm_device *dev)
2462
static int i8xx_irq_postinstall(struct drm_device *dev)
2230
{
2463
{
2231
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2464
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2232
 
2465
 
2233
	I915_WRITE16(EMR,
2466
	I915_WRITE16(EMR,
2234
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2467
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2235
 
2468
 
2236
	/* Unmask the interrupts that we always want on. */
2469
	/* Unmask the interrupts that we always want on. */
2237
	dev_priv->irq_mask =
2470
	dev_priv->irq_mask =
2238
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2471
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2239
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2472
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2240
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2473
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2241
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2474
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2242
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2475
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2243
	I915_WRITE16(IMR, dev_priv->irq_mask);
2476
	I915_WRITE16(IMR, dev_priv->irq_mask);
2244
 
2477
 
2245
	I915_WRITE16(IER,
2478
	I915_WRITE16(IER,
2246
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2479
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2247
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2480
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2248
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2481
		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2249
		     I915_USER_INTERRUPT);
2482
		     I915_USER_INTERRUPT);
2250
	POSTING_READ16(IER);
2483
	POSTING_READ16(IER);
2251
 
2484
 
2252
	return 0;
2485
	return 0;
2253
}
2486
}
2254
 
2487
 
2255
/*
2488
/*
2256
 * Returns true when a page flip has completed.
2489
 * Returns true when a page flip has completed.
2257
 */
2490
 */
2258
static bool i8xx_handle_vblank(struct drm_device *dev,
2491
static bool i8xx_handle_vblank(struct drm_device *dev,
2259
			       int pipe, u16 iir)
2492
			       int pipe, u16 iir)
2260
{
2493
{
2261
	drm_i915_private_t *dev_priv = dev->dev_private;
2494
	drm_i915_private_t *dev_priv = dev->dev_private;
2262
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2495
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2263
 
2496
 
2264
//   if (!drm_handle_vblank(dev, pipe))
2497
//   if (!drm_handle_vblank(dev, pipe))
2265
       return false;
2498
       return false;
2266
 
2499
 
2267
	if ((iir & flip_pending) == 0)
2500
	if ((iir & flip_pending) == 0)
2268
		return false;
2501
		return false;
2269
 
2502
 
2270
//   intel_prepare_page_flip(dev, pipe);
2503
//   intel_prepare_page_flip(dev, pipe);
2271
 
2504
 
2272
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2505
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2273
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2506
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2274
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2507
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2275
	 * the flip is completed (no longer pending). Since this doesn't raise
2508
	 * the flip is completed (no longer pending). Since this doesn't raise
2276
	 * an interrupt per se, we watch for the change at vblank.
2509
	 * an interrupt per se, we watch for the change at vblank.
2277
	 */
2510
	 */
2278
	if (I915_READ16(ISR) & flip_pending)
2511
	if (I915_READ16(ISR) & flip_pending)
2279
		return false;
2512
		return false;
2280
 
2513
 
2281
	intel_finish_page_flip(dev, pipe);
2514
	intel_finish_page_flip(dev, pipe);
2282
 
2515
 
2283
	return true;
2516
	return true;
2284
}
2517
}
2285
 
2518
 
2286
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2519
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2287
{
2520
{
2288
	struct drm_device *dev = (struct drm_device *) arg;
2521
	struct drm_device *dev = (struct drm_device *) arg;
2289
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2522
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2290
	u16 iir, new_iir;
2523
	u16 iir, new_iir;
2291
	u32 pipe_stats[2];
2524
	u32 pipe_stats[2];
2292
	unsigned long irqflags;
2525
	unsigned long irqflags;
2293
	int irq_received;
-
 
2294
	int pipe;
2526
	int pipe;
2295
	u16 flip_mask =
2527
	u16 flip_mask =
2296
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2528
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2297
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2529
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2298
 
2530
 
2299
	atomic_inc(&dev_priv->irq_received);
2531
	atomic_inc(&dev_priv->irq_received);
2300
 
2532
 
2301
	iir = I915_READ16(IIR);
2533
	iir = I915_READ16(IIR);
2302
	if (iir == 0)
2534
	if (iir == 0)
2303
		return IRQ_NONE;
2535
		return IRQ_NONE;
2304
 
2536
 
2305
	while (iir & ~flip_mask) {
2537
	while (iir & ~flip_mask) {
2306
		/* Can't rely on pipestat interrupt bit in iir as it might
2538
		/* Can't rely on pipestat interrupt bit in iir as it might
2307
		 * have been cleared after the pipestat interrupt was received.
2539
		 * have been cleared after the pipestat interrupt was received.
2308
		 * It doesn't set the bit in iir again, but it still produces
2540
		 * It doesn't set the bit in iir again, but it still produces
2309
		 * interrupts (for non-MSI).
2541
		 * interrupts (for non-MSI).
2310
		 */
2542
		 */
2311
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2543
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2312
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2544
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2313
			i915_handle_error(dev, false);
2545
//           i915_handle_error(dev, false);
2314
 
2546
 
2315
		for_each_pipe(pipe) {
2547
		for_each_pipe(pipe) {
2316
			int reg = PIPESTAT(pipe);
2548
			int reg = PIPESTAT(pipe);
2317
			pipe_stats[pipe] = I915_READ(reg);
2549
			pipe_stats[pipe] = I915_READ(reg);
2318
 
2550
 
2319
			/*
2551
			/*
2320
			 * Clear the PIPE*STAT regs before the IIR
2552
			 * Clear the PIPE*STAT regs before the IIR
2321
			 */
2553
			 */
2322
			if (pipe_stats[pipe] & 0x8000ffff) {
2554
			if (pipe_stats[pipe] & 0x8000ffff) {
2323
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2555
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2324
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2556
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2325
							 pipe_name(pipe));
2557
							 pipe_name(pipe));
2326
				I915_WRITE(reg, pipe_stats[pipe]);
2558
				I915_WRITE(reg, pipe_stats[pipe]);
2327
				irq_received = 1;
-
 
2328
			}
2559
			}
2329
		}
2560
		}
2330
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2561
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2331
 
2562
 
2332
		I915_WRITE16(IIR, iir & ~flip_mask);
2563
		I915_WRITE16(IIR, iir & ~flip_mask);
2333
		new_iir = I915_READ16(IIR); /* Flush posted writes */
2564
		new_iir = I915_READ16(IIR); /* Flush posted writes */
2334
 
2565
 
2335
		i915_update_dri1_breadcrumb(dev);
2566
		i915_update_dri1_breadcrumb(dev);
2336
 
2567
 
2337
		if (iir & I915_USER_INTERRUPT)
2568
		if (iir & I915_USER_INTERRUPT)
2338
			notify_ring(dev, &dev_priv->ring[RCS]);
2569
			notify_ring(dev, &dev_priv->ring[RCS]);
2339
 
2570
 
2340
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2571
		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2341
		    i8xx_handle_vblank(dev, 0, iir))
2572
		    i8xx_handle_vblank(dev, 0, iir))
2342
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2573
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2343
 
2574
 
2344
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2575
		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2345
		    i8xx_handle_vblank(dev, 1, iir))
2576
		    i8xx_handle_vblank(dev, 1, iir))
2346
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2577
			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2347
 
2578
 
2348
		iir = new_iir;
2579
		iir = new_iir;
2349
	}
2580
	}
2350
 
2581
 
2351
	return IRQ_HANDLED;
2582
	return IRQ_HANDLED;
2352
}
2583
}
2353
 
2584
 
2354
static void i8xx_irq_uninstall(struct drm_device * dev)
2585
static void i8xx_irq_uninstall(struct drm_device * dev)
2355
{
2586
{
2356
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2587
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2357
	int pipe;
2588
	int pipe;
2358
 
2589
 
2359
	for_each_pipe(pipe) {
2590
	for_each_pipe(pipe) {
2360
		/* Clear enable bits; then clear status bits */
2591
		/* Clear enable bits; then clear status bits */
2361
		I915_WRITE(PIPESTAT(pipe), 0);
2592
		I915_WRITE(PIPESTAT(pipe), 0);
2362
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2593
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2363
	}
2594
	}
2364
	I915_WRITE16(IMR, 0xffff);
2595
	I915_WRITE16(IMR, 0xffff);
2365
	I915_WRITE16(IER, 0x0);
2596
	I915_WRITE16(IER, 0x0);
2366
	I915_WRITE16(IIR, I915_READ16(IIR));
2597
	I915_WRITE16(IIR, I915_READ16(IIR));
2367
}
2598
}
2368
 
2599
 
2369
#endif
2600
#endif
2370
 
2601
 
2371
static void i915_irq_preinstall(struct drm_device * dev)
2602
static void i915_irq_preinstall(struct drm_device * dev)
2372
{
2603
{
2373
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2604
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2374
	int pipe;
2605
	int pipe;
2375
 
2606
 
2376
	atomic_set(&dev_priv->irq_received, 0);
2607
	atomic_set(&dev_priv->irq_received, 0);
2377
 
2608
 
2378
	if (I915_HAS_HOTPLUG(dev)) {
2609
	if (I915_HAS_HOTPLUG(dev)) {
2379
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2610
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2380
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2611
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2381
	}
2612
	}
2382
 
2613
 
2383
	I915_WRITE16(HWSTAM, 0xeffe);
2614
	I915_WRITE16(HWSTAM, 0xeffe);
2384
	for_each_pipe(pipe)
2615
	for_each_pipe(pipe)
2385
		I915_WRITE(PIPESTAT(pipe), 0);
2616
		I915_WRITE(PIPESTAT(pipe), 0);
2386
	I915_WRITE(IMR, 0xffffffff);
2617
	I915_WRITE(IMR, 0xffffffff);
2387
	I915_WRITE(IER, 0x0);
2618
	I915_WRITE(IER, 0x0);
2388
	POSTING_READ(IER);
2619
	POSTING_READ(IER);
2389
}
2620
}
2390
 
2621
 
2391
static int i915_irq_postinstall(struct drm_device *dev)
2622
static int i915_irq_postinstall(struct drm_device *dev)
2392
{
2623
{
2393
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2624
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2394
	u32 enable_mask;
2625
	u32 enable_mask;
2395
 
2626
 
2396
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2627
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2397
 
2628
 
2398
	/* Unmask the interrupts that we always want on. */
2629
	/* Unmask the interrupts that we always want on. */
2399
	dev_priv->irq_mask =
2630
	dev_priv->irq_mask =
2400
		~(I915_ASLE_INTERRUPT |
2631
		~(I915_ASLE_INTERRUPT |
2401
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2632
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2402
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2633
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2403
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2634
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2404
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2635
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2405
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2636
		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2406
 
2637
 
2407
	enable_mask =
2638
	enable_mask =
2408
		I915_ASLE_INTERRUPT |
2639
		I915_ASLE_INTERRUPT |
2409
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2640
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2410
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2641
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2411
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2642
		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2412
		I915_USER_INTERRUPT;
2643
		I915_USER_INTERRUPT;
2413
 
2644
 
2414
	if (I915_HAS_HOTPLUG(dev)) {
2645
	if (I915_HAS_HOTPLUG(dev)) {
2415
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2646
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2416
		POSTING_READ(PORT_HOTPLUG_EN);
2647
		POSTING_READ(PORT_HOTPLUG_EN);
2417
 
2648
 
2418
		/* Enable in IER... */
2649
		/* Enable in IER... */
2419
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2650
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2420
		/* and unmask in IMR */
2651
		/* and unmask in IMR */
2421
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2652
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2422
	}
2653
	}
2423
 
2654
 
2424
	I915_WRITE(IMR, dev_priv->irq_mask);
2655
	I915_WRITE(IMR, dev_priv->irq_mask);
2425
	I915_WRITE(IER, enable_mask);
2656
	I915_WRITE(IER, enable_mask);
2426
	POSTING_READ(IER);
2657
	POSTING_READ(IER);
2427
 
2658
 
2428
//	intel_opregion_enable_asle(dev);
2659
//	intel_opregion_enable_asle(dev);
2429
 
2660
 
2430
	return 0;
2661
	return 0;
2431
}
2662
}
2432
 
2663
 
2433
/*
2664
/*
2434
 * Returns true when a page flip has completed.
2665
 * Returns true when a page flip has completed.
2435
 */
2666
 */
2436
static bool i915_handle_vblank(struct drm_device *dev,
2667
static bool i915_handle_vblank(struct drm_device *dev,
2437
			       int plane, int pipe, u32 iir)
2668
			       int plane, int pipe, u32 iir)
2438
{
2669
{
2439
	drm_i915_private_t *dev_priv = dev->dev_private;
2670
	drm_i915_private_t *dev_priv = dev->dev_private;
2440
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2671
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2441
 
2672
 
2442
//   if (!drm_handle_vblank(dev, pipe))
2673
//   if (!drm_handle_vblank(dev, pipe))
2443
		return false;
2674
		return false;
2444
 
2675
 
2445
	if ((iir & flip_pending) == 0)
2676
	if ((iir & flip_pending) == 0)
2446
		return false;
2677
		return false;
2447
 
2678
 
2448
//   intel_prepare_page_flip(dev, plane);
2679
//   intel_prepare_page_flip(dev, plane);
2449
 
2680
 
2450
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2681
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2451
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2682
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2452
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2683
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2453
	 * the flip is completed (no longer pending). Since this doesn't raise
2684
	 * the flip is completed (no longer pending). Since this doesn't raise
2454
	 * an interrupt per se, we watch for the change at vblank.
2685
	 * an interrupt per se, we watch for the change at vblank.
2455
	 */
2686
	 */
2456
	if (I915_READ(ISR) & flip_pending)
2687
	if (I915_READ(ISR) & flip_pending)
2457
		return false;
2688
		return false;
2458
 
2689
 
2459
	intel_finish_page_flip(dev, pipe);
2690
	intel_finish_page_flip(dev, pipe);
2460
 
2691
 
2461
	return true;
2692
	return true;
2462
}
2693
}
2463
 
2694
 
2464
static irqreturn_t i915_irq_handler(int irq, void *arg)
2695
static irqreturn_t i915_irq_handler(int irq, void *arg)
2465
{
2696
{
2466
	struct drm_device *dev = (struct drm_device *) arg;
2697
	struct drm_device *dev = (struct drm_device *) arg;
2467
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2698
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2468
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2699
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2469
	unsigned long irqflags;
2700
	unsigned long irqflags;
2470
	u32 flip_mask =
2701
	u32 flip_mask =
2471
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2702
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2472
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2703
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2473
	int pipe, ret = IRQ_NONE;
2704
	int pipe, ret = IRQ_NONE;
2474
 
2705
 
2475
	atomic_inc(&dev_priv->irq_received);
2706
	atomic_inc(&dev_priv->irq_received);
2476
 
2707
 
2477
	iir = I915_READ(IIR);
2708
	iir = I915_READ(IIR);
2478
	do {
2709
	do {
2479
		bool irq_received = (iir & ~flip_mask) != 0;
2710
		bool irq_received = (iir & ~flip_mask) != 0;
2480
		bool blc_event = false;
2711
		bool blc_event = false;
2481
 
2712
 
2482
		/* Can't rely on pipestat interrupt bit in iir as it might
2713
		/* Can't rely on pipestat interrupt bit in iir as it might
2483
		 * have been cleared after the pipestat interrupt was received.
2714
		 * have been cleared after the pipestat interrupt was received.
2484
		 * It doesn't set the bit in iir again, but it still produces
2715
		 * It doesn't set the bit in iir again, but it still produces
2485
		 * interrupts (for non-MSI).
2716
		 * interrupts (for non-MSI).
2486
		 */
2717
		 */
2487
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2718
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2488
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2719
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2489
			i915_handle_error(dev, false);
2720
//           i915_handle_error(dev, false);
2490
 
2721
 
2491
		for_each_pipe(pipe) {
2722
		for_each_pipe(pipe) {
2492
			int reg = PIPESTAT(pipe);
2723
			int reg = PIPESTAT(pipe);
2493
			pipe_stats[pipe] = I915_READ(reg);
2724
			pipe_stats[pipe] = I915_READ(reg);
2494
 
2725
 
2495
			/* Clear the PIPE*STAT regs before the IIR */
2726
			/* Clear the PIPE*STAT regs before the IIR */
2496
			if (pipe_stats[pipe] & 0x8000ffff) {
2727
			if (pipe_stats[pipe] & 0x8000ffff) {
2497
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2728
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2498
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2729
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2499
							 pipe_name(pipe));
2730
							 pipe_name(pipe));
2500
				I915_WRITE(reg, pipe_stats[pipe]);
2731
				I915_WRITE(reg, pipe_stats[pipe]);
2501
				irq_received = true;
2732
				irq_received = true;
2502
			}
2733
			}
2503
		}
2734
		}
2504
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2735
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2505
 
2736
 
2506
		if (!irq_received)
2737
		if (!irq_received)
2507
			break;
2738
			break;
2508
 
2739
 
2509
		/* Consume port.  Then clear IIR or we'll miss events */
2740
		/* Consume port.  Then clear IIR or we'll miss events */
2510
		if ((I915_HAS_HOTPLUG(dev)) &&
2741
		if ((I915_HAS_HOTPLUG(dev)) &&
2511
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2742
		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2512
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2743
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2513
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2744
			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2514
 
2745
 
2515
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2746
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2516
				  hotplug_status);
2747
				  hotplug_status);
2517
			if (hotplug_trigger) {
-
 
-
 
2748
 
2518
				if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
2749
			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2519
					i915_hpd_irq_setup(dev);
-
 
2520
				queue_work(dev_priv->wq,
-
 
2521
					   &dev_priv->hotplug_work);
-
 
2522
			}
2750
 
2523
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2751
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2524
			POSTING_READ(PORT_HOTPLUG_STAT);
2752
			POSTING_READ(PORT_HOTPLUG_STAT);
2525
		}
2753
		}
2526
 
2754
 
2527
		I915_WRITE(IIR, iir & ~flip_mask);
2755
		I915_WRITE(IIR, iir & ~flip_mask);
2528
		new_iir = I915_READ(IIR); /* Flush posted writes */
2756
		new_iir = I915_READ(IIR); /* Flush posted writes */
2529
 
2757
 
2530
		if (iir & I915_USER_INTERRUPT)
2758
		if (iir & I915_USER_INTERRUPT)
2531
			notify_ring(dev, &dev_priv->ring[RCS]);
2759
			notify_ring(dev, &dev_priv->ring[RCS]);
2532
 
2760
 
2533
		for_each_pipe(pipe) {
2761
		for_each_pipe(pipe) {
2534
			int plane = pipe;
2762
			int plane = pipe;
2535
			if (IS_MOBILE(dev))
2763
			if (IS_MOBILE(dev))
2536
				plane = !plane;
2764
				plane = !plane;
2537
 
2765
 
2538
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2766
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2539
			    i915_handle_vblank(dev, plane, pipe, iir))
2767
			    i915_handle_vblank(dev, plane, pipe, iir))
2540
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2768
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2541
 
2769
 
2542
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2770
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2543
				blc_event = true;
2771
				blc_event = true;
2544
		}
2772
		}
2545
 
2773
 
2546
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2774
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2547
//			intel_opregion_asle_intr(dev);
2775
//			intel_opregion_asle_intr(dev);
2548
 
2776
 
2549
		/* With MSI, interrupts are only generated when iir
2777
		/* With MSI, interrupts are only generated when iir
2550
		 * transitions from zero to nonzero.  If another bit got
2778
		 * transitions from zero to nonzero.  If another bit got
2551
		 * set while we were handling the existing iir bits, then
2779
		 * set while we were handling the existing iir bits, then
2552
		 * we would never get another interrupt.
2780
		 * we would never get another interrupt.
2553
		 *
2781
		 *
2554
		 * This is fine on non-MSI as well, as if we hit this path
2782
		 * This is fine on non-MSI as well, as if we hit this path
2555
		 * we avoid exiting the interrupt handler only to generate
2783
		 * we avoid exiting the interrupt handler only to generate
2556
		 * another one.
2784
		 * another one.
2557
		 *
2785
		 *
2558
		 * Note that for MSI this could cause a stray interrupt report
2786
		 * Note that for MSI this could cause a stray interrupt report
2559
		 * if an interrupt landed in the time between writing IIR and
2787
		 * if an interrupt landed in the time between writing IIR and
2560
		 * the posting read.  This should be rare enough to never
2788
		 * the posting read.  This should be rare enough to never
2561
		 * trigger the 99% of 100,000 interrupts test for disabling
2789
		 * trigger the 99% of 100,000 interrupts test for disabling
2562
		 * stray interrupts.
2790
		 * stray interrupts.
2563
		 */
2791
		 */
2564
		ret = IRQ_HANDLED;
2792
		ret = IRQ_HANDLED;
2565
		iir = new_iir;
2793
		iir = new_iir;
2566
	} while (iir & ~flip_mask);
2794
	} while (iir & ~flip_mask);
2567
 
2795
 
2568
	i915_update_dri1_breadcrumb(dev);
2796
	i915_update_dri1_breadcrumb(dev);
2569
 
2797
 
2570
	return ret;
2798
	return ret;
2571
}
2799
}
2572
 
2800
 
2573
static void i915_irq_uninstall(struct drm_device * dev)
2801
static void i915_irq_uninstall(struct drm_device * dev)
2574
{
2802
{
2575
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2803
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2576
	int pipe;
2804
	int pipe;
2577
 
2805
 
2578
	if (I915_HAS_HOTPLUG(dev)) {
2806
	if (I915_HAS_HOTPLUG(dev)) {
2579
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2807
		I915_WRITE(PORT_HOTPLUG_EN, 0);
2580
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2808
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2581
	}
2809
	}
2582
 
2810
 
2583
	I915_WRITE16(HWSTAM, 0xffff);
2811
	I915_WRITE16(HWSTAM, 0xffff);
2584
	for_each_pipe(pipe) {
2812
	for_each_pipe(pipe) {
2585
		/* Clear enable bits; then clear status bits */
2813
		/* Clear enable bits; then clear status bits */
2586
		I915_WRITE(PIPESTAT(pipe), 0);
2814
		I915_WRITE(PIPESTAT(pipe), 0);
2587
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2815
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2588
	}
2816
	}
2589
	I915_WRITE(IMR, 0xffffffff);
2817
	I915_WRITE(IMR, 0xffffffff);
2590
	I915_WRITE(IER, 0x0);
2818
	I915_WRITE(IER, 0x0);
2591
 
2819
 
2592
	I915_WRITE(IIR, I915_READ(IIR));
2820
	I915_WRITE(IIR, I915_READ(IIR));
2593
}
2821
}
2594
 
2822
 
2595
static void i965_irq_preinstall(struct drm_device * dev)
2823
static void i965_irq_preinstall(struct drm_device * dev)
2596
{
2824
{
2597
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2825
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2598
	int pipe;
2826
	int pipe;
2599
 
2827
 
2600
	atomic_set(&dev_priv->irq_received, 0);
2828
	atomic_set(&dev_priv->irq_received, 0);
2601
 
2829
 
2602
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2830
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2603
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2831
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2604
 
2832
 
2605
	I915_WRITE(HWSTAM, 0xeffe);
2833
	I915_WRITE(HWSTAM, 0xeffe);
2606
	for_each_pipe(pipe)
2834
	for_each_pipe(pipe)
2607
		I915_WRITE(PIPESTAT(pipe), 0);
2835
		I915_WRITE(PIPESTAT(pipe), 0);
2608
	I915_WRITE(IMR, 0xffffffff);
2836
	I915_WRITE(IMR, 0xffffffff);
2609
	I915_WRITE(IER, 0x0);
2837
	I915_WRITE(IER, 0x0);
2610
	POSTING_READ(IER);
2838
	POSTING_READ(IER);
2611
}
2839
}
2612
 
2840
 
2613
static int i965_irq_postinstall(struct drm_device *dev)
2841
static int i965_irq_postinstall(struct drm_device *dev)
2614
{
2842
{
2615
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2843
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2616
	u32 enable_mask;
2844
	u32 enable_mask;
2617
	u32 error_mask;
2845
	u32 error_mask;
-
 
2846
	unsigned long irqflags;
2618
 
2847
 
2619
	/* Unmask the interrupts that we always want on. */
2848
	/* Unmask the interrupts that we always want on. */
2620
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2849
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2621
			       I915_DISPLAY_PORT_INTERRUPT |
2850
			       I915_DISPLAY_PORT_INTERRUPT |
2622
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2851
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2623
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2852
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2624
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2853
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2625
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2854
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2626
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2855
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2627
 
2856
 
2628
	enable_mask = ~dev_priv->irq_mask;
2857
	enable_mask = ~dev_priv->irq_mask;
2629
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2858
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2630
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2859
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2631
	enable_mask |= I915_USER_INTERRUPT;
2860
	enable_mask |= I915_USER_INTERRUPT;
2632
 
2861
 
2633
	if (IS_G4X(dev))
2862
	if (IS_G4X(dev))
2634
		enable_mask |= I915_BSD_USER_INTERRUPT;
2863
		enable_mask |= I915_BSD_USER_INTERRUPT;
-
 
2864
 
-
 
2865
	/* Interrupt setup is already guaranteed to be single-threaded, this is
-
 
2866
	 * just to make the assert_spin_locked check happy. */
2635
 
2867
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
2868
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2636
	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2869
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2637
 
2870
 
2638
	/*
2871
	/*
2639
	 * Enable some error detection, note the instruction error mask
2872
	 * Enable some error detection, note the instruction error mask
2640
	 * bit is reserved, so we leave it masked.
2873
	 * bit is reserved, so we leave it masked.
2641
	 */
2874
	 */
2642
	if (IS_G4X(dev)) {
2875
	if (IS_G4X(dev)) {
2643
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2876
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2644
			       GM45_ERROR_MEM_PRIV |
2877
			       GM45_ERROR_MEM_PRIV |
2645
			       GM45_ERROR_CP_PRIV |
2878
			       GM45_ERROR_CP_PRIV |
2646
			       I915_ERROR_MEMORY_REFRESH);
2879
			       I915_ERROR_MEMORY_REFRESH);
2647
	} else {
2880
	} else {
2648
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2881
		error_mask = ~(I915_ERROR_PAGE_TABLE |
2649
			       I915_ERROR_MEMORY_REFRESH);
2882
			       I915_ERROR_MEMORY_REFRESH);
2650
	}
2883
	}
2651
	I915_WRITE(EMR, error_mask);
2884
	I915_WRITE(EMR, error_mask);
2652
 
2885
 
2653
	I915_WRITE(IMR, dev_priv->irq_mask);
2886
	I915_WRITE(IMR, dev_priv->irq_mask);
2654
	I915_WRITE(IER, enable_mask);
2887
	I915_WRITE(IER, enable_mask);
2655
	POSTING_READ(IER);
2888
	POSTING_READ(IER);
2656
 
2889
 
2657
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2890
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2658
	POSTING_READ(PORT_HOTPLUG_EN);
2891
	POSTING_READ(PORT_HOTPLUG_EN);
2659
 
2892
 
2660
//	intel_opregion_enable_asle(dev);
2893
//	intel_opregion_enable_asle(dev);
2661
 
2894
 
2662
	return 0;
2895
	return 0;
2663
}
2896
}
2664
 
2897
 
2665
static void i915_hpd_irq_setup(struct drm_device *dev)
2898
static void i915_hpd_irq_setup(struct drm_device *dev)
2666
{
2899
{
2667
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2900
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2668
	struct drm_mode_config *mode_config = &dev->mode_config;
2901
	struct drm_mode_config *mode_config = &dev->mode_config;
2669
	struct intel_encoder *intel_encoder;
2902
	struct intel_encoder *intel_encoder;
2670
	u32 hotplug_en;
2903
	u32 hotplug_en;
-
 
2904
 
-
 
2905
	assert_spin_locked(&dev_priv->irq_lock);
2671
 
2906
 
2672
	if (I915_HAS_HOTPLUG(dev)) {
2907
	if (I915_HAS_HOTPLUG(dev)) {
2673
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2908
		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2674
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2909
		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2675
	/* Note HDMI and DP share hotplug bits */
2910
	/* Note HDMI and DP share hotplug bits */
2676
		/* enable bits are the same for all generations */
2911
		/* enable bits are the same for all generations */
2677
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2912
		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2678
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2913
			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2679
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2914
				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2680
		/* Programming the CRT detection parameters tends
2915
		/* Programming the CRT detection parameters tends
2681
		   to generate a spurious hotplug event about three
2916
		   to generate a spurious hotplug event about three
2682
		   seconds later.  So just do it once.
2917
		   seconds later.  So just do it once.
2683
		   */
2918
		   */
2684
		if (IS_G4X(dev))
2919
		if (IS_G4X(dev))
2685
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2920
			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2686
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2921
		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2687
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2922
		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2688
 
2923
 
2689
	/* Ignore TV since it's buggy */
2924
	/* Ignore TV since it's buggy */
2690
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2925
	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2691
	}
2926
	}
2692
}
2927
}
2693
 
2928
 
2694
static irqreturn_t i965_irq_handler(int irq, void *arg)
2929
static irqreturn_t i965_irq_handler(int irq, void *arg)
2695
{
2930
{
2696
	struct drm_device *dev = (struct drm_device *) arg;
2931
	struct drm_device *dev = (struct drm_device *) arg;
2697
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2932
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2698
	u32 iir, new_iir;
2933
	u32 iir, new_iir;
2699
	u32 pipe_stats[I915_MAX_PIPES];
2934
	u32 pipe_stats[I915_MAX_PIPES];
2700
	unsigned long irqflags;
2935
	unsigned long irqflags;
2701
	int irq_received;
2936
	int irq_received;
2702
	int ret = IRQ_NONE, pipe;
2937
	int ret = IRQ_NONE, pipe;
2703
	u32 flip_mask =
2938
	u32 flip_mask =
2704
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2939
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2705
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2940
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2706
 
2941
 
2707
	atomic_inc(&dev_priv->irq_received);
2942
	atomic_inc(&dev_priv->irq_received);
2708
 
2943
 
2709
	iir = I915_READ(IIR);
2944
	iir = I915_READ(IIR);
2710
 
2945
 
2711
	for (;;) {
2946
	for (;;) {
2712
		bool blc_event = false;
2947
		bool blc_event = false;
2713
 
2948
 
2714
		irq_received = (iir & ~flip_mask) != 0;
2949
		irq_received = (iir & ~flip_mask) != 0;
2715
 
2950
 
2716
		/* Can't rely on pipestat interrupt bit in iir as it might
2951
		/* Can't rely on pipestat interrupt bit in iir as it might
2717
		 * have been cleared after the pipestat interrupt was received.
2952
		 * have been cleared after the pipestat interrupt was received.
2718
		 * It doesn't set the bit in iir again, but it still produces
2953
		 * It doesn't set the bit in iir again, but it still produces
2719
		 * interrupts (for non-MSI).
2954
		 * interrupts (for non-MSI).
2720
		 */
2955
		 */
2721
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2956
		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2722
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2957
//       if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2723
			i915_handle_error(dev, false);
2958
//           i915_handle_error(dev, false);
2724
 
2959
 
2725
		for_each_pipe(pipe) {
2960
		for_each_pipe(pipe) {
2726
			int reg = PIPESTAT(pipe);
2961
			int reg = PIPESTAT(pipe);
2727
			pipe_stats[pipe] = I915_READ(reg);
2962
			pipe_stats[pipe] = I915_READ(reg);
2728
 
2963
 
2729
			/*
2964
			/*
2730
			 * Clear the PIPE*STAT regs before the IIR
2965
			 * Clear the PIPE*STAT regs before the IIR
2731
			 */
2966
			 */
2732
			if (pipe_stats[pipe] & 0x8000ffff) {
2967
			if (pipe_stats[pipe] & 0x8000ffff) {
2733
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2968
				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2734
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2969
					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2735
							 pipe_name(pipe));
2970
							 pipe_name(pipe));
2736
				I915_WRITE(reg, pipe_stats[pipe]);
2971
				I915_WRITE(reg, pipe_stats[pipe]);
2737
				irq_received = 1;
2972
				irq_received = 1;
2738
			}
2973
			}
2739
		}
2974
		}
2740
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2975
		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2741
 
2976
 
2742
		if (!irq_received)
2977
		if (!irq_received)
2743
			break;
2978
			break;
2744
 
2979
 
2745
		ret = IRQ_HANDLED;
2980
		ret = IRQ_HANDLED;
2746
 
2981
 
2747
		/* Consume port.  Then clear IIR or we'll miss events */
2982
		/* Consume port.  Then clear IIR or we'll miss events */
2748
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2983
		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2749
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2984
			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2750
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2985
			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2751
								  HOTPLUG_INT_STATUS_G4X :
2986
								  HOTPLUG_INT_STATUS_G4X :
2752
								  HOTPLUG_INT_STATUS_I965);
2987
								  HOTPLUG_INT_STATUS_I915);
2753
 
2988
 
2754
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2989
			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2755
				  hotplug_status);
2990
				  hotplug_status);
2756
			if (hotplug_trigger) {
-
 
-
 
2991
 
2757
				if (hotplug_irq_storm_detect(dev, hotplug_trigger,
2992
			intel_hpd_irq_handler(dev, hotplug_trigger,
2758
							    IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965))
2993
					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2759
					i915_hpd_irq_setup(dev);
-
 
2760
				queue_work(dev_priv->wq,
-
 
2761
					   &dev_priv->hotplug_work);
-
 
2762
			}
2994
 
2763
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2995
			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2764
			I915_READ(PORT_HOTPLUG_STAT);
2996
			I915_READ(PORT_HOTPLUG_STAT);
2765
		}
2997
		}
2766
 
2998
 
2767
		I915_WRITE(IIR, iir & ~flip_mask);
2999
		I915_WRITE(IIR, iir & ~flip_mask);
2768
		new_iir = I915_READ(IIR); /* Flush posted writes */
3000
		new_iir = I915_READ(IIR); /* Flush posted writes */
2769
 
3001
 
2770
		if (iir & I915_USER_INTERRUPT)
3002
		if (iir & I915_USER_INTERRUPT)
2771
			notify_ring(dev, &dev_priv->ring[RCS]);
3003
			notify_ring(dev, &dev_priv->ring[RCS]);
2772
		if (iir & I915_BSD_USER_INTERRUPT)
3004
		if (iir & I915_BSD_USER_INTERRUPT)
2773
			notify_ring(dev, &dev_priv->ring[VCS]);
3005
			notify_ring(dev, &dev_priv->ring[VCS]);
2774
 
3006
 
2775
		for_each_pipe(pipe) {
3007
		for_each_pipe(pipe) {
2776
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3008
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2777
			    i915_handle_vblank(dev, pipe, pipe, iir))
3009
			    i915_handle_vblank(dev, pipe, pipe, iir))
2778
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3010
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2779
 
3011
 
2780
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3012
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2781
				blc_event = true;
3013
				blc_event = true;
2782
		}
3014
		}
2783
 
3015
 
2784
 
3016
 
2785
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3017
//		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2786
//			intel_opregion_asle_intr(dev);
3018
//			intel_opregion_asle_intr(dev);
2787
 
3019
 
2788
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3020
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2789
			gmbus_irq_handler(dev);
3021
			gmbus_irq_handler(dev);
2790
 
3022
 
2791
		/* With MSI, interrupts are only generated when iir
3023
		/* With MSI, interrupts are only generated when iir
2792
		 * transitions from zero to nonzero.  If another bit got
3024
		 * transitions from zero to nonzero.  If another bit got
2793
		 * set while we were handling the existing iir bits, then
3025
		 * set while we were handling the existing iir bits, then
2794
		 * we would never get another interrupt.
3026
		 * we would never get another interrupt.
2795
		 *
3027
		 *
2796
		 * This is fine on non-MSI as well, as if we hit this path
3028
		 * This is fine on non-MSI as well, as if we hit this path
2797
		 * we avoid exiting the interrupt handler only to generate
3029
		 * we avoid exiting the interrupt handler only to generate
2798
		 * another one.
3030
		 * another one.
2799
		 *
3031
		 *
2800
		 * Note that for MSI this could cause a stray interrupt report
3032
		 * Note that for MSI this could cause a stray interrupt report
2801
		 * if an interrupt landed in the time between writing IIR and
3033
		 * if an interrupt landed in the time between writing IIR and
2802
		 * the posting read.  This should be rare enough to never
3034
		 * the posting read.  This should be rare enough to never
2803
		 * trigger the 99% of 100,000 interrupts test for disabling
3035
		 * trigger the 99% of 100,000 interrupts test for disabling
2804
		 * stray interrupts.
3036
		 * stray interrupts.
2805
		 */
3037
		 */
2806
		iir = new_iir;
3038
		iir = new_iir;
2807
	}
3039
	}
2808
 
3040
 
2809
	i915_update_dri1_breadcrumb(dev);
3041
	i915_update_dri1_breadcrumb(dev);
2810
 
3042
 
2811
	return ret;
3043
	return ret;
2812
}
3044
}
2813
 
3045
 
2814
static void i965_irq_uninstall(struct drm_device * dev)
3046
static void i965_irq_uninstall(struct drm_device * dev)
2815
{
3047
{
2816
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3048
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2817
	int pipe;
3049
	int pipe;
2818
 
3050
 
2819
	if (!dev_priv)
3051
	if (!dev_priv)
2820
		return;
3052
		return;
2821
 
3053
 
2822
	I915_WRITE(PORT_HOTPLUG_EN, 0);
3054
	I915_WRITE(PORT_HOTPLUG_EN, 0);
2823
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3055
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2824
 
3056
 
2825
	I915_WRITE(HWSTAM, 0xffffffff);
3057
	I915_WRITE(HWSTAM, 0xffffffff);
2826
	for_each_pipe(pipe)
3058
	for_each_pipe(pipe)
2827
		I915_WRITE(PIPESTAT(pipe), 0);
3059
		I915_WRITE(PIPESTAT(pipe), 0);
2828
	I915_WRITE(IMR, 0xffffffff);
3060
	I915_WRITE(IMR, 0xffffffff);
2829
	I915_WRITE(IER, 0x0);
3061
	I915_WRITE(IER, 0x0);
2830
 
3062
 
2831
	for_each_pipe(pipe)
3063
	for_each_pipe(pipe)
2832
		I915_WRITE(PIPESTAT(pipe),
3064
		I915_WRITE(PIPESTAT(pipe),
2833
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3065
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2834
	I915_WRITE(IIR, I915_READ(IIR));
3066
	I915_WRITE(IIR, I915_READ(IIR));
2835
}
3067
}
2836
 
3068
 
2837
void intel_irq_init(struct drm_device *dev)
3069
void intel_irq_init(struct drm_device *dev)
2838
{
3070
{
2839
	struct drm_i915_private *dev_priv = dev->dev_private;
3071
	struct drm_i915_private *dev_priv = dev->dev_private;
2840
 
3072
 
2841
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3073
	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2842
 
3074
 
2843
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3075
//	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2844
 
3076
 
-
 
3077
 
2845
 
3078
//	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2846
 
3079
 
2847
	if (IS_VALLEYVIEW(dev)) {
3080
	if (IS_VALLEYVIEW(dev)) {
2848
		dev->driver->irq_handler = valleyview_irq_handler;
3081
		dev->driver->irq_handler = valleyview_irq_handler;
2849
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3082
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2850
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3083
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2851
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3084
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2852
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-
 
2853
		/* Share pre & uninstall handlers with ILK/SNB */
-
 
2854
		dev->driver->irq_handler = ivybridge_irq_handler;
-
 
2855
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
-
 
2856
		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-
 
2857
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
-
 
2858
	} else if (HAS_PCH_SPLIT(dev)) {
3085
	} else if (HAS_PCH_SPLIT(dev)) {
2859
		dev->driver->irq_handler = ironlake_irq_handler;
3086
		dev->driver->irq_handler = ironlake_irq_handler;
2860
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3087
		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2861
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
3088
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2862
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3089
		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2863
	} else {
3090
	} else {
2864
		if (INTEL_INFO(dev)->gen == 2) {
3091
		if (INTEL_INFO(dev)->gen == 2) {
2865
		} else if (INTEL_INFO(dev)->gen == 3) {
3092
		} else if (INTEL_INFO(dev)->gen == 3) {
2866
			dev->driver->irq_preinstall = i915_irq_preinstall;
3093
			dev->driver->irq_preinstall = i915_irq_preinstall;
2867
			dev->driver->irq_postinstall = i915_irq_postinstall;
3094
			dev->driver->irq_postinstall = i915_irq_postinstall;
2868
			dev->driver->irq_handler = i915_irq_handler;
3095
			dev->driver->irq_handler = i915_irq_handler;
2869
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3096
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2870
		} else {
3097
		} else {
2871
			dev->driver->irq_preinstall = i965_irq_preinstall;
3098
			dev->driver->irq_preinstall = i965_irq_preinstall;
2872
			dev->driver->irq_postinstall = i965_irq_postinstall;
3099
			dev->driver->irq_postinstall = i965_irq_postinstall;
2873
			dev->driver->irq_handler = i965_irq_handler;
3100
			dev->driver->irq_handler = i965_irq_handler;
2874
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3101
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2875
		}
3102
		}
2876
	}
3103
	}
2877
}
3104
}
2878
 
3105
 
2879
void intel_hpd_init(struct drm_device *dev)
3106
void intel_hpd_init(struct drm_device *dev)
2880
{
3107
{
2881
	struct drm_i915_private *dev_priv = dev->dev_private;
3108
	struct drm_i915_private *dev_priv = dev->dev_private;
2882
	struct drm_mode_config *mode_config = &dev->mode_config;
3109
	struct drm_mode_config *mode_config = &dev->mode_config;
2883
	struct drm_connector *connector;
3110
	struct drm_connector *connector;
-
 
3111
	unsigned long irqflags;
2884
	int i;
3112
	int i;
2885
 
3113
 
2886
	for (i = 1; i < HPD_NUM_PINS; i++) {
3114
	for (i = 1; i < HPD_NUM_PINS; i++) {
2887
		dev_priv->hpd_stats[i].hpd_cnt = 0;
3115
		dev_priv->hpd_stats[i].hpd_cnt = 0;
2888
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3116
		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
2889
	}
3117
	}
2890
	list_for_each_entry(connector, &mode_config->connector_list, head) {
3118
	list_for_each_entry(connector, &mode_config->connector_list, head) {
2891
		struct intel_connector *intel_connector = to_intel_connector(connector);
3119
		struct intel_connector *intel_connector = to_intel_connector(connector);
2892
		connector->polled = intel_connector->polled;
3120
		connector->polled = intel_connector->polled;
2893
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3121
		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
2894
			connector->polled = DRM_CONNECTOR_POLL_HPD;
3122
			connector->polled = DRM_CONNECTOR_POLL_HPD;
2895
	}
3123
	}
-
 
3124
 
-
 
3125
	/* Interrupt setup is already guaranteed to be single-threaded, this is
-
 
3126
	 * just to make the assert_spin_locked checks happy. */
-
 
3127
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2896
	if (dev_priv->display.hpd_irq_setup)
3128
	if (dev_priv->display.hpd_irq_setup)
2897
		dev_priv->display.hpd_irq_setup(dev);
3129
		dev_priv->display.hpd_irq_setup(dev);
-
 
3130
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2898
}
3131
}
2899
 
-
 
-
 
3132
 
2900
 
3133
/* Disable interrupts so we can allow Package C8+. */
2901
irqreturn_t intel_irq_handler(struct drm_device *dev)
3134
void hsw_pc8_disable_interrupts(struct drm_device *dev)
-
 
3135
{
-
 
3136
	struct drm_i915_private *dev_priv = dev->dev_private;
2902
{
3137
	unsigned long irqflags;
-
 
3138
 
-
 
3139
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3140
 
-
 
3141
	dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
-
 
3142
	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
-
 
3143
	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
-
 
3144
	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
2903
 
3145
	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
-
 
3146
 
-
 
3147
	ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
-
 
3148
	ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
-
 
3149
	ilk_disable_gt_irq(dev_priv, 0xffffffff);
2904
//    printf("i915 irq\n");
3150
	snb_disable_pm_irq(dev_priv, 0xffffffff);
2905
 
3151
 
-
 
3152
	dev_priv->pc8.irqs_disabled = true;
2906
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
3153
 
2907
 
3154
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2908
    return dev->driver->irq_handler(0, dev);
3155
}
2909
}
3156
 
2910
 
3157
/* Restore interrupts so we can recover from Package C8+. */
2911
int drm_irq_install(struct drm_device *dev)
3158
void hsw_pc8_restore_interrupts(struct drm_device *dev)
-
 
3159
{
-
 
3160
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3161
	unsigned long irqflags;
-
 
3162
	uint32_t val, expected;
-
 
3163
 
-
 
3164
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
 
3165
 
2912
{
3166
	val = I915_READ(DEIMR);
-
 
3167
	expected = ~DE_PCH_EVENT_IVB;
-
 
3168
	WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
-
 
3169
 
-
 
3170
	val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
-
 
3171
	expected = ~SDE_HOTPLUG_MASK_CPT;
-
 
3172
	WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
-
 
3173
	     val, expected);
-
 
3174
 
-
 
3175
	val = I915_READ(GTIMR);
-
 
3176
	expected = 0xffffffff;
-
 
3177
	WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
-
 
3178
 
-
 
3179
	val = I915_READ(GEN6_PMIMR);
-
 
3180
	expected = 0xffffffff;
-
 
3181
	WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
-
 
3182
	     expected);
-
 
3183
 
-
 
3184
	dev_priv->pc8.irqs_disabled = false;
-
 
3185
 
2913
    unsigned long sh_flags = 0;
-
 
2914
    int irq_line;
-
 
2915
    int ret = 0;
3186
	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
2916
 
-
 
2917
    char *irqname;
-
 
2918
 
-
 
2919
    mutex_lock(&dev->struct_mutex);
-
 
2920
 
-
 
2921
    /* Driver must have been initialized */
-
 
2922
    if (!dev->dev_private) {
3187
	ibx_enable_display_interrupt(dev_priv,
2923
            mutex_unlock(&dev->struct_mutex);
-
 
2924
            return -EINVAL;
-
 
2925
    }
-
 
2926
 
-
 
2927
    if (dev->irq_enabled) {
-
 
2928
            mutex_unlock(&dev->struct_mutex);
-
 
2929
            return -EBUSY;
-
 
2930
    }
-
 
2931
    dev->irq_enabled = 1;
-
 
2932
    mutex_unlock(&dev->struct_mutex);
3188
				     ~dev_priv->pc8.regsave.sdeimr &
2933
 
3189
				     ~SDE_HOTPLUG_MASK_CPT);
2934
    irq_line   = drm_dev_to_irq(dev);
-
 
2935
 
-
 
2936
    DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
-
 
2937
 
3190
	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
2938
    /* Before installing handler */
-
 
2939
    if (dev->driver->irq_preinstall)
-
 
2940
            dev->driver->irq_preinstall(dev);
3191
	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
2941
 
-
 
2942
    ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
-
 
2943
 
3192
	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
2944
    /* After installing handler */
3193
 
2945
    if (dev->driver->irq_postinstall)
3194
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2946
            ret = dev->driver->irq_postinstall(dev);
3195
}
2947
 
3196
 
2948
    if (ret < 0) {
3197
 
2949
            DRM_ERROR(__FUNCTION__);
3198
irqreturn_t intel_irq_handler(struct drm_device *dev)
2950
    }
3199
{
2951
 
3200
 
2952
    u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
3201
//    printf("i915 irq\n");
2953
    cmd&= ~(1<<10);
3202
 
2954
    PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
3203
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
2955
 
3204
 
2956
    return ret;
3205
    return dev->driver->irq_handler(0, dev);
2957
}
3206
}
2958
>
-
 
2959
>
-
 
2960
//>
-
 
2961
//>
-