Subversion Repositories Kolibri OS

Rev

Rev 6088 | Rev 6131 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6088 Rev 6103
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2
 */
2
 */
3
/*
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
5
 * All Rights Reserved.
6
 *
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
13
 * the following conditions:
14
 *
14
 *
15
 * The above copyright notice and this permission notice (including the
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
17
 * of the Software.
18
 *
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
26
 *
27
 */
27
 */
28
 
28
 
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 
30
 
30
 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include "i915_drv.h"
36
#include "i915_drv.h"
36
#include "i915_trace.h"
37
#include "i915_trace.h"
37
#include "intel_drv.h"
38
#include "intel_drv.h"
38
 
39
 
39
/**
40
/**
40
 * DOC: interrupt handling
41
 * DOC: interrupt handling
41
 *
42
 *
42
 * These functions provide the basic support for enabling and disabling the
43
 * These functions provide the basic support for enabling and disabling the
43
 * interrupt handling support. There's a lot more functionality in i915_irq.c
44
 * interrupt handling support. There's a lot more functionality in i915_irq.c
44
 * and related files, but that will be described in separate chapters.
45
 * and related files, but that will be described in separate chapters.
45
 */
46
 */
46
 
47
 
47
static const u32 hpd_ilk[HPD_NUM_PINS] = {
48
static const u32 hpd_ilk[HPD_NUM_PINS] = {
48
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
49
	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
49
};
50
};
50
 
51
 
51
static const u32 hpd_ivb[HPD_NUM_PINS] = {
52
static const u32 hpd_ivb[HPD_NUM_PINS] = {
52
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
53
	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
53
};
54
};
54
 
55
 
55
static const u32 hpd_bdw[HPD_NUM_PINS] = {
56
static const u32 hpd_bdw[HPD_NUM_PINS] = {
56
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
57
	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
57
};
58
};
58
 
59
 
59
static const u32 hpd_ibx[HPD_NUM_PINS] = {
60
static const u32 hpd_ibx[HPD_NUM_PINS] = {
60
	[HPD_CRT] = SDE_CRT_HOTPLUG,
61
	[HPD_CRT] = SDE_CRT_HOTPLUG,
61
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
62
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
62
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
63
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
63
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
64
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
64
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
65
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
65
};
66
};
66
 
67
 
67
static const u32 hpd_cpt[HPD_NUM_PINS] = {
68
static const u32 hpd_cpt[HPD_NUM_PINS] = {
68
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
69
	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
69
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
70
	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
70
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
71
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
71
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
72
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
72
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
73
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
73
};
74
};
74
 
75
 
75
static const u32 hpd_spt[HPD_NUM_PINS] = {
76
static const u32 hpd_spt[HPD_NUM_PINS] = {
76
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
77
	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
77
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
78
	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
78
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
79
	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
79
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
80
	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
80
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
81
	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
81
};
82
};
82
 
83
 
83
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
84
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
84
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
85
	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
85
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
86
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
86
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
87
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
87
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
88
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
88
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
89
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
89
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
90
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
90
};
91
};
91
 
92
 
92
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
93
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
93
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
94
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
94
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
95
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
95
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
96
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
96
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
97
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
97
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
98
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
98
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
99
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
99
};
100
};
100
 
101
 
101
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
102
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
102
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
103
	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
103
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
104
	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
104
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
105
	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
105
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
106
	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
106
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
107
	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
107
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
108
	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
108
};
109
};
109
 
110
 
110
/* BXT hpd list */
111
/* BXT hpd list */
111
static const u32 hpd_bxt[HPD_NUM_PINS] = {
112
static const u32 hpd_bxt[HPD_NUM_PINS] = {
112
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
113
	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
113
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
114
	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
114
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
115
	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
115
};
116
};
116
 
117
 
117
/* IIR can theoretically queue up two events. Be paranoid. */
118
/* IIR can theoretically queue up two events. Be paranoid. */
118
#define GEN8_IRQ_RESET_NDX(type, which) do { \
119
#define GEN8_IRQ_RESET_NDX(type, which) do { \
119
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
120
	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
120
	POSTING_READ(GEN8_##type##_IMR(which)); \
121
	POSTING_READ(GEN8_##type##_IMR(which)); \
121
	I915_WRITE(GEN8_##type##_IER(which), 0); \
122
	I915_WRITE(GEN8_##type##_IER(which), 0); \
122
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
123
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
123
	POSTING_READ(GEN8_##type##_IIR(which)); \
124
	POSTING_READ(GEN8_##type##_IIR(which)); \
124
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
125
	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
125
	POSTING_READ(GEN8_##type##_IIR(which)); \
126
	POSTING_READ(GEN8_##type##_IIR(which)); \
126
} while (0)
127
} while (0)
127
 
128
 
128
#define GEN5_IRQ_RESET(type) do { \
129
#define GEN5_IRQ_RESET(type) do { \
129
	I915_WRITE(type##IMR, 0xffffffff); \
130
	I915_WRITE(type##IMR, 0xffffffff); \
130
	POSTING_READ(type##IMR); \
131
	POSTING_READ(type##IMR); \
131
	I915_WRITE(type##IER, 0); \
132
	I915_WRITE(type##IER, 0); \
132
	I915_WRITE(type##IIR, 0xffffffff); \
133
	I915_WRITE(type##IIR, 0xffffffff); \
133
	POSTING_READ(type##IIR); \
134
	POSTING_READ(type##IIR); \
134
	I915_WRITE(type##IIR, 0xffffffff); \
135
	I915_WRITE(type##IIR, 0xffffffff); \
135
	POSTING_READ(type##IIR); \
136
	POSTING_READ(type##IIR); \
136
} while (0)
137
} while (0)
137
 
138
 
138
/*
139
/*
139
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
140
 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
140
 */
141
 */
141
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
142
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
142
{
143
{
143
	u32 val = I915_READ(reg);
144
	u32 val = I915_READ(reg);
144
 
145
 
145
	if (val == 0)
146
	if (val == 0)
146
		return;
147
		return;
147
 
148
 
148
	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
149
	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
149
	     reg, val);
150
	     reg, val);
150
	I915_WRITE(reg, 0xffffffff);
151
	I915_WRITE(reg, 0xffffffff);
151
	POSTING_READ(reg);
152
	POSTING_READ(reg);
152
	I915_WRITE(reg, 0xffffffff);
153
	I915_WRITE(reg, 0xffffffff);
153
	POSTING_READ(reg);
154
	POSTING_READ(reg);
154
}
155
}
155
 
156
 
156
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
157
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
157
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
158
	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
158
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
159
	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
159
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
160
	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
160
	POSTING_READ(GEN8_##type##_IMR(which)); \
161
	POSTING_READ(GEN8_##type##_IMR(which)); \
161
} while (0)
162
} while (0)
162
 
163
 
163
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
164
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
164
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
165
	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
165
	I915_WRITE(type##IER, (ier_val)); \
166
	I915_WRITE(type##IER, (ier_val)); \
166
	I915_WRITE(type##IMR, (imr_val)); \
167
	I915_WRITE(type##IMR, (imr_val)); \
167
	POSTING_READ(type##IMR); \
168
	POSTING_READ(type##IMR); \
168
} while (0)
169
} while (0)
169
 
170
 
170
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
171
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
171
 
172
 
172
/* For display hotplug interrupt */
173
/* For display hotplug interrupt */
173
static inline void
174
static inline void
174
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
175
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
175
				     uint32_t mask,
176
				     uint32_t mask,
176
				     uint32_t bits)
177
				     uint32_t bits)
177
{
178
{
178
	uint32_t val;
179
	uint32_t val;
179
 
180
 
180
	assert_spin_locked(&dev_priv->irq_lock);
181
	assert_spin_locked(&dev_priv->irq_lock);
181
	WARN_ON(bits & ~mask);
182
	WARN_ON(bits & ~mask);
182
 
183
 
183
	val = I915_READ(PORT_HOTPLUG_EN);
184
	val = I915_READ(PORT_HOTPLUG_EN);
184
	val &= ~mask;
185
	val &= ~mask;
185
	val |= bits;
186
	val |= bits;
186
	I915_WRITE(PORT_HOTPLUG_EN, val);
187
	I915_WRITE(PORT_HOTPLUG_EN, val);
187
}
188
}
188
 
189
 
189
/**
190
/**
190
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
191
 * i915_hotplug_interrupt_update - update hotplug interrupt enable
191
 * @dev_priv: driver private
192
 * @dev_priv: driver private
192
 * @mask: bits to update
193
 * @mask: bits to update
193
 * @bits: bits to enable
194
 * @bits: bits to enable
194
 * NOTE: the HPD enable bits are modified both inside and outside
195
 * NOTE: the HPD enable bits are modified both inside and outside
195
 * of an interrupt context. To avoid that read-modify-write cycles
196
 * of an interrupt context. To avoid that read-modify-write cycles
196
 * interfer, these bits are protected by a spinlock. Since this
197
 * interfer, these bits are protected by a spinlock. Since this
197
 * function is usually not called from a context where the lock is
198
 * function is usually not called from a context where the lock is
198
 * held already, this function acquires the lock itself. A non-locking
199
 * held already, this function acquires the lock itself. A non-locking
199
 * version is also available.
200
 * version is also available.
200
 */
201
 */
201
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
202
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
202
				   uint32_t mask,
203
				   uint32_t mask,
203
				   uint32_t bits)
204
				   uint32_t bits)
204
{
205
{
205
	spin_lock_irq(&dev_priv->irq_lock);
206
	spin_lock_irq(&dev_priv->irq_lock);
206
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
207
	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
207
	spin_unlock_irq(&dev_priv->irq_lock);
208
	spin_unlock_irq(&dev_priv->irq_lock);
208
}
209
}
209
 
210
 
210
/**
211
/**
211
 * ilk_update_display_irq - update DEIMR
212
 * ilk_update_display_irq - update DEIMR
212
 * @dev_priv: driver private
213
 * @dev_priv: driver private
213
 * @interrupt_mask: mask of interrupt bits to update
214
 * @interrupt_mask: mask of interrupt bits to update
214
 * @enabled_irq_mask: mask of interrupt bits to enable
215
 * @enabled_irq_mask: mask of interrupt bits to enable
215
 */
216
 */
216
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
217
static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
217
				   uint32_t interrupt_mask,
218
				   uint32_t interrupt_mask,
218
				   uint32_t enabled_irq_mask)
219
				   uint32_t enabled_irq_mask)
219
{
220
{
220
	uint32_t new_val;
221
	uint32_t new_val;
221
 
222
 
222
	assert_spin_locked(&dev_priv->irq_lock);
223
	assert_spin_locked(&dev_priv->irq_lock);
223
 
224
 
224
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
225
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
225
 
226
 
226
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
227
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
227
		return;
228
		return;
228
 
229
 
229
	new_val = dev_priv->irq_mask;
230
	new_val = dev_priv->irq_mask;
230
	new_val &= ~interrupt_mask;
231
	new_val &= ~interrupt_mask;
231
	new_val |= (~enabled_irq_mask & interrupt_mask);
232
	new_val |= (~enabled_irq_mask & interrupt_mask);
232
 
233
 
233
	if (new_val != dev_priv->irq_mask) {
234
	if (new_val != dev_priv->irq_mask) {
234
		dev_priv->irq_mask = new_val;
235
		dev_priv->irq_mask = new_val;
235
		I915_WRITE(DEIMR, dev_priv->irq_mask);
236
		I915_WRITE(DEIMR, dev_priv->irq_mask);
236
		POSTING_READ(DEIMR);
237
		POSTING_READ(DEIMR);
237
	}
238
	}
238
}
239
}
239
 
240
 
240
void
241
void
241
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
242
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
242
{
243
{
243
	ilk_update_display_irq(dev_priv, mask, mask);
244
	ilk_update_display_irq(dev_priv, mask, mask);
244
}
245
}
245
 
246
 
246
void
247
void
247
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
248
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
248
{
249
{
249
	ilk_update_display_irq(dev_priv, mask, 0);
250
	ilk_update_display_irq(dev_priv, mask, 0);
250
}
251
}
251
 
252
 
252
/**
253
/**
253
 * ilk_update_gt_irq - update GTIMR
254
 * ilk_update_gt_irq - update GTIMR
254
 * @dev_priv: driver private
255
 * @dev_priv: driver private
255
 * @interrupt_mask: mask of interrupt bits to update
256
 * @interrupt_mask: mask of interrupt bits to update
256
 * @enabled_irq_mask: mask of interrupt bits to enable
257
 * @enabled_irq_mask: mask of interrupt bits to enable
257
 */
258
 */
258
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
259
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
259
			      uint32_t interrupt_mask,
260
			      uint32_t interrupt_mask,
260
			      uint32_t enabled_irq_mask)
261
			      uint32_t enabled_irq_mask)
261
{
262
{
262
	assert_spin_locked(&dev_priv->irq_lock);
263
	assert_spin_locked(&dev_priv->irq_lock);
263
 
264
 
264
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
265
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
265
 
266
 
266
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
267
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
267
		return;
268
		return;
268
 
269
 
269
	dev_priv->gt_irq_mask &= ~interrupt_mask;
270
	dev_priv->gt_irq_mask &= ~interrupt_mask;
270
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
271
	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
271
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
272
	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
272
	POSTING_READ(GTIMR);
273
	POSTING_READ(GTIMR);
273
}
274
}
274
 
275
 
275
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
276
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
276
{
277
{
277
	ilk_update_gt_irq(dev_priv, mask, mask);
278
	ilk_update_gt_irq(dev_priv, mask, mask);
278
}
279
}
279
 
280
 
280
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
281
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
281
{
282
{
282
	ilk_update_gt_irq(dev_priv, mask, 0);
283
	ilk_update_gt_irq(dev_priv, mask, 0);
283
}
284
}
284
 
285
 
285
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
286
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
286
{
287
{
287
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
288
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
288
}
289
}
289
 
290
 
290
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
291
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
291
{
292
{
292
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
293
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
293
}
294
}
294
 
295
 
295
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
296
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
296
{
297
{
297
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
298
	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
298
}
299
}
299
 
300
 
300
/**
301
/**
301
  * snb_update_pm_irq - update GEN6_PMIMR
302
  * snb_update_pm_irq - update GEN6_PMIMR
302
  * @dev_priv: driver private
303
  * @dev_priv: driver private
303
  * @interrupt_mask: mask of interrupt bits to update
304
  * @interrupt_mask: mask of interrupt bits to update
304
  * @enabled_irq_mask: mask of interrupt bits to enable
305
  * @enabled_irq_mask: mask of interrupt bits to enable
305
  */
306
  */
306
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
307
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
307
			      uint32_t interrupt_mask,
308
			      uint32_t interrupt_mask,
308
			      uint32_t enabled_irq_mask)
309
			      uint32_t enabled_irq_mask)
309
{
310
{
310
	uint32_t new_val;
311
	uint32_t new_val;
311
 
312
 
312
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
313
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
313
 
314
 
314
	assert_spin_locked(&dev_priv->irq_lock);
315
	assert_spin_locked(&dev_priv->irq_lock);
315
 
316
 
316
	new_val = dev_priv->pm_irq_mask;
317
	new_val = dev_priv->pm_irq_mask;
317
	new_val &= ~interrupt_mask;
318
	new_val &= ~interrupt_mask;
318
	new_val |= (~enabled_irq_mask & interrupt_mask);
319
	new_val |= (~enabled_irq_mask & interrupt_mask);
319
 
320
 
320
	if (new_val != dev_priv->pm_irq_mask) {
321
	if (new_val != dev_priv->pm_irq_mask) {
321
		dev_priv->pm_irq_mask = new_val;
322
		dev_priv->pm_irq_mask = new_val;
322
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
323
		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
323
		POSTING_READ(gen6_pm_imr(dev_priv));
324
		POSTING_READ(gen6_pm_imr(dev_priv));
324
	}
325
	}
325
}
326
}
326
 
327
 
327
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
328
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
328
{
329
{
329
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
330
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
330
		return;
331
		return;
331
 
332
 
332
	snb_update_pm_irq(dev_priv, mask, mask);
333
	snb_update_pm_irq(dev_priv, mask, mask);
333
}
334
}
334
 
335
 
335
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
336
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
336
				  uint32_t mask)
337
				  uint32_t mask)
337
{
338
{
338
	snb_update_pm_irq(dev_priv, mask, 0);
339
	snb_update_pm_irq(dev_priv, mask, 0);
339
}
340
}
340
 
341
 
341
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
342
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
342
{
343
{
343
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
344
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
344
		return;
345
		return;
345
 
346
 
346
	__gen6_disable_pm_irq(dev_priv, mask);
347
	__gen6_disable_pm_irq(dev_priv, mask);
347
}
348
}
348
 
349
 
349
void gen6_reset_rps_interrupts(struct drm_device *dev)
350
void gen6_reset_rps_interrupts(struct drm_device *dev)
350
{
351
{
351
	struct drm_i915_private *dev_priv = dev->dev_private;
352
	struct drm_i915_private *dev_priv = dev->dev_private;
352
	uint32_t reg = gen6_pm_iir(dev_priv);
353
	uint32_t reg = gen6_pm_iir(dev_priv);
353
 
354
 
354
	spin_lock_irq(&dev_priv->irq_lock);
355
	spin_lock_irq(&dev_priv->irq_lock);
355
	I915_WRITE(reg, dev_priv->pm_rps_events);
356
	I915_WRITE(reg, dev_priv->pm_rps_events);
356
	I915_WRITE(reg, dev_priv->pm_rps_events);
357
	I915_WRITE(reg, dev_priv->pm_rps_events);
357
	POSTING_READ(reg);
358
	POSTING_READ(reg);
358
	dev_priv->rps.pm_iir = 0;
359
	dev_priv->rps.pm_iir = 0;
359
	spin_unlock_irq(&dev_priv->irq_lock);
360
	spin_unlock_irq(&dev_priv->irq_lock);
360
}
361
}
361
 
362
 
362
void gen6_enable_rps_interrupts(struct drm_device *dev)
363
void gen6_enable_rps_interrupts(struct drm_device *dev)
363
{
364
{
364
	struct drm_i915_private *dev_priv = dev->dev_private;
365
	struct drm_i915_private *dev_priv = dev->dev_private;
365
 
366
 
366
	spin_lock_irq(&dev_priv->irq_lock);
367
	spin_lock_irq(&dev_priv->irq_lock);
367
 
368
 
368
	WARN_ON(dev_priv->rps.pm_iir);
369
	WARN_ON(dev_priv->rps.pm_iir);
369
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
370
	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
370
	dev_priv->rps.interrupts_enabled = true;
371
	dev_priv->rps.interrupts_enabled = true;
371
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
372
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
372
				dev_priv->pm_rps_events);
373
				dev_priv->pm_rps_events);
373
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
374
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
374
 
375
 
375
	spin_unlock_irq(&dev_priv->irq_lock);
376
	spin_unlock_irq(&dev_priv->irq_lock);
376
}
377
}
377
 
378
 
378
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
379
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
379
{
380
{
380
	/*
381
	/*
381
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
382
	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
382
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
383
	 * if GEN6_PM_UP_EI_EXPIRED is masked.
383
	 *
384
	 *
384
	 * TODO: verify if this can be reproduced on VLV,CHV.
385
	 * TODO: verify if this can be reproduced on VLV,CHV.
385
	 */
386
	 */
386
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
387
	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
387
		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
388
		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
388
 
389
 
389
	if (INTEL_INFO(dev_priv)->gen >= 8)
390
	if (INTEL_INFO(dev_priv)->gen >= 8)
390
		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
391
		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
391
 
392
 
392
	return mask;
393
	return mask;
393
}
394
}
394
 
395
 
395
void gen6_disable_rps_interrupts(struct drm_device *dev)
396
void gen6_disable_rps_interrupts(struct drm_device *dev)
396
{
397
{
397
	struct drm_i915_private *dev_priv = dev->dev_private;
398
	struct drm_i915_private *dev_priv = dev->dev_private;
398
 
399
 
399
	spin_lock_irq(&dev_priv->irq_lock);
400
	spin_lock_irq(&dev_priv->irq_lock);
400
	dev_priv->rps.interrupts_enabled = false;
401
	dev_priv->rps.interrupts_enabled = false;
401
	spin_unlock_irq(&dev_priv->irq_lock);
402
	spin_unlock_irq(&dev_priv->irq_lock);
402
 
403
 
403
	cancel_work_sync(&dev_priv->rps.work);
404
	cancel_work_sync(&dev_priv->rps.work);
404
 
405
 
405
	spin_lock_irq(&dev_priv->irq_lock);
406
	spin_lock_irq(&dev_priv->irq_lock);
406
 
407
 
407
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
408
	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
408
 
409
 
409
	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
410
	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
410
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
411
	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
411
				~dev_priv->pm_rps_events);
412
				~dev_priv->pm_rps_events);
412
 
413
 
413
	spin_unlock_irq(&dev_priv->irq_lock);
414
	spin_unlock_irq(&dev_priv->irq_lock);
414
 
415
 
415
}
416
}
416
 
417
 
417
/**
418
/**
418
  * bdw_update_port_irq - update DE port interrupt
419
  * bdw_update_port_irq - update DE port interrupt
419
  * @dev_priv: driver private
420
  * @dev_priv: driver private
420
  * @interrupt_mask: mask of interrupt bits to update
421
  * @interrupt_mask: mask of interrupt bits to update
421
  * @enabled_irq_mask: mask of interrupt bits to enable
422
  * @enabled_irq_mask: mask of interrupt bits to enable
422
  */
423
  */
423
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
424
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
424
				uint32_t interrupt_mask,
425
				uint32_t interrupt_mask,
425
				uint32_t enabled_irq_mask)
426
				uint32_t enabled_irq_mask)
426
{
427
{
427
	uint32_t new_val;
428
	uint32_t new_val;
428
	uint32_t old_val;
429
	uint32_t old_val;
429
 
430
 
430
	assert_spin_locked(&dev_priv->irq_lock);
431
	assert_spin_locked(&dev_priv->irq_lock);
431
 
432
 
432
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
433
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
433
 
434
 
434
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
435
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
435
		return;
436
		return;
436
 
437
 
437
	old_val = I915_READ(GEN8_DE_PORT_IMR);
438
	old_val = I915_READ(GEN8_DE_PORT_IMR);
438
 
439
 
439
	new_val = old_val;
440
	new_val = old_val;
440
	new_val &= ~interrupt_mask;
441
	new_val &= ~interrupt_mask;
441
	new_val |= (~enabled_irq_mask & interrupt_mask);
442
	new_val |= (~enabled_irq_mask & interrupt_mask);
442
 
443
 
443
	if (new_val != old_val) {
444
	if (new_val != old_val) {
444
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
445
		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
445
		POSTING_READ(GEN8_DE_PORT_IMR);
446
		POSTING_READ(GEN8_DE_PORT_IMR);
446
	}
447
	}
447
}
448
}
448
 
449
 
449
/**
450
/**
450
 * ibx_display_interrupt_update - update SDEIMR
451
 * ibx_display_interrupt_update - update SDEIMR
451
 * @dev_priv: driver private
452
 * @dev_priv: driver private
452
 * @interrupt_mask: mask of interrupt bits to update
453
 * @interrupt_mask: mask of interrupt bits to update
453
 * @enabled_irq_mask: mask of interrupt bits to enable
454
 * @enabled_irq_mask: mask of interrupt bits to enable
454
 */
455
 */
455
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
456
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
456
				  uint32_t interrupt_mask,
457
				  uint32_t interrupt_mask,
457
				  uint32_t enabled_irq_mask)
458
				  uint32_t enabled_irq_mask)
458
{
459
{
459
	uint32_t sdeimr = I915_READ(SDEIMR);
460
	uint32_t sdeimr = I915_READ(SDEIMR);
460
	sdeimr &= ~interrupt_mask;
461
	sdeimr &= ~interrupt_mask;
461
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
462
	sdeimr |= (~enabled_irq_mask & interrupt_mask);
462
 
463
 
463
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
464
	WARN_ON(enabled_irq_mask & ~interrupt_mask);
464
 
465
 
465
	assert_spin_locked(&dev_priv->irq_lock);
466
	assert_spin_locked(&dev_priv->irq_lock);
466
 
467
 
467
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
468
	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
468
		return;
469
		return;
469
 
470
 
470
	I915_WRITE(SDEIMR, sdeimr);
471
	I915_WRITE(SDEIMR, sdeimr);
471
	POSTING_READ(SDEIMR);
472
	POSTING_READ(SDEIMR);
472
}
473
}
473
 
474
 
474
static void
475
static void
475
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
476
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
476
		       u32 enable_mask, u32 status_mask)
477
		       u32 enable_mask, u32 status_mask)
477
{
478
{
478
	u32 reg = PIPESTAT(pipe);
479
	u32 reg = PIPESTAT(pipe);
479
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
480
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
480
 
481
 
481
	assert_spin_locked(&dev_priv->irq_lock);
482
	assert_spin_locked(&dev_priv->irq_lock);
482
	WARN_ON(!intel_irqs_enabled(dev_priv));
483
	WARN_ON(!intel_irqs_enabled(dev_priv));
483
 
484
 
484
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
485
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
485
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
486
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
486
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
487
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
487
		      pipe_name(pipe), enable_mask, status_mask))
488
		      pipe_name(pipe), enable_mask, status_mask))
488
		return;
489
		return;
489
 
490
 
490
	if ((pipestat & enable_mask) == enable_mask)
491
	if ((pipestat & enable_mask) == enable_mask)
491
		return;
492
		return;
492
 
493
 
493
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
494
	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
494
 
495
 
495
	/* Enable the interrupt, clear any pending status */
496
	/* Enable the interrupt, clear any pending status */
496
	pipestat |= enable_mask | status_mask;
497
	pipestat |= enable_mask | status_mask;
497
	I915_WRITE(reg, pipestat);
498
	I915_WRITE(reg, pipestat);
498
	POSTING_READ(reg);
499
	POSTING_READ(reg);
499
}
500
}
500
 
501
 
501
static void
502
static void
502
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
503
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
503
		        u32 enable_mask, u32 status_mask)
504
		        u32 enable_mask, u32 status_mask)
504
{
505
{
505
	u32 reg = PIPESTAT(pipe);
506
	u32 reg = PIPESTAT(pipe);
506
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
507
	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
507
 
508
 
508
	assert_spin_locked(&dev_priv->irq_lock);
509
	assert_spin_locked(&dev_priv->irq_lock);
509
	WARN_ON(!intel_irqs_enabled(dev_priv));
510
	WARN_ON(!intel_irqs_enabled(dev_priv));
510
 
511
 
511
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
512
	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
512
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
513
		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
513
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
514
		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
514
		      pipe_name(pipe), enable_mask, status_mask))
515
		      pipe_name(pipe), enable_mask, status_mask))
515
		return;
516
		return;
516
 
517
 
517
	if ((pipestat & enable_mask) == 0)
518
	if ((pipestat & enable_mask) == 0)
518
		return;
519
		return;
519
 
520
 
520
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
521
	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
521
 
522
 
522
	pipestat &= ~enable_mask;
523
	pipestat &= ~enable_mask;
523
	I915_WRITE(reg, pipestat);
524
	I915_WRITE(reg, pipestat);
524
	POSTING_READ(reg);
525
	POSTING_READ(reg);
525
}
526
}
526
 
527
 
527
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
528
static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
528
{
529
{
529
	u32 enable_mask = status_mask << 16;
530
	u32 enable_mask = status_mask << 16;
530
 
531
 
531
	/*
532
	/*
532
	 * On pipe A we don't support the PSR interrupt yet,
533
	 * On pipe A we don't support the PSR interrupt yet,
533
	 * on pipe B and C the same bit MBZ.
534
	 * on pipe B and C the same bit MBZ.
534
	 */
535
	 */
535
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
536
	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
536
		return 0;
537
		return 0;
537
	/*
538
	/*
538
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
539
	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
539
	 * A the same bit is for perf counters which we don't use either.
540
	 * A the same bit is for perf counters which we don't use either.
540
	 */
541
	 */
541
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
542
	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
542
		return 0;
543
		return 0;
543
 
544
 
544
	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
545
	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
545
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
546
			 SPRITE0_FLIP_DONE_INT_EN_VLV |
546
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
547
			 SPRITE1_FLIP_DONE_INT_EN_VLV);
547
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
548
	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
548
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
549
		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
549
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
550
	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
550
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
551
		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
551
 
552
 
552
	return enable_mask;
553
	return enable_mask;
553
}
554
}
554
 
555
 
555
void
556
void
556
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
557
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
557
		     u32 status_mask)
558
		     u32 status_mask)
558
{
559
{
559
	u32 enable_mask;
560
	u32 enable_mask;
560
 
561
 
561
	if (IS_VALLEYVIEW(dev_priv->dev))
562
	if (IS_VALLEYVIEW(dev_priv->dev))
562
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
563
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
563
							   status_mask);
564
							   status_mask);
564
	else
565
	else
565
		enable_mask = status_mask << 16;
566
		enable_mask = status_mask << 16;
566
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
567
	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
567
}
568
}
568
 
569
 
569
void
570
void
570
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
571
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
571
		      u32 status_mask)
572
		      u32 status_mask)
572
{
573
{
573
	u32 enable_mask;
574
	u32 enable_mask;
574
 
575
 
575
	if (IS_VALLEYVIEW(dev_priv->dev))
576
	if (IS_VALLEYVIEW(dev_priv->dev))
576
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
577
		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
577
							   status_mask);
578
							   status_mask);
578
	else
579
	else
579
		enable_mask = status_mask << 16;
580
		enable_mask = status_mask << 16;
580
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
581
	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
581
}
582
}
582
 
583
 
583
/**
584
/**
584
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
585
 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
585
 * @dev: drm device
586
 * @dev: drm device
586
 */
587
 */
587
static void i915_enable_asle_pipestat(struct drm_device *dev)
588
static void i915_enable_asle_pipestat(struct drm_device *dev)
588
{
589
{
589
	struct drm_i915_private *dev_priv = dev->dev_private;
590
	struct drm_i915_private *dev_priv = dev->dev_private;
590
 
591
 
591
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
592
	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
592
		return;
593
		return;
593
 
594
 
594
	spin_lock_irq(&dev_priv->irq_lock);
595
	spin_lock_irq(&dev_priv->irq_lock);
595
 
596
 
596
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
597
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
597
	if (INTEL_INFO(dev)->gen >= 4)
598
	if (INTEL_INFO(dev)->gen >= 4)
598
		i915_enable_pipestat(dev_priv, PIPE_A,
599
		i915_enable_pipestat(dev_priv, PIPE_A,
599
				     PIPE_LEGACY_BLC_EVENT_STATUS);
600
				     PIPE_LEGACY_BLC_EVENT_STATUS);
600
 
601
 
601
	spin_unlock_irq(&dev_priv->irq_lock);
602
	spin_unlock_irq(&dev_priv->irq_lock);
602
}
603
}
603
 
604
 
604
/*
605
/*
605
 * This timing diagram depicts the video signal in and
606
 * This timing diagram depicts the video signal in and
606
 * around the vertical blanking period.
607
 * around the vertical blanking period.
607
 *
608
 *
608
 * Assumptions about the fictitious mode used in this example:
609
 * Assumptions about the fictitious mode used in this example:
609
 *  vblank_start >= 3
610
 *  vblank_start >= 3
610
 *  vsync_start = vblank_start + 1
611
 *  vsync_start = vblank_start + 1
611
 *  vsync_end = vblank_start + 2
612
 *  vsync_end = vblank_start + 2
612
 *  vtotal = vblank_start + 3
613
 *  vtotal = vblank_start + 3
613
 *
614
 *
614
 *           start of vblank:
615
 *           start of vblank:
615
 *           latch double buffered registers
616
 *           latch double buffered registers
616
 *           increment frame counter (ctg+)
617
 *           increment frame counter (ctg+)
617
 *           generate start of vblank interrupt (gen4+)
618
 *           generate start of vblank interrupt (gen4+)
618
 *           |
619
 *           |
619
 *           |          frame start:
620
 *           |          frame start:
620
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
621
 *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
621
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
622
 *           |          may be shifted forward 1-3 extra lines via PIPECONF
622
 *           |          |
623
 *           |          |
623
 *           |          |  start of vsync:
624
 *           |          |  start of vsync:
624
 *           |          |  generate vsync interrupt
625
 *           |          |  generate vsync interrupt
625
 *           |          |  |
626
 *           |          |  |
626
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
627
 * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
627
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
628
 *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
628
 * ----va---> <-----------------vb--------------------> <--------va-------------
629
 * ----va---> <-----------------vb--------------------> <--------va-------------
629
 *       |          |       <----vs----->                     |
630
 *       |          |       <----vs----->                     |
630
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
631
 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
631
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
632
 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
632
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
633
 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
633
 *       |          |                                         |
634
 *       |          |                                         |
634
 *       last visible pixel                                   first visible pixel
635
 *       last visible pixel                                   first visible pixel
635
 *                  |                                         increment frame counter (gen3/4)
636
 *                  |                                         increment frame counter (gen3/4)
636
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
637
 *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
637
 *
638
 *
638
 * x  = horizontal active
639
 * x  = horizontal active
639
 * _  = horizontal blanking
640
 * _  = horizontal blanking
640
 * hs = horizontal sync
641
 * hs = horizontal sync
641
 * va = vertical active
642
 * va = vertical active
642
 * vb = vertical blanking
643
 * vb = vertical blanking
643
 * vs = vertical sync
644
 * vs = vertical sync
644
 * vbs = vblank_start (number)
645
 * vbs = vblank_start (number)
645
 *
646
 *
646
 * Summary:
647
 * Summary:
647
 * - most events happen at the start of horizontal sync
648
 * - most events happen at the start of horizontal sync
648
 * - frame start happens at the start of horizontal blank, 1-4 lines
649
 * - frame start happens at the start of horizontal blank, 1-4 lines
649
 *   (depending on PIPECONF settings) after the start of vblank
650
 *   (depending on PIPECONF settings) after the start of vblank
650
 * - gen3/4 pixel and frame counter are synchronized with the start
651
 * - gen3/4 pixel and frame counter are synchronized with the start
651
 *   of horizontal active on the first line of vertical active
652
 *   of horizontal active on the first line of vertical active
652
 */
653
 */
653
 
654
 
654
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
655
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
655
{
656
{
656
	/* Gen2 doesn't have a hardware frame counter */
657
	/* Gen2 doesn't have a hardware frame counter */
657
	return 0;
658
	return 0;
658
}
659
}
659
 
660
 
660
/* Called from drm generic code, passed a 'crtc', which
661
/* Called from drm generic code, passed a 'crtc', which
661
 * we use as a pipe index
662
 * we use as a pipe index
662
 */
663
 */
663
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
664
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
664
{
665
{
665
	struct drm_i915_private *dev_priv = dev->dev_private;
666
	struct drm_i915_private *dev_priv = dev->dev_private;
666
	unsigned long high_frame;
667
	unsigned long high_frame;
667
	unsigned long low_frame;
668
	unsigned long low_frame;
668
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
669
	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
669
	struct intel_crtc *intel_crtc =
670
	struct intel_crtc *intel_crtc =
670
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
671
		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
671
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
672
	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
672
 
673
 
673
	htotal = mode->crtc_htotal;
674
	htotal = mode->crtc_htotal;
674
	hsync_start = mode->crtc_hsync_start;
675
	hsync_start = mode->crtc_hsync_start;
675
	vbl_start = mode->crtc_vblank_start;
676
	vbl_start = mode->crtc_vblank_start;
676
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
677
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
677
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
678
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
678
 
679
 
679
	/* Convert to pixel count */
680
	/* Convert to pixel count */
680
	vbl_start *= htotal;
681
	vbl_start *= htotal;
681
 
682
 
682
	/* Start of vblank event occurs at start of hsync */
683
	/* Start of vblank event occurs at start of hsync */
683
	vbl_start -= htotal - hsync_start;
684
	vbl_start -= htotal - hsync_start;
684
 
685
 
685
	high_frame = PIPEFRAME(pipe);
686
	high_frame = PIPEFRAME(pipe);
686
	low_frame = PIPEFRAMEPIXEL(pipe);
687
	low_frame = PIPEFRAMEPIXEL(pipe);
687
 
688
 
688
	/*
689
	/*
689
	 * High & low register fields aren't synchronized, so make sure
690
	 * High & low register fields aren't synchronized, so make sure
690
	 * we get a low value that's stable across two reads of the high
691
	 * we get a low value that's stable across two reads of the high
691
	 * register.
692
	 * register.
692
	 */
693
	 */
693
	do {
694
	do {
694
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
695
		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
695
		low   = I915_READ(low_frame);
696
		low   = I915_READ(low_frame);
696
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
697
		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
697
	} while (high1 != high2);
698
	} while (high1 != high2);
698
 
699
 
699
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
700
	high1 >>= PIPE_FRAME_HIGH_SHIFT;
700
	pixel = low & PIPE_PIXEL_MASK;
701
	pixel = low & PIPE_PIXEL_MASK;
701
	low >>= PIPE_FRAME_LOW_SHIFT;
702
	low >>= PIPE_FRAME_LOW_SHIFT;
702
 
703
 
703
	/*
704
	/*
704
	 * The frame counter increments at beginning of active.
705
	 * The frame counter increments at beginning of active.
705
	 * Cook up a vblank counter by also checking the pixel
706
	 * Cook up a vblank counter by also checking the pixel
706
	 * counter against vblank start.
707
	 * counter against vblank start.
707
	 */
708
	 */
708
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
709
	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
709
}
710
}
710
 
711
 
711
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
712
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
712
{
713
{
713
	struct drm_i915_private *dev_priv = dev->dev_private;
714
	struct drm_i915_private *dev_priv = dev->dev_private;
714
 
715
 
715
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
716
	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
716
}
717
}
717
 
718
 
718
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
719
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
719
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
720
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
720
 
721
 
721
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
722
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
722
{
723
{
723
	struct drm_device *dev = crtc->base.dev;
724
	struct drm_device *dev = crtc->base.dev;
724
	struct drm_i915_private *dev_priv = dev->dev_private;
725
	struct drm_i915_private *dev_priv = dev->dev_private;
725
	const struct drm_display_mode *mode = &crtc->base.hwmode;
726
	const struct drm_display_mode *mode = &crtc->base.hwmode;
726
	enum pipe pipe = crtc->pipe;
727
	enum pipe pipe = crtc->pipe;
727
	int position, vtotal;
728
	int position, vtotal;
728
 
729
 
729
	vtotal = mode->crtc_vtotal;
730
	vtotal = mode->crtc_vtotal;
730
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
731
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
731
		vtotal /= 2;
732
		vtotal /= 2;
732
 
733
 
733
	if (IS_GEN2(dev))
734
	if (IS_GEN2(dev))
734
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
735
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
735
	else
736
	else
736
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
737
		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
737
 
738
 
738
	/*
739
	/*
739
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
740
	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
740
	 * read it just before the start of vblank.  So try it again
741
	 * read it just before the start of vblank.  So try it again
741
	 * so we don't accidentally end up spanning a vblank frame
742
	 * so we don't accidentally end up spanning a vblank frame
742
	 * increment, causing the pipe_update_end() code to squak at us.
743
	 * increment, causing the pipe_update_end() code to squak at us.
743
	 *
744
	 *
744
	 * The nature of this problem means we can't simply check the ISR
745
	 * The nature of this problem means we can't simply check the ISR
745
	 * bit and return the vblank start value; nor can we use the scanline
746
	 * bit and return the vblank start value; nor can we use the scanline
746
	 * debug register in the transcoder as it appears to have the same
747
	 * debug register in the transcoder as it appears to have the same
747
	 * problem.  We may need to extend this to include other platforms,
748
	 * problem.  We may need to extend this to include other platforms,
748
	 * but so far testing only shows the problem on HSW.
749
	 * but so far testing only shows the problem on HSW.
749
	 */
750
	 */
750
	if (HAS_DDI(dev) && !position) {
751
	if (HAS_DDI(dev) && !position) {
751
		int i, temp;
752
		int i, temp;
752
 
753
 
753
		for (i = 0; i < 100; i++) {
754
		for (i = 0; i < 100; i++) {
754
			udelay(1);
755
			udelay(1);
755
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
756
			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
756
				DSL_LINEMASK_GEN3;
757
				DSL_LINEMASK_GEN3;
757
			if (temp != position) {
758
			if (temp != position) {
758
				position = temp;
759
				position = temp;
759
				break;
760
				break;
760
			}
761
			}
761
		}
762
		}
762
	}
763
	}
763
 
764
 
764
	/*
765
	/*
765
	 * See update_scanline_offset() for the details on the
766
	 * See update_scanline_offset() for the details on the
766
	 * scanline_offset adjustment.
767
	 * scanline_offset adjustment.
767
	 */
768
	 */
768
	return (position + crtc->scanline_offset) % vtotal;
769
	return (position + crtc->scanline_offset) % vtotal;
769
}
770
}
770
 
771
 
771
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
772
static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
772
				    unsigned int flags, int *vpos, int *hpos,
773
				    unsigned int flags, int *vpos, int *hpos,
773
				    ktime_t *stime, ktime_t *etime,
774
				    ktime_t *stime, ktime_t *etime,
774
				    const struct drm_display_mode *mode)
775
				    const struct drm_display_mode *mode)
775
{
776
{
776
	struct drm_i915_private *dev_priv = dev->dev_private;
777
	struct drm_i915_private *dev_priv = dev->dev_private;
777
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
778
	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
778
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
779
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
779
	int position;
780
	int position;
780
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
781
	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
781
	bool in_vbl = true;
782
	bool in_vbl = true;
782
	int ret = 0;
783
	int ret = 0;
783
	unsigned long irqflags;
784
	unsigned long irqflags;
784
 
785
 
785
	if (WARN_ON(!mode->crtc_clock)) {
786
	if (WARN_ON(!mode->crtc_clock)) {
786
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
787
		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
787
				 "pipe %c\n", pipe_name(pipe));
788
				 "pipe %c\n", pipe_name(pipe));
788
		return 0;
789
		return 0;
789
	}
790
	}
790
 
791
 
791
	htotal = mode->crtc_htotal;
792
	htotal = mode->crtc_htotal;
792
	hsync_start = mode->crtc_hsync_start;
793
	hsync_start = mode->crtc_hsync_start;
793
	vtotal = mode->crtc_vtotal;
794
	vtotal = mode->crtc_vtotal;
794
	vbl_start = mode->crtc_vblank_start;
795
	vbl_start = mode->crtc_vblank_start;
795
	vbl_end = mode->crtc_vblank_end;
796
	vbl_end = mode->crtc_vblank_end;
796
 
797
 
797
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
798
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
798
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
799
		vbl_start = DIV_ROUND_UP(vbl_start, 2);
799
		vbl_end /= 2;
800
		vbl_end /= 2;
800
		vtotal /= 2;
801
		vtotal /= 2;
801
	}
802
	}
802
 
803
 
803
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
804
	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
804
 
805
 
805
	/*
806
	/*
806
	 * Lock uncore.lock, as we will do multiple timing critical raw
807
	 * Lock uncore.lock, as we will do multiple timing critical raw
807
	 * register reads, potentially with preemption disabled, so the
808
	 * register reads, potentially with preemption disabled, so the
808
	 * following code must not block on uncore.lock.
809
	 * following code must not block on uncore.lock.
809
	 */
810
	 */
810
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
811
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
811
 
812
 
812
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
813
	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
813
 
814
 
814
	/* Get optional system timestamp before query. */
815
	/* Get optional system timestamp before query. */
815
	if (stime)
816
	if (stime)
816
		*stime = ktime_get();
817
		*stime = ktime_get();
817
 
818
 
818
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
819
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
819
		/* No obvious pixelcount register. Only query vertical
820
		/* No obvious pixelcount register. Only query vertical
820
		 * scanout position from Display scan line register.
821
		 * scanout position from Display scan line register.
821
		 */
822
		 */
822
		position = __intel_get_crtc_scanline(intel_crtc);
823
		position = __intel_get_crtc_scanline(intel_crtc);
823
	} else {
824
	} else {
824
		/* Have access to pixelcount since start of frame.
825
		/* Have access to pixelcount since start of frame.
825
		 * We can split this into vertical and horizontal
826
		 * We can split this into vertical and horizontal
826
		 * scanout position.
827
		 * scanout position.
827
		 */
828
		 */
828
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
829
		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
829
 
830
 
830
		/* convert to pixel counts */
831
		/* convert to pixel counts */
831
		vbl_start *= htotal;
832
		vbl_start *= htotal;
832
		vbl_end *= htotal;
833
		vbl_end *= htotal;
833
		vtotal *= htotal;
834
		vtotal *= htotal;
834
 
835
 
835
		/*
836
		/*
836
		 * In interlaced modes, the pixel counter counts all pixels,
837
		 * In interlaced modes, the pixel counter counts all pixels,
837
		 * so one field will have htotal more pixels. In order to avoid
838
		 * so one field will have htotal more pixels. In order to avoid
838
		 * the reported position from jumping backwards when the pixel
839
		 * the reported position from jumping backwards when the pixel
839
		 * counter is beyond the length of the shorter field, just
840
		 * counter is beyond the length of the shorter field, just
840
		 * clamp the position the length of the shorter field. This
841
		 * clamp the position the length of the shorter field. This
841
		 * matches how the scanline counter based position works since
842
		 * matches how the scanline counter based position works since
842
		 * the scanline counter doesn't count the two half lines.
843
		 * the scanline counter doesn't count the two half lines.
843
		 */
844
		 */
844
		if (position >= vtotal)
845
		if (position >= vtotal)
845
			position = vtotal - 1;
846
			position = vtotal - 1;
846
 
847
 
847
		/*
848
		/*
848
		 * Start of vblank interrupt is triggered at start of hsync,
849
		 * Start of vblank interrupt is triggered at start of hsync,
849
		 * just prior to the first active line of vblank. However we
850
		 * just prior to the first active line of vblank. However we
850
		 * consider lines to start at the leading edge of horizontal
851
		 * consider lines to start at the leading edge of horizontal
851
		 * active. So, should we get here before we've crossed into
852
		 * active. So, should we get here before we've crossed into
852
		 * the horizontal active of the first line in vblank, we would
853
		 * the horizontal active of the first line in vblank, we would
853
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
854
		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
854
		 * always add htotal-hsync_start to the current pixel position.
855
		 * always add htotal-hsync_start to the current pixel position.
855
		 */
856
		 */
856
		position = (position + htotal - hsync_start) % vtotal;
857
		position = (position + htotal - hsync_start) % vtotal;
857
	}
858
	}
858
 
859
 
859
	/* Get optional system timestamp after query. */
860
	/* Get optional system timestamp after query. */
860
	if (etime)
861
	if (etime)
861
		*etime = ktime_get();
862
		*etime = ktime_get();
862
 
863
 
863
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
864
	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
864
 
865
 
865
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
866
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
866
 
867
 
867
	in_vbl = position >= vbl_start && position < vbl_end;
868
	in_vbl = position >= vbl_start && position < vbl_end;
868
 
869
 
869
	/*
870
	/*
870
	 * While in vblank, position will be negative
871
	 * While in vblank, position will be negative
871
	 * counting up towards 0 at vbl_end. And outside
872
	 * counting up towards 0 at vbl_end. And outside
872
	 * vblank, position will be positive counting
873
	 * vblank, position will be positive counting
873
	 * up since vbl_end.
874
	 * up since vbl_end.
874
	 */
875
	 */
875
	if (position >= vbl_start)
876
	if (position >= vbl_start)
876
		position -= vbl_end;
877
		position -= vbl_end;
877
	else
878
	else
878
		position += vtotal - vbl_end;
879
		position += vtotal - vbl_end;
879
 
880
 
880
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
881
	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
881
		*vpos = position;
882
		*vpos = position;
882
		*hpos = 0;
883
		*hpos = 0;
883
	} else {
884
	} else {
884
		*vpos = position / htotal;
885
		*vpos = position / htotal;
885
		*hpos = position - (*vpos * htotal);
886
		*hpos = position - (*vpos * htotal);
886
	}
887
	}
887
 
888
 
888
	/* In vblank? */
889
	/* In vblank? */
889
	if (in_vbl)
890
	if (in_vbl)
890
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
891
		ret |= DRM_SCANOUTPOS_IN_VBLANK;
891
 
892
 
892
	return ret;
893
	return ret;
893
}
894
}
894
 
895
 
895
int intel_get_crtc_scanline(struct intel_crtc *crtc)
896
int intel_get_crtc_scanline(struct intel_crtc *crtc)
896
{
897
{
897
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
898
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
898
	unsigned long irqflags;
899
	unsigned long irqflags;
899
	int position;
900
	int position;
900
 
901
 
901
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
902
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
902
	position = __intel_get_crtc_scanline(crtc);
903
	position = __intel_get_crtc_scanline(crtc);
903
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
904
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
904
 
905
 
905
	return position;
906
	return position;
906
}
907
}
907
 
908
 
908
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
909
static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
909
			      int *max_error,
910
			      int *max_error,
910
			      struct timeval *vblank_time,
911
			      struct timeval *vblank_time,
911
			      unsigned flags)
912
			      unsigned flags)
912
{
913
{
913
	struct drm_crtc *crtc;
914
	struct drm_crtc *crtc;
914
 
915
 
915
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
916
	if (pipe >= INTEL_INFO(dev)->num_pipes) {
916
		DRM_ERROR("Invalid crtc %u\n", pipe);
917
		DRM_ERROR("Invalid crtc %u\n", pipe);
917
		return -EINVAL;
918
		return -EINVAL;
918
	}
919
	}
919
 
920
 
920
	/* Get drm_crtc to timestamp: */
921
	/* Get drm_crtc to timestamp: */
921
	crtc = intel_get_crtc_for_pipe(dev, pipe);
922
	crtc = intel_get_crtc_for_pipe(dev, pipe);
922
	if (crtc == NULL) {
923
	if (crtc == NULL) {
923
		DRM_ERROR("Invalid crtc %u\n", pipe);
924
		DRM_ERROR("Invalid crtc %u\n", pipe);
924
		return -EINVAL;
925
		return -EINVAL;
925
	}
926
	}
926
 
927
 
927
	if (!crtc->hwmode.crtc_clock) {
928
	if (!crtc->hwmode.crtc_clock) {
928
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
929
		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
929
		return -EBUSY;
930
		return -EBUSY;
930
	}
931
	}
931
 
932
 
932
	/* Helper routine in DRM core does all the work: */
933
	/* Helper routine in DRM core does all the work: */
933
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
934
	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
934
						     vblank_time, flags,
935
						     vblank_time, flags,
935
						     &crtc->hwmode);
936
						     &crtc->hwmode);
936
}
937
}
937
 
938
 
938
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
939
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
939
{
940
{
940
	struct drm_i915_private *dev_priv = dev->dev_private;
941
	struct drm_i915_private *dev_priv = dev->dev_private;
941
	u32 busy_up, busy_down, max_avg, min_avg;
942
	u32 busy_up, busy_down, max_avg, min_avg;
942
	u8 new_delay;
943
	u8 new_delay;
943
 
944
 
944
	spin_lock(&mchdev_lock);
945
	spin_lock(&mchdev_lock);
945
 
946
 
946
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
947
	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
947
 
948
 
948
	new_delay = dev_priv->ips.cur_delay;
949
	new_delay = dev_priv->ips.cur_delay;
949
 
950
 
950
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
951
	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
951
	busy_up = I915_READ(RCPREVBSYTUPAVG);
952
	busy_up = I915_READ(RCPREVBSYTUPAVG);
952
	busy_down = I915_READ(RCPREVBSYTDNAVG);
953
	busy_down = I915_READ(RCPREVBSYTDNAVG);
953
	max_avg = I915_READ(RCBMAXAVG);
954
	max_avg = I915_READ(RCBMAXAVG);
954
	min_avg = I915_READ(RCBMINAVG);
955
	min_avg = I915_READ(RCBMINAVG);
955
 
956
 
956
	/* Handle RCS change request from hw */
957
	/* Handle RCS change request from hw */
957
	if (busy_up > max_avg) {
958
	if (busy_up > max_avg) {
958
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
959
		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
959
			new_delay = dev_priv->ips.cur_delay - 1;
960
			new_delay = dev_priv->ips.cur_delay - 1;
960
		if (new_delay < dev_priv->ips.max_delay)
961
		if (new_delay < dev_priv->ips.max_delay)
961
			new_delay = dev_priv->ips.max_delay;
962
			new_delay = dev_priv->ips.max_delay;
962
	} else if (busy_down < min_avg) {
963
	} else if (busy_down < min_avg) {
963
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
964
		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
964
			new_delay = dev_priv->ips.cur_delay + 1;
965
			new_delay = dev_priv->ips.cur_delay + 1;
965
		if (new_delay > dev_priv->ips.min_delay)
966
		if (new_delay > dev_priv->ips.min_delay)
966
			new_delay = dev_priv->ips.min_delay;
967
			new_delay = dev_priv->ips.min_delay;
967
	}
968
	}
968
 
969
 
969
	if (ironlake_set_drps(dev, new_delay))
970
	if (ironlake_set_drps(dev, new_delay))
970
		dev_priv->ips.cur_delay = new_delay;
971
		dev_priv->ips.cur_delay = new_delay;
971
 
972
 
972
	spin_unlock(&mchdev_lock);
973
	spin_unlock(&mchdev_lock);
973
 
974
 
974
	return;
975
	return;
975
}
976
}
976
 
977
 
977
static void notify_ring(struct intel_engine_cs *ring)
978
static void notify_ring(struct intel_engine_cs *ring)
978
{
979
{
979
	if (!intel_ring_initialized(ring))
980
	if (!intel_ring_initialized(ring))
980
		return;
981
		return;
981
 
982
 
982
	trace_i915_gem_request_notify(ring);
983
	trace_i915_gem_request_notify(ring);
983
 
984
 
984
	wake_up_all(&ring->irq_queue);
985
	wake_up_all(&ring->irq_queue);
985
}
986
}
986
 
987
 
987
static void vlv_c0_read(struct drm_i915_private *dev_priv,
988
static void vlv_c0_read(struct drm_i915_private *dev_priv,
988
			struct intel_rps_ei *ei)
989
			struct intel_rps_ei *ei)
989
{
990
{
990
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991
	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992
	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
993
	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
993
}
994
}
994
 
995
 
995
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996
static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996
			 const struct intel_rps_ei *old,
997
			 const struct intel_rps_ei *old,
997
			 const struct intel_rps_ei *now,
998
			 const struct intel_rps_ei *now,
998
			 int threshold)
999
			 int threshold)
999
{
1000
{
1000
	u64 time, c0;
1001
	u64 time, c0;
1001
	unsigned int mul = 100;
1002
	unsigned int mul = 100;
1002
 
1003
 
1003
	if (old->cz_clock == 0)
1004
	if (old->cz_clock == 0)
1004
		return false;
1005
		return false;
1005
 
1006
 
1006
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1007
	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1007
		mul <<= 8;
1008
		mul <<= 8;
1008
 
1009
 
1009
	time = now->cz_clock - old->cz_clock;
1010
	time = now->cz_clock - old->cz_clock;
1010
	time *= threshold * dev_priv->czclk_freq;
1011
	time *= threshold * dev_priv->czclk_freq;
1011
 
1012
 
1012
	/* Workload can be split between render + media, e.g. SwapBuffers
1013
	/* Workload can be split between render + media, e.g. SwapBuffers
1013
	 * being blitted in X after being rendered in mesa. To account for
1014
	 * being blitted in X after being rendered in mesa. To account for
1014
	 * this we need to combine both engines into our activity counter.
1015
	 * this we need to combine both engines into our activity counter.
1015
	 */
1016
	 */
1016
	c0 = now->render_c0 - old->render_c0;
1017
	c0 = now->render_c0 - old->render_c0;
1017
	c0 += now->media_c0 - old->media_c0;
1018
	c0 += now->media_c0 - old->media_c0;
1018
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1019
	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1019
 
1020
 
1020
	return c0 >= time;
1021
	return c0 >= time;
1021
}
1022
}
1022
 
1023
 
1023
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1024
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1024
{
1025
{
1025
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026
	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1027
	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1027
}
1028
}
1028
 
1029
 
1029
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1030
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1030
{
1031
{
1031
	struct intel_rps_ei now;
1032
	struct intel_rps_ei now;
1032
	u32 events = 0;
1033
	u32 events = 0;
1033
 
1034
 
1034
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1035
	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1035
		return 0;
1036
		return 0;
1036
 
1037
 
1037
	vlv_c0_read(dev_priv, &now);
1038
	vlv_c0_read(dev_priv, &now);
1038
	if (now.cz_clock == 0)
1039
	if (now.cz_clock == 0)
1039
		return 0;
1040
		return 0;
1040
 
1041
 
1041
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042
	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042
		if (!vlv_c0_above(dev_priv,
1043
		if (!vlv_c0_above(dev_priv,
1043
				  &dev_priv->rps.down_ei, &now,
1044
				  &dev_priv->rps.down_ei, &now,
1044
				  dev_priv->rps.down_threshold))
1045
				  dev_priv->rps.down_threshold))
1045
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046
			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046
		dev_priv->rps.down_ei = now;
1047
		dev_priv->rps.down_ei = now;
1047
	}
1048
	}
1048
 
1049
 
1049
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050
	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050
		if (vlv_c0_above(dev_priv,
1051
		if (vlv_c0_above(dev_priv,
1051
				 &dev_priv->rps.up_ei, &now,
1052
				 &dev_priv->rps.up_ei, &now,
1052
				 dev_priv->rps.up_threshold))
1053
				 dev_priv->rps.up_threshold))
1053
			events |= GEN6_PM_RP_UP_THRESHOLD;
1054
			events |= GEN6_PM_RP_UP_THRESHOLD;
1054
		dev_priv->rps.up_ei = now;
1055
		dev_priv->rps.up_ei = now;
1055
	}
1056
	}
1056
 
1057
 
1057
	return events;
1058
	return events;
1058
}
1059
}
1059
 
1060
 
1060
static bool any_waiters(struct drm_i915_private *dev_priv)
1061
static bool any_waiters(struct drm_i915_private *dev_priv)
1061
{
1062
{
1062
	struct intel_engine_cs *ring;
1063
	struct intel_engine_cs *ring;
1063
	int i;
1064
	int i;
1064
 
1065
 
1065
	for_each_ring(ring, dev_priv, i)
1066
	for_each_ring(ring, dev_priv, i)
1066
		if (ring->irq_refcount)
1067
		if (ring->irq_refcount)
1067
			return true;
1068
			return true;
1068
 
1069
 
1069
	return false;
1070
	return false;
1070
}
1071
}
1071
 
1072
 
1072
static void gen6_pm_rps_work(struct work_struct *work)
1073
static void gen6_pm_rps_work(struct work_struct *work)
1073
{
1074
{
1074
	struct drm_i915_private *dev_priv =
1075
	struct drm_i915_private *dev_priv =
1075
		container_of(work, struct drm_i915_private, rps.work);
1076
		container_of(work, struct drm_i915_private, rps.work);
1076
	bool client_boost;
1077
	bool client_boost;
1077
	int new_delay, adj, min, max;
1078
	int new_delay, adj, min, max;
1078
	u32 pm_iir;
1079
	u32 pm_iir;
1079
 
1080
 
1080
	spin_lock_irq(&dev_priv->irq_lock);
1081
	spin_lock_irq(&dev_priv->irq_lock);
1081
	/* Speed up work cancelation during disabling rps interrupts. */
1082
	/* Speed up work cancelation during disabling rps interrupts. */
1082
	if (!dev_priv->rps.interrupts_enabled) {
1083
	if (!dev_priv->rps.interrupts_enabled) {
1083
		spin_unlock_irq(&dev_priv->irq_lock);
1084
		spin_unlock_irq(&dev_priv->irq_lock);
1084
		return;
1085
		return;
1085
	}
1086
	}
1086
	pm_iir = dev_priv->rps.pm_iir;
1087
	pm_iir = dev_priv->rps.pm_iir;
1087
	dev_priv->rps.pm_iir = 0;
1088
	dev_priv->rps.pm_iir = 0;
1088
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089
	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1090
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1090
	client_boost = dev_priv->rps.client_boost;
1091
	client_boost = dev_priv->rps.client_boost;
1091
	dev_priv->rps.client_boost = false;
1092
	dev_priv->rps.client_boost = false;
1092
	spin_unlock_irq(&dev_priv->irq_lock);
1093
	spin_unlock_irq(&dev_priv->irq_lock);
1093
 
1094
 
1094
	/* Make sure we didn't queue anything we're not going to process. */
1095
	/* Make sure we didn't queue anything we're not going to process. */
1095
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1096
	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1096
 
1097
 
1097
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1098
	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1098
		return;
1099
		return;
1099
 
1100
 
1100
	mutex_lock(&dev_priv->rps.hw_lock);
1101
	mutex_lock(&dev_priv->rps.hw_lock);
1101
 
1102
 
1102
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1103
	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1103
 
1104
 
1104
	adj = dev_priv->rps.last_adj;
1105
	adj = dev_priv->rps.last_adj;
1105
	new_delay = dev_priv->rps.cur_freq;
1106
	new_delay = dev_priv->rps.cur_freq;
1106
	min = dev_priv->rps.min_freq_softlimit;
1107
	min = dev_priv->rps.min_freq_softlimit;
1107
	max = dev_priv->rps.max_freq_softlimit;
1108
	max = dev_priv->rps.max_freq_softlimit;
1108
 
1109
 
1109
	if (client_boost) {
1110
	if (client_boost) {
1110
		new_delay = dev_priv->rps.max_freq_softlimit;
1111
		new_delay = dev_priv->rps.max_freq_softlimit;
1111
		adj = 0;
1112
		adj = 0;
1112
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1113
	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1113
		if (adj > 0)
1114
		if (adj > 0)
1114
			adj *= 2;
1115
			adj *= 2;
1115
		else /* CHV needs even encode values */
1116
		else /* CHV needs even encode values */
1116
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1117
			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1117
		/*
1118
		/*
1118
		 * For better performance, jump directly
1119
		 * For better performance, jump directly
1119
		 * to RPe if we're below it.
1120
		 * to RPe if we're below it.
1120
		 */
1121
		 */
1121
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1122
		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1122
			new_delay = dev_priv->rps.efficient_freq;
1123
			new_delay = dev_priv->rps.efficient_freq;
1123
			adj = 0;
1124
			adj = 0;
1124
		}
1125
		}
1125
	} else if (any_waiters(dev_priv)) {
1126
	} else if (any_waiters(dev_priv)) {
1126
		adj = 0;
1127
		adj = 0;
1127
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1128
	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1128
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129
		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129
			new_delay = dev_priv->rps.efficient_freq;
1130
			new_delay = dev_priv->rps.efficient_freq;
1130
		else
1131
		else
1131
			new_delay = dev_priv->rps.min_freq_softlimit;
1132
			new_delay = dev_priv->rps.min_freq_softlimit;
1132
		adj = 0;
1133
		adj = 0;
1133
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134
	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134
		if (adj < 0)
1135
		if (adj < 0)
1135
			adj *= 2;
1136
			adj *= 2;
1136
		else /* CHV needs even encode values */
1137
		else /* CHV needs even encode values */
1137
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1138
			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1138
	} else { /* unknown event */
1139
	} else { /* unknown event */
1139
		adj = 0;
1140
		adj = 0;
1140
	}
1141
	}
1141
 
1142
 
1142
	dev_priv->rps.last_adj = adj;
1143
	dev_priv->rps.last_adj = adj;
1143
 
1144
 
1144
	/* sysfs frequency interfaces may have snuck in while servicing the
1145
	/* sysfs frequency interfaces may have snuck in while servicing the
1145
	 * interrupt
1146
	 * interrupt
1146
	 */
1147
	 */
1147
	new_delay += adj;
1148
	new_delay += adj;
1148
	new_delay = clamp_t(int, new_delay, min, max);
1149
	new_delay = clamp_t(int, new_delay, min, max);
1149
 
1150
 
1150
	intel_set_rps(dev_priv->dev, new_delay);
1151
	intel_set_rps(dev_priv->dev, new_delay);
1151
 
1152
 
1152
	mutex_unlock(&dev_priv->rps.hw_lock);
1153
	mutex_unlock(&dev_priv->rps.hw_lock);
1153
}
1154
}
1154
 
1155
 
1155
 
1156
 
1156
/**
1157
/**
1157
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158
 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158
 * occurred.
1159
 * occurred.
1159
 * @work: workqueue struct
1160
 * @work: workqueue struct
1160
 *
1161
 *
1161
 * Doesn't actually do anything except notify userspace. As a consequence of
1162
 * Doesn't actually do anything except notify userspace. As a consequence of
1162
 * this event, userspace should try to remap the bad rows since statistically
1163
 * this event, userspace should try to remap the bad rows since statistically
1163
 * it is likely the same row is more likely to go bad again.
1164
 * it is likely the same row is more likely to go bad again.
1164
 */
1165
 */
1165
static void ivybridge_parity_work(struct work_struct *work)
1166
static void ivybridge_parity_work(struct work_struct *work)
1166
{
1167
{
1167
	struct drm_i915_private *dev_priv =
1168
	struct drm_i915_private *dev_priv =
1168
		container_of(work, struct drm_i915_private, l3_parity.error_work);
1169
		container_of(work, struct drm_i915_private, l3_parity.error_work);
1169
	u32 error_status, row, bank, subbank;
1170
	u32 error_status, row, bank, subbank;
1170
	char *parity_event[6];
1171
	char *parity_event[6];
1171
	uint32_t misccpctl;
1172
	uint32_t misccpctl;
1172
	uint8_t slice = 0;
1173
	uint8_t slice = 0;
1173
 
1174
 
1174
	/* We must turn off DOP level clock gating to access the L3 registers.
1175
	/* We must turn off DOP level clock gating to access the L3 registers.
1175
	 * In order to prevent a get/put style interface, acquire struct mutex
1176
	 * In order to prevent a get/put style interface, acquire struct mutex
1176
	 * any time we access those registers.
1177
	 * any time we access those registers.
1177
	 */
1178
	 */
1178
	mutex_lock(&dev_priv->dev->struct_mutex);
1179
	mutex_lock(&dev_priv->dev->struct_mutex);
1179
 
1180
 
1180
	/* If we've screwed up tracking, just let the interrupt fire again */
1181
	/* If we've screwed up tracking, just let the interrupt fire again */
1181
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182
	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182
		goto out;
1183
		goto out;
1183
 
1184
 
1184
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1185
	misccpctl = I915_READ(GEN7_MISCCPCTL);
1185
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186
	POSTING_READ(GEN7_MISCCPCTL);
1187
	POSTING_READ(GEN7_MISCCPCTL);
1187
 
1188
 
1188
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1189
	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1189
		u32 reg;
1190
		u32 reg;
1190
 
1191
 
1191
		slice--;
1192
		slice--;
1192
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1193
		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1193
			break;
1194
			break;
1194
 
1195
 
1195
		dev_priv->l3_parity.which_slice &= ~(1<
1196
		dev_priv->l3_parity.which_slice &= ~(1<
1196
 
1197
 
1197
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1198
		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1198
 
1199
 
1199
		error_status = I915_READ(reg);
1200
		error_status = I915_READ(reg);
1200
		row = GEN7_PARITY_ERROR_ROW(error_status);
1201
		row = GEN7_PARITY_ERROR_ROW(error_status);
1201
		bank = GEN7_PARITY_ERROR_BANK(error_status);
1202
		bank = GEN7_PARITY_ERROR_BANK(error_status);
1202
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1203
		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1203
 
1204
 
1204
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205
		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205
		POSTING_READ(reg);
1206
		POSTING_READ(reg);
1206
 
1207
 
1207
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1208
		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1208
			  slice, row, bank, subbank);
1209
			  slice, row, bank, subbank);
1209
 
1210
 
1210
	}
1211
	}
1211
 
1212
 
1212
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1213
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1213
 
1214
 
1214
out:
1215
out:
1215
	WARN_ON(dev_priv->l3_parity.which_slice);
1216
	WARN_ON(dev_priv->l3_parity.which_slice);
1216
	spin_lock_irq(&dev_priv->irq_lock);
1217
	spin_lock_irq(&dev_priv->irq_lock);
1217
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1218
	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1218
	spin_unlock_irq(&dev_priv->irq_lock);
1219
	spin_unlock_irq(&dev_priv->irq_lock);
1219
 
1220
 
1220
	mutex_unlock(&dev_priv->dev->struct_mutex);
1221
	mutex_unlock(&dev_priv->dev->struct_mutex);
1221
}
1222
}
1222
 
1223
 
1223
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1224
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1224
{
1225
{
1225
	struct drm_i915_private *dev_priv = dev->dev_private;
1226
	struct drm_i915_private *dev_priv = dev->dev_private;
1226
 
1227
 
1227
	if (!HAS_L3_DPF(dev))
1228
	if (!HAS_L3_DPF(dev))
1228
		return;
1229
		return;
1229
 
1230
 
1230
	spin_lock(&dev_priv->irq_lock);
1231
	spin_lock(&dev_priv->irq_lock);
1231
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1232
	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1232
	spin_unlock(&dev_priv->irq_lock);
1233
	spin_unlock(&dev_priv->irq_lock);
1233
 
1234
 
1234
	iir &= GT_PARITY_ERROR(dev);
1235
	iir &= GT_PARITY_ERROR(dev);
1235
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1236
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1236
		dev_priv->l3_parity.which_slice |= 1 << 1;
1237
		dev_priv->l3_parity.which_slice |= 1 << 1;
1237
 
1238
 
1238
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1239
	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1239
		dev_priv->l3_parity.which_slice |= 1 << 0;
1240
		dev_priv->l3_parity.which_slice |= 1 << 0;
1240
 
1241
 
1241
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1242
	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1242
}
1243
}
1243
 
1244
 
1244
static void ilk_gt_irq_handler(struct drm_device *dev,
1245
static void ilk_gt_irq_handler(struct drm_device *dev,
1245
			       struct drm_i915_private *dev_priv,
1246
			       struct drm_i915_private *dev_priv,
1246
			       u32 gt_iir)
1247
			       u32 gt_iir)
1247
{
1248
{
1248
	if (gt_iir &
1249
	if (gt_iir &
1249
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1250
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1250
		notify_ring(&dev_priv->ring[RCS]);
1251
		notify_ring(&dev_priv->ring[RCS]);
1251
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1252
	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1252
		notify_ring(&dev_priv->ring[VCS]);
1253
		notify_ring(&dev_priv->ring[VCS]);
1253
}
1254
}
1254
 
1255
 
1255
static void snb_gt_irq_handler(struct drm_device *dev,
1256
static void snb_gt_irq_handler(struct drm_device *dev,
1256
			       struct drm_i915_private *dev_priv,
1257
			       struct drm_i915_private *dev_priv,
1257
			       u32 gt_iir)
1258
			       u32 gt_iir)
1258
{
1259
{
1259
 
1260
 
1260
	if (gt_iir &
1261
	if (gt_iir &
1261
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1262
	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1262
		notify_ring(&dev_priv->ring[RCS]);
1263
		notify_ring(&dev_priv->ring[RCS]);
1263
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1264
	if (gt_iir & GT_BSD_USER_INTERRUPT)
1264
		notify_ring(&dev_priv->ring[VCS]);
1265
		notify_ring(&dev_priv->ring[VCS]);
1265
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1266
	if (gt_iir & GT_BLT_USER_INTERRUPT)
1266
		notify_ring(&dev_priv->ring[BCS]);
1267
		notify_ring(&dev_priv->ring[BCS]);
1267
 
1268
 
1268
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1269
	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1269
		      GT_BSD_CS_ERROR_INTERRUPT |
1270
		      GT_BSD_CS_ERROR_INTERRUPT |
1270
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1271
		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1271
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1272
		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1272
 
1273
 
1273
	if (gt_iir & GT_PARITY_ERROR(dev))
1274
	if (gt_iir & GT_PARITY_ERROR(dev))
1274
		ivybridge_parity_error_irq_handler(dev, gt_iir);
1275
		ivybridge_parity_error_irq_handler(dev, gt_iir);
1275
}
1276
}
1276
 
1277
 
1277
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1278
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1278
				       u32 master_ctl)
1279
				       u32 master_ctl)
1279
{
1280
{
1280
	irqreturn_t ret = IRQ_NONE;
1281
	irqreturn_t ret = IRQ_NONE;
1281
 
1282
 
1282
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1283
	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1283
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1284
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1284
		if (tmp) {
1285
		if (tmp) {
1285
			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1286
			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1286
			ret = IRQ_HANDLED;
1287
			ret = IRQ_HANDLED;
1287
 
1288
 
1288
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1289
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1289
				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1290
				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1290
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1291
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1291
				notify_ring(&dev_priv->ring[RCS]);
1292
				notify_ring(&dev_priv->ring[RCS]);
1292
 
1293
 
1293
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1294
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1294
				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1295
				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1295
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1296
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1296
				notify_ring(&dev_priv->ring[BCS]);
1297
				notify_ring(&dev_priv->ring[BCS]);
1297
		} else
1298
		} else
1298
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1299
			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1299
	}
1300
	}
1300
 
1301
 
1301
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1302
	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1302
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1303
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1303
		if (tmp) {
1304
		if (tmp) {
1304
			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1305
			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1305
			ret = IRQ_HANDLED;
1306
			ret = IRQ_HANDLED;
1306
 
1307
 
1307
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1308
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1308
				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1309
				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1309
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1310
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1310
				notify_ring(&dev_priv->ring[VCS]);
1311
				notify_ring(&dev_priv->ring[VCS]);
1311
 
1312
 
1312
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1313
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1313
				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1314
				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1314
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1315
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1315
				notify_ring(&dev_priv->ring[VCS2]);
1316
				notify_ring(&dev_priv->ring[VCS2]);
1316
		} else
1317
		} else
1317
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1318
			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1318
	}
1319
	}
1319
 
1320
 
1320
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1321
	if (master_ctl & GEN8_GT_VECS_IRQ) {
1321
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1322
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1322
		if (tmp) {
1323
		if (tmp) {
1323
			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1324
			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1324
			ret = IRQ_HANDLED;
1325
			ret = IRQ_HANDLED;
1325
 
1326
 
1326
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1327
			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1327
				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1328
				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1328
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1329
			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1329
				notify_ring(&dev_priv->ring[VECS]);
1330
				notify_ring(&dev_priv->ring[VECS]);
1330
		} else
1331
		} else
1331
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1332
			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1332
	}
1333
	}
1333
 
1334
 
1334
	if (master_ctl & GEN8_GT_PM_IRQ) {
1335
	if (master_ctl & GEN8_GT_PM_IRQ) {
1335
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1336
		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1336
		if (tmp & dev_priv->pm_rps_events) {
1337
		if (tmp & dev_priv->pm_rps_events) {
1337
			I915_WRITE_FW(GEN8_GT_IIR(2),
1338
			I915_WRITE_FW(GEN8_GT_IIR(2),
1338
				      tmp & dev_priv->pm_rps_events);
1339
				      tmp & dev_priv->pm_rps_events);
1339
			ret = IRQ_HANDLED;
1340
			ret = IRQ_HANDLED;
1340
			gen6_rps_irq_handler(dev_priv, tmp);
1341
			gen6_rps_irq_handler(dev_priv, tmp);
1341
		} else
1342
		} else
1342
			DRM_ERROR("The master control interrupt lied (PM)!\n");
1343
			DRM_ERROR("The master control interrupt lied (PM)!\n");
1343
	}
1344
	}
1344
 
1345
 
1345
	return ret;
1346
	return ret;
1346
}
1347
}
1347
 
1348
 
1348
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1349
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1349
{
1350
{
1350
	switch (port) {
1351
	switch (port) {
1351
	case PORT_A:
1352
	case PORT_A:
1352
		return val & PORTA_HOTPLUG_LONG_DETECT;
1353
		return val & PORTA_HOTPLUG_LONG_DETECT;
1353
	case PORT_B:
1354
	case PORT_B:
1354
		return val & PORTB_HOTPLUG_LONG_DETECT;
1355
		return val & PORTB_HOTPLUG_LONG_DETECT;
1355
	case PORT_C:
1356
	case PORT_C:
1356
		return val & PORTC_HOTPLUG_LONG_DETECT;
1357
		return val & PORTC_HOTPLUG_LONG_DETECT;
1357
	default:
1358
	default:
1358
		return false;
1359
		return false;
1359
	}
1360
	}
1360
}
1361
}
1361
 
1362
 
1362
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1363
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1363
{
1364
{
1364
	switch (port) {
1365
	switch (port) {
1365
	case PORT_E:
1366
	case PORT_E:
1366
		return val & PORTE_HOTPLUG_LONG_DETECT;
1367
		return val & PORTE_HOTPLUG_LONG_DETECT;
1367
	default:
1368
	default:
1368
		return false;
1369
		return false;
1369
	}
1370
	}
1370
}
1371
}
1371
 
1372
 
1372
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1373
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1373
{
1374
{
1374
	switch (port) {
1375
	switch (port) {
1375
	case PORT_A:
1376
	case PORT_A:
1376
		return val & PORTA_HOTPLUG_LONG_DETECT;
1377
		return val & PORTA_HOTPLUG_LONG_DETECT;
1377
	case PORT_B:
1378
	case PORT_B:
1378
		return val & PORTB_HOTPLUG_LONG_DETECT;
1379
		return val & PORTB_HOTPLUG_LONG_DETECT;
1379
	case PORT_C:
1380
	case PORT_C:
1380
		return val & PORTC_HOTPLUG_LONG_DETECT;
1381
		return val & PORTC_HOTPLUG_LONG_DETECT;
1381
	case PORT_D:
1382
	case PORT_D:
1382
		return val & PORTD_HOTPLUG_LONG_DETECT;
1383
		return val & PORTD_HOTPLUG_LONG_DETECT;
1383
	default:
1384
	default:
1384
		return false;
1385
		return false;
1385
	}
1386
	}
1386
}
1387
}
1387
 
1388
 
1388
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1389
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1389
{
1390
{
1390
	switch (port) {
1391
	switch (port) {
1391
	case PORT_A:
1392
	case PORT_A:
1392
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1393
		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1393
	default:
1394
	default:
1394
		return false;
1395
		return false;
1395
	}
1396
	}
1396
}
1397
}
1397
 
1398
 
1398
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1399
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1399
{
1400
{
1400
	switch (port) {
1401
	switch (port) {
1401
	case PORT_B:
1402
	case PORT_B:
1402
		return val & PORTB_HOTPLUG_LONG_DETECT;
1403
		return val & PORTB_HOTPLUG_LONG_DETECT;
1403
	case PORT_C:
1404
	case PORT_C:
1404
		return val & PORTC_HOTPLUG_LONG_DETECT;
1405
		return val & PORTC_HOTPLUG_LONG_DETECT;
1405
	case PORT_D:
1406
	case PORT_D:
1406
		return val & PORTD_HOTPLUG_LONG_DETECT;
1407
		return val & PORTD_HOTPLUG_LONG_DETECT;
1407
	default:
1408
	default:
1408
		return false;
1409
		return false;
1409
	}
1410
	}
1410
}
1411
}
1411
 
1412
 
1412
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1413
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1413
{
1414
{
1414
	switch (port) {
1415
	switch (port) {
1415
	case PORT_B:
1416
	case PORT_B:
1416
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1417
		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1417
	case PORT_C:
1418
	case PORT_C:
1418
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1419
		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1419
	case PORT_D:
1420
	case PORT_D:
1420
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1421
		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1421
	default:
1422
	default:
1422
		return false;
1423
		return false;
1423
	}
1424
	}
1424
}
1425
}
1425
 
1426
 
1426
/*
1427
/*
1427
 * Get a bit mask of pins that have triggered, and which ones may be long.
1428
 * Get a bit mask of pins that have triggered, and which ones may be long.
1428
 * This can be called multiple times with the same masks to accumulate
1429
 * This can be called multiple times with the same masks to accumulate
1429
 * hotplug detection results from several registers.
1430
 * hotplug detection results from several registers.
1430
 *
1431
 *
1431
 * Note that the caller is expected to zero out the masks initially.
1432
 * Note that the caller is expected to zero out the masks initially.
1432
 */
1433
 */
1433
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1434
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1434
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1435
			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1435
			     const u32 hpd[HPD_NUM_PINS],
1436
			     const u32 hpd[HPD_NUM_PINS],
1436
			     bool long_pulse_detect(enum port port, u32 val))
1437
			     bool long_pulse_detect(enum port port, u32 val))
1437
{
1438
{
1438
	enum port port;
1439
	enum port port;
1439
	int i;
1440
	int i;
1440
 
1441
 
1441
	for_each_hpd_pin(i) {
1442
	for_each_hpd_pin(i) {
1442
		if ((hpd[i] & hotplug_trigger) == 0)
1443
		if ((hpd[i] & hotplug_trigger) == 0)
1443
			continue;
1444
			continue;
1444
 
1445
 
1445
		*pin_mask |= BIT(i);
1446
		*pin_mask |= BIT(i);
1446
 
1447
 
1447
//       if (!intel_hpd_pin_to_port(i, &port))
1448
//       if (!intel_hpd_pin_to_port(i, &port))
1448
//			continue;
1449
//			continue;
1449
 
1450
 
1450
		if (long_pulse_detect(port, dig_hotplug_reg))
1451
		if (long_pulse_detect(port, dig_hotplug_reg))
1451
			*long_mask |= BIT(i);
1452
			*long_mask |= BIT(i);
1452
	}
1453
	}
1453
 
1454
 
1454
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1455
	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1455
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1456
			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1456
 
1457
 
1457
}
1458
}
1458
 
1459
 
1459
static void gmbus_irq_handler(struct drm_device *dev)
1460
static void gmbus_irq_handler(struct drm_device *dev)
1460
{
1461
{
1461
	struct drm_i915_private *dev_priv = dev->dev_private;
1462
	struct drm_i915_private *dev_priv = dev->dev_private;
1462
 
1463
 
1463
	wake_up_all(&dev_priv->gmbus_wait_queue);
1464
	wake_up_all(&dev_priv->gmbus_wait_queue);
1464
}
1465
}
1465
 
1466
 
1466
static void dp_aux_irq_handler(struct drm_device *dev)
1467
static void dp_aux_irq_handler(struct drm_device *dev)
1467
{
1468
{
1468
	struct drm_i915_private *dev_priv = dev->dev_private;
1469
	struct drm_i915_private *dev_priv = dev->dev_private;
1469
 
1470
 
1470
	wake_up_all(&dev_priv->gmbus_wait_queue);
1471
	wake_up_all(&dev_priv->gmbus_wait_queue);
1471
}
1472
}
1472
 
1473
 
1473
#if defined(CONFIG_DEBUG_FS)
1474
#if defined(CONFIG_DEBUG_FS)
1474
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1475
static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1475
					 uint32_t crc0, uint32_t crc1,
1476
					 uint32_t crc0, uint32_t crc1,
1476
					 uint32_t crc2, uint32_t crc3,
1477
					 uint32_t crc2, uint32_t crc3,
1477
					 uint32_t crc4)
1478
					 uint32_t crc4)
1478
{
1479
{
1479
	struct drm_i915_private *dev_priv = dev->dev_private;
1480
	struct drm_i915_private *dev_priv = dev->dev_private;
1480
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1481
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1481
	struct intel_pipe_crc_entry *entry;
1482
	struct intel_pipe_crc_entry *entry;
1482
	int head, tail;
1483
	int head, tail;
1483
 
1484
 
1484
	spin_lock(&pipe_crc->lock);
1485
	spin_lock(&pipe_crc->lock);
1485
 
1486
 
1486
	if (!pipe_crc->entries) {
1487
	if (!pipe_crc->entries) {
1487
		spin_unlock(&pipe_crc->lock);
1488
		spin_unlock(&pipe_crc->lock);
1488
		DRM_DEBUG_KMS("spurious interrupt\n");
1489
		DRM_DEBUG_KMS("spurious interrupt\n");
1489
		return;
1490
		return;
1490
	}
1491
	}
1491
 
1492
 
1492
	head = pipe_crc->head;
1493
	head = pipe_crc->head;
1493
	tail = pipe_crc->tail;
1494
	tail = pipe_crc->tail;
1494
 
1495
 
1495
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1496
	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1496
		spin_unlock(&pipe_crc->lock);
1497
		spin_unlock(&pipe_crc->lock);
1497
		DRM_ERROR("CRC buffer overflowing\n");
1498
		DRM_ERROR("CRC buffer overflowing\n");
1498
		return;
1499
		return;
1499
	}
1500
	}
1500
 
1501
 
1501
	entry = &pipe_crc->entries[head];
1502
	entry = &pipe_crc->entries[head];
1502
 
1503
 
1503
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1504
	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1504
	entry->crc[0] = crc0;
1505
	entry->crc[0] = crc0;
1505
	entry->crc[1] = crc1;
1506
	entry->crc[1] = crc1;
1506
	entry->crc[2] = crc2;
1507
	entry->crc[2] = crc2;
1507
	entry->crc[3] = crc3;
1508
	entry->crc[3] = crc3;
1508
	entry->crc[4] = crc4;
1509
	entry->crc[4] = crc4;
1509
 
1510
 
1510
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1511
	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1511
	pipe_crc->head = head;
1512
	pipe_crc->head = head;
1512
 
1513
 
1513
	spin_unlock(&pipe_crc->lock);
1514
	spin_unlock(&pipe_crc->lock);
1514
 
1515
 
1515
	wake_up_interruptible(&pipe_crc->wq);
1516
	wake_up_interruptible(&pipe_crc->wq);
1516
}
1517
}
1517
#else
1518
#else
1518
static inline void
1519
static inline void
1519
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1520
display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1520
			     uint32_t crc0, uint32_t crc1,
1521
			     uint32_t crc0, uint32_t crc1,
1521
			     uint32_t crc2, uint32_t crc3,
1522
			     uint32_t crc2, uint32_t crc3,
1522
			     uint32_t crc4) {}
1523
			     uint32_t crc4) {}
1523
#endif
1524
#endif
1524
 
1525
 
1525
 
1526
 
1526
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1527
static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1527
{
1528
{
1528
	struct drm_i915_private *dev_priv = dev->dev_private;
1529
	struct drm_i915_private *dev_priv = dev->dev_private;
1529
 
1530
 
1530
	display_pipe_crc_irq_handler(dev, pipe,
1531
	display_pipe_crc_irq_handler(dev, pipe,
1531
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1532
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1532
				     0, 0, 0, 0);
1533
				     0, 0, 0, 0);
1533
}
1534
}
1534
 
1535
 
1535
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1536
static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1536
{
1537
{
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
1538
	struct drm_i915_private *dev_priv = dev->dev_private;
1538
 
1539
 
1539
	display_pipe_crc_irq_handler(dev, pipe,
1540
	display_pipe_crc_irq_handler(dev, pipe,
1540
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1541
				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1541
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1542
				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1542
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1543
				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1543
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1544
				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1544
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1545
				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1545
}
1546
}
1546
 
1547
 
1547
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1548
static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1548
{
1549
{
1549
	struct drm_i915_private *dev_priv = dev->dev_private;
1550
	struct drm_i915_private *dev_priv = dev->dev_private;
1550
	uint32_t res1, res2;
1551
	uint32_t res1, res2;
1551
 
1552
 
1552
	if (INTEL_INFO(dev)->gen >= 3)
1553
	if (INTEL_INFO(dev)->gen >= 3)
1553
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1554
		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1554
	else
1555
	else
1555
		res1 = 0;
1556
		res1 = 0;
1556
 
1557
 
1557
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1558
	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1558
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1559
		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1559
	else
1560
	else
1560
		res2 = 0;
1561
		res2 = 0;
1561
 
1562
 
1562
	display_pipe_crc_irq_handler(dev, pipe,
1563
	display_pipe_crc_irq_handler(dev, pipe,
1563
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1564
				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1564
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1565
				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1565
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1566
				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1566
				     res1, res2);
1567
				     res1, res2);
1567
}
1568
}
1568
 
1569
 
1569
/* The RPS events need forcewake, so we add them to a work queue and mask their
1570
/* The RPS events need forcewake, so we add them to a work queue and mask their
1570
 * IMR bits until the work is done. Other interrupts can be processed without
1571
 * IMR bits until the work is done. Other interrupts can be processed without
1571
 * the work queue. */
1572
 * the work queue. */
1572
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1573
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1573
{
1574
{
1574
	if (pm_iir & dev_priv->pm_rps_events) {
1575
	if (pm_iir & dev_priv->pm_rps_events) {
1575
		spin_lock(&dev_priv->irq_lock);
1576
		spin_lock(&dev_priv->irq_lock);
1576
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1577
		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1577
		if (dev_priv->rps.interrupts_enabled) {
1578
		if (dev_priv->rps.interrupts_enabled) {
1578
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1579
			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1579
			queue_work(dev_priv->wq, &dev_priv->rps.work);
1580
			queue_work(dev_priv->wq, &dev_priv->rps.work);
1580
		}
1581
		}
1581
		spin_unlock(&dev_priv->irq_lock);
1582
		spin_unlock(&dev_priv->irq_lock);
1582
	}
1583
	}
1583
 
1584
 
1584
	if (INTEL_INFO(dev_priv)->gen >= 8)
1585
	if (INTEL_INFO(dev_priv)->gen >= 8)
1585
		return;
1586
		return;
1586
 
1587
 
1587
	if (HAS_VEBOX(dev_priv->dev)) {
1588
	if (HAS_VEBOX(dev_priv->dev)) {
1588
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1589
		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1589
			notify_ring(&dev_priv->ring[VECS]);
1590
			notify_ring(&dev_priv->ring[VECS]);
1590
 
1591
 
1591
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1592
		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1592
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1593
			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1593
	}
1594
	}
1594
}
1595
}
1595
 
1596
 
1596
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1597
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1597
{
1598
{
1598
	if (!drm_handle_vblank(dev, pipe))
1599
	if (!drm_handle_vblank(dev, pipe))
1599
		return false;
1600
		return false;
1600
 
1601
 
1601
	return true;
1602
	return true;
1602
}
1603
}
1603
 
1604
 
1604
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1605
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1605
{
1606
{
1606
	struct drm_i915_private *dev_priv = dev->dev_private;
1607
	struct drm_i915_private *dev_priv = dev->dev_private;
1607
	u32 pipe_stats[I915_MAX_PIPES] = { };
1608
	u32 pipe_stats[I915_MAX_PIPES] = { };
1608
	int pipe;
1609
	int pipe;
1609
 
1610
 
1610
	spin_lock(&dev_priv->irq_lock);
1611
	spin_lock(&dev_priv->irq_lock);
1611
	for_each_pipe(dev_priv, pipe) {
1612
	for_each_pipe(dev_priv, pipe) {
1612
		int reg;
1613
		int reg;
1613
		u32 mask, iir_bit = 0;
1614
		u32 mask, iir_bit = 0;
1614
 
1615
 
1615
		/*
1616
		/*
1616
		 * PIPESTAT bits get signalled even when the interrupt is
1617
		 * PIPESTAT bits get signalled even when the interrupt is
1617
		 * disabled with the mask bits, and some of the status bits do
1618
		 * disabled with the mask bits, and some of the status bits do
1618
		 * not generate interrupts at all (like the underrun bit). Hence
1619
		 * not generate interrupts at all (like the underrun bit). Hence
1619
		 * we need to be careful that we only handle what we want to
1620
		 * we need to be careful that we only handle what we want to
1620
		 * handle.
1621
		 * handle.
1621
		 */
1622
		 */
1622
 
1623
 
1623
		/* fifo underruns are filterered in the underrun handler. */
1624
		/* fifo underruns are filterered in the underrun handler. */
1624
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1625
		mask = PIPE_FIFO_UNDERRUN_STATUS;
1625
 
1626
 
1626
		switch (pipe) {
1627
		switch (pipe) {
1627
		case PIPE_A:
1628
		case PIPE_A:
1628
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1629
			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1629
			break;
1630
			break;
1630
		case PIPE_B:
1631
		case PIPE_B:
1631
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1632
			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1632
			break;
1633
			break;
1633
		case PIPE_C:
1634
		case PIPE_C:
1634
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1635
			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1635
			break;
1636
			break;
1636
		}
1637
		}
1637
		if (iir & iir_bit)
1638
		if (iir & iir_bit)
1638
			mask |= dev_priv->pipestat_irq_mask[pipe];
1639
			mask |= dev_priv->pipestat_irq_mask[pipe];
1639
 
1640
 
1640
		if (!mask)
1641
		if (!mask)
1641
			continue;
1642
			continue;
1642
 
1643
 
1643
		reg = PIPESTAT(pipe);
1644
		reg = PIPESTAT(pipe);
1644
		mask |= PIPESTAT_INT_ENABLE_MASK;
1645
		mask |= PIPESTAT_INT_ENABLE_MASK;
1645
		pipe_stats[pipe] = I915_READ(reg) & mask;
1646
		pipe_stats[pipe] = I915_READ(reg) & mask;
1646
 
1647
 
1647
		/*
1648
		/*
1648
		 * Clear the PIPE*STAT regs before the IIR
1649
		 * Clear the PIPE*STAT regs before the IIR
1649
		 */
1650
		 */
1650
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1651
		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1651
					PIPESTAT_INT_STATUS_MASK))
1652
					PIPESTAT_INT_STATUS_MASK))
1652
			I915_WRITE(reg, pipe_stats[pipe]);
1653
			I915_WRITE(reg, pipe_stats[pipe]);
1653
	}
1654
	}
1654
	spin_unlock(&dev_priv->irq_lock);
1655
	spin_unlock(&dev_priv->irq_lock);
1655
 
1656
 
1656
	for_each_pipe(dev_priv, pipe) {
1657
	for_each_pipe(dev_priv, pipe) {
1657
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1658
		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1658
		    intel_pipe_handle_vblank(dev, pipe))
1659
		    intel_pipe_handle_vblank(dev, pipe))
1659
            /*intel_check_page_flip(dev, pipe)*/;
1660
            /*intel_check_page_flip(dev, pipe)*/;
1660
 
1661
 
1661
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1662
		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1662
//           intel_prepare_page_flip(dev, pipe);
1663
//           intel_prepare_page_flip(dev, pipe);
1663
//           intel_finish_page_flip(dev, pipe);
1664
//           intel_finish_page_flip(dev, pipe);
1664
		}
1665
		}
1665
 
1666
 
1666
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1667
		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1667
			i9xx_pipe_crc_irq_handler(dev, pipe);
1668
			i9xx_pipe_crc_irq_handler(dev, pipe);
1668
 
1669
 
1669
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1670
		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1670
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1671
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1671
	}
1672
	}
1672
 
1673
 
1673
	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1674
	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1674
		gmbus_irq_handler(dev);
1675
		gmbus_irq_handler(dev);
1675
}
1676
}
1676
 
1677
 
1677
static void i9xx_hpd_irq_handler(struct drm_device *dev)
1678
static void i9xx_hpd_irq_handler(struct drm_device *dev)
1678
{
1679
{
1679
	struct drm_i915_private *dev_priv = dev->dev_private;
1680
	struct drm_i915_private *dev_priv = dev->dev_private;
1680
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1681
	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1681
	u32 pin_mask = 0, long_mask = 0;
1682
	u32 pin_mask = 0, long_mask = 0;
1682
 
1683
 
1683
	if (!hotplug_status)
1684
	if (!hotplug_status)
1684
		return;
1685
		return;
1685
 
1686
 
1686
	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1687
	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1687
	/*
1688
	/*
1688
	 * Make sure hotplug status is cleared before we clear IIR, or else we
1689
	 * Make sure hotplug status is cleared before we clear IIR, or else we
1689
	 * may miss hotplug events.
1690
	 * may miss hotplug events.
1690
	 */
1691
	 */
1691
	POSTING_READ(PORT_HOTPLUG_STAT);
1692
	POSTING_READ(PORT_HOTPLUG_STAT);
1692
 
1693
 
1693
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1694
	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1694
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1695
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1695
 
1696
 
1696
		if (hotplug_trigger) {
1697
		if (hotplug_trigger) {
1697
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1698
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1698
					   hotplug_trigger, hpd_status_g4x,
1699
					   hotplug_trigger, hpd_status_g4x,
1699
					   i9xx_port_hotplug_long_detect);
1700
					   i9xx_port_hotplug_long_detect);
1700
 
1701
 
1701
//           intel_hpd_irq_handler(dev, pin_mask, long_mask);
1702
//           intel_hpd_irq_handler(dev, pin_mask, long_mask);
1702
		}
1703
		}
1703
 
1704
 
1704
		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1705
		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1705
			dp_aux_irq_handler(dev);
1706
			dp_aux_irq_handler(dev);
1706
	} else {
1707
	} else {
1707
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1708
		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1708
 
1709
 
1709
		if (hotplug_trigger) {
1710
		if (hotplug_trigger) {
1710
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1711
			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1711
					   hotplug_trigger, hpd_status_i915,
1712
					   hotplug_trigger, hpd_status_i915,
1712
					   i9xx_port_hotplug_long_detect);
1713
					   i9xx_port_hotplug_long_detect);
1713
//           intel_hpd_irq_handler(dev, pin_mask, long_mask);
1714
//           intel_hpd_irq_handler(dev, pin_mask, long_mask);
1714
		}
1715
		}
1715
	}
1716
	}
1716
}
1717
}
1717
 
1718
 
1718
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1719
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1719
{
1720
{
1720
	struct drm_device *dev = arg;
1721
	struct drm_device *dev = arg;
1721
	struct drm_i915_private *dev_priv = dev->dev_private;
1722
	struct drm_i915_private *dev_priv = dev->dev_private;
1722
	u32 iir, gt_iir, pm_iir;
1723
	u32 iir, gt_iir, pm_iir;
1723
	irqreturn_t ret = IRQ_NONE;
1724
	irqreturn_t ret = IRQ_NONE;
1724
 
1725
 
1725
	if (!intel_irqs_enabled(dev_priv))
1726
	if (!intel_irqs_enabled(dev_priv))
1726
		return IRQ_NONE;
1727
		return IRQ_NONE;
1727
 
1728
 
1728
	while (true) {
1729
	while (true) {
1729
		/* Find, clear, then process each source of interrupt */
1730
		/* Find, clear, then process each source of interrupt */
1730
 
1731
 
1731
		gt_iir = I915_READ(GTIIR);
1732
		gt_iir = I915_READ(GTIIR);
1732
		if (gt_iir)
1733
		if (gt_iir)
1733
			I915_WRITE(GTIIR, gt_iir);
1734
			I915_WRITE(GTIIR, gt_iir);
1734
 
1735
 
1735
		pm_iir = I915_READ(GEN6_PMIIR);
1736
		pm_iir = I915_READ(GEN6_PMIIR);
1736
		if (pm_iir)
1737
		if (pm_iir)
1737
			I915_WRITE(GEN6_PMIIR, pm_iir);
1738
			I915_WRITE(GEN6_PMIIR, pm_iir);
1738
 
1739
 
1739
		iir = I915_READ(VLV_IIR);
1740
		iir = I915_READ(VLV_IIR);
1740
		if (iir) {
1741
		if (iir) {
1741
			/* Consume port before clearing IIR or we'll miss events */
1742
			/* Consume port before clearing IIR or we'll miss events */
1742
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1743
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1743
				i9xx_hpd_irq_handler(dev);
1744
				i9xx_hpd_irq_handler(dev);
1744
			I915_WRITE(VLV_IIR, iir);
1745
			I915_WRITE(VLV_IIR, iir);
1745
		}
1746
		}
1746
 
1747
 
1747
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1748
		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1748
			goto out;
1749
			goto out;
1749
 
1750
 
1750
		ret = IRQ_HANDLED;
1751
		ret = IRQ_HANDLED;
1751
 
1752
 
1752
		if (gt_iir)
1753
		if (gt_iir)
1753
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1754
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1754
		if (pm_iir)
1755
		if (pm_iir)
1755
			gen6_rps_irq_handler(dev_priv, pm_iir);
1756
			gen6_rps_irq_handler(dev_priv, pm_iir);
1756
		/* Call regardless, as some status bits might not be
1757
		/* Call regardless, as some status bits might not be
1757
		 * signalled in iir */
1758
		 * signalled in iir */
1758
		valleyview_pipestat_irq_handler(dev, iir);
1759
		valleyview_pipestat_irq_handler(dev, iir);
1759
	}
1760
	}
1760
 
1761
 
1761
out:
1762
out:
1762
	return ret;
1763
	return ret;
1763
}
1764
}
1764
 
1765
 
1765
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1766
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1766
{
1767
{
1767
	struct drm_device *dev = arg;
1768
	struct drm_device *dev = arg;
1768
	struct drm_i915_private *dev_priv = dev->dev_private;
1769
	struct drm_i915_private *dev_priv = dev->dev_private;
1769
	u32 master_ctl, iir;
1770
	u32 master_ctl, iir;
1770
	irqreturn_t ret = IRQ_NONE;
1771
	irqreturn_t ret = IRQ_NONE;
1771
 
1772
 
1772
	if (!intel_irqs_enabled(dev_priv))
1773
	if (!intel_irqs_enabled(dev_priv))
1773
		return IRQ_NONE;
1774
		return IRQ_NONE;
1774
 
1775
 
1775
	for (;;) {
1776
	for (;;) {
1776
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1777
		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1777
		iir = I915_READ(VLV_IIR);
1778
		iir = I915_READ(VLV_IIR);
1778
 
1779
 
1779
		if (master_ctl == 0 && iir == 0)
1780
		if (master_ctl == 0 && iir == 0)
1780
			break;
1781
			break;
1781
 
1782
 
1782
		ret = IRQ_HANDLED;
1783
		ret = IRQ_HANDLED;
1783
 
1784
 
1784
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1785
		I915_WRITE(GEN8_MASTER_IRQ, 0);
1785
 
1786
 
1786
		/* Find, clear, then process each source of interrupt */
1787
		/* Find, clear, then process each source of interrupt */
1787
 
1788
 
1788
		if (iir) {
1789
		if (iir) {
1789
			/* Consume port before clearing IIR or we'll miss events */
1790
			/* Consume port before clearing IIR or we'll miss events */
1790
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1791
			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1791
				i9xx_hpd_irq_handler(dev);
1792
				i9xx_hpd_irq_handler(dev);
1792
			I915_WRITE(VLV_IIR, iir);
1793
			I915_WRITE(VLV_IIR, iir);
1793
		}
1794
		}
1794
 
1795
 
1795
		gen8_gt_irq_handler(dev_priv, master_ctl);
1796
		gen8_gt_irq_handler(dev_priv, master_ctl);
1796
 
1797
 
1797
		/* Call regardless, as some status bits might not be
1798
		/* Call regardless, as some status bits might not be
1798
		 * signalled in iir */
1799
		 * signalled in iir */
1799
		valleyview_pipestat_irq_handler(dev, iir);
1800
		valleyview_pipestat_irq_handler(dev, iir);
1800
 
1801
 
1801
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1802
		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1802
		POSTING_READ(GEN8_MASTER_IRQ);
1803
		POSTING_READ(GEN8_MASTER_IRQ);
1803
	}
1804
	}
1804
 
1805
 
1805
	return ret;
1806
	return ret;
1806
}
1807
}
1807
 
1808
 
1808
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1809
static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1809
				const u32 hpd[HPD_NUM_PINS])
1810
				const u32 hpd[HPD_NUM_PINS])
1810
{
1811
{
1811
	struct drm_i915_private *dev_priv = to_i915(dev);
1812
	struct drm_i915_private *dev_priv = to_i915(dev);
1812
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1813
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1813
 
1814
 
1814
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1815
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1815
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1816
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1816
 
1817
 
1817
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1818
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1818
			   dig_hotplug_reg, hpd,
1819
			   dig_hotplug_reg, hpd,
1819
			   pch_port_hotplug_long_detect);
1820
			   pch_port_hotplug_long_detect);
1820
 
1821
 
1821
//   intel_hpd_irq_handler(dev, pin_mask, long_mask);
1822
//   intel_hpd_irq_handler(dev, pin_mask, long_mask);
1822
}
1823
}
1823
 
1824
 
1824
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1825
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1825
{
1826
{
1826
	struct drm_i915_private *dev_priv = dev->dev_private;
1827
	struct drm_i915_private *dev_priv = dev->dev_private;
1827
	int pipe;
1828
	int pipe;
1828
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1829
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1829
 
1830
 
1830
	if (hotplug_trigger)
1831
	if (hotplug_trigger)
1831
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1832
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1832
 
1833
 
1833
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1834
	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1834
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1835
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1835
			       SDE_AUDIO_POWER_SHIFT);
1836
			       SDE_AUDIO_POWER_SHIFT);
1836
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1837
		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1837
				 port_name(port));
1838
				 port_name(port));
1838
	}
1839
	}
1839
 
1840
 
1840
	if (pch_iir & SDE_AUX_MASK)
1841
	if (pch_iir & SDE_AUX_MASK)
1841
		dp_aux_irq_handler(dev);
1842
		dp_aux_irq_handler(dev);
1842
 
1843
 
1843
	if (pch_iir & SDE_GMBUS)
1844
	if (pch_iir & SDE_GMBUS)
1844
		gmbus_irq_handler(dev);
1845
		gmbus_irq_handler(dev);
1845
 
1846
 
1846
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1847
	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1847
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1848
		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1848
 
1849
 
1849
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1850
	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1850
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1851
		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1851
 
1852
 
1852
	if (pch_iir & SDE_POISON)
1853
	if (pch_iir & SDE_POISON)
1853
		DRM_ERROR("PCH poison interrupt\n");
1854
		DRM_ERROR("PCH poison interrupt\n");
1854
 
1855
 
1855
	if (pch_iir & SDE_FDI_MASK)
1856
	if (pch_iir & SDE_FDI_MASK)
1856
		for_each_pipe(dev_priv, pipe)
1857
		for_each_pipe(dev_priv, pipe)
1857
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1858
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1858
					 pipe_name(pipe),
1859
					 pipe_name(pipe),
1859
					 I915_READ(FDI_RX_IIR(pipe)));
1860
					 I915_READ(FDI_RX_IIR(pipe)));
1860
 
1861
 
1861
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1862
	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1862
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1863
		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1863
 
1864
 
1864
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1865
	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1865
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1866
		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1866
 
1867
 
1867
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1868
	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1868
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1869
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1869
 
1870
 
1870
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1871
	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1871
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1872
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1872
}
1873
}
1873
 
1874
 
1874
static void ivb_err_int_handler(struct drm_device *dev)
1875
static void ivb_err_int_handler(struct drm_device *dev)
1875
{
1876
{
1876
	struct drm_i915_private *dev_priv = dev->dev_private;
1877
	struct drm_i915_private *dev_priv = dev->dev_private;
1877
	u32 err_int = I915_READ(GEN7_ERR_INT);
1878
	u32 err_int = I915_READ(GEN7_ERR_INT);
1878
	enum pipe pipe;
1879
	enum pipe pipe;
1879
 
1880
 
1880
	if (err_int & ERR_INT_POISON)
1881
	if (err_int & ERR_INT_POISON)
1881
		DRM_ERROR("Poison interrupt\n");
1882
		DRM_ERROR("Poison interrupt\n");
1882
 
1883
 
1883
	for_each_pipe(dev_priv, pipe) {
1884
	for_each_pipe(dev_priv, pipe) {
1884
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1885
		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1885
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1886
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1886
 
1887
 
1887
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1888
		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1888
			if (IS_IVYBRIDGE(dev))
1889
			if (IS_IVYBRIDGE(dev))
1889
				ivb_pipe_crc_irq_handler(dev, pipe);
1890
				ivb_pipe_crc_irq_handler(dev, pipe);
1890
			else
1891
			else
1891
				hsw_pipe_crc_irq_handler(dev, pipe);
1892
				hsw_pipe_crc_irq_handler(dev, pipe);
1892
		}
1893
		}
1893
	}
1894
	}
1894
 
1895
 
1895
	I915_WRITE(GEN7_ERR_INT, err_int);
1896
	I915_WRITE(GEN7_ERR_INT, err_int);
1896
}
1897
}
1897
 
1898
 
1898
static void cpt_serr_int_handler(struct drm_device *dev)
1899
static void cpt_serr_int_handler(struct drm_device *dev)
1899
{
1900
{
1900
	struct drm_i915_private *dev_priv = dev->dev_private;
1901
	struct drm_i915_private *dev_priv = dev->dev_private;
1901
	u32 serr_int = I915_READ(SERR_INT);
1902
	u32 serr_int = I915_READ(SERR_INT);
1902
 
1903
 
1903
	if (serr_int & SERR_INT_POISON)
1904
	if (serr_int & SERR_INT_POISON)
1904
		DRM_ERROR("PCH poison interrupt\n");
1905
		DRM_ERROR("PCH poison interrupt\n");
1905
 
1906
 
1906
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1907
	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1907
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1908
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1908
 
1909
 
1909
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1910
	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1910
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1911
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1911
 
1912
 
1912
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1913
	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1913
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1914
		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1914
 
1915
 
1915
	I915_WRITE(SERR_INT, serr_int);
1916
	I915_WRITE(SERR_INT, serr_int);
1916
}
1917
}
1917
 
1918
 
1918
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1919
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1919
{
1920
{
1920
	struct drm_i915_private *dev_priv = dev->dev_private;
1921
	struct drm_i915_private *dev_priv = dev->dev_private;
1921
	int pipe;
1922
	int pipe;
1922
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1923
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1923
 
1924
 
1924
	if (hotplug_trigger)
1925
	if (hotplug_trigger)
1925
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1926
		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1926
 
1927
 
1927
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1928
	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1928
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1929
		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1929
			       SDE_AUDIO_POWER_SHIFT_CPT);
1930
			       SDE_AUDIO_POWER_SHIFT_CPT);
1930
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1931
		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1931
				 port_name(port));
1932
				 port_name(port));
1932
	}
1933
	}
1933
 
1934
 
1934
	if (pch_iir & SDE_AUX_MASK_CPT)
1935
	if (pch_iir & SDE_AUX_MASK_CPT)
1935
		dp_aux_irq_handler(dev);
1936
		dp_aux_irq_handler(dev);
1936
 
1937
 
1937
	if (pch_iir & SDE_GMBUS_CPT)
1938
	if (pch_iir & SDE_GMBUS_CPT)
1938
		gmbus_irq_handler(dev);
1939
		gmbus_irq_handler(dev);
1939
 
1940
 
1940
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1941
	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1941
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1942
		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1942
 
1943
 
1943
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1944
	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1944
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1945
		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1945
 
1946
 
1946
	if (pch_iir & SDE_FDI_MASK_CPT)
1947
	if (pch_iir & SDE_FDI_MASK_CPT)
1947
		for_each_pipe(dev_priv, pipe)
1948
		for_each_pipe(dev_priv, pipe)
1948
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1949
			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1949
					 pipe_name(pipe),
1950
					 pipe_name(pipe),
1950
					 I915_READ(FDI_RX_IIR(pipe)));
1951
					 I915_READ(FDI_RX_IIR(pipe)));
1951
 
1952
 
1952
	if (pch_iir & SDE_ERROR_CPT)
1953
	if (pch_iir & SDE_ERROR_CPT)
1953
		cpt_serr_int_handler(dev);
1954
		cpt_serr_int_handler(dev);
1954
}
1955
}
1955
 
1956
 
1956
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1957
static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1957
{
1958
{
1958
	struct drm_i915_private *dev_priv = dev->dev_private;
1959
	struct drm_i915_private *dev_priv = dev->dev_private;
1959
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1960
	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1960
		~SDE_PORTE_HOTPLUG_SPT;
1961
		~SDE_PORTE_HOTPLUG_SPT;
1961
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1962
	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1962
	u32 pin_mask = 0, long_mask = 0;
1963
	u32 pin_mask = 0, long_mask = 0;
1963
 
1964
 
1964
	if (hotplug_trigger) {
1965
	if (hotplug_trigger) {
1965
		u32 dig_hotplug_reg;
1966
		u32 dig_hotplug_reg;
1966
 
1967
 
1967
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1968
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1968
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1969
		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1969
 
1970
 
1970
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1971
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1971
				   dig_hotplug_reg, hpd_spt,
1972
				   dig_hotplug_reg, hpd_spt,
1972
				   spt_port_hotplug_long_detect);
1973
				   spt_port_hotplug_long_detect);
1973
	}
1974
	}
1974
 
1975
 
1975
	if (hotplug2_trigger) {
1976
	if (hotplug2_trigger) {
1976
		u32 dig_hotplug_reg;
1977
		u32 dig_hotplug_reg;
1977
 
1978
 
1978
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1979
		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1979
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1980
		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1980
 
1981
 
1981
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1982
		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1982
				   dig_hotplug_reg, hpd_spt,
1983
				   dig_hotplug_reg, hpd_spt,
1983
				   spt_port_hotplug2_long_detect);
1984
				   spt_port_hotplug2_long_detect);
1984
	}
1985
	}
1985
 
1986
 
1986
	if (pch_iir & SDE_GMBUS_CPT)
1987
	if (pch_iir & SDE_GMBUS_CPT)
1987
		gmbus_irq_handler(dev);
1988
		gmbus_irq_handler(dev);
1988
}
1989
}
1989
 
1990
 
1990
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1991
static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1991
				const u32 hpd[HPD_NUM_PINS])
1992
				const u32 hpd[HPD_NUM_PINS])
1992
{
1993
{
1993
	struct drm_i915_private *dev_priv = to_i915(dev);
1994
	struct drm_i915_private *dev_priv = to_i915(dev);
1994
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1995
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1995
 
1996
 
1996
	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1997
	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1997
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1998
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1998
 
1999
 
1999
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2000
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2000
			   dig_hotplug_reg, hpd,
2001
			   dig_hotplug_reg, hpd,
2001
			   ilk_port_hotplug_long_detect);
2002
			   ilk_port_hotplug_long_detect);
2002
 
2003
 
2003
}
2004
}
2004
 
2005
 
2005
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2006
static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2006
{
2007
{
2007
	struct drm_i915_private *dev_priv = dev->dev_private;
2008
	struct drm_i915_private *dev_priv = dev->dev_private;
2008
	enum pipe pipe;
2009
	enum pipe pipe;
2009
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2010
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2010
 
2011
 
2011
	if (hotplug_trigger)
2012
	if (hotplug_trigger)
2012
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2013
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2013
 
2014
 
2014
	if (de_iir & DE_AUX_CHANNEL_A)
2015
	if (de_iir & DE_AUX_CHANNEL_A)
2015
		dp_aux_irq_handler(dev);
2016
		dp_aux_irq_handler(dev);
2016
 
2017
 
2017
	if (de_iir & DE_GSE)
2018
	if (de_iir & DE_GSE)
2018
		intel_opregion_asle_intr(dev);
2019
		intel_opregion_asle_intr(dev);
2019
 
2020
 
2020
	if (de_iir & DE_POISON)
2021
	if (de_iir & DE_POISON)
2021
		DRM_ERROR("Poison interrupt\n");
2022
		DRM_ERROR("Poison interrupt\n");
2022
 
2023
 
2023
	for_each_pipe(dev_priv, pipe) {
2024
	for_each_pipe(dev_priv, pipe) {
2024
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2025
		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2025
		    intel_pipe_handle_vblank(dev, pipe))
2026
		    intel_pipe_handle_vblank(dev, pipe))
2026
            /*intel_check_page_flip(dev, pipe)*/;
2027
            /*intel_check_page_flip(dev, pipe)*/;
2027
 
2028
 
2028
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2029
		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2029
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2030
			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2030
 
2031
 
2031
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2032
		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2032
			i9xx_pipe_crc_irq_handler(dev, pipe);
2033
			i9xx_pipe_crc_irq_handler(dev, pipe);
2033
 
2034
 
2034
		/* plane/pipes map 1:1 on ilk+ */
2035
		/* plane/pipes map 1:1 on ilk+ */
2035
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2036
		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2036
//			intel_prepare_page_flip(dev, pipe);
2037
//			intel_prepare_page_flip(dev, pipe);
2037
//			intel_finish_page_flip_plane(dev, pipe);
2038
//			intel_finish_page_flip_plane(dev, pipe);
2038
		}
2039
		}
2039
	}
2040
	}
2040
 
2041
 
2041
	/* check event from PCH */
2042
	/* check event from PCH */
2042
	if (de_iir & DE_PCH_EVENT) {
2043
	if (de_iir & DE_PCH_EVENT) {
2043
		u32 pch_iir = I915_READ(SDEIIR);
2044
		u32 pch_iir = I915_READ(SDEIIR);
2044
 
2045
 
2045
		if (HAS_PCH_CPT(dev))
2046
		if (HAS_PCH_CPT(dev))
2046
			cpt_irq_handler(dev, pch_iir);
2047
			cpt_irq_handler(dev, pch_iir);
2047
		else
2048
		else
2048
			ibx_irq_handler(dev, pch_iir);
2049
			ibx_irq_handler(dev, pch_iir);
2049
 
2050
 
2050
		/* should clear PCH hotplug event before clear CPU irq */
2051
		/* should clear PCH hotplug event before clear CPU irq */
2051
		I915_WRITE(SDEIIR, pch_iir);
2052
		I915_WRITE(SDEIIR, pch_iir);
2052
	}
2053
	}
2053
 
2054
 
2054
	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2055
	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2055
		ironlake_rps_change_irq_handler(dev);
2056
		ironlake_rps_change_irq_handler(dev);
2056
}
2057
}
2057
 
2058
 
2058
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2059
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2059
{
2060
{
2060
	struct drm_i915_private *dev_priv = dev->dev_private;
2061
	struct drm_i915_private *dev_priv = dev->dev_private;
2061
	enum pipe pipe;
2062
	enum pipe pipe;
2062
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2063
	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2063
 
2064
 
2064
	if (hotplug_trigger)
2065
	if (hotplug_trigger)
2065
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2066
		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2066
 
2067
 
2067
	if (de_iir & DE_ERR_INT_IVB)
2068
	if (de_iir & DE_ERR_INT_IVB)
2068
		ivb_err_int_handler(dev);
2069
		ivb_err_int_handler(dev);
2069
 
2070
 
2070
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2071
	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2071
		dp_aux_irq_handler(dev);
2072
		dp_aux_irq_handler(dev);
2072
 
2073
 
2073
	if (de_iir & DE_GSE_IVB)
2074
	if (de_iir & DE_GSE_IVB)
2074
		intel_opregion_asle_intr(dev);
2075
		intel_opregion_asle_intr(dev);
2075
 
2076
 
2076
	for_each_pipe(dev_priv, pipe) {
2077
	for_each_pipe(dev_priv, pipe) {
2077
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2078
		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2078
		    intel_pipe_handle_vblank(dev, pipe))
2079
		    intel_pipe_handle_vblank(dev, pipe))
2079
            /*intel_check_page_flip(dev, pipe)*/;
2080
            /*intel_check_page_flip(dev, pipe)*/;
2080
 
2081
 
2081
		/* plane/pipes map 1:1 on ilk+ */
2082
		/* plane/pipes map 1:1 on ilk+ */
2082
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2083
		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2083
//			intel_prepare_page_flip(dev, pipe);
2084
//			intel_prepare_page_flip(dev, pipe);
2084
//			intel_finish_page_flip_plane(dev, pipe);
2085
//			intel_finish_page_flip_plane(dev, pipe);
2085
		}
2086
		}
2086
	}
2087
	}
2087
 
2088
 
2088
	/* check event from PCH */
2089
	/* check event from PCH */
2089
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2090
	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2090
		u32 pch_iir = I915_READ(SDEIIR);
2091
		u32 pch_iir = I915_READ(SDEIIR);
2091
 
2092
 
2092
		cpt_irq_handler(dev, pch_iir);
2093
		cpt_irq_handler(dev, pch_iir);
2093
 
2094
 
2094
		/* clear PCH hotplug event before clear CPU irq */
2095
		/* clear PCH hotplug event before clear CPU irq */
2095
		I915_WRITE(SDEIIR, pch_iir);
2096
		I915_WRITE(SDEIIR, pch_iir);
2096
	}
2097
	}
2097
}
2098
}
2098
 
2099
 
2099
/*
2100
/*
2100
 * To handle irqs with the minimum potential races with fresh interrupts, we:
2101
 * To handle irqs with the minimum potential races with fresh interrupts, we:
2101
 * 1 - Disable Master Interrupt Control.
2102
 * 1 - Disable Master Interrupt Control.
2102
 * 2 - Find the source(s) of the interrupt.
2103
 * 2 - Find the source(s) of the interrupt.
2103
 * 3 - Clear the Interrupt Identity bits (IIR).
2104
 * 3 - Clear the Interrupt Identity bits (IIR).
2104
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2105
 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2105
 * 5 - Re-enable Master Interrupt Control.
2106
 * 5 - Re-enable Master Interrupt Control.
2106
 */
2107
 */
2107
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2108
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2108
{
2109
{
2109
	struct drm_device *dev = arg;
2110
	struct drm_device *dev = arg;
2110
	struct drm_i915_private *dev_priv = dev->dev_private;
2111
	struct drm_i915_private *dev_priv = dev->dev_private;
2111
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2112
	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2112
	irqreturn_t ret = IRQ_NONE;
2113
	irqreturn_t ret = IRQ_NONE;
2113
 
2114
 
2114
	if (!intel_irqs_enabled(dev_priv))
2115
	if (!intel_irqs_enabled(dev_priv))
2115
		return IRQ_NONE;
2116
		return IRQ_NONE;
2116
 
2117
 
2117
	/* We get interrupts on unclaimed registers, so check for this before we
2118
	/* We get interrupts on unclaimed registers, so check for this before we
2118
	 * do any I915_{READ,WRITE}. */
2119
	 * do any I915_{READ,WRITE}. */
2119
	intel_uncore_check_errors(dev);
2120
	intel_uncore_check_errors(dev);
2120
 
2121
 
2121
	/* disable master interrupt before clearing iir  */
2122
	/* disable master interrupt before clearing iir  */
2122
	de_ier = I915_READ(DEIER);
2123
	de_ier = I915_READ(DEIER);
2123
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2124
	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2124
	POSTING_READ(DEIER);
2125
	POSTING_READ(DEIER);
2125
 
2126
 
2126
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2127
	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2127
	 * interrupts will will be stored on its back queue, and then we'll be
2128
	 * interrupts will will be stored on its back queue, and then we'll be
2128
	 * able to process them after we restore SDEIER (as soon as we restore
2129
	 * able to process them after we restore SDEIER (as soon as we restore
2129
	 * it, we'll get an interrupt if SDEIIR still has something to process
2130
	 * it, we'll get an interrupt if SDEIIR still has something to process
2130
	 * due to its back queue). */
2131
	 * due to its back queue). */
2131
	if (!HAS_PCH_NOP(dev)) {
2132
	if (!HAS_PCH_NOP(dev)) {
2132
		sde_ier = I915_READ(SDEIER);
2133
		sde_ier = I915_READ(SDEIER);
2133
		I915_WRITE(SDEIER, 0);
2134
		I915_WRITE(SDEIER, 0);
2134
		POSTING_READ(SDEIER);
2135
		POSTING_READ(SDEIER);
2135
	}
2136
	}
2136
 
2137
 
2137
	/* Find, clear, then process each source of interrupt */
2138
	/* Find, clear, then process each source of interrupt */
2138
 
2139
 
2139
	gt_iir = I915_READ(GTIIR);
2140
	gt_iir = I915_READ(GTIIR);
2140
	if (gt_iir) {
2141
	if (gt_iir) {
2141
		I915_WRITE(GTIIR, gt_iir);
2142
		I915_WRITE(GTIIR, gt_iir);
2142
		ret = IRQ_HANDLED;
2143
		ret = IRQ_HANDLED;
2143
		if (INTEL_INFO(dev)->gen >= 6)
2144
		if (INTEL_INFO(dev)->gen >= 6)
2144
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2145
			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2145
		else
2146
		else
2146
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2147
			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2147
	}
2148
	}
2148
 
2149
 
2149
	de_iir = I915_READ(DEIIR);
2150
	de_iir = I915_READ(DEIIR);
2150
	if (de_iir) {
2151
	if (de_iir) {
2151
		I915_WRITE(DEIIR, de_iir);
2152
		I915_WRITE(DEIIR, de_iir);
2152
		ret = IRQ_HANDLED;
2153
		ret = IRQ_HANDLED;
2153
		if (INTEL_INFO(dev)->gen >= 7)
2154
		if (INTEL_INFO(dev)->gen >= 7)
2154
			ivb_display_irq_handler(dev, de_iir);
2155
			ivb_display_irq_handler(dev, de_iir);
2155
		else
2156
		else
2156
			ilk_display_irq_handler(dev, de_iir);
2157
			ilk_display_irq_handler(dev, de_iir);
2157
	}
2158
	}
2158
 
2159
 
2159
	if (INTEL_INFO(dev)->gen >= 6) {
2160
	if (INTEL_INFO(dev)->gen >= 6) {
2160
		u32 pm_iir = I915_READ(GEN6_PMIIR);
2161
		u32 pm_iir = I915_READ(GEN6_PMIIR);
2161
		if (pm_iir) {
2162
		if (pm_iir) {
2162
			I915_WRITE(GEN6_PMIIR, pm_iir);
2163
			I915_WRITE(GEN6_PMIIR, pm_iir);
2163
			ret = IRQ_HANDLED;
2164
			ret = IRQ_HANDLED;
2164
			gen6_rps_irq_handler(dev_priv, pm_iir);
2165
			gen6_rps_irq_handler(dev_priv, pm_iir);
2165
		}
2166
		}
2166
	}
2167
	}
2167
 
2168
 
2168
	I915_WRITE(DEIER, de_ier);
2169
	I915_WRITE(DEIER, de_ier);
2169
	POSTING_READ(DEIER);
2170
	POSTING_READ(DEIER);
2170
	if (!HAS_PCH_NOP(dev)) {
2171
	if (!HAS_PCH_NOP(dev)) {
2171
		I915_WRITE(SDEIER, sde_ier);
2172
		I915_WRITE(SDEIER, sde_ier);
2172
		POSTING_READ(SDEIER);
2173
		POSTING_READ(SDEIER);
2173
	}
2174
	}
2174
 
2175
 
2175
	return ret;
2176
	return ret;
2176
}
2177
}
2177
 
2178
 
2178
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2179
static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2179
				const u32 hpd[HPD_NUM_PINS])
2180
				const u32 hpd[HPD_NUM_PINS])
2180
{
2181
{
2181
	struct drm_i915_private *dev_priv = to_i915(dev);
2182
	struct drm_i915_private *dev_priv = to_i915(dev);
2182
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2183
	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2183
 
2184
 
2184
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2185
	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2185
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2186
	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2186
 
2187
 
2187
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2188
	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2188
			   dig_hotplug_reg, hpd,
2189
			   dig_hotplug_reg, hpd,
2189
			   bxt_port_hotplug_long_detect);
2190
			   bxt_port_hotplug_long_detect);
2190
 
2191
 
2191
}
2192
}
2192
 
2193
 
2193
static irqreturn_t gen8_irq_handler(int irq, void *arg)
2194
static irqreturn_t gen8_irq_handler(int irq, void *arg)
2194
{
2195
{
2195
	struct drm_device *dev = arg;
2196
	struct drm_device *dev = arg;
2196
	struct drm_i915_private *dev_priv = dev->dev_private;
2197
	struct drm_i915_private *dev_priv = dev->dev_private;
2197
	u32 master_ctl;
2198
	u32 master_ctl;
2198
	irqreturn_t ret = IRQ_NONE;
2199
	irqreturn_t ret = IRQ_NONE;
2199
	uint32_t tmp = 0;
2200
	uint32_t tmp = 0;
2200
	enum pipe pipe;
2201
	enum pipe pipe;
2201
	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2202
	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2202
 
2203
 
2203
	if (!intel_irqs_enabled(dev_priv))
2204
	if (!intel_irqs_enabled(dev_priv))
2204
		return IRQ_NONE;
2205
		return IRQ_NONE;
2205
 
2206
 
2206
	if (INTEL_INFO(dev_priv)->gen >= 9)
2207
	if (INTEL_INFO(dev_priv)->gen >= 9)
2207
		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2208
		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2208
			GEN9_AUX_CHANNEL_D;
2209
			GEN9_AUX_CHANNEL_D;
2209
 
2210
 
2210
	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2211
	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2211
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2212
	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2212
	if (!master_ctl)
2213
	if (!master_ctl)
2213
		return IRQ_NONE;
2214
		return IRQ_NONE;
2214
 
2215
 
2215
	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2216
	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2216
 
2217
 
2217
	/* Find, clear, then process each source of interrupt */
2218
	/* Find, clear, then process each source of interrupt */
2218
 
2219
 
2219
	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2220
	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2220
 
2221
 
2221
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2222
	if (master_ctl & GEN8_DE_MISC_IRQ) {
2222
		tmp = I915_READ(GEN8_DE_MISC_IIR);
2223
		tmp = I915_READ(GEN8_DE_MISC_IIR);
2223
		if (tmp) {
2224
		if (tmp) {
2224
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2225
			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2225
			ret = IRQ_HANDLED;
2226
			ret = IRQ_HANDLED;
2226
			if (tmp & GEN8_DE_MISC_GSE)
2227
			if (tmp & GEN8_DE_MISC_GSE)
2227
				intel_opregion_asle_intr(dev);
2228
				intel_opregion_asle_intr(dev);
2228
			else
2229
			else
2229
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2230
				DRM_ERROR("Unexpected DE Misc interrupt\n");
2230
		}
2231
		}
2231
		else
2232
		else
2232
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2233
			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2233
	}
2234
	}
2234
 
2235
 
2235
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2236
	if (master_ctl & GEN8_DE_PORT_IRQ) {
2236
		tmp = I915_READ(GEN8_DE_PORT_IIR);
2237
		tmp = I915_READ(GEN8_DE_PORT_IIR);
2237
		if (tmp) {
2238
		if (tmp) {
2238
			bool found = false;
2239
			bool found = false;
2239
			u32 hotplug_trigger = 0;
2240
			u32 hotplug_trigger = 0;
2240
 
2241
 
2241
			if (IS_BROXTON(dev_priv))
2242
			if (IS_BROXTON(dev_priv))
2242
				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2243
				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2243
			else if (IS_BROADWELL(dev_priv))
2244
			else if (IS_BROADWELL(dev_priv))
2244
				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2245
				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2245
 
2246
 
2246
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2247
			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2247
			ret = IRQ_HANDLED;
2248
			ret = IRQ_HANDLED;
2248
 
2249
 
2249
			if (tmp & aux_mask) {
2250
			if (tmp & aux_mask) {
2250
				dp_aux_irq_handler(dev);
2251
				dp_aux_irq_handler(dev);
2251
				found = true;
2252
				found = true;
2252
			}
2253
			}
2253
 
2254
 
2254
			if (hotplug_trigger) {
2255
			if (hotplug_trigger) {
2255
				if (IS_BROXTON(dev))
2256
				if (IS_BROXTON(dev))
2256
					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2257
					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2257
				else
2258
				else
2258
					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2259
					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2259
				found = true;
2260
				found = true;
2260
			}
2261
			}
2261
 
2262
 
2262
			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2263
			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2263
				gmbus_irq_handler(dev);
2264
				gmbus_irq_handler(dev);
2264
				found = true;
2265
				found = true;
2265
			}
2266
			}
2266
 
2267
 
2267
			if (!found)
2268
			if (!found)
2268
				DRM_ERROR("Unexpected DE Port interrupt\n");
2269
				DRM_ERROR("Unexpected DE Port interrupt\n");
2269
		}
2270
		}
2270
		else
2271
		else
2271
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2272
			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2272
	}
2273
	}
2273
 
2274
 
2274
	for_each_pipe(dev_priv, pipe) {
2275
	for_each_pipe(dev_priv, pipe) {
2275
		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2276
		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2276
 
2277
 
2277
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2278
		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2278
			continue;
2279
			continue;
2279
 
2280
 
2280
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2281
		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2281
		if (pipe_iir) {
2282
		if (pipe_iir) {
2282
			ret = IRQ_HANDLED;
2283
			ret = IRQ_HANDLED;
2283
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2284
			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2284
 
2285
 
2285
			if (pipe_iir & GEN8_PIPE_VBLANK &&
2286
			if (pipe_iir & GEN8_PIPE_VBLANK &&
2286
			    intel_pipe_handle_vblank(dev, pipe))
2287
			    intel_pipe_handle_vblank(dev, pipe))
2287
			/*	intel_check_page_flip(dev, pipe)*/;
2288
			/*	intel_check_page_flip(dev, pipe)*/;
2288
 
2289
 
2289
			if (INTEL_INFO(dev_priv)->gen >= 9)
2290
			if (INTEL_INFO(dev_priv)->gen >= 9)
2290
				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2291
				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2291
			else
2292
			else
2292
				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2293
				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2293
 
2294
 
2294
 
2295
 
2295
			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2296
			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2296
				hsw_pipe_crc_irq_handler(dev, pipe);
2297
				hsw_pipe_crc_irq_handler(dev, pipe);
2297
 
2298
 
2298
			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2299
			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2299
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2300
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2300
								    pipe);
2301
								    pipe);
2301
 
2302
 
2302
 
2303
 
2303
			if (INTEL_INFO(dev_priv)->gen >= 9)
2304
			if (INTEL_INFO(dev_priv)->gen >= 9)
2304
				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2305
				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2305
			else
2306
			else
2306
				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2307
				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2307
 
2308
 
2308
			if (fault_errors)
2309
			if (fault_errors)
2309
				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2310
				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2310
					  pipe_name(pipe),
2311
					  pipe_name(pipe),
2311
					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2312
					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2312
		} else
2313
		} else
2313
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2314
			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2314
	}
2315
	}
2315
 
2316
 
2316
	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2317
	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2317
	    master_ctl & GEN8_DE_PCH_IRQ) {
2318
	    master_ctl & GEN8_DE_PCH_IRQ) {
2318
		/*
2319
		/*
2319
		 * FIXME(BDW): Assume for now that the new interrupt handling
2320
		 * FIXME(BDW): Assume for now that the new interrupt handling
2320
		 * scheme also closed the SDE interrupt handling race we've seen
2321
		 * scheme also closed the SDE interrupt handling race we've seen
2321
		 * on older pch-split platforms. But this needs testing.
2322
		 * on older pch-split platforms. But this needs testing.
2322
		 */
2323
		 */
2323
		u32 pch_iir = I915_READ(SDEIIR);
2324
		u32 pch_iir = I915_READ(SDEIIR);
2324
		if (pch_iir) {
2325
		if (pch_iir) {
2325
			I915_WRITE(SDEIIR, pch_iir);
2326
			I915_WRITE(SDEIIR, pch_iir);
2326
			ret = IRQ_HANDLED;
2327
			ret = IRQ_HANDLED;
2327
 
2328
 
2328
			if (HAS_PCH_SPT(dev_priv))
2329
			if (HAS_PCH_SPT(dev_priv))
2329
				spt_irq_handler(dev, pch_iir);
2330
				spt_irq_handler(dev, pch_iir);
2330
			else
2331
			else
2331
				cpt_irq_handler(dev, pch_iir);
2332
				cpt_irq_handler(dev, pch_iir);
2332
		} else
2333
		} else
2333
			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2334
			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2334
 
2335
 
2335
	}
2336
	}
2336
 
2337
 
2337
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2338
	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2338
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2339
	POSTING_READ_FW(GEN8_MASTER_IRQ);
2339
 
2340
 
2340
	return ret;
2341
	return ret;
2341
}
2342
}
2342
 
2343
 
2343
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2344
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2344
			       bool reset_completed)
2345
			       bool reset_completed)
2345
{
2346
{
2346
	struct intel_engine_cs *ring;
2347
	struct intel_engine_cs *ring;
2347
	int i;
2348
	int i;
2348
 
2349
 
2349
	/*
2350
	/*
2350
	 * Notify all waiters for GPU completion events that reset state has
2351
	 * Notify all waiters for GPU completion events that reset state has
2351
	 * been changed, and that they need to restart their wait after
2352
	 * been changed, and that they need to restart their wait after
2352
	 * checking for potential errors (and bail out to drop locks if there is
2353
	 * checking for potential errors (and bail out to drop locks if there is
2353
	 * a gpu reset pending so that i915_error_work_func can acquire them).
2354
	 * a gpu reset pending so that i915_error_work_func can acquire them).
2354
	 */
2355
	 */
2355
 
2356
 
2356
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2357
	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2357
	for_each_ring(ring, dev_priv, i)
2358
	for_each_ring(ring, dev_priv, i)
2358
		wake_up_all(&ring->irq_queue);
2359
		wake_up_all(&ring->irq_queue);
2359
 
2360
 
2360
 
2361
 
2361
	/*
2362
	/*
2362
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2363
	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2363
	 * reset state is cleared.
2364
	 * reset state is cleared.
2364
	 */
2365
	 */
2365
	if (reset_completed)
2366
	if (reset_completed)
2366
		wake_up_all(&dev_priv->gpu_error.reset_queue);
2367
		wake_up_all(&dev_priv->gpu_error.reset_queue);
2367
}
2368
}
2368
 
2369
 
2369
/**
2370
/**
2370
 * i915_reset_and_wakeup - do process context error handling work
2371
 * i915_reset_and_wakeup - do process context error handling work
2371
 * @dev: drm device
2372
 * @dev: drm device
2372
 *
2373
 *
2373
 * Fire an error uevent so userspace can see that a hang or error
2374
 * Fire an error uevent so userspace can see that a hang or error
2374
 * was detected.
2375
 * was detected.
2375
 */
2376
 */
2376
static void i915_reset_and_wakeup(struct drm_device *dev)
2377
static void i915_reset_and_wakeup(struct drm_device *dev)
2377
{
2378
{
2378
	struct drm_i915_private *dev_priv = to_i915(dev);
2379
	struct drm_i915_private *dev_priv = to_i915(dev);
2379
	struct i915_gpu_error *error = &dev_priv->gpu_error;
2380
	struct i915_gpu_error *error = &dev_priv->gpu_error;
2380
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2381
	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2381
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2382
	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2382
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2383
	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2383
	int ret;
2384
	int ret;
2384
 
2385
 
2385
	/*
2386
	/*
2386
	 * Note that there's only one work item which does gpu resets, so we
2387
	 * Note that there's only one work item which does gpu resets, so we
2387
	 * need not worry about concurrent gpu resets potentially incrementing
2388
	 * need not worry about concurrent gpu resets potentially incrementing
2388
	 * error->reset_counter twice. We only need to take care of another
2389
	 * error->reset_counter twice. We only need to take care of another
2389
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2390
	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2390
	 * quick check for that is good enough: schedule_work ensures the
2391
	 * quick check for that is good enough: schedule_work ensures the
2391
	 * correct ordering between hang detection and this work item, and since
2392
	 * correct ordering between hang detection and this work item, and since
2392
	 * the reset in-progress bit is only ever set by code outside of this
2393
	 * the reset in-progress bit is only ever set by code outside of this
2393
	 * work we don't need to worry about any other races.
2394
	 * work we don't need to worry about any other races.
2394
	 */
2395
	 */
2395
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2396
	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2396
		DRM_DEBUG_DRIVER("resetting chip\n");
2397
		DRM_DEBUG_DRIVER("resetting chip\n");
2397
		intel_runtime_pm_get(dev_priv);
2398
		intel_runtime_pm_get(dev_priv);
2398
 
2399
 
2399
		/*
2400
		/*
2400
		 * All state reset _must_ be completed before we update the
2401
		 * All state reset _must_ be completed before we update the
2401
		 * reset counter, for otherwise waiters might miss the reset
2402
		 * reset counter, for otherwise waiters might miss the reset
2402
		 * pending state and not properly drop locks, resulting in
2403
		 * pending state and not properly drop locks, resulting in
2403
		 * deadlocks with the reset work.
2404
		 * deadlocks with the reset work.
2404
		 */
2405
		 */
2405
//		ret = i915_reset(dev);
2406
//		ret = i915_reset(dev);
2406
 
2407
 
2407
//		intel_finish_reset(dev);
2408
//		intel_finish_reset(dev);
2408
 
2409
 
2409
		intel_runtime_pm_put(dev_priv);
2410
		intel_runtime_pm_put(dev_priv);
2410
 
2411
 
2411
		if (ret == 0) {
2412
		if (ret == 0) {
2412
			/*
2413
			/*
2413
			 * After all the gem state is reset, increment the reset
2414
			 * After all the gem state is reset, increment the reset
2414
			 * counter and wake up everyone waiting for the reset to
2415
			 * counter and wake up everyone waiting for the reset to
2415
			 * complete.
2416
			 * complete.
2416
			 *
2417
			 *
2417
			 * Since unlock operations are a one-sided barrier only,
2418
			 * Since unlock operations are a one-sided barrier only,
2418
			 * we need to insert a barrier here to order any seqno
2419
			 * we need to insert a barrier here to order any seqno
2419
			 * updates before
2420
			 * updates before
2420
			 * the counter increment.
2421
			 * the counter increment.
2421
			 */
2422
			 */
2422
			smp_mb__before_atomic();
2423
			smp_mb__before_atomic();
2423
			atomic_inc(&dev_priv->gpu_error.reset_counter);
2424
			atomic_inc(&dev_priv->gpu_error.reset_counter);
2424
 
2425
 
2425
		} else {
2426
		} else {
2426
			atomic_or(I915_WEDGED, &error->reset_counter);
2427
			atomic_or(I915_WEDGED, &error->reset_counter);
2427
		}
2428
		}
2428
 
2429
 
2429
		/*
2430
		/*
2430
		 * Note: The wake_up also serves as a memory barrier so that
2431
		 * Note: The wake_up also serves as a memory barrier so that
2431
		 * waiters see the update value of the reset counter atomic_t.
2432
		 * waiters see the update value of the reset counter atomic_t.
2432
		 */
2433
		 */
2433
		i915_error_wake_up(dev_priv, true);
2434
		i915_error_wake_up(dev_priv, true);
2434
	}
2435
	}
2435
}
2436
}
2436
 
2437
 
2437
static void i915_report_and_clear_eir(struct drm_device *dev)
2438
static void i915_report_and_clear_eir(struct drm_device *dev)
2438
{
2439
{
2439
	struct drm_i915_private *dev_priv = dev->dev_private;
2440
	struct drm_i915_private *dev_priv = dev->dev_private;
2440
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2441
	uint32_t instdone[I915_NUM_INSTDONE_REG];
2441
	u32 eir = I915_READ(EIR);
2442
	u32 eir = I915_READ(EIR);
2442
	int pipe, i;
2443
	int pipe, i;
2443
 
2444
 
2444
	if (!eir)
2445
	if (!eir)
2445
		return;
2446
		return;
2446
 
2447
 
2447
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2448
	pr_err("render error detected, EIR: 0x%08x\n", eir);
2448
 
2449
 
2449
	i915_get_extra_instdone(dev, instdone);
2450
	i915_get_extra_instdone(dev, instdone);
2450
 
2451
 
2451
	if (IS_G4X(dev)) {
2452
	if (IS_G4X(dev)) {
2452
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2453
		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2453
			u32 ipeir = I915_READ(IPEIR_I965);
2454
			u32 ipeir = I915_READ(IPEIR_I965);
2454
 
2455
 
2455
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2456
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2456
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2457
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2457
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2458
			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2458
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2459
				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2459
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2460
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2460
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2461
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2461
			I915_WRITE(IPEIR_I965, ipeir);
2462
			I915_WRITE(IPEIR_I965, ipeir);
2462
			POSTING_READ(IPEIR_I965);
2463
			POSTING_READ(IPEIR_I965);
2463
		}
2464
		}
2464
		if (eir & GM45_ERROR_PAGE_TABLE) {
2465
		if (eir & GM45_ERROR_PAGE_TABLE) {
2465
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2466
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2466
			pr_err("page table error\n");
2467
			pr_err("page table error\n");
2467
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2468
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2468
			I915_WRITE(PGTBL_ER, pgtbl_err);
2469
			I915_WRITE(PGTBL_ER, pgtbl_err);
2469
			POSTING_READ(PGTBL_ER);
2470
			POSTING_READ(PGTBL_ER);
2470
		}
2471
		}
2471
	}
2472
	}
2472
 
2473
 
2473
	if (!IS_GEN2(dev)) {
2474
	if (!IS_GEN2(dev)) {
2474
		if (eir & I915_ERROR_PAGE_TABLE) {
2475
		if (eir & I915_ERROR_PAGE_TABLE) {
2475
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2476
			u32 pgtbl_err = I915_READ(PGTBL_ER);
2476
			pr_err("page table error\n");
2477
			pr_err("page table error\n");
2477
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2478
			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2478
			I915_WRITE(PGTBL_ER, pgtbl_err);
2479
			I915_WRITE(PGTBL_ER, pgtbl_err);
2479
			POSTING_READ(PGTBL_ER);
2480
			POSTING_READ(PGTBL_ER);
2480
		}
2481
		}
2481
	}
2482
	}
2482
 
2483
 
2483
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2484
	if (eir & I915_ERROR_MEMORY_REFRESH) {
2484
		pr_err("memory refresh error:\n");
2485
		pr_err("memory refresh error:\n");
2485
		for_each_pipe(dev_priv, pipe)
2486
		for_each_pipe(dev_priv, pipe)
2486
			pr_err("pipe %c stat: 0x%08x\n",
2487
			pr_err("pipe %c stat: 0x%08x\n",
2487
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2488
			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2488
		/* pipestat has already been acked */
2489
		/* pipestat has already been acked */
2489
	}
2490
	}
2490
	if (eir & I915_ERROR_INSTRUCTION) {
2491
	if (eir & I915_ERROR_INSTRUCTION) {
2491
		pr_err("instruction error\n");
2492
		pr_err("instruction error\n");
2492
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2493
		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2493
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2494
		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2494
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2495
			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2495
		if (INTEL_INFO(dev)->gen < 4) {
2496
		if (INTEL_INFO(dev)->gen < 4) {
2496
			u32 ipeir = I915_READ(IPEIR);
2497
			u32 ipeir = I915_READ(IPEIR);
2497
 
2498
 
2498
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2499
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2499
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2500
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2500
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2501
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2501
			I915_WRITE(IPEIR, ipeir);
2502
			I915_WRITE(IPEIR, ipeir);
2502
			POSTING_READ(IPEIR);
2503
			POSTING_READ(IPEIR);
2503
		} else {
2504
		} else {
2504
			u32 ipeir = I915_READ(IPEIR_I965);
2505
			u32 ipeir = I915_READ(IPEIR_I965);
2505
 
2506
 
2506
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2507
			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2507
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2508
			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2508
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2509
			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2509
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2510
			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2510
			I915_WRITE(IPEIR_I965, ipeir);
2511
			I915_WRITE(IPEIR_I965, ipeir);
2511
			POSTING_READ(IPEIR_I965);
2512
			POSTING_READ(IPEIR_I965);
2512
		}
2513
		}
2513
	}
2514
	}
2514
 
2515
 
2515
	I915_WRITE(EIR, eir);
2516
	I915_WRITE(EIR, eir);
2516
	POSTING_READ(EIR);
2517
	POSTING_READ(EIR);
2517
	eir = I915_READ(EIR);
2518
	eir = I915_READ(EIR);
2518
	if (eir) {
2519
	if (eir) {
2519
		/*
2520
		/*
2520
		 * some errors might have become stuck,
2521
		 * some errors might have become stuck,
2521
		 * mask them.
2522
		 * mask them.
2522
		 */
2523
		 */
2523
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2524
		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2524
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2525
		I915_WRITE(EMR, I915_READ(EMR) | eir);
2525
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2526
		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2526
	}
2527
	}
2527
}
2528
}
2528
 
2529
 
2529
/**
2530
/**
2530
 * i915_handle_error - handle a gpu error
2531
 * i915_handle_error - handle a gpu error
2531
 * @dev: drm device
2532
 * @dev: drm device
2532
 *
2533
 *
2533
 * Do some basic checking of register state at error time and
2534
 * Do some basic checking of register state at error time and
2534
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2535
 * dump it to the syslog.  Also call i915_capture_error_state() to make
2535
 * sure we get a record and make it available in debugfs.  Fire a uevent
2536
 * sure we get a record and make it available in debugfs.  Fire a uevent
2536
 * so userspace knows something bad happened (should trigger collection
2537
 * so userspace knows something bad happened (should trigger collection
2537
 * of a ring dump etc.).
2538
 * of a ring dump etc.).
2538
 */
2539
 */
2539
void i915_handle_error(struct drm_device *dev, bool wedged,
2540
void i915_handle_error(struct drm_device *dev, bool wedged,
2540
		       const char *fmt, ...)
2541
		       const char *fmt, ...)
2541
{
2542
{
2542
	struct drm_i915_private *dev_priv = dev->dev_private;
2543
	struct drm_i915_private *dev_priv = dev->dev_private;
2543
	va_list args;
2544
	va_list args;
2544
	char error_msg[80];
2545
	char error_msg[80];
2545
 
2546
 
2546
	va_start(args, fmt);
2547
	va_start(args, fmt);
2547
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2548
	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2548
	va_end(args);
2549
	va_end(args);
2549
 
2550
 
2550
//	i915_capture_error_state(dev);
2551
//	i915_capture_error_state(dev);
2551
	i915_report_and_clear_eir(dev);
2552
	i915_report_and_clear_eir(dev);
2552
 
2553
 
2553
	if (wedged) {
2554
	if (wedged) {
2554
		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2555
		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2555
				&dev_priv->gpu_error.reset_counter);
2556
				&dev_priv->gpu_error.reset_counter);
2556
 
2557
 
2557
		/*
2558
		/*
2558
		 * Wakeup waiting processes so that the reset function
2559
		 * Wakeup waiting processes so that the reset function
2559
		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2560
		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2560
		 * various locks. By bumping the reset counter first, the woken
2561
		 * various locks. By bumping the reset counter first, the woken
2561
		 * processes will see a reset in progress and back off,
2562
		 * processes will see a reset in progress and back off,
2562
		 * releasing their locks and then wait for the reset completion.
2563
		 * releasing their locks and then wait for the reset completion.
2563
		 * We must do this for _all_ gpu waiters that might hold locks
2564
		 * We must do this for _all_ gpu waiters that might hold locks
2564
		 * that the reset work needs to acquire.
2565
		 * that the reset work needs to acquire.
2565
		 *
2566
		 *
2566
		 * Note: The wake_up serves as the required memory barrier to
2567
		 * Note: The wake_up serves as the required memory barrier to
2567
		 * ensure that the waiters see the updated value of the reset
2568
		 * ensure that the waiters see the updated value of the reset
2568
		 * counter atomic_t.
2569
		 * counter atomic_t.
2569
		 */
2570
		 */
2570
		i915_error_wake_up(dev_priv, false);
2571
		i915_error_wake_up(dev_priv, false);
2571
	}
2572
	}
2572
 
2573
 
2573
	i915_reset_and_wakeup(dev);
2574
	i915_reset_and_wakeup(dev);
2574
}
2575
}
2575
 
2576
 
2576
/* Called from drm generic code, passed 'crtc' which
2577
/* Called from drm generic code, passed 'crtc' which
2577
 * we use as a pipe index
2578
 * we use as a pipe index
2578
 */
2579
 */
2579
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2580
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2580
{
2581
{
2581
	struct drm_i915_private *dev_priv = dev->dev_private;
2582
	struct drm_i915_private *dev_priv = dev->dev_private;
2582
	unsigned long irqflags;
2583
	unsigned long irqflags;
2583
 
2584
 
2584
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2585
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2585
	if (INTEL_INFO(dev)->gen >= 4)
2586
	if (INTEL_INFO(dev)->gen >= 4)
2586
		i915_enable_pipestat(dev_priv, pipe,
2587
		i915_enable_pipestat(dev_priv, pipe,
2587
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2588
				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2588
	else
2589
	else
2589
		i915_enable_pipestat(dev_priv, pipe,
2590
		i915_enable_pipestat(dev_priv, pipe,
2590
				     PIPE_VBLANK_INTERRUPT_STATUS);
2591
				     PIPE_VBLANK_INTERRUPT_STATUS);
2591
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2592
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2592
 
2593
 
2593
	return 0;
2594
	return 0;
2594
}
2595
}
2595
 
2596
 
2596
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2597
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2597
{
2598
{
2598
	struct drm_i915_private *dev_priv = dev->dev_private;
2599
	struct drm_i915_private *dev_priv = dev->dev_private;
2599
	unsigned long irqflags;
2600
	unsigned long irqflags;
2600
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2601
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2601
						     DE_PIPE_VBLANK(pipe);
2602
						     DE_PIPE_VBLANK(pipe);
2602
 
2603
 
2603
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2604
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2604
	ironlake_enable_display_irq(dev_priv, bit);
2605
	ironlake_enable_display_irq(dev_priv, bit);
2605
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2606
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2606
 
2607
 
2607
	return 0;
2608
	return 0;
2608
}
2609
}
2609
 
2610
 
2610
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2611
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2611
{
2612
{
2612
	struct drm_i915_private *dev_priv = dev->dev_private;
2613
	struct drm_i915_private *dev_priv = dev->dev_private;
2613
	unsigned long irqflags;
2614
	unsigned long irqflags;
2614
 
2615
 
2615
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2616
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2616
	i915_enable_pipestat(dev_priv, pipe,
2617
	i915_enable_pipestat(dev_priv, pipe,
2617
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2618
			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2618
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2619
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2619
 
2620
 
2620
	return 0;
2621
	return 0;
2621
}
2622
}
2622
 
2623
 
2623
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2624
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2624
{
2625
{
2625
	struct drm_i915_private *dev_priv = dev->dev_private;
2626
	struct drm_i915_private *dev_priv = dev->dev_private;
2626
	unsigned long irqflags;
2627
	unsigned long irqflags;
2627
 
2628
 
2628
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2629
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2629
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2630
	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2630
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2631
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2631
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2632
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2632
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2633
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2633
	return 0;
2634
	return 0;
2634
}
2635
}
2635
 
2636
 
2636
/* Called from drm generic code, passed 'crtc' which
2637
/* Called from drm generic code, passed 'crtc' which
2637
 * we use as a pipe index
2638
 * we use as a pipe index
2638
 */
2639
 */
2639
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2640
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2640
{
2641
{
2641
	struct drm_i915_private *dev_priv = dev->dev_private;
2642
	struct drm_i915_private *dev_priv = dev->dev_private;
2642
	unsigned long irqflags;
2643
	unsigned long irqflags;
2643
 
2644
 
2644
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2645
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2645
	i915_disable_pipestat(dev_priv, pipe,
2646
	i915_disable_pipestat(dev_priv, pipe,
2646
			      PIPE_VBLANK_INTERRUPT_STATUS |
2647
			      PIPE_VBLANK_INTERRUPT_STATUS |
2647
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2648
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2648
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2649
}
2650
}
2650
 
2651
 
2651
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2652
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2652
{
2653
{
2653
	struct drm_i915_private *dev_priv = dev->dev_private;
2654
	struct drm_i915_private *dev_priv = dev->dev_private;
2654
	unsigned long irqflags;
2655
	unsigned long irqflags;
2655
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2656
	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2656
						     DE_PIPE_VBLANK(pipe);
2657
						     DE_PIPE_VBLANK(pipe);
2657
 
2658
 
2658
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2659
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2659
	ironlake_disable_display_irq(dev_priv, bit);
2660
	ironlake_disable_display_irq(dev_priv, bit);
2660
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2661
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2661
}
2662
}
2662
 
2663
 
2663
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2664
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2664
{
2665
{
2665
	struct drm_i915_private *dev_priv = dev->dev_private;
2666
	struct drm_i915_private *dev_priv = dev->dev_private;
2666
	unsigned long irqflags;
2667
	unsigned long irqflags;
2667
 
2668
 
2668
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2669
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2669
	i915_disable_pipestat(dev_priv, pipe,
2670
	i915_disable_pipestat(dev_priv, pipe,
2670
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2671
			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2671
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2672
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2672
}
2673
}
2673
 
2674
 
2674
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2675
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2675
{
2676
{
2676
	struct drm_i915_private *dev_priv = dev->dev_private;
2677
	struct drm_i915_private *dev_priv = dev->dev_private;
2677
	unsigned long irqflags;
2678
	unsigned long irqflags;
2678
 
2679
 
2679
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2680
	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2680
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2681
	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2681
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2682
	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2682
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2683
	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2683
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2684
	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2684
}
2685
}
2685
 
2686
 
2686
static bool
2687
static bool
2687
ring_idle(struct intel_engine_cs *ring, u32 seqno)
2688
ring_idle(struct intel_engine_cs *ring, u32 seqno)
2688
{
2689
{
2689
	return (list_empty(&ring->request_list) ||
2690
	return (list_empty(&ring->request_list) ||
2690
		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2691
		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2691
}
2692
}
2692
 
2693
 
2693
static bool
2694
static bool
2694
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2695
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2695
{
2696
{
2696
	if (INTEL_INFO(dev)->gen >= 8) {
2697
	if (INTEL_INFO(dev)->gen >= 8) {
2697
		return (ipehr >> 23) == 0x1c;
2698
		return (ipehr >> 23) == 0x1c;
2698
	} else {
2699
	} else {
2699
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2700
		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2700
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2701
		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2701
				 MI_SEMAPHORE_REGISTER);
2702
				 MI_SEMAPHORE_REGISTER);
2702
	}
2703
	}
2703
}
2704
}
2704
 
2705
 
2705
static struct intel_engine_cs *
2706
static struct intel_engine_cs *
2706
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2707
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2707
{
2708
{
2708
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2709
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2709
	struct intel_engine_cs *signaller;
2710
	struct intel_engine_cs *signaller;
2710
	int i;
2711
	int i;
2711
 
2712
 
2712
	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2713
	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2713
		for_each_ring(signaller, dev_priv, i) {
2714
		for_each_ring(signaller, dev_priv, i) {
2714
			if (ring == signaller)
2715
			if (ring == signaller)
2715
				continue;
2716
				continue;
2716
 
2717
 
2717
			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2718
			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2718
				return signaller;
2719
				return signaller;
2719
		}
2720
		}
2720
	} else {
2721
	} else {
2721
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2722
		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2722
 
2723
 
2723
		for_each_ring(signaller, dev_priv, i) {
2724
		for_each_ring(signaller, dev_priv, i) {
2724
			if(ring == signaller)
2725
			if(ring == signaller)
2725
				continue;
2726
				continue;
2726
 
2727
 
2727
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2728
			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2728
				return signaller;
2729
				return signaller;
2729
		}
2730
		}
2730
	}
2731
	}
2731
 
2732
 
2732
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2733
	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2733
		  ring->id, ipehr, offset);
2734
		  ring->id, ipehr, offset);
2734
 
2735
 
2735
	return NULL;
2736
	return NULL;
2736
}
2737
}
2737
 
2738
 
2738
static struct intel_engine_cs *
2739
static struct intel_engine_cs *
2739
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2740
semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2740
{
2741
{
2741
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2742
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2742
	u32 cmd, ipehr, head;
2743
	u32 cmd, ipehr, head;
2743
	u64 offset = 0;
2744
	u64 offset = 0;
2744
	int i, backwards;
2745
	int i, backwards;
2745
 
2746
 
2746
	/*
2747
	/*
2747
	 * This function does not support execlist mode - any attempt to
2748
	 * This function does not support execlist mode - any attempt to
2748
	 * proceed further into this function will result in a kernel panic
2749
	 * proceed further into this function will result in a kernel panic
2749
	 * when dereferencing ring->buffer, which is not set up in execlist
2750
	 * when dereferencing ring->buffer, which is not set up in execlist
2750
	 * mode.
2751
	 * mode.
2751
	 *
2752
	 *
2752
	 * The correct way of doing it would be to derive the currently
2753
	 * The correct way of doing it would be to derive the currently
2753
	 * executing ring buffer from the current context, which is derived
2754
	 * executing ring buffer from the current context, which is derived
2754
	 * from the currently running request. Unfortunately, to get the
2755
	 * from the currently running request. Unfortunately, to get the
2755
	 * current request we would have to grab the struct_mutex before doing
2756
	 * current request we would have to grab the struct_mutex before doing
2756
	 * anything else, which would be ill-advised since some other thread
2757
	 * anything else, which would be ill-advised since some other thread
2757
	 * might have grabbed it already and managed to hang itself, causing
2758
	 * might have grabbed it already and managed to hang itself, causing
2758
	 * the hang checker to deadlock.
2759
	 * the hang checker to deadlock.
2759
	 *
2760
	 *
2760
	 * Therefore, this function does not support execlist mode in its
2761
	 * Therefore, this function does not support execlist mode in its
2761
	 * current form. Just return NULL and move on.
2762
	 * current form. Just return NULL and move on.
2762
	 */
2763
	 */
2763
	if (ring->buffer == NULL)
2764
	if (ring->buffer == NULL)
2764
		return NULL;
2765
		return NULL;
2765
 
2766
 
2766
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2767
	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2767
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2768
	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2768
		return NULL;
2769
		return NULL;
2769
 
2770
 
2770
	/*
2771
	/*
2771
	 * HEAD is likely pointing to the dword after the actual command,
2772
	 * HEAD is likely pointing to the dword after the actual command,
2772
	 * so scan backwards until we find the MBOX. But limit it to just 3
2773
	 * so scan backwards until we find the MBOX. But limit it to just 3
2773
	 * or 4 dwords depending on the semaphore wait command size.
2774
	 * or 4 dwords depending on the semaphore wait command size.
2774
	 * Note that we don't care about ACTHD here since that might
2775
	 * Note that we don't care about ACTHD here since that might
2775
	 * point at at batch, and semaphores are always emitted into the
2776
	 * point at at batch, and semaphores are always emitted into the
2776
	 * ringbuffer itself.
2777
	 * ringbuffer itself.
2777
	 */
2778
	 */
2778
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2779
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2779
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2780
	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2780
 
2781
 
2781
	for (i = backwards; i; --i) {
2782
	for (i = backwards; i; --i) {
2782
		/*
2783
		/*
2783
		 * Be paranoid and presume the hw has gone off into the wild -
2784
		 * Be paranoid and presume the hw has gone off into the wild -
2784
		 * our ring is smaller than what the hardware (and hence
2785
		 * our ring is smaller than what the hardware (and hence
2785
		 * HEAD_ADDR) allows. Also handles wrap-around.
2786
		 * HEAD_ADDR) allows. Also handles wrap-around.
2786
		 */
2787
		 */
2787
		head &= ring->buffer->size - 1;
2788
		head &= ring->buffer->size - 1;
2788
 
2789
 
2789
		/* This here seems to blow up */
2790
		/* This here seems to blow up */
2790
		cmd = ioread32(ring->buffer->virtual_start + head);
2791
		cmd = ioread32(ring->buffer->virtual_start + head);
2791
		if (cmd == ipehr)
2792
		if (cmd == ipehr)
2792
			break;
2793
			break;
2793
 
2794
 
2794
		head -= 4;
2795
		head -= 4;
2795
	}
2796
	}
2796
 
2797
 
2797
	if (!i)
2798
	if (!i)
2798
		return NULL;
2799
		return NULL;
2799
 
2800
 
2800
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2801
	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2801
	if (INTEL_INFO(ring->dev)->gen >= 8) {
2802
	if (INTEL_INFO(ring->dev)->gen >= 8) {
2802
		offset = ioread32(ring->buffer->virtual_start + head + 12);
2803
		offset = ioread32(ring->buffer->virtual_start + head + 12);
2803
		offset <<= 32;
2804
		offset <<= 32;
2804
		offset = ioread32(ring->buffer->virtual_start + head + 8);
2805
		offset = ioread32(ring->buffer->virtual_start + head + 8);
2805
	}
2806
	}
2806
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2807
	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2807
}
2808
}
2808
 
2809
 
2809
static int semaphore_passed(struct intel_engine_cs *ring)
2810
static int semaphore_passed(struct intel_engine_cs *ring)
2810
{
2811
{
2811
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2812
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2812
	struct intel_engine_cs *signaller;
2813
	struct intel_engine_cs *signaller;
2813
	u32 seqno;
2814
	u32 seqno;
2814
 
2815
 
2815
	ring->hangcheck.deadlock++;
2816
	ring->hangcheck.deadlock++;
2816
 
2817
 
2817
	signaller = semaphore_waits_for(ring, &seqno);
2818
	signaller = semaphore_waits_for(ring, &seqno);
2818
	if (signaller == NULL)
2819
	if (signaller == NULL)
2819
		return -1;
2820
		return -1;
2820
 
2821
 
2821
	/* Prevent pathological recursion due to driver bugs */
2822
	/* Prevent pathological recursion due to driver bugs */
2822
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2823
	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2823
		return -1;
2824
		return -1;
2824
 
2825
 
2825
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2826
	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2826
		return 1;
2827
		return 1;
2827
 
2828
 
2828
	/* cursory check for an unkickable deadlock */
2829
	/* cursory check for an unkickable deadlock */
2829
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2830
	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2830
	    semaphore_passed(signaller) < 0)
2831
	    semaphore_passed(signaller) < 0)
2831
		return -1;
2832
		return -1;
2832
 
2833
 
2833
	return 0;
2834
	return 0;
2834
}
2835
}
2835
 
2836
 
2836
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2837
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2837
{
2838
{
2838
	struct intel_engine_cs *ring;
2839
	struct intel_engine_cs *ring;
2839
	int i;
2840
	int i;
2840
 
2841
 
2841
	for_each_ring(ring, dev_priv, i)
2842
	for_each_ring(ring, dev_priv, i)
2842
		ring->hangcheck.deadlock = 0;
2843
		ring->hangcheck.deadlock = 0;
2843
}
2844
}
2844
 
2845
 
2845
static enum intel_ring_hangcheck_action
2846
static enum intel_ring_hangcheck_action
2846
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2847
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2847
{
2848
{
2848
	struct drm_device *dev = ring->dev;
2849
	struct drm_device *dev = ring->dev;
2849
	struct drm_i915_private *dev_priv = dev->dev_private;
2850
	struct drm_i915_private *dev_priv = dev->dev_private;
2850
	u32 tmp;
2851
	u32 tmp;
2851
 
2852
 
2852
	if (acthd != ring->hangcheck.acthd) {
2853
	if (acthd != ring->hangcheck.acthd) {
2853
		if (acthd > ring->hangcheck.max_acthd) {
2854
		if (acthd > ring->hangcheck.max_acthd) {
2854
			ring->hangcheck.max_acthd = acthd;
2855
			ring->hangcheck.max_acthd = acthd;
2855
			return HANGCHECK_ACTIVE;
2856
			return HANGCHECK_ACTIVE;
2856
		}
2857
		}
2857
 
2858
 
2858
		return HANGCHECK_ACTIVE_LOOP;
2859
		return HANGCHECK_ACTIVE_LOOP;
2859
	}
2860
	}
2860
 
2861
 
2861
	if (IS_GEN2(dev))
2862
	if (IS_GEN2(dev))
2862
		return HANGCHECK_HUNG;
2863
		return HANGCHECK_HUNG;
2863
 
2864
 
2864
	/* Is the chip hanging on a WAIT_FOR_EVENT?
2865
	/* Is the chip hanging on a WAIT_FOR_EVENT?
2865
	 * If so we can simply poke the RB_WAIT bit
2866
	 * If so we can simply poke the RB_WAIT bit
2866
	 * and break the hang. This should work on
2867
	 * and break the hang. This should work on
2867
	 * all but the second generation chipsets.
2868
	 * all but the second generation chipsets.
2868
	 */
2869
	 */
2869
	tmp = I915_READ_CTL(ring);
2870
	tmp = I915_READ_CTL(ring);
2870
	if (tmp & RING_WAIT) {
2871
	if (tmp & RING_WAIT) {
2871
		i915_handle_error(dev, false,
2872
		i915_handle_error(dev, false,
2872
				  "Kicking stuck wait on %s",
2873
				  "Kicking stuck wait on %s",
2873
				  ring->name);
2874
				  ring->name);
2874
		I915_WRITE_CTL(ring, tmp);
2875
		I915_WRITE_CTL(ring, tmp);
2875
		return HANGCHECK_KICK;
2876
		return HANGCHECK_KICK;
2876
	}
2877
	}
2877
 
2878
 
2878
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2879
	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2879
		switch (semaphore_passed(ring)) {
2880
		switch (semaphore_passed(ring)) {
2880
		default:
2881
		default:
2881
			return HANGCHECK_HUNG;
2882
			return HANGCHECK_HUNG;
2882
		case 1:
2883
		case 1:
2883
			i915_handle_error(dev, false,
2884
			i915_handle_error(dev, false,
2884
					  "Kicking stuck semaphore on %s",
2885
					  "Kicking stuck semaphore on %s",
2885
					  ring->name);
2886
					  ring->name);
2886
			I915_WRITE_CTL(ring, tmp);
2887
			I915_WRITE_CTL(ring, tmp);
2887
			return HANGCHECK_KICK;
2888
			return HANGCHECK_KICK;
2888
		case 0:
2889
		case 0:
2889
			return HANGCHECK_WAIT;
2890
			return HANGCHECK_WAIT;
2890
		}
2891
		}
2891
	}
2892
	}
2892
 
2893
 
2893
	return HANGCHECK_HUNG;
2894
	return HANGCHECK_HUNG;
2894
}
2895
}
2895
 
2896
 
2896
/*
2897
/*
2897
 * This is called when the chip hasn't reported back with completed
2898
 * This is called when the chip hasn't reported back with completed
2898
 * batchbuffers in a long time. We keep track per ring seqno progress and
2899
 * batchbuffers in a long time. We keep track per ring seqno progress and
2899
 * if there are no progress, hangcheck score for that ring is increased.
2900
 * if there are no progress, hangcheck score for that ring is increased.
2900
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2901
 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2901
 * we kick the ring. If we see no progress on three subsequent calls
2902
 * we kick the ring. If we see no progress on three subsequent calls
2902
 * we assume chip is wedged and try to fix it by resetting the chip.
2903
 * we assume chip is wedged and try to fix it by resetting the chip.
2903
 */
2904
 */
2904
static void i915_hangcheck_elapsed(struct work_struct *work)
2905
static void i915_hangcheck_elapsed(struct work_struct *work)
2905
{
2906
{
2906
	struct drm_i915_private *dev_priv =
2907
	struct drm_i915_private *dev_priv =
2907
		container_of(work, typeof(*dev_priv),
2908
		container_of(work, typeof(*dev_priv),
2908
			     gpu_error.hangcheck_work.work);
2909
			     gpu_error.hangcheck_work.work);
2909
	struct drm_device *dev = dev_priv->dev;
2910
	struct drm_device *dev = dev_priv->dev;
2910
	struct intel_engine_cs *ring;
2911
	struct intel_engine_cs *ring;
2911
	int i;
2912
	int i;
2912
	int busy_count = 0, rings_hung = 0;
2913
	int busy_count = 0, rings_hung = 0;
2913
	bool stuck[I915_NUM_RINGS] = { 0 };
2914
	bool stuck[I915_NUM_RINGS] = { 0 };
2914
#define BUSY 1
2915
#define BUSY 1
2915
#define KICK 5
2916
#define KICK 5
2916
#define HUNG 20
2917
#define HUNG 20
2917
 
2918
 
2918
	if (!i915.enable_hangcheck)
2919
	if (!i915.enable_hangcheck)
2919
		return;
2920
		return;
2920
 
2921
 
2921
	for_each_ring(ring, dev_priv, i) {
2922
	for_each_ring(ring, dev_priv, i) {
2922
		u64 acthd;
2923
		u64 acthd;
2923
		u32 seqno;
2924
		u32 seqno;
2924
		bool busy = true;
2925
		bool busy = true;
2925
 
2926
 
2926
		semaphore_clear_deadlocks(dev_priv);
2927
		semaphore_clear_deadlocks(dev_priv);
2927
 
2928
 
2928
		seqno = ring->get_seqno(ring, false);
2929
		seqno = ring->get_seqno(ring, false);
2929
		acthd = intel_ring_get_active_head(ring);
2930
		acthd = intel_ring_get_active_head(ring);
2930
 
2931
 
2931
		if (ring->hangcheck.seqno == seqno) {
2932
		if (ring->hangcheck.seqno == seqno) {
2932
			if (ring_idle(ring, seqno)) {
2933
			if (ring_idle(ring, seqno)) {
2933
				ring->hangcheck.action = HANGCHECK_IDLE;
2934
				ring->hangcheck.action = HANGCHECK_IDLE;
2934
 
2935
 
2935
				if (waitqueue_active(&ring->irq_queue)) {
2936
				if (waitqueue_active(&ring->irq_queue)) {
2936
					/* Issue a wake-up to catch stuck h/w. */
2937
					/* Issue a wake-up to catch stuck h/w. */
2937
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2938
					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2938
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2939
						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2939
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2940
							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2940
								  ring->name);
2941
								  ring->name);
2941
						else
2942
						else
2942
							DRM_INFO("Fake missed irq on %s\n",
2943
							DRM_INFO("Fake missed irq on %s\n",
2943
								 ring->name);
2944
								 ring->name);
2944
						wake_up_all(&ring->irq_queue);
2945
						wake_up_all(&ring->irq_queue);
2945
					}
2946
					}
2946
					/* Safeguard against driver failure */
2947
					/* Safeguard against driver failure */
2947
					ring->hangcheck.score += BUSY;
2948
					ring->hangcheck.score += BUSY;
2948
				} else
2949
				} else
2949
					busy = false;
2950
					busy = false;
2950
			} else {
2951
			} else {
2951
				/* We always increment the hangcheck score
2952
				/* We always increment the hangcheck score
2952
				 * if the ring is busy and still processing
2953
				 * if the ring is busy and still processing
2953
				 * the same request, so that no single request
2954
				 * the same request, so that no single request
2954
				 * can run indefinitely (such as a chain of
2955
				 * can run indefinitely (such as a chain of
2955
				 * batches). The only time we do not increment
2956
				 * batches). The only time we do not increment
2956
				 * the hangcheck score on this ring, if this
2957
				 * the hangcheck score on this ring, if this
2957
				 * ring is in a legitimate wait for another
2958
				 * ring is in a legitimate wait for another
2958
				 * ring. In that case the waiting ring is a
2959
				 * ring. In that case the waiting ring is a
2959
				 * victim and we want to be sure we catch the
2960
				 * victim and we want to be sure we catch the
2960
				 * right culprit. Then every time we do kick
2961
				 * right culprit. Then every time we do kick
2961
				 * the ring, add a small increment to the
2962
				 * the ring, add a small increment to the
2962
				 * score so that we can catch a batch that is
2963
				 * score so that we can catch a batch that is
2963
				 * being repeatedly kicked and so responsible
2964
				 * being repeatedly kicked and so responsible
2964
				 * for stalling the machine.
2965
				 * for stalling the machine.
2965
				 */
2966
				 */
2966
				ring->hangcheck.action = ring_stuck(ring,
2967
				ring->hangcheck.action = ring_stuck(ring,
2967
								    acthd);
2968
								    acthd);
2968
 
2969
 
2969
				switch (ring->hangcheck.action) {
2970
				switch (ring->hangcheck.action) {
2970
				case HANGCHECK_IDLE:
2971
				case HANGCHECK_IDLE:
2971
				case HANGCHECK_WAIT:
2972
				case HANGCHECK_WAIT:
2972
				case HANGCHECK_ACTIVE:
2973
				case HANGCHECK_ACTIVE:
2973
					break;
2974
					break;
2974
				case HANGCHECK_ACTIVE_LOOP:
2975
				case HANGCHECK_ACTIVE_LOOP:
2975
					ring->hangcheck.score += BUSY;
2976
					ring->hangcheck.score += BUSY;
2976
					break;
2977
					break;
2977
				case HANGCHECK_KICK:
2978
				case HANGCHECK_KICK:
2978
					ring->hangcheck.score += KICK;
2979
					ring->hangcheck.score += KICK;
2979
					break;
2980
					break;
2980
				case HANGCHECK_HUNG:
2981
				case HANGCHECK_HUNG:
2981
					ring->hangcheck.score += HUNG;
2982
					ring->hangcheck.score += HUNG;
2982
					stuck[i] = true;
2983
					stuck[i] = true;
2983
					break;
2984
					break;
2984
				}
2985
				}
2985
			}
2986
			}
2986
		} else {
2987
		} else {
2987
			ring->hangcheck.action = HANGCHECK_ACTIVE;
2988
			ring->hangcheck.action = HANGCHECK_ACTIVE;
2988
 
2989
 
2989
			/* Gradually reduce the count so that we catch DoS
2990
			/* Gradually reduce the count so that we catch DoS
2990
			 * attempts across multiple batches.
2991
			 * attempts across multiple batches.
2991
			 */
2992
			 */
2992
			if (ring->hangcheck.score > 0)
2993
			if (ring->hangcheck.score > 0)
2993
				ring->hangcheck.score--;
2994
				ring->hangcheck.score--;
2994
 
2995
 
2995
			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2996
			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2996
		}
2997
		}
2997
 
2998
 
2998
		ring->hangcheck.seqno = seqno;
2999
		ring->hangcheck.seqno = seqno;
2999
		ring->hangcheck.acthd = acthd;
3000
		ring->hangcheck.acthd = acthd;
3000
		busy_count += busy;
3001
		busy_count += busy;
3001
	}
3002
	}
3002
 
3003
 
3003
	for_each_ring(ring, dev_priv, i) {
3004
	for_each_ring(ring, dev_priv, i) {
3004
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3005
		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3005
			DRM_INFO("%s on %s\n",
3006
			DRM_INFO("%s on %s\n",
3006
				 stuck[i] ? "stuck" : "no progress",
3007
				 stuck[i] ? "stuck" : "no progress",
3007
				 ring->name);
3008
				 ring->name);
3008
			rings_hung++;
3009
			rings_hung++;
3009
		}
3010
		}
3010
	}
3011
	}
3011
 
3012
 
3012
//   if (rings_hung)
3013
//   if (rings_hung)
3013
//       return i915_handle_error(dev, true);
3014
//       return i915_handle_error(dev, true);
3014
 
3015
 
3015
}
3016
}
3016
 
3017
 
3017
static void ibx_irq_reset(struct drm_device *dev)
3018
static void ibx_irq_reset(struct drm_device *dev)
3018
{
3019
{
3019
	struct drm_i915_private *dev_priv = dev->dev_private;
3020
	struct drm_i915_private *dev_priv = dev->dev_private;
3020
 
3021
 
3021
	if (HAS_PCH_NOP(dev))
3022
	if (HAS_PCH_NOP(dev))
3022
		return;
3023
		return;
3023
 
3024
 
3024
	GEN5_IRQ_RESET(SDE);
3025
	GEN5_IRQ_RESET(SDE);
3025
 
3026
 
3026
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3027
	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3027
		I915_WRITE(SERR_INT, 0xffffffff);
3028
		I915_WRITE(SERR_INT, 0xffffffff);
3028
}
3029
}
3029
 
3030
 
3030
/*
3031
/*
3031
 * SDEIER is also touched by the interrupt handler to work around missed PCH
3032
 * SDEIER is also touched by the interrupt handler to work around missed PCH
3032
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3033
 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3033
 * instead we unconditionally enable all PCH interrupt sources here, but then
3034
 * instead we unconditionally enable all PCH interrupt sources here, but then
3034
 * only unmask them as needed with SDEIMR.
3035
 * only unmask them as needed with SDEIMR.
3035
 *
3036
 *
3036
 * This function needs to be called before interrupts are enabled.
3037
 * This function needs to be called before interrupts are enabled.
3037
 */
3038
 */
3038
static void ibx_irq_pre_postinstall(struct drm_device *dev)
3039
static void ibx_irq_pre_postinstall(struct drm_device *dev)
3039
{
3040
{
3040
	struct drm_i915_private *dev_priv = dev->dev_private;
3041
	struct drm_i915_private *dev_priv = dev->dev_private;
3041
 
3042
 
3042
	if (HAS_PCH_NOP(dev))
3043
	if (HAS_PCH_NOP(dev))
3043
		return;
3044
		return;
3044
 
3045
 
3045
	WARN_ON(I915_READ(SDEIER) != 0);
3046
	WARN_ON(I915_READ(SDEIER) != 0);
3046
	I915_WRITE(SDEIER, 0xffffffff);
3047
	I915_WRITE(SDEIER, 0xffffffff);
3047
	POSTING_READ(SDEIER);
3048
	POSTING_READ(SDEIER);
3048
}
3049
}
3049
 
3050
 
3050
static void gen5_gt_irq_reset(struct drm_device *dev)
3051
static void gen5_gt_irq_reset(struct drm_device *dev)
3051
{
3052
{
3052
	struct drm_i915_private *dev_priv = dev->dev_private;
3053
	struct drm_i915_private *dev_priv = dev->dev_private;
3053
 
3054
 
3054
	GEN5_IRQ_RESET(GT);
3055
	GEN5_IRQ_RESET(GT);
3055
	if (INTEL_INFO(dev)->gen >= 6)
3056
	if (INTEL_INFO(dev)->gen >= 6)
3056
		GEN5_IRQ_RESET(GEN6_PM);
3057
		GEN5_IRQ_RESET(GEN6_PM);
3057
}
3058
}
3058
 
3059
 
3059
/* drm_dma.h hooks
3060
/* drm_dma.h hooks
3060
*/
3061
*/
3061
static void ironlake_irq_reset(struct drm_device *dev)
3062
static void ironlake_irq_reset(struct drm_device *dev)
3062
{
3063
{
3063
	struct drm_i915_private *dev_priv = dev->dev_private;
3064
	struct drm_i915_private *dev_priv = dev->dev_private;
3064
 
3065
 
3065
	I915_WRITE(HWSTAM, 0xffffffff);
3066
	I915_WRITE(HWSTAM, 0xffffffff);
3066
 
3067
 
3067
	GEN5_IRQ_RESET(DE);
3068
	GEN5_IRQ_RESET(DE);
3068
	if (IS_GEN7(dev))
3069
	if (IS_GEN7(dev))
3069
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3070
		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3070
 
3071
 
3071
	gen5_gt_irq_reset(dev);
3072
	gen5_gt_irq_reset(dev);
3072
 
3073
 
3073
	ibx_irq_reset(dev);
3074
	ibx_irq_reset(dev);
3074
}
3075
}
3075
 
3076
 
3076
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3077
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3077
{
3078
{
3078
	enum pipe pipe;
3079
	enum pipe pipe;
3079
 
3080
 
3080
	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3081
	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3081
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3082
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3082
 
3083
 
3083
	for_each_pipe(dev_priv, pipe)
3084
	for_each_pipe(dev_priv, pipe)
3084
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3085
		I915_WRITE(PIPESTAT(pipe), 0xffff);
3085
 
3086
 
3086
	GEN5_IRQ_RESET(VLV_);
3087
	GEN5_IRQ_RESET(VLV_);
3087
}
3088
}
3088
 
3089
 
3089
static void valleyview_irq_preinstall(struct drm_device *dev)
3090
static void valleyview_irq_preinstall(struct drm_device *dev)
3090
{
3091
{
3091
	struct drm_i915_private *dev_priv = dev->dev_private;
3092
	struct drm_i915_private *dev_priv = dev->dev_private;
3092
 
3093
 
3093
	/* VLV magic */
3094
	/* VLV magic */
3094
	I915_WRITE(VLV_IMR, 0);
3095
	I915_WRITE(VLV_IMR, 0);
3095
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3096
	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3096
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3097
	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3097
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3098
	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3098
 
3099
 
3099
	gen5_gt_irq_reset(dev);
3100
	gen5_gt_irq_reset(dev);
3100
 
3101
 
3101
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3102
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3102
 
3103
 
3103
	vlv_display_irq_reset(dev_priv);
3104
	vlv_display_irq_reset(dev_priv);
3104
}
3105
}
3105
 
3106
 
3106
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3107
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3107
{
3108
{
3108
	GEN8_IRQ_RESET_NDX(GT, 0);
3109
	GEN8_IRQ_RESET_NDX(GT, 0);
3109
	GEN8_IRQ_RESET_NDX(GT, 1);
3110
	GEN8_IRQ_RESET_NDX(GT, 1);
3110
	GEN8_IRQ_RESET_NDX(GT, 2);
3111
	GEN8_IRQ_RESET_NDX(GT, 2);
3111
	GEN8_IRQ_RESET_NDX(GT, 3);
3112
	GEN8_IRQ_RESET_NDX(GT, 3);
3112
}
3113
}
3113
 
3114
 
3114
static void gen8_irq_reset(struct drm_device *dev)
3115
static void gen8_irq_reset(struct drm_device *dev)
3115
{
3116
{
3116
	struct drm_i915_private *dev_priv = dev->dev_private;
3117
	struct drm_i915_private *dev_priv = dev->dev_private;
3117
	int pipe;
3118
	int pipe;
3118
 
3119
 
3119
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3120
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3120
	POSTING_READ(GEN8_MASTER_IRQ);
3121
	POSTING_READ(GEN8_MASTER_IRQ);
3121
 
3122
 
3122
	gen8_gt_irq_reset(dev_priv);
3123
	gen8_gt_irq_reset(dev_priv);
3123
 
3124
 
3124
	for_each_pipe(dev_priv, pipe)
3125
	for_each_pipe(dev_priv, pipe)
3125
		if (intel_display_power_is_enabled(dev_priv,
3126
		if (intel_display_power_is_enabled(dev_priv,
3126
						   POWER_DOMAIN_PIPE(pipe)))
3127
						   POWER_DOMAIN_PIPE(pipe)))
3127
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3128
			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3128
 
3129
 
3129
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3130
	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3130
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3131
	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3131
	GEN5_IRQ_RESET(GEN8_PCU_);
3132
	GEN5_IRQ_RESET(GEN8_PCU_);
3132
 
3133
 
3133
	if (HAS_PCH_SPLIT(dev))
3134
	if (HAS_PCH_SPLIT(dev))
3134
		ibx_irq_reset(dev);
3135
		ibx_irq_reset(dev);
3135
}
3136
}
3136
 
3137
 
3137
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3138
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3138
				     unsigned int pipe_mask)
3139
				     unsigned int pipe_mask)
3139
{
3140
{
3140
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3141
	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3141
 
3142
 
3142
	spin_lock_irq(&dev_priv->irq_lock);
3143
	spin_lock_irq(&dev_priv->irq_lock);
3143
	if (pipe_mask & 1 << PIPE_A)
3144
	if (pipe_mask & 1 << PIPE_A)
3144
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3145
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3145
				  dev_priv->de_irq_mask[PIPE_A],
3146
				  dev_priv->de_irq_mask[PIPE_A],
3146
				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3147
				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3147
	if (pipe_mask & 1 << PIPE_B)
3148
	if (pipe_mask & 1 << PIPE_B)
3148
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3149
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3149
				  dev_priv->de_irq_mask[PIPE_B],
3150
				  dev_priv->de_irq_mask[PIPE_B],
3150
				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3151
				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3151
	if (pipe_mask & 1 << PIPE_C)
3152
	if (pipe_mask & 1 << PIPE_C)
3152
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3153
		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3153
				  dev_priv->de_irq_mask[PIPE_C],
3154
				  dev_priv->de_irq_mask[PIPE_C],
3154
				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3155
				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3155
	spin_unlock_irq(&dev_priv->irq_lock);
3156
	spin_unlock_irq(&dev_priv->irq_lock);
3156
}
3157
}
3157
 
3158
 
3158
static void cherryview_irq_preinstall(struct drm_device *dev)
3159
static void cherryview_irq_preinstall(struct drm_device *dev)
3159
{
3160
{
3160
	struct drm_i915_private *dev_priv = dev->dev_private;
3161
	struct drm_i915_private *dev_priv = dev->dev_private;
3161
 
3162
 
3162
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3163
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3163
	POSTING_READ(GEN8_MASTER_IRQ);
3164
	POSTING_READ(GEN8_MASTER_IRQ);
3164
 
3165
 
3165
	gen8_gt_irq_reset(dev_priv);
3166
	gen8_gt_irq_reset(dev_priv);
3166
 
3167
 
3167
	GEN5_IRQ_RESET(GEN8_PCU_);
3168
	GEN5_IRQ_RESET(GEN8_PCU_);
3168
 
3169
 
3169
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3170
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3170
 
3171
 
3171
	vlv_display_irq_reset(dev_priv);
3172
	vlv_display_irq_reset(dev_priv);
3172
}
3173
}
3173
 
3174
 
3174
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3175
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3175
				  const u32 hpd[HPD_NUM_PINS])
3176
				  const u32 hpd[HPD_NUM_PINS])
3176
{
3177
{
3177
	struct drm_i915_private *dev_priv = to_i915(dev);
3178
	struct drm_i915_private *dev_priv = to_i915(dev);
3178
	struct intel_encoder *encoder;
3179
	struct intel_encoder *encoder;
3179
	u32 enabled_irqs = 0;
3180
	u32 enabled_irqs = 0;
3180
 
3181
 
3181
	for_each_intel_encoder(dev, encoder)
3182
	for_each_intel_encoder(dev, encoder)
3182
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3183
		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3183
			enabled_irqs |= hpd[encoder->hpd_pin];
3184
			enabled_irqs |= hpd[encoder->hpd_pin];
3184
 
3185
 
3185
	return enabled_irqs;
3186
	return enabled_irqs;
3186
}
3187
}
3187
 
3188
 
3188
static void ibx_hpd_irq_setup(struct drm_device *dev)
3189
static void ibx_hpd_irq_setup(struct drm_device *dev)
3189
{
3190
{
3190
	struct drm_i915_private *dev_priv = dev->dev_private;
3191
	struct drm_i915_private *dev_priv = dev->dev_private;
3191
	u32 hotplug_irqs, hotplug, enabled_irqs;
3192
	u32 hotplug_irqs, hotplug, enabled_irqs;
3192
 
3193
 
3193
	if (HAS_PCH_IBX(dev)) {
3194
	if (HAS_PCH_IBX(dev)) {
3194
		hotplug_irqs = SDE_HOTPLUG_MASK;
3195
		hotplug_irqs = SDE_HOTPLUG_MASK;
3195
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3196
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3196
	} else {
3197
	} else {
3197
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3198
		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3198
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3199
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3199
	}
3200
	}
3200
 
3201
 
3201
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3202
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3202
 
3203
 
3203
	/*
3204
	/*
3204
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3205
	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3205
	 * duration to 2ms (which is the minimum in the Display Port spec).
3206
	 * duration to 2ms (which is the minimum in the Display Port spec).
3206
	 * The pulse duration bits are reserved on LPT+.
3207
	 * The pulse duration bits are reserved on LPT+.
3207
	 */
3208
	 */
3208
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3209
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3209
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3210
	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3210
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3211
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3211
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3212
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3212
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3213
	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3213
	/*
3214
	/*
3214
	 * When CPU and PCH are on the same package, port A
3215
	 * When CPU and PCH are on the same package, port A
3215
	 * HPD must be enabled in both north and south.
3216
	 * HPD must be enabled in both north and south.
3216
	 */
3217
	 */
3217
	if (HAS_PCH_LPT_LP(dev))
3218
	if (HAS_PCH_LPT_LP(dev))
3218
		hotplug |= PORTA_HOTPLUG_ENABLE;
3219
		hotplug |= PORTA_HOTPLUG_ENABLE;
3219
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3220
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3220
}
3221
}
3221
 
3222
 
3222
static void spt_hpd_irq_setup(struct drm_device *dev)
3223
static void spt_hpd_irq_setup(struct drm_device *dev)
3223
{
3224
{
3224
	struct drm_i915_private *dev_priv = dev->dev_private;
3225
	struct drm_i915_private *dev_priv = dev->dev_private;
3225
	u32 hotplug_irqs, hotplug, enabled_irqs;
3226
	u32 hotplug_irqs, hotplug, enabled_irqs;
3226
 
3227
 
3227
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3228
	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3228
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3229
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3229
 
3230
 
3230
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3231
	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3231
 
3232
 
3232
	/* Enable digital hotplug on the PCH */
3233
	/* Enable digital hotplug on the PCH */
3233
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3234
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3234
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3235
	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3235
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3236
		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3236
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3237
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3237
 
3238
 
3238
	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3239
	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3239
	hotplug |= PORTE_HOTPLUG_ENABLE;
3240
	hotplug |= PORTE_HOTPLUG_ENABLE;
3240
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3241
	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3241
}
3242
}
3242
 
3243
 
3243
static void ilk_hpd_irq_setup(struct drm_device *dev)
3244
static void ilk_hpd_irq_setup(struct drm_device *dev)
3244
{
3245
{
3245
	struct drm_i915_private *dev_priv = dev->dev_private;
3246
	struct drm_i915_private *dev_priv = dev->dev_private;
3246
	u32 hotplug_irqs, hotplug, enabled_irqs;
3247
	u32 hotplug_irqs, hotplug, enabled_irqs;
3247
 
3248
 
3248
	if (INTEL_INFO(dev)->gen >= 8) {
3249
	if (INTEL_INFO(dev)->gen >= 8) {
3249
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3250
		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3250
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3251
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3251
 
3252
 
3252
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3253
		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3253
	} else if (INTEL_INFO(dev)->gen >= 7) {
3254
	} else if (INTEL_INFO(dev)->gen >= 7) {
3254
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3255
		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3255
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3256
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3256
 
3257
 
3257
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3258
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3258
	} else {
3259
	} else {
3259
		hotplug_irqs = DE_DP_A_HOTPLUG;
3260
		hotplug_irqs = DE_DP_A_HOTPLUG;
3260
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3261
		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3261
 
3262
 
3262
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3263
		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3263
	}
3264
	}
3264
 
3265
 
3265
	/*
3266
	/*
3266
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3267
	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3267
	 * duration to 2ms (which is the minimum in the Display Port spec)
3268
	 * duration to 2ms (which is the minimum in the Display Port spec)
3268
	 * The pulse duration bits are reserved on HSW+.
3269
	 * The pulse duration bits are reserved on HSW+.
3269
	 */
3270
	 */
3270
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3271
	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3271
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3272
	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3272
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3273
	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3273
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3274
	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3274
 
3275
 
3275
	ibx_hpd_irq_setup(dev);
3276
	ibx_hpd_irq_setup(dev);
3276
}
3277
}
3277
 
3278
 
3278
static void bxt_hpd_irq_setup(struct drm_device *dev)
3279
static void bxt_hpd_irq_setup(struct drm_device *dev)
3279
{
3280
{
3280
	struct drm_i915_private *dev_priv = dev->dev_private;
3281
	struct drm_i915_private *dev_priv = dev->dev_private;
3281
	u32 hotplug_irqs, hotplug, enabled_irqs;
3282
	u32 hotplug_irqs, hotplug, enabled_irqs;
3282
 
3283
 
3283
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3284
	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3284
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3285
	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3285
 
3286
 
3286
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3287
	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3287
 
3288
 
3288
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3289
	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3289
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3290
	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3290
		PORTA_HOTPLUG_ENABLE;
3291
		PORTA_HOTPLUG_ENABLE;
3291
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3292
	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3292
}
3293
}
3293
 
3294
 
3294
static void ibx_irq_postinstall(struct drm_device *dev)
3295
static void ibx_irq_postinstall(struct drm_device *dev)
3295
{
3296
{
3296
	struct drm_i915_private *dev_priv = dev->dev_private;
3297
	struct drm_i915_private *dev_priv = dev->dev_private;
3297
	u32 mask;
3298
	u32 mask;
3298
 
3299
 
3299
	if (HAS_PCH_NOP(dev))
3300
	if (HAS_PCH_NOP(dev))
3300
		return;
3301
		return;
3301
 
3302
 
3302
	if (HAS_PCH_IBX(dev))
3303
	if (HAS_PCH_IBX(dev))
3303
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3304
		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3304
	else
3305
	else
3305
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3306
		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3306
 
3307
 
3307
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3308
	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3308
	I915_WRITE(SDEIMR, ~mask);
3309
	I915_WRITE(SDEIMR, ~mask);
3309
}
3310
}
3310
 
3311
 
3311
static void gen5_gt_irq_postinstall(struct drm_device *dev)
3312
static void gen5_gt_irq_postinstall(struct drm_device *dev)
3312
{
3313
{
3313
	struct drm_i915_private *dev_priv = dev->dev_private;
3314
	struct drm_i915_private *dev_priv = dev->dev_private;
3314
	u32 pm_irqs, gt_irqs;
3315
	u32 pm_irqs, gt_irqs;
3315
 
3316
 
3316
	pm_irqs = gt_irqs = 0;
3317
	pm_irqs = gt_irqs = 0;
3317
 
3318
 
3318
	dev_priv->gt_irq_mask = ~0;
3319
	dev_priv->gt_irq_mask = ~0;
3319
	if (HAS_L3_DPF(dev)) {
3320
	if (HAS_L3_DPF(dev)) {
3320
		/* L3 parity interrupt is always unmasked. */
3321
		/* L3 parity interrupt is always unmasked. */
3321
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3322
		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3322
		gt_irqs |= GT_PARITY_ERROR(dev);
3323
		gt_irqs |= GT_PARITY_ERROR(dev);
3323
	}
3324
	}
3324
 
3325
 
3325
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3326
	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3326
	if (IS_GEN5(dev)) {
3327
	if (IS_GEN5(dev)) {
3327
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3328
		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3328
			   ILK_BSD_USER_INTERRUPT;
3329
			   ILK_BSD_USER_INTERRUPT;
3329
	} else {
3330
	} else {
3330
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3331
		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3331
	}
3332
	}
3332
 
3333
 
3333
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3334
	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3334
 
3335
 
3335
	if (INTEL_INFO(dev)->gen >= 6) {
3336
	if (INTEL_INFO(dev)->gen >= 6) {
3336
		/*
3337
		/*
3337
		 * RPS interrupts will get enabled/disabled on demand when RPS
3338
		 * RPS interrupts will get enabled/disabled on demand when RPS
3338
		 * itself is enabled/disabled.
3339
		 * itself is enabled/disabled.
3339
		 */
3340
		 */
3340
		if (HAS_VEBOX(dev))
3341
		if (HAS_VEBOX(dev))
3341
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3342
			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3342
 
3343
 
3343
		dev_priv->pm_irq_mask = 0xffffffff;
3344
		dev_priv->pm_irq_mask = 0xffffffff;
3344
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3345
		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3345
	}
3346
	}
3346
}
3347
}
3347
 
3348
 
3348
static int ironlake_irq_postinstall(struct drm_device *dev)
3349
static int ironlake_irq_postinstall(struct drm_device *dev)
3349
{
3350
{
3350
	struct drm_i915_private *dev_priv = dev->dev_private;
3351
	struct drm_i915_private *dev_priv = dev->dev_private;
3351
	u32 display_mask, extra_mask;
3352
	u32 display_mask, extra_mask;
3352
 
3353
 
3353
	if (INTEL_INFO(dev)->gen >= 7) {
3354
	if (INTEL_INFO(dev)->gen >= 7) {
3354
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3355
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3355
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3356
				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3356
				DE_PLANEB_FLIP_DONE_IVB |
3357
				DE_PLANEB_FLIP_DONE_IVB |
3357
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3358
				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3358
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3359
		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3359
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3360
			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3360
			      DE_DP_A_HOTPLUG_IVB);
3361
			      DE_DP_A_HOTPLUG_IVB);
3361
	} else {
3362
	} else {
3362
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3363
		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3363
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3364
				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3364
				DE_AUX_CHANNEL_A |
3365
				DE_AUX_CHANNEL_A |
3365
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3366
				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3366
				DE_POISON);
3367
				DE_POISON);
3367
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3368
		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3368
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3369
			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3369
			      DE_DP_A_HOTPLUG);
3370
			      DE_DP_A_HOTPLUG);
3370
	}
3371
	}
3371
 
3372
 
3372
	dev_priv->irq_mask = ~display_mask;
3373
	dev_priv->irq_mask = ~display_mask;
3373
 
3374
 
3374
	I915_WRITE(HWSTAM, 0xeffe);
3375
	I915_WRITE(HWSTAM, 0xeffe);
3375
 
3376
 
3376
	ibx_irq_pre_postinstall(dev);
3377
	ibx_irq_pre_postinstall(dev);
3377
 
3378
 
3378
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3379
	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3379
 
3380
 
3380
	gen5_gt_irq_postinstall(dev);
3381
	gen5_gt_irq_postinstall(dev);
3381
 
3382
 
3382
	ibx_irq_postinstall(dev);
3383
	ibx_irq_postinstall(dev);
3383
 
3384
 
3384
	if (IS_IRONLAKE_M(dev)) {
3385
	if (IS_IRONLAKE_M(dev)) {
3385
		/* Enable PCU event interrupts
3386
		/* Enable PCU event interrupts
3386
		 *
3387
		 *
3387
		 * spinlocking not required here for correctness since interrupt
3388
		 * spinlocking not required here for correctness since interrupt
3388
		 * setup is guaranteed to run in single-threaded context. But we
3389
		 * setup is guaranteed to run in single-threaded context. But we
3389
		 * need it to make the assert_spin_locked happy. */
3390
		 * need it to make the assert_spin_locked happy. */
3390
		spin_lock_irq(&dev_priv->irq_lock);
3391
		spin_lock_irq(&dev_priv->irq_lock);
3391
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3392
		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3392
		spin_unlock_irq(&dev_priv->irq_lock);
3393
		spin_unlock_irq(&dev_priv->irq_lock);
3393
	}
3394
	}
3394
 
3395
 
3395
	return 0;
3396
	return 0;
3396
}
3397
}
3397
 
3398
 
3398
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3399
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3399
{
3400
{
3400
	u32 pipestat_mask;
3401
	u32 pipestat_mask;
3401
	u32 iir_mask;
3402
	u32 iir_mask;
3402
	enum pipe pipe;
3403
	enum pipe pipe;
3403
 
3404
 
3404
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3405
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3405
			PIPE_FIFO_UNDERRUN_STATUS;
3406
			PIPE_FIFO_UNDERRUN_STATUS;
3406
 
3407
 
3407
	for_each_pipe(dev_priv, pipe)
3408
	for_each_pipe(dev_priv, pipe)
3408
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3409
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3409
	POSTING_READ(PIPESTAT(PIPE_A));
3410
	POSTING_READ(PIPESTAT(PIPE_A));
3410
 
3411
 
3411
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3412
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3412
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3413
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3413
 
3414
 
3414
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3415
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3415
	for_each_pipe(dev_priv, pipe)
3416
	for_each_pipe(dev_priv, pipe)
3416
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3417
		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3417
 
3418
 
3418
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3419
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3419
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3420
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3420
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3421
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3421
	if (IS_CHERRYVIEW(dev_priv))
3422
	if (IS_CHERRYVIEW(dev_priv))
3422
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3423
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3423
	dev_priv->irq_mask &= ~iir_mask;
3424
	dev_priv->irq_mask &= ~iir_mask;
3424
 
3425
 
3425
	I915_WRITE(VLV_IIR, iir_mask);
3426
	I915_WRITE(VLV_IIR, iir_mask);
3426
	I915_WRITE(VLV_IIR, iir_mask);
3427
	I915_WRITE(VLV_IIR, iir_mask);
3427
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3428
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3428
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3429
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3429
	POSTING_READ(VLV_IMR);
3430
	POSTING_READ(VLV_IMR);
3430
}
3431
}
3431
 
3432
 
3432
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3433
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3433
{
3434
{
3434
	u32 pipestat_mask;
3435
	u32 pipestat_mask;
3435
	u32 iir_mask;
3436
	u32 iir_mask;
3436
	enum pipe pipe;
3437
	enum pipe pipe;
3437
 
3438
 
3438
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3439
	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3439
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3440
		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3440
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3441
		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3441
	if (IS_CHERRYVIEW(dev_priv))
3442
	if (IS_CHERRYVIEW(dev_priv))
3442
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3443
		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3443
 
3444
 
3444
	dev_priv->irq_mask |= iir_mask;
3445
	dev_priv->irq_mask |= iir_mask;
3445
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3446
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3446
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3447
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3447
	I915_WRITE(VLV_IIR, iir_mask);
3448
	I915_WRITE(VLV_IIR, iir_mask);
3448
	I915_WRITE(VLV_IIR, iir_mask);
3449
	I915_WRITE(VLV_IIR, iir_mask);
3449
	POSTING_READ(VLV_IIR);
3450
	POSTING_READ(VLV_IIR);
3450
 
3451
 
3451
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3452
	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3452
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3453
			PIPE_CRC_DONE_INTERRUPT_STATUS;
3453
 
3454
 
3454
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3455
	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3455
	for_each_pipe(dev_priv, pipe)
3456
	for_each_pipe(dev_priv, pipe)
3456
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3457
		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3457
 
3458
 
3458
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3459
	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3459
			PIPE_FIFO_UNDERRUN_STATUS;
3460
			PIPE_FIFO_UNDERRUN_STATUS;
3460
 
3461
 
3461
	for_each_pipe(dev_priv, pipe)
3462
	for_each_pipe(dev_priv, pipe)
3462
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3463
		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3463
	POSTING_READ(PIPESTAT(PIPE_A));
3464
	POSTING_READ(PIPESTAT(PIPE_A));
3464
}
3465
}
3465
 
3466
 
3466
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3467
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3467
{
3468
{
3468
	assert_spin_locked(&dev_priv->irq_lock);
3469
	assert_spin_locked(&dev_priv->irq_lock);
3469
 
3470
 
3470
	if (dev_priv->display_irqs_enabled)
3471
	if (dev_priv->display_irqs_enabled)
3471
		return;
3472
		return;
3472
 
3473
 
3473
	dev_priv->display_irqs_enabled = true;
3474
	dev_priv->display_irqs_enabled = true;
3474
 
3475
 
3475
	if (intel_irqs_enabled(dev_priv))
3476
	if (intel_irqs_enabled(dev_priv))
3476
		valleyview_display_irqs_install(dev_priv);
3477
		valleyview_display_irqs_install(dev_priv);
3477
}
3478
}
3478
 
3479
 
3479
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3480
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3480
{
3481
{
3481
	assert_spin_locked(&dev_priv->irq_lock);
3482
	assert_spin_locked(&dev_priv->irq_lock);
3482
 
3483
 
3483
	if (!dev_priv->display_irqs_enabled)
3484
	if (!dev_priv->display_irqs_enabled)
3484
		return;
3485
		return;
3485
 
3486
 
3486
	dev_priv->display_irqs_enabled = false;
3487
	dev_priv->display_irqs_enabled = false;
3487
 
3488
 
3488
	if (intel_irqs_enabled(dev_priv))
3489
	if (intel_irqs_enabled(dev_priv))
3489
		valleyview_display_irqs_uninstall(dev_priv);
3490
		valleyview_display_irqs_uninstall(dev_priv);
3490
}
3491
}
3491
 
3492
 
3492
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3493
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3493
{
3494
{
3494
	dev_priv->irq_mask = ~0;
3495
	dev_priv->irq_mask = ~0;
3495
 
3496
 
3496
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3497
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3497
	POSTING_READ(PORT_HOTPLUG_EN);
3498
	POSTING_READ(PORT_HOTPLUG_EN);
3498
 
3499
 
3499
	I915_WRITE(VLV_IIR, 0xffffffff);
3500
	I915_WRITE(VLV_IIR, 0xffffffff);
3500
	I915_WRITE(VLV_IIR, 0xffffffff);
3501
	I915_WRITE(VLV_IIR, 0xffffffff);
3501
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3502
	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3502
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3503
	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3503
	POSTING_READ(VLV_IMR);
3504
	POSTING_READ(VLV_IMR);
3504
 
3505
 
3505
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3506
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3506
	 * just to make the assert_spin_locked check happy. */
3507
	 * just to make the assert_spin_locked check happy. */
3507
	spin_lock_irq(&dev_priv->irq_lock);
3508
	spin_lock_irq(&dev_priv->irq_lock);
3508
	if (dev_priv->display_irqs_enabled)
3509
	if (dev_priv->display_irqs_enabled)
3509
		valleyview_display_irqs_install(dev_priv);
3510
		valleyview_display_irqs_install(dev_priv);
3510
	spin_unlock_irq(&dev_priv->irq_lock);
3511
	spin_unlock_irq(&dev_priv->irq_lock);
3511
}
3512
}
3512
 
3513
 
3513
static int valleyview_irq_postinstall(struct drm_device *dev)
3514
static int valleyview_irq_postinstall(struct drm_device *dev)
3514
{
3515
{
3515
	struct drm_i915_private *dev_priv = dev->dev_private;
3516
	struct drm_i915_private *dev_priv = dev->dev_private;
3516
 
3517
 
3517
	vlv_display_irq_postinstall(dev_priv);
3518
	vlv_display_irq_postinstall(dev_priv);
3518
 
3519
 
3519
	gen5_gt_irq_postinstall(dev);
3520
	gen5_gt_irq_postinstall(dev);
3520
 
3521
 
3521
	/* ack & enable invalid PTE error interrupts */
3522
	/* ack & enable invalid PTE error interrupts */
3522
#if 0 /* FIXME: add support to irq handler for checking these bits */
3523
#if 0 /* FIXME: add support to irq handler for checking these bits */
3523
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3524
	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3524
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3525
	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3525
#endif
3526
#endif
3526
 
3527
 
3527
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3528
	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3528
 
3529
 
3529
	return 0;
3530
	return 0;
3530
}
3531
}
3531
 
3532
 
3532
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3533
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3533
{
3534
{
3534
	/* These are interrupts we'll toggle with the ring mask register */
3535
	/* These are interrupts we'll toggle with the ring mask register */
3535
	uint32_t gt_interrupts[] = {
3536
	uint32_t gt_interrupts[] = {
3536
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3537
		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3537
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3538
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3538
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3539
			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3539
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3540
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3540
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3541
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3541
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3542
		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3542
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3543
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3543
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3544
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3544
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3545
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3545
		0,
3546
		0,
3546
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3547
		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3547
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3548
			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3548
		};
3549
		};
3549
 
3550
 
3550
	dev_priv->pm_irq_mask = 0xffffffff;
3551
	dev_priv->pm_irq_mask = 0xffffffff;
3551
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3552
	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3552
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3553
	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3553
	/*
3554
	/*
3554
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3555
	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3555
	 * is enabled/disabled.
3556
	 * is enabled/disabled.
3556
	 */
3557
	 */
3557
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3558
	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3558
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3559
	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3559
}
3560
}
3560
 
3561
 
3561
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3562
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3562
{
3563
{
3563
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3564
	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3564
	uint32_t de_pipe_enables;
3565
	uint32_t de_pipe_enables;
3565
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3566
	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3566
	u32 de_port_enables;
3567
	u32 de_port_enables;
3567
	enum pipe pipe;
3568
	enum pipe pipe;
3568
 
3569
 
3569
	if (INTEL_INFO(dev_priv)->gen >= 9) {
3570
	if (INTEL_INFO(dev_priv)->gen >= 9) {
3570
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3571
		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3571
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3572
				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3572
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3573
		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3573
				  GEN9_AUX_CHANNEL_D;
3574
				  GEN9_AUX_CHANNEL_D;
3574
		if (IS_BROXTON(dev_priv))
3575
		if (IS_BROXTON(dev_priv))
3575
			de_port_masked |= BXT_DE_PORT_GMBUS;
3576
			de_port_masked |= BXT_DE_PORT_GMBUS;
3576
	} else {
3577
	} else {
3577
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3578
		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3578
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3579
				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3579
	}
3580
	}
3580
 
3581
 
3581
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3582
	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3582
					   GEN8_PIPE_FIFO_UNDERRUN;
3583
					   GEN8_PIPE_FIFO_UNDERRUN;
3583
 
3584
 
3584
	de_port_enables = de_port_masked;
3585
	de_port_enables = de_port_masked;
3585
	if (IS_BROXTON(dev_priv))
3586
	if (IS_BROXTON(dev_priv))
3586
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3587
		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3587
	else if (IS_BROADWELL(dev_priv))
3588
	else if (IS_BROADWELL(dev_priv))
3588
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3589
		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3589
 
3590
 
3590
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3591
	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3591
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3592
	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3592
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3593
	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3593
 
3594
 
3594
	for_each_pipe(dev_priv, pipe)
3595
	for_each_pipe(dev_priv, pipe)
3595
		if (intel_display_power_is_enabled(dev_priv,
3596
		if (intel_display_power_is_enabled(dev_priv,
3596
				POWER_DOMAIN_PIPE(pipe)))
3597
				POWER_DOMAIN_PIPE(pipe)))
3597
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3598
			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3598
					  dev_priv->de_irq_mask[pipe],
3599
					  dev_priv->de_irq_mask[pipe],
3599
					  de_pipe_enables);
3600
					  de_pipe_enables);
3600
 
3601
 
3601
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3602
	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3602
}
3603
}
3603
 
3604
 
3604
static int gen8_irq_postinstall(struct drm_device *dev)
3605
static int gen8_irq_postinstall(struct drm_device *dev)
3605
{
3606
{
3606
	struct drm_i915_private *dev_priv = dev->dev_private;
3607
	struct drm_i915_private *dev_priv = dev->dev_private;
3607
 
3608
 
3608
	if (HAS_PCH_SPLIT(dev))
3609
	if (HAS_PCH_SPLIT(dev))
3609
		ibx_irq_pre_postinstall(dev);
3610
		ibx_irq_pre_postinstall(dev);
3610
 
3611
 
3611
	gen8_gt_irq_postinstall(dev_priv);
3612
	gen8_gt_irq_postinstall(dev_priv);
3612
	gen8_de_irq_postinstall(dev_priv);
3613
	gen8_de_irq_postinstall(dev_priv);
3613
 
3614
 
3614
	if (HAS_PCH_SPLIT(dev))
3615
	if (HAS_PCH_SPLIT(dev))
3615
		ibx_irq_postinstall(dev);
3616
		ibx_irq_postinstall(dev);
3616
 
3617
 
3617
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3618
	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3618
	POSTING_READ(GEN8_MASTER_IRQ);
3619
	POSTING_READ(GEN8_MASTER_IRQ);
3619
 
3620
 
3620
	return 0;
3621
	return 0;
3621
}
3622
}
3622
 
3623
 
3623
static int cherryview_irq_postinstall(struct drm_device *dev)
3624
static int cherryview_irq_postinstall(struct drm_device *dev)
3624
{
3625
{
3625
	struct drm_i915_private *dev_priv = dev->dev_private;
3626
	struct drm_i915_private *dev_priv = dev->dev_private;
3626
 
3627
 
3627
	vlv_display_irq_postinstall(dev_priv);
3628
	vlv_display_irq_postinstall(dev_priv);
3628
 
3629
 
3629
	gen8_gt_irq_postinstall(dev_priv);
3630
	gen8_gt_irq_postinstall(dev_priv);
3630
 
3631
 
3631
	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3632
	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3632
	POSTING_READ(GEN8_MASTER_IRQ);
3633
	POSTING_READ(GEN8_MASTER_IRQ);
3633
 
3634
 
3634
	return 0;
3635
	return 0;
3635
}
3636
}
3636
 
3637
 
3637
static void gen8_irq_uninstall(struct drm_device *dev)
3638
static void gen8_irq_uninstall(struct drm_device *dev)
3638
{
3639
{
3639
	struct drm_i915_private *dev_priv = dev->dev_private;
3640
	struct drm_i915_private *dev_priv = dev->dev_private;
3640
 
3641
 
3641
	if (!dev_priv)
3642
	if (!dev_priv)
3642
		return;
3643
		return;
3643
 
3644
 
3644
	gen8_irq_reset(dev);
3645
	gen8_irq_reset(dev);
3645
}
3646
}
3646
 
3647
 
3647
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3648
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3648
{
3649
{
3649
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3650
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3650
	 * just to make the assert_spin_locked check happy. */
3651
	 * just to make the assert_spin_locked check happy. */
3651
	spin_lock_irq(&dev_priv->irq_lock);
3652
	spin_lock_irq(&dev_priv->irq_lock);
3652
	if (dev_priv->display_irqs_enabled)
3653
	if (dev_priv->display_irqs_enabled)
3653
		valleyview_display_irqs_uninstall(dev_priv);
3654
		valleyview_display_irqs_uninstall(dev_priv);
3654
	spin_unlock_irq(&dev_priv->irq_lock);
3655
	spin_unlock_irq(&dev_priv->irq_lock);
3655
 
3656
 
3656
	vlv_display_irq_reset(dev_priv);
3657
	vlv_display_irq_reset(dev_priv);
3657
 
3658
 
3658
	dev_priv->irq_mask = ~0;
3659
	dev_priv->irq_mask = ~0;
3659
}
3660
}
3660
 
3661
 
3661
static void valleyview_irq_uninstall(struct drm_device *dev)
3662
static void valleyview_irq_uninstall(struct drm_device *dev)
3662
{
3663
{
3663
	struct drm_i915_private *dev_priv = dev->dev_private;
3664
	struct drm_i915_private *dev_priv = dev->dev_private;
3664
 
3665
 
3665
	if (!dev_priv)
3666
	if (!dev_priv)
3666
		return;
3667
		return;
3667
 
3668
 
3668
	I915_WRITE(VLV_MASTER_IER, 0);
3669
	I915_WRITE(VLV_MASTER_IER, 0);
3669
 
3670
 
3670
	gen5_gt_irq_reset(dev);
3671
	gen5_gt_irq_reset(dev);
3671
 
3672
 
3672
	I915_WRITE(HWSTAM, 0xffffffff);
3673
	I915_WRITE(HWSTAM, 0xffffffff);
3673
 
3674
 
3674
	vlv_display_irq_uninstall(dev_priv);
3675
	vlv_display_irq_uninstall(dev_priv);
3675
}
3676
}
3676
 
3677
 
3677
static void cherryview_irq_uninstall(struct drm_device *dev)
3678
static void cherryview_irq_uninstall(struct drm_device *dev)
3678
{
3679
{
3679
	struct drm_i915_private *dev_priv = dev->dev_private;
3680
	struct drm_i915_private *dev_priv = dev->dev_private;
3680
 
3681
 
3681
	if (!dev_priv)
3682
	if (!dev_priv)
3682
		return;
3683
		return;
3683
 
3684
 
3684
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3685
	I915_WRITE(GEN8_MASTER_IRQ, 0);
3685
	POSTING_READ(GEN8_MASTER_IRQ);
3686
	POSTING_READ(GEN8_MASTER_IRQ);
3686
 
3687
 
3687
	gen8_gt_irq_reset(dev_priv);
3688
	gen8_gt_irq_reset(dev_priv);
3688
 
3689
 
3689
	GEN5_IRQ_RESET(GEN8_PCU_);
3690
	GEN5_IRQ_RESET(GEN8_PCU_);
3690
 
3691
 
3691
	vlv_display_irq_uninstall(dev_priv);
3692
	vlv_display_irq_uninstall(dev_priv);
3692
}
3693
}
3693
 
3694
 
3694
static void ironlake_irq_uninstall(struct drm_device *dev)
3695
static void ironlake_irq_uninstall(struct drm_device *dev)
3695
{
3696
{
3696
	struct drm_i915_private *dev_priv = dev->dev_private;
3697
	struct drm_i915_private *dev_priv = dev->dev_private;
3697
 
3698
 
3698
	if (!dev_priv)
3699
	if (!dev_priv)
3699
		return;
3700
		return;
3700
 
3701
 
3701
	ironlake_irq_reset(dev);
3702
	ironlake_irq_reset(dev);
3702
}
3703
}
3703
 
3704
 
3704
#if 0
3705
#if 0
3705
static void i8xx_irq_preinstall(struct drm_device * dev)
3706
static void i8xx_irq_preinstall(struct drm_device * dev)
3706
{
3707
{
3707
	struct drm_i915_private *dev_priv = dev->dev_private;
3708
	struct drm_i915_private *dev_priv = dev->dev_private;
3708
	int pipe;
3709
	int pipe;
3709
 
3710
 
3710
	for_each_pipe(dev_priv, pipe)
3711
	for_each_pipe(dev_priv, pipe)
3711
		I915_WRITE(PIPESTAT(pipe), 0);
3712
		I915_WRITE(PIPESTAT(pipe), 0);
3712
	I915_WRITE16(IMR, 0xffff);
3713
	I915_WRITE16(IMR, 0xffff);
3713
	I915_WRITE16(IER, 0x0);
3714
	I915_WRITE16(IER, 0x0);
3714
	POSTING_READ16(IER);
3715
	POSTING_READ16(IER);
3715
}
3716
}
3716
 
3717
 
3717
static int i8xx_irq_postinstall(struct drm_device *dev)
3718
static int i8xx_irq_postinstall(struct drm_device *dev)
3718
{
3719
{
3719
	struct drm_i915_private *dev_priv = dev->dev_private;
3720
	struct drm_i915_private *dev_priv = dev->dev_private;
3720
 
3721
 
3721
	I915_WRITE16(EMR,
3722
	I915_WRITE16(EMR,
3722
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3723
		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3723
 
3724
 
3724
	/* Unmask the interrupts that we always want on. */
3725
	/* Unmask the interrupts that we always want on. */
3725
	dev_priv->irq_mask =
3726
	dev_priv->irq_mask =
3726
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3727
		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3727
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3728
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3728
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3729
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3729
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3730
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3730
	I915_WRITE16(IMR, dev_priv->irq_mask);
3731
	I915_WRITE16(IMR, dev_priv->irq_mask);
3731
 
3732
 
3732
	I915_WRITE16(IER,
3733
	I915_WRITE16(IER,
3733
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3734
		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3734
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3735
		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3735
		     I915_USER_INTERRUPT);
3736
		     I915_USER_INTERRUPT);
3736
	POSTING_READ16(IER);
3737
	POSTING_READ16(IER);
3737
 
3738
 
3738
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3739
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3739
	 * just to make the assert_spin_locked check happy. */
3740
	 * just to make the assert_spin_locked check happy. */
3740
	spin_lock_irq(&dev_priv->irq_lock);
3741
	spin_lock_irq(&dev_priv->irq_lock);
3741
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3742
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3742
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3743
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3743
	spin_unlock_irq(&dev_priv->irq_lock);
3744
	spin_unlock_irq(&dev_priv->irq_lock);
3744
 
3745
 
3745
	return 0;
3746
	return 0;
3746
}
3747
}
3747
 
3748
 
3748
/*
3749
/*
3749
 * Returns true when a page flip has completed.
3750
 * Returns true when a page flip has completed.
3750
 */
3751
 */
3751
static bool i8xx_handle_vblank(struct drm_device *dev,
3752
static bool i8xx_handle_vblank(struct drm_device *dev,
3752
			       int plane, int pipe, u32 iir)
3753
			       int plane, int pipe, u32 iir)
3753
{
3754
{
3754
	struct drm_i915_private *dev_priv = dev->dev_private;
3755
	struct drm_i915_private *dev_priv = dev->dev_private;
3755
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3756
	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3756
 
3757
 
3757
	if (!intel_pipe_handle_vblank(dev, pipe))
3758
	if (!intel_pipe_handle_vblank(dev, pipe))
3758
		return false;
3759
		return false;
3759
 
3760
 
3760
	if ((iir & flip_pending) == 0)
3761
	if ((iir & flip_pending) == 0)
3761
		goto check_page_flip;
3762
		goto check_page_flip;
3762
 
3763
 
3763
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3764
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3764
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3765
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3765
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3766
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3766
	 * the flip is completed (no longer pending). Since this doesn't raise
3767
	 * the flip is completed (no longer pending). Since this doesn't raise
3767
	 * an interrupt per se, we watch for the change at vblank.
3768
	 * an interrupt per se, we watch for the change at vblank.
3768
	 */
3769
	 */
3769
	if (I915_READ16(ISR) & flip_pending)
3770
	if (I915_READ16(ISR) & flip_pending)
3770
		goto check_page_flip;
3771
		goto check_page_flip;
3771
 
3772
 
3772
//   intel_prepare_page_flip(dev, plane);
3773
//   intel_prepare_page_flip(dev, plane);
3773
//   intel_finish_page_flip(dev, pipe);
3774
//   intel_finish_page_flip(dev, pipe);
3774
	return true;
3775
	return true;
3775
 
3776
 
3776
check_page_flip:
3777
check_page_flip:
3777
//   intel_check_page_flip(dev, pipe);
3778
//   intel_check_page_flip(dev, pipe);
3778
	return false;
3779
	return false;
3779
}
3780
}
3780
 
3781
 
3781
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3782
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3782
{
3783
{
3783
	struct drm_device *dev = arg;
3784
	struct drm_device *dev = arg;
3784
	struct drm_i915_private *dev_priv = dev->dev_private;
3785
	struct drm_i915_private *dev_priv = dev->dev_private;
3785
	u16 iir, new_iir;
3786
	u16 iir, new_iir;
3786
	u32 pipe_stats[2];
3787
	u32 pipe_stats[2];
3787
	int pipe;
3788
	int pipe;
3788
	u16 flip_mask =
3789
	u16 flip_mask =
3789
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3790
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3790
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3791
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3791
 
3792
 
3792
	if (!intel_irqs_enabled(dev_priv))
3793
	if (!intel_irqs_enabled(dev_priv))
3793
		return IRQ_NONE;
3794
		return IRQ_NONE;
3794
 
3795
 
3795
	iir = I915_READ16(IIR);
3796
	iir = I915_READ16(IIR);
3796
	if (iir == 0)
3797
	if (iir == 0)
3797
		return IRQ_NONE;
3798
		return IRQ_NONE;
3798
 
3799
 
3799
	while (iir & ~flip_mask) {
3800
	while (iir & ~flip_mask) {
3800
		/* Can't rely on pipestat interrupt bit in iir as it might
3801
		/* Can't rely on pipestat interrupt bit in iir as it might
3801
		 * have been cleared after the pipestat interrupt was received.
3802
		 * have been cleared after the pipestat interrupt was received.
3802
		 * It doesn't set the bit in iir again, but it still produces
3803
		 * It doesn't set the bit in iir again, but it still produces
3803
		 * interrupts (for non-MSI).
3804
		 * interrupts (for non-MSI).
3804
		 */
3805
		 */
3805
		spin_lock(&dev_priv->irq_lock);
3806
		spin_lock(&dev_priv->irq_lock);
3806
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3807
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3807
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3808
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3808
 
3809
 
3809
		for_each_pipe(dev_priv, pipe) {
3810
		for_each_pipe(dev_priv, pipe) {
3810
			int reg = PIPESTAT(pipe);
3811
			int reg = PIPESTAT(pipe);
3811
			pipe_stats[pipe] = I915_READ(reg);
3812
			pipe_stats[pipe] = I915_READ(reg);
3812
 
3813
 
3813
			/*
3814
			/*
3814
			 * Clear the PIPE*STAT regs before the IIR
3815
			 * Clear the PIPE*STAT regs before the IIR
3815
			 */
3816
			 */
3816
			if (pipe_stats[pipe] & 0x8000ffff)
3817
			if (pipe_stats[pipe] & 0x8000ffff)
3817
				I915_WRITE(reg, pipe_stats[pipe]);
3818
				I915_WRITE(reg, pipe_stats[pipe]);
3818
		}
3819
		}
3819
		spin_unlock(&dev_priv->irq_lock);
3820
		spin_unlock(&dev_priv->irq_lock);
3820
 
3821
 
3821
		I915_WRITE16(IIR, iir & ~flip_mask);
3822
		I915_WRITE16(IIR, iir & ~flip_mask);
3822
		new_iir = I915_READ16(IIR); /* Flush posted writes */
3823
		new_iir = I915_READ16(IIR); /* Flush posted writes */
3823
 
3824
 
3824
		if (iir & I915_USER_INTERRUPT)
3825
		if (iir & I915_USER_INTERRUPT)
3825
			notify_ring(&dev_priv->ring[RCS]);
3826
			notify_ring(&dev_priv->ring[RCS]);
3826
 
3827
 
3827
		for_each_pipe(dev_priv, pipe) {
3828
		for_each_pipe(dev_priv, pipe) {
3828
			int plane = pipe;
3829
			int plane = pipe;
3829
			if (HAS_FBC(dev))
3830
			if (HAS_FBC(dev))
3830
				plane = !plane;
3831
				plane = !plane;
3831
 
3832
 
3832
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3833
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3833
			    i8xx_handle_vblank(dev, plane, pipe, iir))
3834
			    i8xx_handle_vblank(dev, plane, pipe, iir))
3834
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3835
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3835
 
3836
 
3836
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3837
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3837
				i9xx_pipe_crc_irq_handler(dev, pipe);
3838
				i9xx_pipe_crc_irq_handler(dev, pipe);
3838
 
3839
 
3839
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3840
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3840
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3841
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3841
								    pipe);
3842
								    pipe);
3842
		}
3843
		}
3843
 
3844
 
3844
		iir = new_iir;
3845
		iir = new_iir;
3845
	}
3846
	}
3846
 
3847
 
3847
	return IRQ_HANDLED;
3848
	return IRQ_HANDLED;
3848
}
3849
}
3849
 
3850
 
3850
static void i8xx_irq_uninstall(struct drm_device * dev)
3851
static void i8xx_irq_uninstall(struct drm_device * dev)
3851
{
3852
{
3852
	struct drm_i915_private *dev_priv = dev->dev_private;
3853
	struct drm_i915_private *dev_priv = dev->dev_private;
3853
	int pipe;
3854
	int pipe;
3854
 
3855
 
3855
	for_each_pipe(dev_priv, pipe) {
3856
	for_each_pipe(dev_priv, pipe) {
3856
		/* Clear enable bits; then clear status bits */
3857
		/* Clear enable bits; then clear status bits */
3857
		I915_WRITE(PIPESTAT(pipe), 0);
3858
		I915_WRITE(PIPESTAT(pipe), 0);
3858
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3859
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3859
	}
3860
	}
3860
	I915_WRITE16(IMR, 0xffff);
3861
	I915_WRITE16(IMR, 0xffff);
3861
	I915_WRITE16(IER, 0x0);
3862
	I915_WRITE16(IER, 0x0);
3862
	I915_WRITE16(IIR, I915_READ16(IIR));
3863
	I915_WRITE16(IIR, I915_READ16(IIR));
3863
}
3864
}
3864
 
3865
 
3865
#endif
3866
#endif
3866
 
3867
 
3867
static void i915_irq_preinstall(struct drm_device * dev)
3868
static void i915_irq_preinstall(struct drm_device * dev)
3868
{
3869
{
3869
	struct drm_i915_private *dev_priv = dev->dev_private;
3870
	struct drm_i915_private *dev_priv = dev->dev_private;
3870
	int pipe;
3871
	int pipe;
3871
 
3872
 
3872
	if (I915_HAS_HOTPLUG(dev)) {
3873
	if (I915_HAS_HOTPLUG(dev)) {
3873
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3874
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3874
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3875
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3875
	}
3876
	}
3876
 
3877
 
3877
	I915_WRITE16(HWSTAM, 0xeffe);
3878
	I915_WRITE16(HWSTAM, 0xeffe);
3878
	for_each_pipe(dev_priv, pipe)
3879
	for_each_pipe(dev_priv, pipe)
3879
		I915_WRITE(PIPESTAT(pipe), 0);
3880
		I915_WRITE(PIPESTAT(pipe), 0);
3880
	I915_WRITE(IMR, 0xffffffff);
3881
	I915_WRITE(IMR, 0xffffffff);
3881
	I915_WRITE(IER, 0x0);
3882
	I915_WRITE(IER, 0x0);
3882
	POSTING_READ(IER);
3883
	POSTING_READ(IER);
3883
}
3884
}
3884
 
3885
 
3885
static int i915_irq_postinstall(struct drm_device *dev)
3886
static int i915_irq_postinstall(struct drm_device *dev)
3886
{
3887
{
3887
	struct drm_i915_private *dev_priv = dev->dev_private;
3888
	struct drm_i915_private *dev_priv = dev->dev_private;
3888
	u32 enable_mask;
3889
	u32 enable_mask;
3889
 
3890
 
3890
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3891
	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3891
 
3892
 
3892
	/* Unmask the interrupts that we always want on. */
3893
	/* Unmask the interrupts that we always want on. */
3893
	dev_priv->irq_mask =
3894
	dev_priv->irq_mask =
3894
		~(I915_ASLE_INTERRUPT |
3895
		~(I915_ASLE_INTERRUPT |
3895
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3896
		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3896
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3897
		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3897
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3898
		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3898
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3899
		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3899
 
3900
 
3900
	enable_mask =
3901
	enable_mask =
3901
		I915_ASLE_INTERRUPT |
3902
		I915_ASLE_INTERRUPT |
3902
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3903
		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3903
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3904
		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3904
		I915_USER_INTERRUPT;
3905
		I915_USER_INTERRUPT;
3905
 
3906
 
3906
	if (I915_HAS_HOTPLUG(dev)) {
3907
	if (I915_HAS_HOTPLUG(dev)) {
3907
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3908
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3908
		POSTING_READ(PORT_HOTPLUG_EN);
3909
		POSTING_READ(PORT_HOTPLUG_EN);
3909
 
3910
 
3910
		/* Enable in IER... */
3911
		/* Enable in IER... */
3911
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3912
		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3912
		/* and unmask in IMR */
3913
		/* and unmask in IMR */
3913
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3914
		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3914
	}
3915
	}
3915
 
3916
 
3916
	I915_WRITE(IMR, dev_priv->irq_mask);
3917
	I915_WRITE(IMR, dev_priv->irq_mask);
3917
	I915_WRITE(IER, enable_mask);
3918
	I915_WRITE(IER, enable_mask);
3918
	POSTING_READ(IER);
3919
	POSTING_READ(IER);
3919
 
3920
 
3920
	i915_enable_asle_pipestat(dev);
3921
	i915_enable_asle_pipestat(dev);
3921
 
3922
 
3922
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3923
	/* Interrupt setup is already guaranteed to be single-threaded, this is
3923
	 * just to make the assert_spin_locked check happy. */
3924
	 * just to make the assert_spin_locked check happy. */
3924
	spin_lock_irq(&dev_priv->irq_lock);
3925
	spin_lock_irq(&dev_priv->irq_lock);
3925
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3926
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3926
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3927
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3927
	spin_unlock_irq(&dev_priv->irq_lock);
3928
	spin_unlock_irq(&dev_priv->irq_lock);
3928
 
3929
 
3929
	return 0;
3930
	return 0;
3930
}
3931
}
3931
 
3932
 
3932
/*
3933
/*
3933
 * Returns true when a page flip has completed.
3934
 * Returns true when a page flip has completed.
3934
 */
3935
 */
3935
static bool i915_handle_vblank(struct drm_device *dev,
3936
static bool i915_handle_vblank(struct drm_device *dev,
3936
			       int plane, int pipe, u32 iir)
3937
			       int plane, int pipe, u32 iir)
3937
{
3938
{
3938
	struct drm_i915_private *dev_priv = dev->dev_private;
3939
	struct drm_i915_private *dev_priv = dev->dev_private;
3939
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3940
	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3940
 
3941
 
3941
	if (!intel_pipe_handle_vblank(dev, pipe))
3942
	if (!intel_pipe_handle_vblank(dev, pipe))
3942
		return false;
3943
		return false;
3943
 
3944
 
3944
	if ((iir & flip_pending) == 0)
3945
	if ((iir & flip_pending) == 0)
3945
		goto check_page_flip;
3946
		goto check_page_flip;
3946
 
3947
 
3947
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3948
	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3948
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3949
	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3949
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3950
	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3950
	 * the flip is completed (no longer pending). Since this doesn't raise
3951
	 * the flip is completed (no longer pending). Since this doesn't raise
3951
	 * an interrupt per se, we watch for the change at vblank.
3952
	 * an interrupt per se, we watch for the change at vblank.
3952
	 */
3953
	 */
3953
	if (I915_READ(ISR) & flip_pending)
3954
	if (I915_READ(ISR) & flip_pending)
3954
		goto check_page_flip;
3955
		goto check_page_flip;
3955
 
3956
 
3956
	return true;
3957
	return true;
3957
 
3958
 
3958
check_page_flip:
3959
check_page_flip:
3959
	return false;
3960
	return false;
3960
}
3961
}
3961
 
3962
 
3962
static irqreturn_t i915_irq_handler(int irq, void *arg)
3963
static irqreturn_t i915_irq_handler(int irq, void *arg)
3963
{
3964
{
3964
	struct drm_device *dev = arg;
3965
	struct drm_device *dev = arg;
3965
	struct drm_i915_private *dev_priv = dev->dev_private;
3966
	struct drm_i915_private *dev_priv = dev->dev_private;
3966
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3967
	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3967
	u32 flip_mask =
3968
	u32 flip_mask =
3968
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3969
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3969
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3970
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3970
	int pipe, ret = IRQ_NONE;
3971
	int pipe, ret = IRQ_NONE;
3971
 
3972
 
3972
	if (!intel_irqs_enabled(dev_priv))
3973
	if (!intel_irqs_enabled(dev_priv))
3973
		return IRQ_NONE;
3974
		return IRQ_NONE;
3974
 
3975
 
3975
	iir = I915_READ(IIR);
3976
	iir = I915_READ(IIR);
3976
	do {
3977
	do {
3977
		bool irq_received = (iir & ~flip_mask) != 0;
3978
		bool irq_received = (iir & ~flip_mask) != 0;
3978
		bool blc_event = false;
3979
		bool blc_event = false;
3979
 
3980
 
3980
		/* Can't rely on pipestat interrupt bit in iir as it might
3981
		/* Can't rely on pipestat interrupt bit in iir as it might
3981
		 * have been cleared after the pipestat interrupt was received.
3982
		 * have been cleared after the pipestat interrupt was received.
3982
		 * It doesn't set the bit in iir again, but it still produces
3983
		 * It doesn't set the bit in iir again, but it still produces
3983
		 * interrupts (for non-MSI).
3984
		 * interrupts (for non-MSI).
3984
		 */
3985
		 */
3985
		spin_lock(&dev_priv->irq_lock);
3986
		spin_lock(&dev_priv->irq_lock);
3986
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3987
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3987
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3988
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3988
 
3989
 
3989
		for_each_pipe(dev_priv, pipe) {
3990
		for_each_pipe(dev_priv, pipe) {
3990
			int reg = PIPESTAT(pipe);
3991
			int reg = PIPESTAT(pipe);
3991
			pipe_stats[pipe] = I915_READ(reg);
3992
			pipe_stats[pipe] = I915_READ(reg);
3992
 
3993
 
3993
			/* Clear the PIPE*STAT regs before the IIR */
3994
			/* Clear the PIPE*STAT regs before the IIR */
3994
			if (pipe_stats[pipe] & 0x8000ffff) {
3995
			if (pipe_stats[pipe] & 0x8000ffff) {
3995
				I915_WRITE(reg, pipe_stats[pipe]);
3996
				I915_WRITE(reg, pipe_stats[pipe]);
3996
				irq_received = true;
3997
				irq_received = true;
3997
			}
3998
			}
3998
		}
3999
		}
3999
		spin_unlock(&dev_priv->irq_lock);
4000
		spin_unlock(&dev_priv->irq_lock);
4000
 
4001
 
4001
		if (!irq_received)
4002
		if (!irq_received)
4002
			break;
4003
			break;
4003
 
4004
 
4004
		/* Consume port.  Then clear IIR or we'll miss events */
4005
		/* Consume port.  Then clear IIR or we'll miss events */
4005
		if (I915_HAS_HOTPLUG(dev) &&
4006
		if (I915_HAS_HOTPLUG(dev) &&
4006
		    iir & I915_DISPLAY_PORT_INTERRUPT)
4007
		    iir & I915_DISPLAY_PORT_INTERRUPT)
4007
			i9xx_hpd_irq_handler(dev);
4008
			i9xx_hpd_irq_handler(dev);
4008
 
4009
 
4009
		I915_WRITE(IIR, iir & ~flip_mask);
4010
		I915_WRITE(IIR, iir & ~flip_mask);
4010
		new_iir = I915_READ(IIR); /* Flush posted writes */
4011
		new_iir = I915_READ(IIR); /* Flush posted writes */
4011
 
4012
 
4012
		if (iir & I915_USER_INTERRUPT)
4013
		if (iir & I915_USER_INTERRUPT)
4013
			notify_ring(&dev_priv->ring[RCS]);
4014
			notify_ring(&dev_priv->ring[RCS]);
4014
 
4015
 
4015
		for_each_pipe(dev_priv, pipe) {
4016
		for_each_pipe(dev_priv, pipe) {
4016
			int plane = pipe;
4017
			int plane = pipe;
4017
			if (HAS_FBC(dev))
4018
			if (HAS_FBC(dev))
4018
				plane = !plane;
4019
				plane = !plane;
4019
 
4020
 
4020
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4021
			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4021
			    i915_handle_vblank(dev, plane, pipe, iir))
4022
			    i915_handle_vblank(dev, plane, pipe, iir))
4022
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4023
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4023
 
4024
 
4024
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4025
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4025
				blc_event = true;
4026
				blc_event = true;
4026
 
4027
 
4027
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4028
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4028
				i9xx_pipe_crc_irq_handler(dev, pipe);
4029
				i9xx_pipe_crc_irq_handler(dev, pipe);
4029
 
4030
 
4030
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4031
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4031
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4032
				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4032
								    pipe);
4033
								    pipe);
4033
		}
4034
		}
4034
 
4035
 
4035
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4036
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4036
			intel_opregion_asle_intr(dev);
4037
			intel_opregion_asle_intr(dev);
4037
 
4038
 
4038
		/* With MSI, interrupts are only generated when iir
4039
		/* With MSI, interrupts are only generated when iir
4039
		 * transitions from zero to nonzero.  If another bit got
4040
		 * transitions from zero to nonzero.  If another bit got
4040
		 * set while we were handling the existing iir bits, then
4041
		 * set while we were handling the existing iir bits, then
4041
		 * we would never get another interrupt.
4042
		 * we would never get another interrupt.
4042
		 *
4043
		 *
4043
		 * This is fine on non-MSI as well, as if we hit this path
4044
		 * This is fine on non-MSI as well, as if we hit this path
4044
		 * we avoid exiting the interrupt handler only to generate
4045
		 * we avoid exiting the interrupt handler only to generate
4045
		 * another one.
4046
		 * another one.
4046
		 *
4047
		 *
4047
		 * Note that for MSI this could cause a stray interrupt report
4048
		 * Note that for MSI this could cause a stray interrupt report
4048
		 * if an interrupt landed in the time between writing IIR and
4049
		 * if an interrupt landed in the time between writing IIR and
4049
		 * the posting read.  This should be rare enough to never
4050
		 * the posting read.  This should be rare enough to never
4050
		 * trigger the 99% of 100,000 interrupts test for disabling
4051
		 * trigger the 99% of 100,000 interrupts test for disabling
4051
		 * stray interrupts.
4052
		 * stray interrupts.
4052
		 */
4053
		 */
4053
		ret = IRQ_HANDLED;
4054
		ret = IRQ_HANDLED;
4054
		iir = new_iir;
4055
		iir = new_iir;
4055
	} while (iir & ~flip_mask);
4056
	} while (iir & ~flip_mask);
4056
 
4057
 
4057
	return ret;
4058
	return ret;
4058
}
4059
}
4059
 
4060
 
4060
static void i915_irq_uninstall(struct drm_device * dev)
4061
static void i915_irq_uninstall(struct drm_device * dev)
4061
{
4062
{
4062
	struct drm_i915_private *dev_priv = dev->dev_private;
4063
	struct drm_i915_private *dev_priv = dev->dev_private;
4063
	int pipe;
4064
	int pipe;
4064
 
4065
 
4065
	if (I915_HAS_HOTPLUG(dev)) {
4066
	if (I915_HAS_HOTPLUG(dev)) {
4066
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4067
		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4067
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4068
		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4068
	}
4069
	}
4069
 
4070
 
4070
	I915_WRITE16(HWSTAM, 0xffff);
4071
	I915_WRITE16(HWSTAM, 0xffff);
4071
	for_each_pipe(dev_priv, pipe) {
4072
	for_each_pipe(dev_priv, pipe) {
4072
		/* Clear enable bits; then clear status bits */
4073
		/* Clear enable bits; then clear status bits */
4073
		I915_WRITE(PIPESTAT(pipe), 0);
4074
		I915_WRITE(PIPESTAT(pipe), 0);
4074
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4075
		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4075
	}
4076
	}
4076
	I915_WRITE(IMR, 0xffffffff);
4077
	I915_WRITE(IMR, 0xffffffff);
4077
	I915_WRITE(IER, 0x0);
4078
	I915_WRITE(IER, 0x0);
4078
 
4079
 
4079
	I915_WRITE(IIR, I915_READ(IIR));
4080
	I915_WRITE(IIR, I915_READ(IIR));
4080
}
4081
}
4081
 
4082
 
4082
static void i965_irq_preinstall(struct drm_device * dev)
4083
static void i965_irq_preinstall(struct drm_device * dev)
4083
{
4084
{
4084
	struct drm_i915_private *dev_priv = dev->dev_private;
4085
	struct drm_i915_private *dev_priv = dev->dev_private;
4085
	int pipe;
4086
	int pipe;
4086
 
4087
 
4087
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4088
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4088
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4089
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4089
 
4090
 
4090
	I915_WRITE(HWSTAM, 0xeffe);
4091
	I915_WRITE(HWSTAM, 0xeffe);
4091
	for_each_pipe(dev_priv, pipe)
4092
	for_each_pipe(dev_priv, pipe)
4092
		I915_WRITE(PIPESTAT(pipe), 0);
4093
		I915_WRITE(PIPESTAT(pipe), 0);
4093
	I915_WRITE(IMR, 0xffffffff);
4094
	I915_WRITE(IMR, 0xffffffff);
4094
	I915_WRITE(IER, 0x0);
4095
	I915_WRITE(IER, 0x0);
4095
	POSTING_READ(IER);
4096
	POSTING_READ(IER);
4096
}
4097
}
4097
 
4098
 
4098
static int i965_irq_postinstall(struct drm_device *dev)
4099
static int i965_irq_postinstall(struct drm_device *dev)
4099
{
4100
{
4100
	struct drm_i915_private *dev_priv = dev->dev_private;
4101
	struct drm_i915_private *dev_priv = dev->dev_private;
4101
	u32 enable_mask;
4102
	u32 enable_mask;
4102
	u32 error_mask;
4103
	u32 error_mask;
4103
 
4104
 
4104
	/* Unmask the interrupts that we always want on. */
4105
	/* Unmask the interrupts that we always want on. */
4105
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4106
	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4106
			       I915_DISPLAY_PORT_INTERRUPT |
4107
			       I915_DISPLAY_PORT_INTERRUPT |
4107
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4108
			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4108
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4109
			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4109
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4110
			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4110
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4111
			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4111
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4112
			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4112
 
4113
 
4113
	enable_mask = ~dev_priv->irq_mask;
4114
	enable_mask = ~dev_priv->irq_mask;
4114
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4115
	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4115
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4116
			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4116
	enable_mask |= I915_USER_INTERRUPT;
4117
	enable_mask |= I915_USER_INTERRUPT;
4117
 
4118
 
4118
	if (IS_G4X(dev))
4119
	if (IS_G4X(dev))
4119
		enable_mask |= I915_BSD_USER_INTERRUPT;
4120
		enable_mask |= I915_BSD_USER_INTERRUPT;
4120
 
4121
 
4121
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4122
	/* Interrupt setup is already guaranteed to be single-threaded, this is
4122
	 * just to make the assert_spin_locked check happy. */
4123
	 * just to make the assert_spin_locked check happy. */
4123
	spin_lock_irq(&dev_priv->irq_lock);
4124
	spin_lock_irq(&dev_priv->irq_lock);
4124
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4125
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4125
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4126
	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4126
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4127
	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4127
	spin_unlock_irq(&dev_priv->irq_lock);
4128
	spin_unlock_irq(&dev_priv->irq_lock);
4128
 
4129
 
4129
	/*
4130
	/*
4130
	 * Enable some error detection, note the instruction error mask
4131
	 * Enable some error detection, note the instruction error mask
4131
	 * bit is reserved, so we leave it masked.
4132
	 * bit is reserved, so we leave it masked.
4132
	 */
4133
	 */
4133
	if (IS_G4X(dev)) {
4134
	if (IS_G4X(dev)) {
4134
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4135
		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4135
			       GM45_ERROR_MEM_PRIV |
4136
			       GM45_ERROR_MEM_PRIV |
4136
			       GM45_ERROR_CP_PRIV |
4137
			       GM45_ERROR_CP_PRIV |
4137
			       I915_ERROR_MEMORY_REFRESH);
4138
			       I915_ERROR_MEMORY_REFRESH);
4138
	} else {
4139
	} else {
4139
		error_mask = ~(I915_ERROR_PAGE_TABLE |
4140
		error_mask = ~(I915_ERROR_PAGE_TABLE |
4140
			       I915_ERROR_MEMORY_REFRESH);
4141
			       I915_ERROR_MEMORY_REFRESH);
4141
	}
4142
	}
4142
	I915_WRITE(EMR, error_mask);
4143
	I915_WRITE(EMR, error_mask);
4143
 
4144
 
4144
	I915_WRITE(IMR, dev_priv->irq_mask);
4145
	I915_WRITE(IMR, dev_priv->irq_mask);
4145
	I915_WRITE(IER, enable_mask);
4146
	I915_WRITE(IER, enable_mask);
4146
	POSTING_READ(IER);
4147
	POSTING_READ(IER);
4147
 
4148
 
4148
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4149
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4149
	POSTING_READ(PORT_HOTPLUG_EN);
4150
	POSTING_READ(PORT_HOTPLUG_EN);
4150
 
4151
 
4151
	i915_enable_asle_pipestat(dev);
4152
	i915_enable_asle_pipestat(dev);
4152
 
4153
 
4153
	return 0;
4154
	return 0;
4154
}
4155
}
4155
 
4156
 
4156
static void i915_hpd_irq_setup(struct drm_device *dev)
4157
static void i915_hpd_irq_setup(struct drm_device *dev)
4157
{
4158
{
4158
	struct drm_i915_private *dev_priv = dev->dev_private;
4159
	struct drm_i915_private *dev_priv = dev->dev_private;
4159
	u32 hotplug_en;
4160
	u32 hotplug_en;
4160
 
4161
 
4161
	assert_spin_locked(&dev_priv->irq_lock);
4162
	assert_spin_locked(&dev_priv->irq_lock);
4162
 
4163
 
4163
	/* Note HDMI and DP share hotplug bits */
4164
	/* Note HDMI and DP share hotplug bits */
4164
	/* enable bits are the same for all generations */
4165
	/* enable bits are the same for all generations */
4165
	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4166
	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4166
	/* Programming the CRT detection parameters tends
4167
	/* Programming the CRT detection parameters tends
4167
	   to generate a spurious hotplug event about three
4168
	   to generate a spurious hotplug event about three
4168
	   seconds later.  So just do it once.
4169
	   seconds later.  So just do it once.
4169
	*/
4170
	*/
4170
	if (IS_G4X(dev))
4171
	if (IS_G4X(dev))
4171
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4172
		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4172
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4173
	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4173
 
4174
 
4174
	/* Ignore TV since it's buggy */
4175
	/* Ignore TV since it's buggy */
4175
	i915_hotplug_interrupt_update_locked(dev_priv,
4176
	i915_hotplug_interrupt_update_locked(dev_priv,
4176
					     HOTPLUG_INT_EN_MASK |
4177
					     HOTPLUG_INT_EN_MASK |
4177
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4178
					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4178
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4179
					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4179
					     hotplug_en);
4180
					     hotplug_en);
4180
}
4181
}
4181
 
4182
 
4182
static irqreturn_t i965_irq_handler(int irq, void *arg)
4183
static irqreturn_t i965_irq_handler(int irq, void *arg)
4183
{
4184
{
4184
	struct drm_device *dev = arg;
4185
	struct drm_device *dev = arg;
4185
	struct drm_i915_private *dev_priv = dev->dev_private;
4186
	struct drm_i915_private *dev_priv = dev->dev_private;
4186
	u32 iir, new_iir;
4187
	u32 iir, new_iir;
4187
	u32 pipe_stats[I915_MAX_PIPES];
4188
	u32 pipe_stats[I915_MAX_PIPES];
4188
	int ret = IRQ_NONE, pipe;
4189
	int ret = IRQ_NONE, pipe;
4189
	u32 flip_mask =
4190
	u32 flip_mask =
4190
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4191
		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4191
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4192
		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4192
 
4193
 
4193
	if (!intel_irqs_enabled(dev_priv))
4194
	if (!intel_irqs_enabled(dev_priv))
4194
		return IRQ_NONE;
4195
		return IRQ_NONE;
4195
 
4196
 
4196
	iir = I915_READ(IIR);
4197
	iir = I915_READ(IIR);
4197
 
4198
 
4198
	for (;;) {
4199
	for (;;) {
4199
		bool irq_received = (iir & ~flip_mask) != 0;
4200
		bool irq_received = (iir & ~flip_mask) != 0;
4200
		bool blc_event = false;
4201
		bool blc_event = false;
4201
 
4202
 
4202
		/* Can't rely on pipestat interrupt bit in iir as it might
4203
		/* Can't rely on pipestat interrupt bit in iir as it might
4203
		 * have been cleared after the pipestat interrupt was received.
4204
		 * have been cleared after the pipestat interrupt was received.
4204
		 * It doesn't set the bit in iir again, but it still produces
4205
		 * It doesn't set the bit in iir again, but it still produces
4205
		 * interrupts (for non-MSI).
4206
		 * interrupts (for non-MSI).
4206
		 */
4207
		 */
4207
		spin_lock(&dev_priv->irq_lock);
4208
		spin_lock(&dev_priv->irq_lock);
4208
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4209
		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4209
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4210
			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4210
 
4211
 
4211
		for_each_pipe(dev_priv, pipe) {
4212
		for_each_pipe(dev_priv, pipe) {
4212
			int reg = PIPESTAT(pipe);
4213
			int reg = PIPESTAT(pipe);
4213
			pipe_stats[pipe] = I915_READ(reg);
4214
			pipe_stats[pipe] = I915_READ(reg);
4214
 
4215
 
4215
			/*
4216
			/*
4216
			 * Clear the PIPE*STAT regs before the IIR
4217
			 * Clear the PIPE*STAT regs before the IIR
4217
			 */
4218
			 */
4218
			if (pipe_stats[pipe] & 0x8000ffff) {
4219
			if (pipe_stats[pipe] & 0x8000ffff) {
4219
				I915_WRITE(reg, pipe_stats[pipe]);
4220
				I915_WRITE(reg, pipe_stats[pipe]);
4220
				irq_received = true;
4221
				irq_received = true;
4221
			}
4222
			}
4222
		}
4223
		}
4223
		spin_unlock(&dev_priv->irq_lock);
4224
		spin_unlock(&dev_priv->irq_lock);
4224
 
4225
 
4225
		if (!irq_received)
4226
		if (!irq_received)
4226
			break;
4227
			break;
4227
 
4228
 
4228
		ret = IRQ_HANDLED;
4229
		ret = IRQ_HANDLED;
4229
 
4230
 
4230
		/* Consume port.  Then clear IIR or we'll miss events */
4231
		/* Consume port.  Then clear IIR or we'll miss events */
4231
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4232
		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4232
			i9xx_hpd_irq_handler(dev);
4233
			i9xx_hpd_irq_handler(dev);
4233
 
4234
 
4234
		I915_WRITE(IIR, iir & ~flip_mask);
4235
		I915_WRITE(IIR, iir & ~flip_mask);
4235
		new_iir = I915_READ(IIR); /* Flush posted writes */
4236
		new_iir = I915_READ(IIR); /* Flush posted writes */
4236
 
4237
 
4237
		if (iir & I915_USER_INTERRUPT)
4238
		if (iir & I915_USER_INTERRUPT)
4238
			notify_ring(&dev_priv->ring[RCS]);
4239
			notify_ring(&dev_priv->ring[RCS]);
4239
		if (iir & I915_BSD_USER_INTERRUPT)
4240
		if (iir & I915_BSD_USER_INTERRUPT)
4240
			notify_ring(&dev_priv->ring[VCS]);
4241
			notify_ring(&dev_priv->ring[VCS]);
4241
 
4242
 
4242
		for_each_pipe(dev_priv, pipe) {
4243
		for_each_pipe(dev_priv, pipe) {
4243
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4244
			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4244
			    i915_handle_vblank(dev, pipe, pipe, iir))
4245
			    i915_handle_vblank(dev, pipe, pipe, iir))
4245
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4246
				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4246
 
4247
 
4247
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4248
			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4248
				blc_event = true;
4249
				blc_event = true;
4249
 
4250
 
4250
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4251
			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4251
				i9xx_pipe_crc_irq_handler(dev, pipe);
4252
				i9xx_pipe_crc_irq_handler(dev, pipe);
4252
 
4253
 
4253
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4254
			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4254
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4255
				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4255
		}
4256
		}
4256
 
4257
 
4257
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4258
		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4258
			intel_opregion_asle_intr(dev);
4259
			intel_opregion_asle_intr(dev);
4259
 
4260
 
4260
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4261
		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4261
			gmbus_irq_handler(dev);
4262
			gmbus_irq_handler(dev);
4262
 
4263
 
4263
		/* With MSI, interrupts are only generated when iir
4264
		/* With MSI, interrupts are only generated when iir
4264
		 * transitions from zero to nonzero.  If another bit got
4265
		 * transitions from zero to nonzero.  If another bit got
4265
		 * set while we were handling the existing iir bits, then
4266
		 * set while we were handling the existing iir bits, then
4266
		 * we would never get another interrupt.
4267
		 * we would never get another interrupt.
4267
		 *
4268
		 *
4268
		 * This is fine on non-MSI as well, as if we hit this path
4269
		 * This is fine on non-MSI as well, as if we hit this path
4269
		 * we avoid exiting the interrupt handler only to generate
4270
		 * we avoid exiting the interrupt handler only to generate
4270
		 * another one.
4271
		 * another one.
4271
		 *
4272
		 *
4272
		 * Note that for MSI this could cause a stray interrupt report
4273
		 * Note that for MSI this could cause a stray interrupt report
4273
		 * if an interrupt landed in the time between writing IIR and
4274
		 * if an interrupt landed in the time between writing IIR and
4274
		 * the posting read.  This should be rare enough to never
4275
		 * the posting read.  This should be rare enough to never
4275
		 * trigger the 99% of 100,000 interrupts test for disabling
4276
		 * trigger the 99% of 100,000 interrupts test for disabling
4276
		 * stray interrupts.
4277
		 * stray interrupts.
4277
		 */
4278
		 */
4278
		iir = new_iir;
4279
		iir = new_iir;
4279
	}
4280
	}
4280
 
4281
 
4281
	return ret;
4282
	return ret;
4282
}
4283
}
4283
 
4284
 
4284
static void i965_irq_uninstall(struct drm_device * dev)
4285
static void i965_irq_uninstall(struct drm_device * dev)
4285
{
4286
{
4286
	struct drm_i915_private *dev_priv = dev->dev_private;
4287
	struct drm_i915_private *dev_priv = dev->dev_private;
4287
	int pipe;
4288
	int pipe;
4288
 
4289
 
4289
	if (!dev_priv)
4290
	if (!dev_priv)
4290
		return;
4291
		return;
4291
 
4292
 
4292
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4293
	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4293
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4294
	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4294
 
4295
 
4295
	I915_WRITE(HWSTAM, 0xffffffff);
4296
	I915_WRITE(HWSTAM, 0xffffffff);
4296
	for_each_pipe(dev_priv, pipe)
4297
	for_each_pipe(dev_priv, pipe)
4297
		I915_WRITE(PIPESTAT(pipe), 0);
4298
		I915_WRITE(PIPESTAT(pipe), 0);
4298
	I915_WRITE(IMR, 0xffffffff);
4299
	I915_WRITE(IMR, 0xffffffff);
4299
	I915_WRITE(IER, 0x0);
4300
	I915_WRITE(IER, 0x0);
4300
 
4301
 
4301
	for_each_pipe(dev_priv, pipe)
4302
	for_each_pipe(dev_priv, pipe)
4302
		I915_WRITE(PIPESTAT(pipe),
4303
		I915_WRITE(PIPESTAT(pipe),
4303
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4304
			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4304
	I915_WRITE(IIR, I915_READ(IIR));
4305
	I915_WRITE(IIR, I915_READ(IIR));
4305
}
4306
}
4306
 
4307
 
4307
/**
4308
/**
4308
 * intel_irq_init - initializes irq support
4309
 * intel_irq_init - initializes irq support
4309
 * @dev_priv: i915 device instance
4310
 * @dev_priv: i915 device instance
4310
 *
4311
 *
4311
 * This function initializes all the irq support including work items, timers
4312
 * This function initializes all the irq support including work items, timers
4312
 * and all the vtables. It does not setup the interrupt itself though.
4313
 * and all the vtables. It does not setup the interrupt itself though.
4313
 */
4314
 */
4314
void intel_irq_init(struct drm_i915_private *dev_priv)
4315
void intel_irq_init(struct drm_i915_private *dev_priv)
4315
{
4316
{
4316
	struct drm_device *dev = dev_priv->dev;
4317
	struct drm_device *dev = dev_priv->dev;
4317
 
4318
 
4318
//   intel_hpd_init_work(dev_priv);
4319
//   intel_hpd_init_work(dev_priv);
4319
 
4320
 
4320
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4321
	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4321
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4322
	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4322
 
4323
 
4323
	/* Let's track the enabled rps events */
4324
	/* Let's track the enabled rps events */
4324
	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4325
	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4325
		/* WaGsvRC0ResidencyMethod:vlv */
4326
		/* WaGsvRC0ResidencyMethod:vlv */
4326
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4327
		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4327
	else
4328
	else
4328
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4329
		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4329
 
4330
 
4330
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4331
	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4331
			  i915_hangcheck_elapsed);
4332
			  i915_hangcheck_elapsed);
4332
 
4333
 
4333
 
4334
 
4334
	if (IS_GEN2(dev_priv)) {
4335
	if (IS_GEN2(dev_priv)) {
4335
		dev->max_vblank_count = 0;
4336
		dev->max_vblank_count = 0;
4336
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4337
		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4337
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4338
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4338
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4339
		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4339
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4340
		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4340
	} else {
4341
	} else {
4341
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4342
		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4342
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4343
		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4343
	}
4344
	}
4344
 
4345
 
4345
	/*
4346
	/*
4346
	 * Opt out of the vblank disable timer on everything except gen2.
4347
	 * Opt out of the vblank disable timer on everything except gen2.
4347
	 * Gen2 doesn't have a hardware frame counter and so depends on
4348
	 * Gen2 doesn't have a hardware frame counter and so depends on
4348
	 * vblank interrupts to produce sane vblank seuquence numbers.
4349
	 * vblank interrupts to produce sane vblank seuquence numbers.
4349
	 */
4350
	 */
4350
	if (!IS_GEN2(dev_priv))
4351
	if (!IS_GEN2(dev_priv))
4351
		dev->vblank_disable_immediate = true;
4352
		dev->vblank_disable_immediate = true;
4352
 
4353
 
4353
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4354
	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4354
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4355
	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4355
 
4356
 
4356
	if (IS_CHERRYVIEW(dev_priv)) {
4357
	if (IS_CHERRYVIEW(dev_priv)) {
4357
		dev->driver->irq_handler = cherryview_irq_handler;
4358
		dev->driver->irq_handler = cherryview_irq_handler;
4358
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4359
		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4359
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4360
		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4360
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4361
		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4361
		dev->driver->enable_vblank = valleyview_enable_vblank;
4362
		dev->driver->enable_vblank = valleyview_enable_vblank;
4362
		dev->driver->disable_vblank = valleyview_disable_vblank;
4363
		dev->driver->disable_vblank = valleyview_disable_vblank;
4363
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4364
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4364
	} else if (IS_VALLEYVIEW(dev_priv)) {
4365
	} else if (IS_VALLEYVIEW(dev_priv)) {
4365
		dev->driver->irq_handler = valleyview_irq_handler;
4366
		dev->driver->irq_handler = valleyview_irq_handler;
4366
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4367
		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4367
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4368
		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4368
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4369
		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4369
		dev->driver->enable_vblank = valleyview_enable_vblank;
4370
		dev->driver->enable_vblank = valleyview_enable_vblank;
4370
		dev->driver->disable_vblank = valleyview_disable_vblank;
4371
		dev->driver->disable_vblank = valleyview_disable_vblank;
4371
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4372
		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4372
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4373
	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4373
		dev->driver->irq_handler = gen8_irq_handler;
4374
		dev->driver->irq_handler = gen8_irq_handler;
4374
		dev->driver->irq_preinstall = gen8_irq_reset;
4375
		dev->driver->irq_preinstall = gen8_irq_reset;
4375
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4376
		dev->driver->irq_postinstall = gen8_irq_postinstall;
4376
		dev->driver->irq_uninstall = gen8_irq_uninstall;
4377
		dev->driver->irq_uninstall = gen8_irq_uninstall;
4377
		dev->driver->enable_vblank = gen8_enable_vblank;
4378
		dev->driver->enable_vblank = gen8_enable_vblank;
4378
		dev->driver->disable_vblank = gen8_disable_vblank;
4379
		dev->driver->disable_vblank = gen8_disable_vblank;
4379
		if (IS_BROXTON(dev))
4380
		if (IS_BROXTON(dev))
4380
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4381
			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4381
		else if (HAS_PCH_SPT(dev))
4382
		else if (HAS_PCH_SPT(dev))
4382
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4383
			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4383
		else
4384
		else
4384
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4385
			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4385
	} else if (HAS_PCH_SPLIT(dev)) {
4386
	} else if (HAS_PCH_SPLIT(dev)) {
4386
		dev->driver->irq_handler = ironlake_irq_handler;
4387
		dev->driver->irq_handler = ironlake_irq_handler;
4387
		dev->driver->irq_preinstall = ironlake_irq_reset;
4388
		dev->driver->irq_preinstall = ironlake_irq_reset;
4388
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4389
		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4389
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4390
		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4390
		dev->driver->enable_vblank = ironlake_enable_vblank;
4391
		dev->driver->enable_vblank = ironlake_enable_vblank;
4391
		dev->driver->disable_vblank = ironlake_disable_vblank;
4392
		dev->driver->disable_vblank = ironlake_disable_vblank;
4392
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4393
		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4393
	} else {
4394
	} else {
4394
		if (INTEL_INFO(dev_priv)->gen == 2) {
4395
		if (INTEL_INFO(dev_priv)->gen == 2) {
4395
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4396
		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4396
			dev->driver->irq_preinstall = i915_irq_preinstall;
4397
			dev->driver->irq_preinstall = i915_irq_preinstall;
4397
			dev->driver->irq_postinstall = i915_irq_postinstall;
4398
			dev->driver->irq_postinstall = i915_irq_postinstall;
4398
			dev->driver->irq_uninstall = i915_irq_uninstall;
4399
			dev->driver->irq_uninstall = i915_irq_uninstall;
4399
			dev->driver->irq_handler = i915_irq_handler;
4400
			dev->driver->irq_handler = i915_irq_handler;
4400
		} else {
4401
		} else {
4401
			dev->driver->irq_preinstall = i965_irq_preinstall;
4402
			dev->driver->irq_preinstall = i965_irq_preinstall;
4402
			dev->driver->irq_postinstall = i965_irq_postinstall;
4403
			dev->driver->irq_postinstall = i965_irq_postinstall;
4403
			dev->driver->irq_uninstall = i965_irq_uninstall;
4404
			dev->driver->irq_uninstall = i965_irq_uninstall;
4404
			dev->driver->irq_handler = i965_irq_handler;
4405
			dev->driver->irq_handler = i965_irq_handler;
4405
		}
4406
		}
4406
		if (I915_HAS_HOTPLUG(dev_priv))
4407
		if (I915_HAS_HOTPLUG(dev_priv))
4407
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4408
			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4408
		dev->driver->enable_vblank = i915_enable_vblank;
4409
		dev->driver->enable_vblank = i915_enable_vblank;
4409
		dev->driver->disable_vblank = i915_disable_vblank;
4410
		dev->driver->disable_vblank = i915_disable_vblank;
4410
	}
4411
	}
4411
}
4412
}
4412
 
4413
 
4413
/**
4414
/**
4414
 * intel_irq_install - enables the hardware interrupt
4415
 * intel_irq_install - enables the hardware interrupt
4415
 * @dev_priv: i915 device instance
4416
 * @dev_priv: i915 device instance
4416
 *
4417
 *
4417
 * This function enables the hardware interrupt handling, but leaves the hotplug
4418
 * This function enables the hardware interrupt handling, but leaves the hotplug
4418
 * handling still disabled. It is called after intel_irq_init().
4419
 * handling still disabled. It is called after intel_irq_init().
4419
 *
4420
 *
4420
 * In the driver load and resume code we need working interrupts in a few places
4421
 * In the driver load and resume code we need working interrupts in a few places
4421
 * but don't want to deal with the hassle of concurrent probe and hotplug
4422
 * but don't want to deal with the hassle of concurrent probe and hotplug
4422
 * workers. Hence the split into this two-stage approach.
4423
 * workers. Hence the split into this two-stage approach.
4423
 */
4424
 */
4424
int intel_irq_install(struct drm_i915_private *dev_priv)
4425
int intel_irq_install(struct drm_i915_private *dev_priv)
4425
{
4426
{
4426
	/*
4427
	/*
4427
	 * We enable some interrupt sources in our postinstall hooks, so mark
4428
	 * We enable some interrupt sources in our postinstall hooks, so mark
4428
	 * interrupts as enabled _before_ actually enabling them to avoid
4429
	 * interrupts as enabled _before_ actually enabling them to avoid
4429
	 * special cases in our ordering checks.
4430
	 * special cases in our ordering checks.
4430
	 */
4431
	 */
4431
	dev_priv->pm.irqs_enabled = true;
4432
	dev_priv->pm.irqs_enabled = true;
4432
 
4433
 
4433
	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4434
	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4434
}
4435
}
4435
 
4436
 
4436
/**
4437
/**
4437
 * intel_irq_uninstall - finilizes all irq handling
4438
 * intel_irq_uninstall - finilizes all irq handling
4438
 * @dev_priv: i915 device instance
4439
 * @dev_priv: i915 device instance
4439
 *
4440
 *
4440
 * This stops interrupt and hotplug handling and unregisters and frees all
4441
 * This stops interrupt and hotplug handling and unregisters and frees all
4441
 * resources acquired in the init functions.
4442
 * resources acquired in the init functions.
4442
 */
4443
 */
4443
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4444
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4444
{
4445
{
4445
//	drm_irq_uninstall(dev_priv->dev);
4446
//	drm_irq_uninstall(dev_priv->dev);
4446
//	intel_hpd_cancel_work(dev_priv);
4447
//	intel_hpd_cancel_work(dev_priv);
4447
	dev_priv->pm.irqs_enabled = false;
4448
	dev_priv->pm.irqs_enabled = false;
4448
}
4449
}
4449
 
4450
 
4450
/**
4451
/**
4451
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4452
 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4452
 * @dev_priv: i915 device instance
4453
 * @dev_priv: i915 device instance
4453
 *
4454
 *
4454
 * This function is used to disable interrupts at runtime, both in the runtime
4455
 * This function is used to disable interrupts at runtime, both in the runtime
4455
 * pm and the system suspend/resume code.
4456
 * pm and the system suspend/resume code.
4456
 */
4457
 */
4457
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4458
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4458
{
4459
{
4459
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4460
	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4460
	dev_priv->pm.irqs_enabled = false;
4461
	dev_priv->pm.irqs_enabled = false;
4461
}
4462
}
4462
 
4463
 
4463
/**
4464
/**
4464
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4465
 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4465
 * @dev_priv: i915 device instance
4466
 * @dev_priv: i915 device instance
4466
 *
4467
 *
4467
 * This function is used to enable interrupts at runtime, both in the runtime
4468
 * This function is used to enable interrupts at runtime, both in the runtime
4468
 * pm and the system suspend/resume code.
4469
 * pm and the system suspend/resume code.
4469
 */
4470
 */
4470
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4471
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4471
{
4472
{
4472
	dev_priv->pm.irqs_enabled = true;
4473
	dev_priv->pm.irqs_enabled = true;
4473
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4474
	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4474
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4475
	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4475
}
4476
}
4476
 
4477
 
4477
irqreturn_t intel_irq_handler(struct drm_device *dev)
4478
irqreturn_t intel_irq_handler(struct drm_device *dev)
4478
{
4479
{
4479
 
4480
 
4480
//    printf("i915 irq\n");
4481
//    printf("i915 irq\n");
4481
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
4482
//    printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
4482
 
4483
 
4483
    return dev->driver->irq_handler(0, dev);
4484
    return dev->driver->irq_handler(0, dev);
4484
}
4485
}
4485
 
4486
 
4486
>
4487
>
4487
 
4488
 
4488
>
4489
>
4489
>
4490
>