Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6660 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5354 Rev 6084
Line 21... Line 21...
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 */
22
 */
Line 23... Line 23...
23
 
23
 
24
#include "i915_drv.h"
24
#include "i915_drv.h"
-
 
25
#include "intel_drv.h"
Line -... Line 26...
-
 
26
#include "i915_vgpu.h"
-
 
27
 
25
#include "intel_drv.h"
28
#include 
Line 26... Line 29...
26
 
29
 
27
#define FORCEWAKE_ACK_TIMEOUT_MS 2
30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
Line 28... Line 31...
28
 
31
 
Line 38... Line 41...
38
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
41
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
39
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
42
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
Line 40... Line 43...
40
 
43
 
Line -... Line 44...
-
 
44
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
-
 
45
 
-
 
46
static const char * const forcewake_domain_names[] = {
-
 
47
	"render",
-
 
48
	"blitter",
-
 
49
	"media",
-
 
50
};
-
 
51
 
-
 
52
const char *
-
 
53
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
-
 
54
{
-
 
55
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
-
 
56
 
-
 
57
	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
-
 
58
		return forcewake_domain_names[id];
-
 
59
 
-
 
60
	WARN_ON(id);
-
 
61
 
-
 
62
	return "unknown";
41
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
63
}
42
 
64
 
43
static void
65
static void
44
assert_device_not_suspended(struct drm_i915_private *dev_priv)
66
assert_device_not_suspended(struct drm_i915_private *dev_priv)
45
{
67
{
46
	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
68
	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
Line -... Line 69...
-
 
69
		  "Device suspended\n");
47
	     "Device suspended\n");
70
}
48
}
71
 
49
 
-
 
50
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
72
static inline void
51
{
-
 
52
	/* w/a for a sporadic read returning 0 by waiting for the GT
73
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
53
	 * thread to wake up.
-
 
54
	 */
-
 
55
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
74
{
Line -... Line 75...
-
 
75
	WARN_ON(d->reg_set == 0);
56
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
76
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
57
		DRM_ERROR("GT thread status wait timed out\n");
77
}
58
}
78
 
59
 
79
static inline void
60
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
80
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
61
{
81
{
Line -... Line 82...
-
 
82
//	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
62
	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
83
//	/* something from same cacheline, but !FORCEWAKE */
63
	/* something from same cacheline, but !FORCEWAKE */
-
 
64
	__raw_posting_read(dev_priv, ECOBUS);
84
//	__raw_posting_read(dev_priv, ECOBUS);
65
}
85
}
-
 
86
 
66
 
87
static inline void
67
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv,
88
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
-
 
89
{
-
 
90
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
Line 68... Line 91...
68
							int fw_engine)
91
			     FORCEWAKE_KERNEL) == 0,
69
{
92
			    FORCEWAKE_ACK_TIMEOUT_MS))
-
 
93
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
70
	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
94
			  intel_uncore_forcewake_domain_to_str(d->id));
-
 
95
}
Line -... Line 96...
-
 
96
 
-
 
97
static inline void
-
 
98
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
71
			    FORCEWAKE_ACK_TIMEOUT_MS))
99
{
-
 
100
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
72
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
101
}
73
 
102
 
-
 
103
static inline void
-
 
104
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
Line 74... Line 105...
74
	__raw_i915_write32(dev_priv, FORCEWAKE, 1);
105
{
-
 
106
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
-
 
107
			     FORCEWAKE_KERNEL),
75
	/* something from same cacheline, but !FORCEWAKE */
108
			    FORCEWAKE_ACK_TIMEOUT_MS))
76
	__raw_posting_read(dev_priv, ECOBUS);
109
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
Line -... Line 110...
-
 
110
			  intel_uncore_forcewake_domain_to_str(d->id));
77
 
111
}
78
	if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
112
 
79
			    FORCEWAKE_ACK_TIMEOUT_MS))
-
 
80
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
113
static inline void
-
 
114
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
81
 
115
{
82
	/* WaRsForcewakeWaitTC0:snb */
116
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
Line -... Line 117...
-
 
117
}
83
	__gen6_gt_wait_for_thread_c0(dev_priv);
118
 
84
}
-
 
85
 
119
static inline void
-
 
120
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
86
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
121
{
Line 87... Line 122...
87
{
122
	/* something from same cacheline, but not from the set register */
88
	__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
123
	if (d->reg_post)
89
	/* something from same cacheline, but !FORCEWAKE_MT */
124
		__raw_posting_read(d->i915, d->reg_post);
90
	__raw_posting_read(dev_priv, ECOBUS);
125
}
-
 
126
 
-
 
127
static void
Line -... Line 128...
-
 
128
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
91
}
129
{
-
 
130
	struct intel_uncore_forcewake_domain *d;
92
 
131
	enum forcewake_domain_id id;
93
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
132
 
Line 94... Line 133...
94
							int fw_engine)
133
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
95
{
134
		fw_domain_wait_ack_clear(d);
96
	u32 forcewake_ack;
-
 
97
 
135
		fw_domain_get(d);
-
 
136
		fw_domain_wait_ack(d);
-
 
137
	}
Line -... Line 138...
-
 
138
}
-
 
139
 
-
 
140
static void
-
 
141
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
-
 
142
{
-
 
143
	struct intel_uncore_forcewake_domain *d;
-
 
144
	enum forcewake_domain_id id;
-
 
145
 
-
 
146
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
-
 
147
		fw_domain_put(d);
-
 
148
		fw_domain_posting_read(d);
-
 
149
	}
-
 
150
}
-
 
151
 
98
	if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
152
static void
-
 
153
fw_domains_posting_read(struct drm_i915_private *dev_priv)
-
 
154
{
-
 
155
	struct intel_uncore_forcewake_domain *d;
-
 
156
	enum forcewake_domain_id id;
99
		forcewake_ack = FORCEWAKE_ACK_HSW;
157
 
-
 
158
	/* No need to do for all, just do for first found */
-
 
159
	for_each_fw_domain(d, dev_priv, id) {
100
	else
160
		fw_domain_posting_read(d);
-
 
161
		break;
-
 
162
	}
-
 
163
}
-
 
164
 
Line -... Line 165...
-
 
165
static void
-
 
166
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
-
 
167
{
-
 
168
	struct intel_uncore_forcewake_domain *d;
-
 
169
	enum forcewake_domain_id id;
-
 
170
 
-
 
171
	if (dev_priv->uncore.fw_domains == 0)
-
 
172
		return;
-
 
173
 
-
 
174
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
-
 
175
		fw_domain_reset(d);
-
 
176
 
-
 
177
	fw_domains_posting_read(dev_priv);
-
 
178
}
-
 
179
 
101
		forcewake_ack = FORCEWAKE_MT_ACK;
180
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
102
 
181
{
103
	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
182
	/* w/a for a sporadic read returning 0 by waiting for the GT
Line 104... Line 183...
104
			    FORCEWAKE_ACK_TIMEOUT_MS))
183
	 * thread to wake up.
105
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
184
	 */
Line 124... Line 203...
124
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
203
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
125
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
204
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
126
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
205
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
127
}
206
}
Line 128... Line 207...
128
 
207
 
129
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
208
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
130
							int fw_engine)
209
				     enum forcewake_domains fw_domains)
131
{
-
 
132
	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
-
 
133
	/* something from same cacheline, but !FORCEWAKE */
210
{
134
	__raw_posting_read(dev_priv, ECOBUS);
211
	fw_domains_put(dev_priv, fw_domains);
135
	gen6_gt_check_fifodbg(dev_priv);
212
	gen6_gt_check_fifodbg(dev_priv);
Line 136... Line 213...
136
}
213
}
137
 
-
 
138
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
214
 
139
							int fw_engine)
215
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
140
{
-
 
141
	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
-
 
142
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
 
Line 143... Line -...
143
	/* something from same cacheline, but !FORCEWAKE_MT */
-
 
144
	__raw_posting_read(dev_priv, ECOBUS);
216
{
145
 
217
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
Line 146... Line 218...
146
	if (IS_GEN7(dev_priv->dev))
218
 
147
	gen6_gt_check_fifodbg(dev_priv);
219
	return count & GT_FIFO_FREE_ENTRIES_MASK;
148
}
220
}
Line 149... Line 221...
149
 
221
 
150
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
222
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
151
{
223
{
152
	int ret = 0;
224
	int ret = 0;
153
 
-
 
154
	/* On VLV, FIFO will be shared by both SW and HW.
-
 
Line 155... Line 225...
155
	 * So, we need to read the FREE_ENTRIES everytime */
225
 
156
	if (IS_VALLEYVIEW(dev_priv->dev))
226
	/* On VLV, FIFO will be shared by both SW and HW.
157
		dev_priv->uncore.fifo_count =
227
	 * So, we need to read the FREE_ENTRIES everytime */
-
 
228
	if (IS_VALLEYVIEW(dev_priv->dev))
158
			__raw_i915_read32(dev_priv, GTFIFOCTL) &
229
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
159
						GT_FIFO_FREE_ENTRIES_MASK;
230
 
160
 
231
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
161
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
232
		int loop = 500;
162
		int loop = 500;
233
		u32 fifo = fifo_free_entries(dev_priv);
163
		u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
234
 
164
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
235
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
165
			udelay(10);
236
			udelay(10);
166
			fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
237
			fifo = fifo_free_entries(dev_priv);
Line 167... Line 238...
167
		}
238
		}
168
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
239
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
Line 169... Line 240...
169
			++ret;
240
			++ret;
170
		dev_priv->uncore.fifo_count = fifo;
-
 
171
	}
-
 
172
	dev_priv->uncore.fifo_count--;
-
 
173
 
-
 
174
	return ret;
-
 
175
}
-
 
176
 
-
 
177
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-
 
178
{
-
 
179
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-
 
180
			   _MASKED_BIT_DISABLE(0xffff));
-
 
181
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-
 
182
			   _MASKED_BIT_DISABLE(0xffff));
-
 
183
	/* something from same cacheline, but !FORCEWAKE_VLV */
-
 
184
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
-
 
185
}
-
 
186
 
-
 
187
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
-
 
188
						int fw_engine)
-
 
189
{
-
 
190
	/* Check for Render Engine */
-
 
191
	if (FORCEWAKE_RENDER & fw_engine) {
-
 
192
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
193
						FORCEWAKE_ACK_VLV) &
-
 
194
						FORCEWAKE_KERNEL) == 0,
-
 
195
			    FORCEWAKE_ACK_TIMEOUT_MS))
-
 
196
			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
 
197
 
-
 
198
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-
 
199
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
 
200
 
-
 
201
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
202
						FORCEWAKE_ACK_VLV) &
-
 
203
						FORCEWAKE_KERNEL),
-
 
204
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
205
			DRM_ERROR("Timed out: waiting for Render to ack.\n");
-
 
206
	}
-
 
207
 
-
 
208
	/* Check for Media Engine */
-
 
209
	if (FORCEWAKE_MEDIA & fw_engine) {
-
 
210
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
211
						FORCEWAKE_ACK_MEDIA_VLV) &
-
 
212
						FORCEWAKE_KERNEL) == 0,
-
 
213
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
214
			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
 
215
 
-
 
216
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-
 
217
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
 
218
 
-
 
219
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
220
						FORCEWAKE_ACK_MEDIA_VLV) &
-
 
221
			     FORCEWAKE_KERNEL),
-
 
222
			    FORCEWAKE_ACK_TIMEOUT_MS))
-
 
223
			DRM_ERROR("Timed out: waiting for media to ack.\n");
-
 
224
	}
-
 
225
}
-
 
226
 
-
 
227
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
-
 
228
					int fw_engine)
-
 
229
{
-
 
230
 
-
 
231
	/* Check for Render Engine */
-
 
232
	if (FORCEWAKE_RENDER & fw_engine)
-
 
233
	__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
-
 
234
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
 
235
 
-
 
236
 
-
 
237
	/* Check for Media Engine */
-
 
238
	if (FORCEWAKE_MEDIA & fw_engine)
-
 
239
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
-
 
240
			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
 
241
 
241
		dev_priv->uncore.fifo_count = fifo;
-
 
242
	}
242
	/* something from same cacheline, but !FORCEWAKE_VLV */
243
	dev_priv->uncore.fifo_count--;
Line 243... Line 244...
243
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
244
 
Line 244... Line -...
244
	if (!IS_CHERRYVIEW(dev_priv->dev))
-
 
245
	gen6_gt_check_fifodbg(dev_priv);
245
	return ret;
246
}
-
 
247
 
-
 
248
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
246
}
249
{
247
 
Line 250... Line 248...
250
	unsigned long irqflags;
248
static void intel_uncore_fw_release_timer(unsigned long arg)
251
 
249
{
-
 
250
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
Line 252... Line 251...
252
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
251
	unsigned long irqflags;
253
 
252
 
Line 254... Line 253...
254
	if (fw_engine & FORCEWAKE_RENDER &&
253
	assert_device_not_suspended(domain->i915);
255
	    dev_priv->uncore.fw_rendercount++ != 0)
254
 
-
 
255
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
256
		fw_engine &= ~FORCEWAKE_RENDER;
256
	if (WARN_ON(domain->wake_count == 0))
-
 
257
		domain->wake_count++;
-
 
258
 
-
 
259
	if (--domain->wake_count == 0)
-
 
260
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
Line 257... Line -...
257
	if (fw_engine & FORCEWAKE_MEDIA &&
-
 
258
	    dev_priv->uncore.fw_mediacount++ != 0)
-
 
259
		fw_engine &= ~FORCEWAKE_MEDIA;
-
 
260
 
-
 
261
	if (fw_engine)
-
 
262
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine);
-
 
263
 
-
 
264
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
265
}
-
 
266
 
-
 
267
static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-
 
268
{
-
 
269
	unsigned long irqflags;
-
 
270
 
-
 
271
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
 
272
 
-
 
273
	if (fw_engine & FORCEWAKE_RENDER) {
-
 
274
		WARN_ON(!dev_priv->uncore.fw_rendercount);
261
							  1 << domain->id);
275
		if (--dev_priv->uncore.fw_rendercount != 0)
-
 
276
			fw_engine &= ~FORCEWAKE_RENDER;
-
 
277
	}
-
 
278
 
-
 
279
	if (fw_engine & FORCEWAKE_MEDIA) {
-
 
280
		WARN_ON(!dev_priv->uncore.fw_mediacount);
-
 
281
		if (--dev_priv->uncore.fw_mediacount != 0)
-
 
282
			fw_engine &= ~FORCEWAKE_MEDIA;
-
 
283
	}
-
 
284
 
-
 
285
	if (fw_engine)
-
 
286
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine);
-
 
287
 
-
 
288
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
289
}
-
 
290
 
262
 
291
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-
 
292
{
263
	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
293
	__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
-
 
294
			_MASKED_BIT_DISABLE(0xffff));
-
 
295
 
-
 
296
	__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
-
 
297
			_MASKED_BIT_DISABLE(0xffff));
-
 
298
 
-
 
299
	__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
-
 
300
			_MASKED_BIT_DISABLE(0xffff));
-
 
301
}
-
 
302
 
-
 
303
static void
-
 
304
__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
-
 
305
{
-
 
306
	/* Check for Render Engine */
-
 
307
	if (FORCEWAKE_RENDER & fw_engine) {
-
 
308
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
264
}
309
						FORCEWAKE_ACK_RENDER_GEN9) &
-
 
310
						FORCEWAKE_KERNEL) == 0,
-
 
311
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
312
			DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
-
 
313
 
-
 
314
		__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
-
 
315
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
 
316
 
-
 
317
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
318
						FORCEWAKE_ACK_RENDER_GEN9) &
-
 
319
						FORCEWAKE_KERNEL),
-
 
320
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
321
			DRM_ERROR("Timed out: waiting for Render to ack.\n");
-
 
322
	}
-
 
323
 
-
 
324
	/* Check for Media Engine */
-
 
325
	if (FORCEWAKE_MEDIA & fw_engine) {
-
 
326
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
327
						FORCEWAKE_ACK_MEDIA_GEN9) &
-
 
328
						FORCEWAKE_KERNEL) == 0,
265
 
329
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
330
			DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
-
 
331
 
-
 
332
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
266
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
333
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
 
334
 
-
 
335
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
336
						FORCEWAKE_ACK_MEDIA_GEN9) &
-
 
337
						FORCEWAKE_KERNEL),
-
 
Line 338... Line 267...
338
					FORCEWAKE_ACK_TIMEOUT_MS))
267
{
339
			DRM_ERROR("Timed out: waiting for Media to ack.\n");
268
	struct drm_i915_private *dev_priv = dev->dev_private;
340
	}
269
	unsigned long irqflags;
341
 
-
 
342
	/* Check for Blitter Engine */
-
 
343
	if (FORCEWAKE_BLITTER & fw_engine) {
-
 
344
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
Line 345... Line -...
345
						FORCEWAKE_ACK_BLITTER_GEN9) &
-
 
346
						FORCEWAKE_KERNEL) == 0,
-
 
347
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
348
			DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
-
 
349
 
-
 
350
		__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
-
 
351
				   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
 
352
 
-
 
353
		if (wait_for_atomic((__raw_i915_read32(dev_priv,
-
 
354
						FORCEWAKE_ACK_BLITTER_GEN9) &
-
 
355
						FORCEWAKE_KERNEL),
270
	struct intel_uncore_forcewake_domain *domain;
356
					FORCEWAKE_ACK_TIMEOUT_MS))
-
 
357
			DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
-
 
358
	}
-
 
359
}
-
 
360
 
-
 
361
static void
-
 
362
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
271
	int retry_count = 100;
Line 363... Line -...
363
{
-
 
364
	/* Check for Render Engine */
-
 
365
	if (FORCEWAKE_RENDER & fw_engine)
-
 
366
		__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
-
 
367
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
 
368
 
272
	enum forcewake_domain_id id;
Line 369... Line -...
369
	/* Check for Media Engine */
-
 
370
	if (FORCEWAKE_MEDIA & fw_engine)
-
 
371
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
273
	enum forcewake_domains fw = 0, active_domains;
372
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
 
373
 
-
 
374
	/* Check for Blitter Engine */
-
 
375
	if (FORCEWAKE_BLITTER & fw_engine)
-
 
376
		__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
274
 
377
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-
 
378
}
-
 
379
 
-
 
380
static void
-
 
381
gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
-
 
382
{
-
 
383
	unsigned long irqflags;
-
 
384
 
275
	/* Hold uncore.lock across reset to prevent any register access
385
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
276
	 * with forcewake not set correctly. Wait until all pending
Line 386... Line -...
386
 
-
 
387
	if (FORCEWAKE_RENDER & fw_engine) {
-
 
388
		if (dev_priv->uncore.fw_rendercount++ == 0)
-
 
389
			dev_priv->uncore.funcs.force_wake_get(dev_priv,
-
 
390
							FORCEWAKE_RENDER);
-
 
391
	}
-
 
392
 
-
 
393
	if (FORCEWAKE_MEDIA & fw_engine) {
-
 
394
		if (dev_priv->uncore.fw_mediacount++ == 0)
-
 
395
			dev_priv->uncore.funcs.force_wake_get(dev_priv,
-
 
396
							FORCEWAKE_MEDIA);
-
 
397
	}
-
 
398
 
-
 
399
	if (FORCEWAKE_BLITTER & fw_engine) {
-
 
400
		if (dev_priv->uncore.fw_blittercount++ == 0)
-
 
401
			dev_priv->uncore.funcs.force_wake_get(dev_priv,
-
 
402
							FORCEWAKE_BLITTER);
-
 
403
	}
-
 
404
 
-
 
405
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
277
	 * timers are run before holding.
406
}
-
 
407
 
-
 
408
static void
-
 
409
gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-
 
410
{
-
 
411
	unsigned long irqflags;
-
 
412
 
-
 
413
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
 
414
 
-
 
415
	if (FORCEWAKE_RENDER & fw_engine) {
278
	 */
Line -... Line 279...
-
 
279
	while (1) {
416
		WARN_ON(dev_priv->uncore.fw_rendercount == 0);
280
		active_domains = 0;
-
 
281
 
417
		if (--dev_priv->uncore.fw_rendercount == 0)
282
		for_each_fw_domain(domain, dev_priv, id) {
Line 418... Line -...
418
			dev_priv->uncore.funcs.force_wake_put(dev_priv,
-
 
419
							FORCEWAKE_RENDER);
-
 
420
	}
-
 
421
 
-
 
422
	if (FORCEWAKE_MEDIA & fw_engine) {
-
 
423
		WARN_ON(dev_priv->uncore.fw_mediacount == 0);
-
 
424
		if (--dev_priv->uncore.fw_mediacount == 0)
-
 
425
			dev_priv->uncore.funcs.force_wake_put(dev_priv,
-
 
426
							FORCEWAKE_MEDIA);
-
 
427
	}
-
 
428
 
-
 
429
	if (FORCEWAKE_BLITTER & fw_engine) {
-
 
430
		WARN_ON(dev_priv->uncore.fw_blittercount == 0);
283
			if (del_timer_sync(&domain->timer) == 0)
431
		if (--dev_priv->uncore.fw_blittercount == 0)
-
 
432
			dev_priv->uncore.funcs.force_wake_put(dev_priv,
284
				continue;
433
							FORCEWAKE_BLITTER);
285
 
Line 434... Line -...
434
	}
-
 
435
 
-
 
436
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
437
}
286
			intel_uncore_fw_release_timer((unsigned long)domain);
Line 438... Line 287...
438
 
287
		}
439
static void gen6_force_wake_timer(unsigned long arg)
288
 
-
 
289
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Line 440... Line -...
440
{
-
 
441
	struct drm_i915_private *dev_priv = (void *)arg;
-
 
442
	unsigned long irqflags;
290
 
443
 
-
 
444
	assert_device_not_suspended(dev_priv);
-
 
445
 
-
 
446
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
 
447
	WARN_ON(!dev_priv->uncore.forcewake_count);
-
 
448
 
291
		for_each_fw_domain(domain, dev_priv, id) {
449
	if (--dev_priv->uncore.forcewake_count == 0)
-
 
450
		dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
-
 
451
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
Line 452... Line -...
452
 
-
 
453
	intel_runtime_pm_put(dev_priv);
292
//           if (timer_pending(&domain->timer))
Line 454... Line 293...
454
}
293
//				active_domains |= (1 << id);
455
 
-
 
456
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
-
 
457
{
-
 
458
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
459
	unsigned long irqflags;
-
 
460
 
-
 
461
	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
-
 
462
		gen6_force_wake_timer((unsigned long)dev_priv);
-
 
463
 
-
 
464
	/* Hold uncore.lock across reset to prevent any register access
-
 
465
	 * with forcewake not set correctly
-
 
466
	 */
-
 
467
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
 
468
 
-
 
469
	if (IS_VALLEYVIEW(dev))
-
 
470
		vlv_force_wake_reset(dev_priv);
-
 
471
	else if (IS_GEN6(dev) || IS_GEN7(dev))
-
 
472
		__gen6_gt_force_wake_reset(dev_priv);
-
 
473
 
-
 
474
	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
-
 
475
		__gen7_gt_force_wake_mt_reset(dev_priv);
-
 
476
 
-
 
477
	if (IS_GEN9(dev))
294
	}
478
		__gen9_gt_force_wake_mt_reset(dev_priv);
295
 
Line 479... Line 296...
479
 
296
		if (active_domains == 0)
480
	if (restore) { /* If reset with a user forcewake, try to restore */
297
			break;
481
		unsigned fw = 0;
298
 
482
 
-
 
483
		if (IS_VALLEYVIEW(dev)) {
299
		if (--retry_count == 0) {
Line -... Line 300...
-
 
300
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
-
 
301
			break;
-
 
302
		}
484
			if (dev_priv->uncore.fw_rendercount)
303
 
485
				fw |= FORCEWAKE_RENDER;
304
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Line 486... Line 305...
486
 
305
        change_task();
487
			if (dev_priv->uncore.fw_mediacount)
-
 
488
				fw |= FORCEWAKE_MEDIA;
306
	}
489
		} else if (IS_GEN9(dev)) {
307
 
Line 490... Line 308...
490
			if (dev_priv->uncore.fw_rendercount)
308
	WARN_ON(active_domains);
491
				fw |= FORCEWAKE_RENDER;
-
 
492
 
-
 
493
			if (dev_priv->uncore.fw_mediacount)
309
 
494
				fw |= FORCEWAKE_MEDIA;
310
	for_each_fw_domain(domain, dev_priv, id)
495
 
311
		if (domain->wake_count)
496
			if (dev_priv->uncore.fw_blittercount)
312
			fw |= 1 << id;
497
				fw |= FORCEWAKE_BLITTER;
313
 
498
		} else {
314
	if (fw)
499
			if (dev_priv->uncore.forcewake_count)
315
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
500
				fw = FORCEWAKE_ALL;
316
 
501
	}
317
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
502
 
318
 
-
 
319
	if (restore) { /* If reset with a user forcewake, try to restore */
-
 
320
		if (fw)
-
 
321
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
-
 
322
 
-
 
323
		if (IS_GEN6(dev) || IS_GEN7(dev))
-
 
324
			dev_priv->uncore.fifo_count =
-
 
325
				fifo_free_entries(dev_priv);
-
 
326
	}
-
 
327
 
Line 503... Line 328...
503
		if (fw)
328
	if (!restore)
504
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
329
		assert_forcewakes_inactive(dev_priv);
505
 
330
 
506
		if (IS_GEN6(dev) || IS_GEN7(dev))
331
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Line -... Line 332...
-
 
332
}
-
 
333
 
-
 
334
static void intel_uncore_ellc_detect(struct drm_device *dev)
-
 
335
{
-
 
336
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
337
 
-
 
338
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
-
 
339
	     INTEL_INFO(dev)->gen >= 9) &&
507
			dev_priv->uncore.fifo_count =
340
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
508
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
341
		/* The docs do not explain exactly how the calculation can be
Line 509... Line 342...
509
				GT_FIFO_FREE_ENTRIES_MASK;
342
		 * made. It is somewhat guessable, but for now, it's always
510
	}
343
		 * 128MB.
Line 549... Line 382...
549
{
382
{
550
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
383
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
551
	intel_disable_gt_powersave(dev);
384
	intel_disable_gt_powersave(dev);
552
}
385
}
Line 553... Line -...
553
 
-
 
554
/*
-
 
555
 * Generally this is called implicitly by the register read function. However,
386
 
556
 * if some sequence requires the GT to not power down then this function should
-
 
557
 * be called at the beginning of the sequence followed by a call to
387
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
558
 * gen6_gt_force_wake_put() at the end of the sequence.
-
 
559
 */
-
 
560
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
388
					 enum forcewake_domains fw_domains)
-
 
389
{
561
{
390
	struct intel_uncore_forcewake_domain *domain;
Line 562... Line 391...
562
	unsigned long irqflags;
391
	enum forcewake_domain_id id;
563
 
392
 
Line 564... Line 393...
564
	if (!dev_priv->uncore.funcs.force_wake_get)
393
	if (!dev_priv->uncore.funcs.force_wake_get)
Line -... Line 394...
-
 
394
		return;
565
		return;
395
 
-
 
396
	fw_domains &= dev_priv->uncore.fw_domains;
-
 
397
 
-
 
398
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
566
 
399
		if (domain->wake_count++)
567
	intel_runtime_pm_get(dev_priv);
400
			fw_domains &= ~(1 << id);
-
 
401
	}
-
 
402
 
-
 
403
	if (fw_domains)
-
 
404
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-
 
405
}
-
 
406
 
-
 
407
/**
-
 
408
 * intel_uncore_forcewake_get - grab forcewake domain references
-
 
409
 * @dev_priv: i915 device instance
-
 
410
 * @fw_domains: forcewake domains to get reference on
-
 
411
 *
-
 
412
 * This function can be used get GT's forcewake domain references.
-
 
413
 * Normal register access will handle the forcewake domains automatically.
-
 
414
 * However if some sequence requires the GT to not power down a particular
-
 
415
 * forcewake domains this function should be called at the beginning of the
-
 
416
 * sequence. And subsequently the reference should be dropped by symmetric
-
 
417
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
-
 
418
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
-
 
419
 */
Line 568... Line 420...
568
 
420
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
569
	/* Redirect to Gen9 specific routine */
421
				enum forcewake_domains fw_domains)
-
 
422
{
570
	if (IS_GEN9(dev_priv->dev))
423
	unsigned long irqflags;
Line 571... Line 424...
571
		return gen9_force_wake_get(dev_priv, fw_engine);
424
 
572
 
-
 
573
	/* Redirect to VLV specific routine */
425
	if (!dev_priv->uncore.funcs.force_wake_get)
574
	if (IS_VALLEYVIEW(dev_priv->dev))
426
		return;
575
		return vlv_force_wake_get(dev_priv, fw_engine);
427
 
Line 576... Line 428...
576
 
428
	WARN_ON(dev_priv->pm.suspended);
-
 
429
 
577
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
430
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
 
431
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
-
 
432
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
433
}
-
 
434
 
578
	if (dev_priv->uncore.forcewake_count++ == 0)
435
/**
579
		dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
436
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
-
 
437
 * @dev_priv: i915 device instance
580
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
438
 * @fw_domains: forcewake domains to get reference on
581
}
439
 *
582
 
-
 
Line 583... Line 440...
583
/*
440
 * See intel_uncore_forcewake_get(). This variant places the onus
584
 * see gen6_gt_force_wake_get()
441
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
Line 585... Line -...
585
 */
-
 
586
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
-
 
587
{
442
 */
588
	unsigned long irqflags;
-
 
589
	bool delayed = false;
443
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
Line 590... Line 444...
590
 
444
					enum forcewake_domains fw_domains)
591
	if (!dev_priv->uncore.funcs.force_wake_put)
-
 
592
		return;
445
{
593
 
-
 
594
	/* Redirect to Gen9 specific routine */
446
	assert_spin_locked(&dev_priv->uncore.lock);
-
 
447
 
-
 
448
	if (!dev_priv->uncore.funcs.force_wake_get)
Line -... Line 449...
-
 
449
		return;
-
 
450
 
Line 595... Line 451...
595
	if (IS_GEN9(dev_priv->dev)) {
451
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
596
		gen9_force_wake_put(dev_priv, fw_engine);
-
 
Line -... Line 452...
-
 
452
}
597
		goto out;
453
 
-
 
454
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
-
 
455
					 enum forcewake_domains fw_domains)
598
	}
456
{
599
 
457
	struct intel_uncore_forcewake_domain *domain;
-
 
458
	enum forcewake_domain_id id;
600
	/* Redirect to VLV specific routine */
459
 
601
	if (IS_VALLEYVIEW(dev_priv->dev)) {
460
	if (!dev_priv->uncore.funcs.force_wake_put)
602
		vlv_force_wake_put(dev_priv, fw_engine);
461
		return;
-
 
462
 
-
 
463
	fw_domains &= dev_priv->uncore.fw_domains;
-
 
464
 
-
 
465
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
-
 
466
		if (WARN_ON(domain->wake_count == 0))
-
 
467
			continue;
-
 
468
 
-
 
469
		if (--domain->wake_count)
-
 
470
			continue;
-
 
471
 
-
 
472
		domain->wake_count++;
-
 
473
		fw_domain_arm_timer(domain);
-
 
474
	}
-
 
475
}
-
 
476
 
-
 
477
/**
-
 
478
 * intel_uncore_forcewake_put - release a forcewake domain reference
-
 
479
 * @dev_priv: i915 device instance
-
 
480
 * @fw_domains: forcewake domains to put references
-
 
481
 *
603
		goto out;
482
 * This function drops the device-level forcewakes for specified
-
 
483
 * domains obtained by intel_uncore_forcewake_get().
Line 604... Line 484...
604
	}
484
 */
-
 
485
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
-
 
486
				enum forcewake_domains fw_domains)
-
 
487
{
-
 
488
	unsigned long irqflags;
-
 
489
 
-
 
490
	if (!dev_priv->uncore.funcs.force_wake_put)
-
 
491
		return;
-
 
492
 
-
 
493
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
 
494
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
-
 
495
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
 
496
}
-
 
497
 
605
 
498
/**
-
 
499
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
606
 
500
 * @dev_priv: i915 device instance
607
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
501
 * @fw_domains: forcewake domains to get reference on
Line 608... Line 502...
608
	WARN_ON(!dev_priv->uncore.forcewake_count);
502
 *
609
 
503
 * See intel_uncore_forcewake_put(). This variant places the onus
-
 
504
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
-
 
505
 */
-
 
506
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
610
	if (--dev_priv->uncore.forcewake_count == 0) {
507
					enum forcewake_domains fw_domains)
611
		dev_priv->uncore.forcewake_count++;
508
{
Line -... Line 509...
-
 
509
	assert_spin_locked(&dev_priv->uncore.lock);
612
		delayed = true;
510
 
613
//       mod_timer_pinned(&dev_priv->uncore.force_wake_timer,
511
	if (!dev_priv->uncore.funcs.force_wake_put)
Line 614... Line 512...
614
//                GetTimerTicks() + 1);
512
		return;
615
	}
513
 
616
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
514
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
Line 617... Line 515...
617
 
515
}
Line 618... Line 516...
618
out:
516
 
Line 645... Line 543...
645
	 REG_RANGE((reg), 0x22000, 0x24000) || \
543
	 REG_RANGE((reg), 0x22000, 0x24000) || \
646
	 REG_RANGE((reg), 0x30000, 0x40000))
544
	 REG_RANGE((reg), 0x30000, 0x40000))
Line 647... Line 545...
647
 
545
 
648
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
546
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
649
	(REG_RANGE((reg), 0x2000, 0x4000) || \
547
	(REG_RANGE((reg), 0x2000, 0x4000) || \
650
	 REG_RANGE((reg), 0x5000, 0x8000) || \
548
	 REG_RANGE((reg), 0x5200, 0x8000) || \
651
	 REG_RANGE((reg), 0x8300, 0x8500) || \
549
	 REG_RANGE((reg), 0x8300, 0x8500) || \
652
	 REG_RANGE((reg), 0xB000, 0xC000) || \
550
	 REG_RANGE((reg), 0xB000, 0xB480) || \
Line 653... Line 551...
653
	 REG_RANGE((reg), 0xE000, 0xE800))
551
	 REG_RANGE((reg), 0xE000, 0xE800))
654
 
552
 
655
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
553
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
656
	(REG_RANGE((reg), 0x8800, 0x8900) || \
554
	(REG_RANGE((reg), 0x8800, 0x8900) || \
657
	 REG_RANGE((reg), 0xD000, 0xD800) || \
555
	 REG_RANGE((reg), 0xD000, 0xD800) || \
658
	 REG_RANGE((reg), 0x12000, 0x14000) || \
556
	 REG_RANGE((reg), 0x12000, 0x14000) || \
659
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
557
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
Line 660... Line 558...
660
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
558
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
661
	 REG_RANGE((reg), 0x30000, 0x40000))
559
	 REG_RANGE((reg), 0x30000, 0x38000))
662
 
560
 
663
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
561
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
664
	(REG_RANGE((reg), 0x4000, 0x5000) || \
562
	(REG_RANGE((reg), 0x4000, 0x5000) || \
665
	 REG_RANGE((reg), 0x8000, 0x8300) || \
-
 
666
	 REG_RANGE((reg), 0x8500, 0x8600) || \
-
 
667
	 REG_RANGE((reg), 0x9000, 0xB000) || \
-
 
668
	 REG_RANGE((reg), 0xC000, 0xC800) || \
563
	 REG_RANGE((reg), 0x8000, 0x8300) || \
Line 669... Line 564...
669
	 REG_RANGE((reg), 0xF000, 0x10000) || \
564
	 REG_RANGE((reg), 0x8500, 0x8600) || \
670
	 REG_RANGE((reg), 0x14000, 0x14400) || \
565
	 REG_RANGE((reg), 0x9000, 0xB000) || \
Line 671... Line 566...
671
	 REG_RANGE((reg), 0x22000, 0x24000))
566
	 REG_RANGE((reg), 0xF000, 0x10000))
Line 723... Line 618...
723
 
618
 
724
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
619
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
725
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
620
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
726
		     when, op, reg);
621
		     when, op, reg);
-
 
622
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
727
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
623
		i915.mmio_debug--; /* Only report the first N failures */
728
	}
624
	}
Line 729... Line 625...
729
}
625
}
730
 
626
 
731
static void
627
static void
-
 
628
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
-
 
629
{
732
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
630
	static bool mmio_debug_once = true;
733
{
631
 
Line 734... Line 632...
734
	if (i915.mmio_debug)
632
	if (i915.mmio_debug || !mmio_debug_once)
-
 
633
		return;
-
 
634
 
735
		return;
635
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
736
 
636
		DRM_DEBUG("Unclaimed register detected, "
-
 
637
			  "enabling oneshot unclaimed register reporting. "
737
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
638
			  "Please use i915.mmio_debug=N for more information.\n");
738
		DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
639
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
Line 739... Line 640...
739
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
640
		i915.mmio_debug = mmio_debug_once--;
740
	}
-
 
741
}
641
	}
742
 
642
}
743
#define REG_READ_HEADER(x) \
-
 
Line 744... Line 643...
744
	unsigned long irqflags; \
643
 
745
	u##x val = 0; \
-
 
746
	assert_device_not_suspended(dev_priv); \
644
#define GEN2_READ_HEADER(x) \
747
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
645
	u##x val = 0; \
Line 748... Line 646...
748
 
646
	assert_device_not_suspended(dev_priv);
749
#define REG_READ_FOOTER \
647
 
750
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
648
#define GEN2_READ_FOOTER \
751
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
649
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
752
	return val
650
	return val
753
 
651
 
754
#define __gen4_read(x) \
652
#define __gen2_read(x) \
Line 755... Line 653...
755
static u##x \
653
static u##x \
756
gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
654
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
757
	REG_READ_HEADER(x); \
655
	GEN2_READ_HEADER(x); \
758
	val = __raw_i915_read##x(dev_priv, reg); \
656
	val = __raw_i915_read##x(dev_priv, reg); \
759
	REG_READ_FOOTER; \
657
	GEN2_READ_FOOTER; \
760
}
658
}
-
 
659
 
-
 
660
#define __gen5_read(x) \
-
 
661
static u##x \
-
 
662
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
-
 
663
	GEN2_READ_HEADER(x); \
-
 
664
	ilk_dummy_write(dev_priv); \
-
 
665
	val = __raw_i915_read##x(dev_priv, reg); \
-
 
666
	GEN2_READ_FOOTER; \
-
 
667
}
-
 
668
 
-
 
669
__gen5_read(8)
-
 
670
__gen5_read(16)
-
 
671
__gen5_read(32)
-
 
672
__gen5_read(64)
-
 
673
__gen2_read(8)
-
 
674
__gen2_read(16)
-
 
675
__gen2_read(32)
-
 
676
__gen2_read(64)
-
 
677
 
-
 
678
#undef __gen5_read
-
 
679
#undef __gen2_read
-
 
680
 
-
 
681
#undef GEN2_READ_FOOTER
-
 
682
#undef GEN2_READ_HEADER
-
 
683
 
-
 
684
#define GEN6_READ_HEADER(x) \
-
 
685
	unsigned long irqflags; \
-
 
686
	u##x val = 0; \
-
 
687
	assert_device_not_suspended(dev_priv); \
-
 
688
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
 
689
 
-
 
690
#define GEN6_READ_FOOTER \
-
 
691
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-
 
692
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
-
 
693
	return val
-
 
694
 
-
 
695
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
-
 
696
				    enum forcewake_domains fw_domains)
-
 
697
{
-
 
698
	struct intel_uncore_forcewake_domain *domain;
-
 
699
	enum forcewake_domain_id id;
-
 
700
 
-
 
701
	if (WARN_ON(!fw_domains))
-
 
702
		return;
-
 
703
 
-
 
704
	/* Ideally GCC would be constant-fold and eliminate this loop */
-
 
705
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
-
 
706
		if (domain->wake_count) {
-
 
707
			fw_domains &= ~(1 << id);
-
 
708
			continue;
-
 
709
		}
-
 
710
 
-
 
711
		domain->wake_count++;
-
 
712
		fw_domain_arm_timer(domain);
-
 
713
	}
-
 
714
 
-
 
715
	if (fw_domains)
-
 
716
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
761
 
717
}
762
#define __gen5_read(x) \
718
 
Line 763... Line 719...
763
static u##x \
719
#define __vgpu_read(x) \
764
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
720
static u##x \
765
	REG_READ_HEADER(x); \
721
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
766
		ilk_dummy_write(dev_priv); \
722
	GEN6_READ_HEADER(x); \
767
	val = __raw_i915_read##x(dev_priv, reg); \
723
	val = __raw_i915_read##x(dev_priv, reg); \
768
	REG_READ_FOOTER; \
-
 
769
}
724
	GEN6_READ_FOOTER; \
770
 
-
 
771
#define __gen6_read(x) \
-
 
772
static u##x \
725
}
773
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
-
 
774
	REG_READ_HEADER(x); \
-
 
775
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
-
 
776
	if (dev_priv->uncore.forcewake_count == 0 && \
726
 
777
	    NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-
 
778
			dev_priv->uncore.funcs.force_wake_get(dev_priv, \
727
#define __gen6_read(x) \
779
							FORCEWAKE_ALL); \
728
static u##x \
780
		val = __raw_i915_read##x(dev_priv, reg); \
729
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
Line 781... Line 730...
781
			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
730
	GEN6_READ_HEADER(x); \
782
							FORCEWAKE_ALL); \
731
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
783
	} else { \
732
	if (NEEDS_FORCE_WAKE(reg)) \
784
		val = __raw_i915_read##x(dev_priv, reg); \
-
 
785
	} \
733
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
786
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
734
	val = __raw_i915_read##x(dev_priv, reg); \
787
	REG_READ_FOOTER; \
-
 
788
}
735
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
789
 
736
	GEN6_READ_FOOTER; \
790
#define __vlv_read(x) \
-
 
791
static u##x \
-
 
792
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
-
 
793
	unsigned fwengine = 0; \
-
 
794
	REG_READ_HEADER(x); \
737
}
795
	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) {   \
738
 
796
		if (dev_priv->uncore.fw_rendercount == 0) \
-
 
797
		fwengine = FORCEWAKE_RENDER;            \
-
 
798
	} else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \
739
#define __vlv_read(x) \
799
		if (dev_priv->uncore.fw_mediacount == 0) \
740
static u##x \
Line 800... Line 741...
800
		fwengine = FORCEWAKE_MEDIA;             \
741
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
801
	}                                               \
742
	GEN6_READ_HEADER(x); \
802
	if (fwengine) \
743
	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
803
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
-
 
804
		val = __raw_i915_read##x(dev_priv, reg); \
744
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
805
	if (fwengine) \
745
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
806
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
-
 
807
	REG_READ_FOOTER; \
746
		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
808
}
747
	val = __raw_i915_read##x(dev_priv, reg); \
809
 
-
 
810
#define __chv_read(x) \
748
	GEN6_READ_FOOTER; \
811
static u##x \
749
}
812
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
-
 
813
	unsigned fwengine = 0; \
750
 
814
	REG_READ_HEADER(x); \
-
 
815
	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
751
#define __chv_read(x) \
816
		if (dev_priv->uncore.fw_rendercount == 0) \
-
 
817
			fwengine = FORCEWAKE_RENDER; \
-
 
818
	} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
-
 
819
		if (dev_priv->uncore.fw_mediacount == 0) \
752
static u##x \
820
		fwengine = FORCEWAKE_MEDIA;             \
-
 
821
	} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
-
 
822
		if (dev_priv->uncore.fw_rendercount == 0) \
753
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
823
			fwengine |= FORCEWAKE_RENDER; \
754
	GEN6_READ_HEADER(x); \
Line 824... Line 755...
824
		if (dev_priv->uncore.fw_mediacount == 0) \
755
	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
825
			fwengine |= FORCEWAKE_MEDIA; \
756
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
Line 826... Line 757...
826
	}  \
757
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
827
	if (fwengine) \
758
		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
828
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
759
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
-
 
760
		__force_wake_get(dev_priv, \
829
		val = __raw_i915_read##x(dev_priv, reg); \
761
				 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
830
	if (fwengine) \
762
	val = __raw_i915_read##x(dev_priv, reg); \
831
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
763
	GEN6_READ_FOOTER; \
832
	REG_READ_FOOTER; \
-
 
833
}
764
}
834
 
765
 
835
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg)	\
-
 
836
	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
766
#define SKL_NEEDS_FORCE_WAKE(reg) \
837
 
767
	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
838
#define __gen9_read(x) \
-
 
839
static u##x \
768
 
840
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
769
#define __gen9_read(x) \
841
	REG_READ_HEADER(x); \
-
 
842
	if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
770
static u##x \
843
		val = __raw_i915_read##x(dev_priv, reg); \
-
 
844
	} else { \
-
 
845
		unsigned fwengine = 0; \
771
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
846
		if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
-
 
847
			if (dev_priv->uncore.fw_rendercount == 0) \
772
	enum forcewake_domains fw_engine; \
848
				fwengine = FORCEWAKE_RENDER; \
-
 
849
		} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
773
	GEN6_READ_HEADER(x); \
850
			if (dev_priv->uncore.fw_mediacount == 0) \
774
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
851
				fwengine = FORCEWAKE_MEDIA; \
775
	if (!SKL_NEEDS_FORCE_WAKE(reg)) \
852
		} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
-
 
853
			if (dev_priv->uncore.fw_rendercount == 0) \
776
		fw_engine = 0; \
854
				fwengine |= FORCEWAKE_RENDER; \
-
 
855
			if (dev_priv->uncore.fw_mediacount == 0) \
777
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
856
				fwengine |= FORCEWAKE_MEDIA; \
778
		fw_engine = FORCEWAKE_RENDER; \
Line -... Line 779...
-
 
779
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
-
 
780
		fw_engine = FORCEWAKE_MEDIA; \
-
 
781
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
-
 
782
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
857
		} else { \
783
	else \
858
			if (dev_priv->uncore.fw_blittercount == 0) \
784
		fw_engine = FORCEWAKE_BLITTER; \
859
				fwengine = FORCEWAKE_BLITTER; \
785
	if (fw_engine) \
860
		} \
786
		__force_wake_get(dev_priv, fw_engine); \
861
		if (fwengine) \
787
	val = __raw_i915_read##x(dev_priv, reg); \
Line 881... Line 807...
881
__vlv_read(64)
807
__vlv_read(64)
882
__gen6_read(8)
808
__gen6_read(8)
883
__gen6_read(16)
809
__gen6_read(16)
884
__gen6_read(32)
810
__gen6_read(32)
885
__gen6_read(64)
811
__gen6_read(64)
886
__gen5_read(8)
-
 
887
__gen5_read(16)
-
 
888
__gen5_read(32)
-
 
889
__gen5_read(64)
-
 
890
__gen4_read(8)
-
 
891
__gen4_read(16)
-
 
892
__gen4_read(32)
-
 
893
__gen4_read(64)
-
 
Line 894... Line 812...
894
 
812
 
895
#undef __gen9_read
813
#undef __gen9_read
896
#undef __chv_read
814
#undef __chv_read
897
#undef __vlv_read
815
#undef __vlv_read
898
#undef __gen6_read
-
 
899
#undef __gen5_read
816
#undef __gen6_read
900
#undef __gen4_read
817
#undef __vgpu_read
901
#undef REG_READ_FOOTER
818
#undef GEN6_READ_FOOTER
Line 902... Line 819...
902
#undef REG_READ_HEADER
819
#undef GEN6_READ_HEADER
903
 
-
 
904
#define REG_WRITE_HEADER \
820
 
905
	unsigned long irqflags; \
821
#define GEN2_WRITE_HEADER \
906
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-
 
Line 907... Line 822...
907
	assert_device_not_suspended(dev_priv); \
822
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
908
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
 
Line 909... Line 823...
909
 
823
	assert_device_not_suspended(dev_priv); \
910
#define REG_WRITE_FOOTER \
824
 
911
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
825
#define GEN2_WRITE_FOOTER
912
 
826
 
913
#define __gen4_write(x) \
827
#define __gen2_write(x) \
914
static void \
828
static void \
915
gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
829
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
Line 916... Line 830...
916
	REG_WRITE_HEADER; \
830
	GEN2_WRITE_HEADER; \
917
	__raw_i915_write##x(dev_priv, reg, val); \
831
	__raw_i915_write##x(dev_priv, reg, val); \
918
	REG_WRITE_FOOTER; \
832
	GEN2_WRITE_FOOTER; \
919
}
833
}
920
 
834
 
921
#define __gen5_write(x) \
835
#define __gen5_write(x) \
922
static void \
836
static void \
923
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
837
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
Line -... Line 838...
-
 
838
	GEN2_WRITE_HEADER; \
-
 
839
	ilk_dummy_write(dev_priv); \
-
 
840
	__raw_i915_write##x(dev_priv, reg, val); \
-
 
841
	GEN2_WRITE_FOOTER; \
-
 
842
}
-
 
843
 
-
 
844
__gen5_write(8)
-
 
845
__gen5_write(16)
-
 
846
__gen5_write(32)
-
 
847
__gen5_write(64)
-
 
848
__gen2_write(8)
-
 
849
__gen2_write(16)
-
 
850
__gen2_write(32)
-
 
851
__gen2_write(64)
-
 
852
 
-
 
853
#undef __gen5_write
-
 
854
#undef __gen2_write
-
 
855
 
-
 
856
#undef GEN2_WRITE_FOOTER
-
 
857
#undef GEN2_WRITE_HEADER
-
 
858
 
-
 
859
#define GEN6_WRITE_HEADER \
-
 
860
	unsigned long irqflags; \
-
 
861
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
924
	REG_WRITE_HEADER; \
862
	assert_device_not_suspended(dev_priv); \
925
	ilk_dummy_write(dev_priv); \
863
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
926
	__raw_i915_write##x(dev_priv, reg, val); \
864
 
927
	REG_WRITE_FOOTER; \
865
#define GEN6_WRITE_FOOTER \
928
}
866
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
929
 
867
 
930
#define __gen6_write(x) \
868
#define __gen6_write(x) \
931
static void \
869
static void \
932
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
870
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
933
	u32 __fifo_ret = 0; \
871
	u32 __fifo_ret = 0; \
934
	REG_WRITE_HEADER; \
872
	GEN6_WRITE_HEADER; \
935
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
873
	if (NEEDS_FORCE_WAKE(reg)) { \
936
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
874
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
937
	} \
875
	} \
Line 938... Line 876...
938
	__raw_i915_write##x(dev_priv, reg, val); \
876
	__raw_i915_write##x(dev_priv, reg, val); \
939
	if (unlikely(__fifo_ret)) { \
877
	if (unlikely(__fifo_ret)) { \
940
		gen6_gt_check_fifodbg(dev_priv); \
878
		gen6_gt_check_fifodbg(dev_priv); \
941
	} \
879
	} \
942
	REG_WRITE_FOOTER; \
880
	GEN6_WRITE_FOOTER; \
943
}
881
}
944
 
882
 
945
#define __hsw_write(x) \
883
#define __hsw_write(x) \
946
static void \
884
static void \
947
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
885
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
948
	u32 __fifo_ret = 0; \
886
	u32 __fifo_ret = 0; \
949
	REG_WRITE_HEADER; \
887
	GEN6_WRITE_HEADER; \
950
	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
888
	if (NEEDS_FORCE_WAKE(reg)) { \
951
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
889
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
952
	} \
890
	} \
953
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
891
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
-
 
892
	__raw_i915_write##x(dev_priv, reg, val); \
-
 
893
	if (unlikely(__fifo_ret)) { \
-
 
894
		gen6_gt_check_fifodbg(dev_priv); \
-
 
895
	} \
-
 
896
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-
 
897
	hsw_unclaimed_reg_detect(dev_priv); \
-
 
898
	GEN6_WRITE_FOOTER; \
-
 
899
}
954
	__raw_i915_write##x(dev_priv, reg, val); \
900
 
Line 955... Line 901...
955
	if (unlikely(__fifo_ret)) { \
901
#define __vgpu_write(x) \
956
		gen6_gt_check_fifodbg(dev_priv); \
902
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
957
	} \
903
			  off_t reg, u##x val, bool trace) { \
Line 982... Line 928...
982
}
928
}
Line 983... Line 929...
983
 
929
 
984
#define __gen8_write(x) \
930
#define __gen8_write(x) \
985
static void \
931
static void \
986
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
932
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
987
	REG_WRITE_HEADER; \
933
	GEN6_WRITE_HEADER; \
988
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
934
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
989
	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
-
 
990
		if (dev_priv->uncore.forcewake_count == 0) \
-
 
991
		dev_priv->uncore.funcs.force_wake_get(dev_priv, \
-
 
992
							FORCEWAKE_ALL); \
935
	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
993
	__raw_i915_write##x(dev_priv, reg, val); \
-
 
994
		if (dev_priv->uncore.forcewake_count == 0) \
-
 
995
		dev_priv->uncore.funcs.force_wake_put(dev_priv, \
-
 
996
							FORCEWAKE_ALL); \
-
 
997
	} else { \
936
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
998
		__raw_i915_write##x(dev_priv, reg, val); \
-
 
999
	} \
937
	__raw_i915_write##x(dev_priv, reg, val); \
1000
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
938
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1001
	hsw_unclaimed_reg_detect(dev_priv); \
939
	hsw_unclaimed_reg_detect(dev_priv); \
1002
	REG_WRITE_FOOTER; \
940
	GEN6_WRITE_FOOTER; \
Line 1003... Line 941...
1003
}
941
}
1004
 
942
 
1005
#define __chv_write(x) \
943
#define __chv_write(x) \
1006
static void \
-
 
1007
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
944
static void \
1008
	unsigned fwengine = 0; \
945
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
1009
	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
946
	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
1010
	REG_WRITE_HEADER; \
947
	GEN6_WRITE_HEADER; \
1011
	if (!shadowed) { \
-
 
1012
		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
948
	if (!shadowed) { \
1013
			if (dev_priv->uncore.fw_rendercount == 0) \
949
		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
1014
				fwengine = FORCEWAKE_RENDER; \
-
 
1015
		} else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
950
			__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
1016
			if (dev_priv->uncore.fw_mediacount == 0) \
951
		else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
1017
				fwengine = FORCEWAKE_MEDIA; \
-
 
1018
		} else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
952
			__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
1019
			if (dev_priv->uncore.fw_rendercount == 0) \
-
 
1020
				fwengine |= FORCEWAKE_RENDER; \
-
 
1021
			if (dev_priv->uncore.fw_mediacount == 0) \
953
		else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
1022
				fwengine |= FORCEWAKE_MEDIA; \
-
 
1023
		} \
-
 
1024
	} \
-
 
1025
	if (fwengine) \
954
			__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
1026
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
-
 
1027
	__raw_i915_write##x(dev_priv, reg, val); \
-
 
1028
	if (fwengine) \
955
	} \
1029
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
956
	__raw_i915_write##x(dev_priv, reg, val); \
Line 1030... Line 957...
1030
	REG_WRITE_FOOTER; \
957
	GEN6_WRITE_FOOTER; \
1031
}
958
}
1032
 
959
 
Line 1055... Line 982...
1055
 
982
 
1056
#define __gen9_write(x) \
983
#define __gen9_write(x) \
1057
static void \
984
static void \
1058
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
985
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
-
 
986
		bool trace) { \
1059
		bool trace) { \
987
	enum forcewake_domains fw_engine; \
1060
	REG_WRITE_HEADER; \
988
	GEN6_WRITE_HEADER; \
1061
	if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
989
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
1062
			is_gen9_shadowed(dev_priv, reg)) { \
990
	if (!SKL_NEEDS_FORCE_WAKE(reg) || \
1063
		__raw_i915_write##x(dev_priv, reg, val); \
-
 
1064
	} else { \
991
	    is_gen9_shadowed(dev_priv, reg)) \
1065
		unsigned fwengine = 0; \
992
		fw_engine = 0; \
1066
		if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
-
 
1067
			if (dev_priv->uncore.fw_rendercount == 0) \
993
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
1068
				fwengine = FORCEWAKE_RENDER; \
994
		fw_engine = FORCEWAKE_RENDER; \
1069
		} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
-
 
1070
			if (dev_priv->uncore.fw_mediacount == 0) \
995
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
1071
				fwengine = FORCEWAKE_MEDIA; \
996
		fw_engine = FORCEWAKE_MEDIA; \
1072
		} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
-
 
1073
			if (dev_priv->uncore.fw_rendercount == 0) \
997
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
1074
				fwengine |= FORCEWAKE_RENDER; \
-
 
1075
			if (dev_priv->uncore.fw_mediacount == 0) \
-
 
1076
				fwengine |= FORCEWAKE_MEDIA; \
998
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1077
		} else { \
-
 
1078
			if (dev_priv->uncore.fw_blittercount == 0) \
999
	else \
1079
				fwengine = FORCEWAKE_BLITTER; \
-
 
1080
		} \
1000
		fw_engine = FORCEWAKE_BLITTER; \
1081
		if (fwengine) \
1001
	if (fw_engine) \
1082
			dev_priv->uncore.funcs.force_wake_get(dev_priv, \
-
 
1083
					fwengine); \
1002
		__force_wake_get(dev_priv, fw_engine); \
1084
		__raw_i915_write##x(dev_priv, reg, val); \
-
 
1085
		if (fwengine) \
1003
	__raw_i915_write##x(dev_priv, reg, val); \
1086
			dev_priv->uncore.funcs.force_wake_put(dev_priv, \
1004
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1087
					fwengine); \
-
 
1088
	} \
1005
	hsw_unclaimed_reg_detect(dev_priv); \
1089
	REG_WRITE_FOOTER; \
1006
	GEN6_WRITE_FOOTER; \
Line 1090... Line 1007...
1090
}
1007
}
1091
 
1008
 
1092
__gen9_write(8)
1009
__gen9_write(8)
Line 1107... Line 1024...
1107
__hsw_write(64)
1024
__hsw_write(64)
1108
__gen6_write(8)
1025
__gen6_write(8)
1109
__gen6_write(16)
1026
__gen6_write(16)
1110
__gen6_write(32)
1027
__gen6_write(32)
1111
__gen6_write(64)
1028
__gen6_write(64)
1112
__gen5_write(8)
1029
__vgpu_write(8)
1113
__gen5_write(16)
-
 
1114
__gen5_write(32)
-
 
1115
__gen5_write(64)
-
 
1116
__gen4_write(8)
-
 
1117
__gen4_write(16)
1030
__vgpu_write(16)
1118
__gen4_write(32)
1031
__vgpu_write(32)
1119
__gen4_write(64)
1032
__vgpu_write(64)
Line 1120... Line 1033...
1120
 
1033
 
1121
#undef __gen9_write
1034
#undef __gen9_write
1122
#undef __chv_write
1035
#undef __chv_write
1123
#undef __gen8_write
1036
#undef __gen8_write
1124
#undef __hsw_write
1037
#undef __hsw_write
1125
#undef __gen6_write
-
 
1126
#undef __gen5_write
1038
#undef __gen6_write
1127
#undef __gen4_write
1039
#undef __vgpu_write
1128
#undef REG_WRITE_FOOTER
1040
#undef GEN6_WRITE_FOOTER
Line 1129... Line 1041...
1129
#undef REG_WRITE_HEADER
1041
#undef GEN6_WRITE_HEADER
1130
 
1042
 
1131
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1043
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1132
do { \
1044
do { \
Line 1142... Line 1054...
1142
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1054
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1143
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1055
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1144
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1056
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1145
} while (0)
1057
} while (0)
Line -... Line 1058...
-
 
1058
 
1146
 
1059
 
-
 
1060
static void fw_domain_init(struct drm_i915_private *dev_priv,
-
 
1061
			   enum forcewake_domain_id domain_id,
1147
void intel_uncore_init(struct drm_device *dev)
1062
			   u32 reg_set, u32 reg_ack)
1148
{
1063
{
Line 1149... Line 1064...
1149
	struct drm_i915_private *dev_priv = dev->dev_private;
1064
	struct intel_uncore_forcewake_domain *d;
1150
 
1065
 
Line -... Line 1066...
-
 
1066
	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
-
 
1067
		return;
-
 
1068
 
-
 
1069
	d = &dev_priv->uncore.fw_domain[domain_id];
-
 
1070
 
-
 
1071
	WARN_ON(d->wake_count);
-
 
1072
 
-
 
1073
	d->wake_count = 0;
-
 
1074
	d->reg_set = reg_set;
-
 
1075
	d->reg_ack = reg_ack;
-
 
1076
 
-
 
1077
	if (IS_GEN6(dev_priv)) {
-
 
1078
		d->val_reset = 0;
-
 
1079
		d->val_set = FORCEWAKE_KERNEL;
-
 
1080
		d->val_clear = 0;
-
 
1081
	} else {
-
 
1082
		/* WaRsClearFWBitsAtReset:bdw,skl */
-
 
1083
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
-
 
1084
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
-
 
1085
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
-
 
1086
	}
-
 
1087
 
-
 
1088
	if (IS_VALLEYVIEW(dev_priv))
-
 
1089
		d->reg_post = FORCEWAKE_ACK_VLV;
-
 
1090
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
-
 
1091
		d->reg_post = ECOBUS;
-
 
1092
	else
-
 
1093
		d->reg_post = 0;
-
 
1094
 
-
 
1095
	d->i915 = dev_priv;
-
 
1096
	d->id = domain_id;
1151
	setup_timer(&dev_priv->uncore.force_wake_timer,
1097
 
-
 
1098
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
-
 
1099
 
-
 
1100
	dev_priv->uncore.fw_domains |= (1 << domain_id);
-
 
1101
 
-
 
1102
	fw_domain_reset(d);
-
 
1103
}
-
 
1104
 
-
 
1105
static void intel_uncore_fw_domains_init(struct drm_device *dev)
-
 
1106
{
-
 
1107
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1152... Line 1108...
1152
		    gen6_force_wake_timer, (unsigned long)dev_priv);
1108
 
1153
 
1109
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1154
	__intel_uncore_early_sanitize(dev, false);
1110
		return;
-
 
1111
 
-
 
1112
	if (IS_GEN9(dev)) {
-
 
1113
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
-
 
1114
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-
 
1115
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
-
 
1116
			       FORCEWAKE_RENDER_GEN9,
-
 
1117
			       FORCEWAKE_ACK_RENDER_GEN9);
-
 
1118
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1155
 
1119
			       FORCEWAKE_BLITTER_GEN9,
1156
	if (IS_GEN9(dev)) {
1120
			       FORCEWAKE_ACK_BLITTER_GEN9);
-
 
1121
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
-
 
1122
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
-
 
1123
	} else if (IS_VALLEYVIEW(dev)) {
-
 
1124
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1157
		dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
1125
		if (!IS_CHERRYVIEW(dev))
-
 
1126
			dev_priv->uncore.funcs.force_wake_put =
-
 
1127
				fw_domains_put_with_fifo;
-
 
1128
		else
-
 
1129
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1158
		dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
1130
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1159
	} else if (IS_VALLEYVIEW(dev)) {
1131
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
-
 
1132
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1160
		dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
1133
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
-
 
1134
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-
 
1135
		dev_priv->uncore.funcs.force_wake_get =
1161
		dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
1136
			fw_domains_get_with_thread_status;
1162
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1137
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
Line 1163... Line 1138...
1163
		dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
1138
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
Line 1172... Line 1147...
1172
		 * force_wake_mt_get will not wake the device and the
1147
		 * force_wake_mt_get will not wake the device and the
1173
		 * ECOBUS read will return zero. Which will be
1148
		 * ECOBUS read will return zero. Which will be
1174
		 * (correctly) interpreted by the test below as MT
1149
		 * (correctly) interpreted by the test below as MT
1175
		 * forcewake being disabled.
1150
		 * forcewake being disabled.
1176
		 */
1151
		 */
-
 
1152
		dev_priv->uncore.funcs.force_wake_get =
-
 
1153
			fw_domains_get_with_thread_status;
-
 
1154
		dev_priv->uncore.funcs.force_wake_put =
-
 
1155
			fw_domains_put_with_fifo;
-
 
1156
 
-
 
1157
		/* We need to init first for ECOBUS access and then
-
 
1158
		 * determine later if we want to reinit, in case of MT access is
-
 
1159
		 * not working. In this stage we don't know which flavour this
-
 
1160
		 * ivb is, so it is better to reset also the gen6 fw registers
-
 
1161
		 * before the ecobus check.
-
 
1162
		 */
-
 
1163
 
-
 
1164
		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
-
 
1165
		__raw_posting_read(dev_priv, ECOBUS);
-
 
1166
 
-
 
1167
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
-
 
1168
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
-
 
1169
 
1177
		mutex_lock(&dev->struct_mutex);
1170
		mutex_lock(&dev->struct_mutex);
1178
		__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL);
1171
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1179
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1172
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1180
		__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL);
1173
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1181
		mutex_unlock(&dev->struct_mutex);
1174
		mutex_unlock(&dev->struct_mutex);
Line 1182... Line 1175...
1182
 
1175
 
1183
		if (ecobus & FORCEWAKE_MT_ENABLE) {
-
 
1184
			dev_priv->uncore.funcs.force_wake_get =
-
 
1185
				__gen7_gt_force_wake_mt_get;
-
 
1186
			dev_priv->uncore.funcs.force_wake_put =
-
 
1187
				__gen7_gt_force_wake_mt_put;
-
 
1188
		} else {
1176
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1189
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1177
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1190
			DRM_INFO("when using vblank-synced partial screen updates.\n");
1178
			DRM_INFO("when using vblank-synced partial screen updates.\n");
1191
			dev_priv->uncore.funcs.force_wake_get =
1179
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1192
				__gen6_gt_force_wake_get;
-
 
1193
			dev_priv->uncore.funcs.force_wake_put =
-
 
1194
				__gen6_gt_force_wake_put;
1180
				       FORCEWAKE, FORCEWAKE_ACK);
1195
		}
1181
		}
1196
	} else if (IS_GEN6(dev)) {
1182
	} else if (IS_GEN6(dev)) {
1197
		dev_priv->uncore.funcs.force_wake_get =
1183
		dev_priv->uncore.funcs.force_wake_get =
1198
			__gen6_gt_force_wake_get;
1184
			fw_domains_get_with_thread_status;
1199
		dev_priv->uncore.funcs.force_wake_put =
1185
		dev_priv->uncore.funcs.force_wake_put =
-
 
1186
			fw_domains_put_with_fifo;
-
 
1187
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
-
 
1188
			       FORCEWAKE, FORCEWAKE_ACK);
-
 
1189
	}
-
 
1190
 
-
 
1191
	/* All future platforms are expected to require complex power gating */
1200
			__gen6_gt_force_wake_put;
1192
	WARN_ON(dev_priv->uncore.fw_domains == 0);
Line -... Line 1193...
-
 
1193
}
-
 
1194
 
-
 
1195
void intel_uncore_init(struct drm_device *dev)
-
 
1196
{
-
 
1197
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1198
 
-
 
1199
	i915_check_vgpu(dev);
-
 
1200
 
-
 
1201
	intel_uncore_ellc_detect(dev);
-
 
1202
	intel_uncore_fw_domains_init(dev);
1201
	}
1203
	__intel_uncore_early_sanitize(dev, false);
1202
 
1204
 
1203
	switch (INTEL_INFO(dev)->gen) {
-
 
1204
	default:
-
 
1205
		WARN_ON(1);
1205
	switch (INTEL_INFO(dev)->gen) {
1206
		return;
1206
	default:
1207
	case 9:
1207
	case 9:
1208
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1208
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1209
		ASSIGN_READ_MMIO_VFUNCS(gen9);
1209
		ASSIGN_READ_MMIO_VFUNCS(gen9);
Line 1237... Line 1237...
1237
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1237
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1238
		break;
1238
		break;
1239
	case 4:
1239
	case 4:
1240
	case 3:
1240
	case 3:
1241
	case 2:
1241
	case 2:
1242
		ASSIGN_WRITE_MMIO_VFUNCS(gen4);
1242
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1243
		ASSIGN_READ_MMIO_VFUNCS(gen4);
1243
		ASSIGN_READ_MMIO_VFUNCS(gen2);
1244
		break;
1244
		break;
1245
	}
1245
	}
Line -... Line 1246...
-
 
1246
 
-
 
1247
	if (intel_vgpu_active(dev)) {
-
 
1248
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
-
 
1249
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
-
 
1250
	}
1246
 
1251
 
1247
	i915_check_and_clear_faults(dev);
1252
	i915_check_and_clear_faults(dev);
1248
}
1253
}
1249
#undef ASSIGN_WRITE_MMIO_VFUNCS
1254
#undef ASSIGN_WRITE_MMIO_VFUNCS
Line 1271... Line 1276...
1271
			void *data, struct drm_file *file)
1276
			void *data, struct drm_file *file)
1272
{
1277
{
1273
	struct drm_i915_private *dev_priv = dev->dev_private;
1278
	struct drm_i915_private *dev_priv = dev->dev_private;
1274
	struct drm_i915_reg_read *reg = data;
1279
	struct drm_i915_reg_read *reg = data;
1275
	struct register_whitelist const *entry = whitelist;
1280
	struct register_whitelist const *entry = whitelist;
-
 
1281
	unsigned size;
-
 
1282
	u64 offset;
1276
	int i, ret = 0;
1283
	int i, ret = 0;
Line 1277... Line 1284...
1277
 
1284
 
1278
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1285
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1279
		if (entry->offset == reg->offset &&
1286
		if (entry->offset == (reg->offset & -entry->size) &&
1280
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1287
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1281
			break;
1288
			break;
Line 1282... Line 1289...
1282
	}
1289
	}
1283
 
1290
 
Line -... Line 1291...
-
 
1291
	if (i == ARRAY_SIZE(whitelist))
-
 
1292
		return -EINVAL;
-
 
1293
 
-
 
1294
	/* We use the low bits to encode extra flags as the register should
-
 
1295
	 * be naturally aligned (and those that are not so aligned merely
-
 
1296
	 * limit the available flags for that register).
-
 
1297
	 */
-
 
1298
	offset = entry->offset;
-
 
1299
	size = entry->size;
-
 
1300
	size |= reg->offset ^ offset;
1284
	if (i == ARRAY_SIZE(whitelist))
1301
 
-
 
1302
	intel_runtime_pm_get(dev_priv);
-
 
1303
 
-
 
1304
	switch (size) {
1285
		return -EINVAL;
1305
	case 8 | 1:
1286
 
1306
		reg->val = I915_READ64_2x32(offset, offset+4);
1287
	switch (entry->size) {
1307
		break;
1288
	case 8:
1308
	case 8:
1289
		reg->val = I915_READ64(reg->offset);
1309
		reg->val = I915_READ64(offset);
1290
		break;
1310
		break;
1291
	case 4:
1311
	case 4:
1292
		reg->val = I915_READ(reg->offset);
1312
		reg->val = I915_READ(offset);
1293
		break;
1313
		break;
1294
	case 2:
1314
	case 2:
1295
		reg->val = I915_READ16(reg->offset);
1315
		reg->val = I915_READ16(offset);
1296
		break;
1316
		break;
1297
	case 1:
1317
	case 1:
1298
		reg->val = I915_READ8(reg->offset);
-
 
1299
		break;
1318
		reg->val = I915_READ8(offset);
1300
	default:
1319
		break;
1301
		WARN_ON(1);
1320
	default:
Line 1302... Line 1321...
1302
		ret = -EINVAL;
1321
		ret = -EINVAL;
-
 
1322
		goto out;
1303
		goto out;
1323
	}
1304
	}
1324
 
Line 1305... Line 1325...
1305
 
1325
out:
1306
out:
1326
	intel_runtime_pm_put(dev_priv);
Line 1403... Line 1423...
1403
static int ironlake_do_reset(struct drm_device *dev)
1423
static int ironlake_do_reset(struct drm_device *dev)
1404
{
1424
{
1405
	struct drm_i915_private *dev_priv = dev->dev_private;
1425
	struct drm_i915_private *dev_priv = dev->dev_private;
1406
	int ret;
1426
	int ret;
Line 1407... Line 1427...
1407
 
1427
 
1408
	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1428
	I915_WRITE(ILK_GDSR,
1409
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1429
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1410
	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1430
	ret = wait_for((I915_READ(ILK_GDSR) &
1411
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1431
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1412
	if (ret)
1432
	if (ret)
Line 1413... Line 1433...
1413
		return ret;
1433
		return ret;
1414
 
1434
 
1415
	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
1435
	I915_WRITE(ILK_GDSR,
1416
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1436
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1417
	ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1437
	ret = wait_for((I915_READ(ILK_GDSR) &
1418
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1438
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
Line 1419... Line 1439...
1419
	if (ret)
1439
	if (ret)
Line 1420... Line 1440...
1420
		return ret;
1440
		return ret;
1421
 
1441
 
Line 1422... Line 1442...
1422
	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1442
	I915_WRITE(ILK_GDSR, 0);
Line 1443... Line 1463...
1443
	intel_uncore_forcewake_reset(dev, true);
1463
	intel_uncore_forcewake_reset(dev, true);
Line 1444... Line 1464...
1444
 
1464
 
1445
	return ret;
1465
	return ret;
Line 1446... Line 1466...
1446
}
1466
}
-
 
1467
 
-
 
1468
static int wait_for_register(struct drm_i915_private *dev_priv,
-
 
1469
			     const u32 reg,
-
 
1470
			     const u32 mask,
1447
 
1471
			     const u32 value,
-
 
1472
			     const unsigned long timeout_ms)
-
 
1473
{
-
 
1474
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
-
 
1475
}
-
 
1476
 
-
 
1477
static int gen8_do_reset(struct drm_device *dev)
-
 
1478
{
-
 
1479
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1480
	struct intel_engine_cs *engine;
-
 
1481
	int i;
-
 
1482
 
-
 
1483
	for_each_ring(engine, dev_priv, i) {
-
 
1484
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1448
int intel_gpu_reset(struct drm_device *dev)
1485
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
 
1486
 
-
 
1487
		if (wait_for_register(dev_priv,
-
 
1488
				      RING_RESET_CTL(engine->mmio_base),
-
 
1489
				      RESET_CTL_READY_TO_RESET,
-
 
1490
				      RESET_CTL_READY_TO_RESET,
-
 
1491
				      700)) {
-
 
1492
			DRM_ERROR("%s: reset request timeout\n", engine->name);
-
 
1493
			goto not_ready;
-
 
1494
		}
1449
{
1495
	}
-
 
1496
 
-
 
1497
	return gen6_do_reset(dev);
-
 
1498
 
-
 
1499
not_ready:
-
 
1500
	for_each_ring(engine, dev_priv, i)
-
 
1501
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
-
 
1502
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
-
 
1503
 
-
 
1504
	return -EIO;
-
 
1505
}
-
 
1506
 
-
 
1507
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
-
 
1508
{
-
 
1509
	if (!i915.reset)
-
 
1510
		return NULL;
-
 
1511
 
-
 
1512
	if (INTEL_INFO(dev)->gen >= 8)
-
 
1513
		return gen8_do_reset;
1450
	if (INTEL_INFO(dev)->gen >= 6)
1514
	else if (INTEL_INFO(dev)->gen >= 6)
1451
		return gen6_do_reset(dev);
1515
		return gen6_do_reset;
1452
	else if (IS_GEN5(dev))
1516
	else if (IS_GEN5(dev))
1453
		return ironlake_do_reset(dev);
1517
		return ironlake_do_reset;
1454
	else if (IS_G4X(dev))
1518
	else if (IS_G4X(dev))
1455
			return g4x_do_reset(dev);
1519
		return g4x_do_reset;
1456
	else if (IS_G33(dev))
1520
	else if (IS_G33(dev))
1457
		return g33_do_reset(dev);
1521
		return g33_do_reset;
1458
	else if (INTEL_INFO(dev)->gen >= 3)
1522
	else if (INTEL_INFO(dev)->gen >= 3)
-
 
1523
		return i915_do_reset;
-
 
1524
	else
-
 
1525
		return NULL;
-
 
1526
}
-
 
1527
 
-
 
1528
int intel_gpu_reset(struct drm_device *dev)
-
 
1529
{
-
 
1530
	struct drm_i915_private *dev_priv = to_i915(dev);
-
 
1531
	int (*reset)(struct drm_device *);
-
 
1532
	int ret;
-
 
1533
 
1459
		return i915_do_reset(dev);
1534
	reset = intel_get_gpu_reset(dev);
-
 
1535
	if (reset == NULL)
-
 
1536
		return -ENODEV;
-
 
1537
 
-
 
1538
	/* If the power well sleeps during the reset, the reset
-
 
1539
	 * request may be dropped and never completes (causing -EIO).
-
 
1540
	 */
-
 
1541
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
 
1542
	ret = reset(dev);
-
 
1543
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
 
1544
 
-
 
1545
	return ret;
-
 
1546
}
-
 
1547
 
-
 
1548
bool intel_has_gpu_reset(struct drm_device *dev)
1460
		else
1549
{
Line 1461... Line 1550...
1461
		return -ENODEV;
1550
	return intel_get_gpu_reset(dev) != NULL;
1462
}
1551
}
1463
 
1552