Subversion Repositories Kolibri OS

Rev

Rev 6660 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6660 Rev 6937
1
/*
1
/*
2
 * Copyright © 2013 Intel Corporation
2
 * Copyright © 2013 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 */
22
 */
23
 
23
 
24
#include "i915_drv.h"
24
#include "i915_drv.h"
25
#include "intel_drv.h"
25
#include "intel_drv.h"
26
#include "i915_vgpu.h"
26
#include "i915_vgpu.h"
27
 
27
 
28
#include 
28
#include 
29
 
29
 
30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
 
-
 
32
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
-
 
33
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
-
 
34
 
-
 
35
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
-
 
36
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
-
 
37
 
-
 
38
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-
 
39
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
-
 
40
 
-
 
41
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
-
 
42
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
-
 
43
 
31
 
44
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
32
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
45
 
33
 
46
static const char * const forcewake_domain_names[] = {
34
static const char * const forcewake_domain_names[] = {
47
	"render",
35
	"render",
48
	"blitter",
36
	"blitter",
49
	"media",
37
	"media",
50
};
38
};
51
 
39
 
52
const char *
40
const char *
53
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
41
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
54
{
42
{
55
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
43
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
56
 
44
 
57
	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
45
	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
58
		return forcewake_domain_names[id];
46
		return forcewake_domain_names[id];
59
 
47
 
60
	WARN_ON(id);
48
	WARN_ON(id);
61
 
49
 
62
	return "unknown";
50
	return "unknown";
63
}
51
}
64
 
-
 
65
static void
-
 
66
assert_device_not_suspended(struct drm_i915_private *dev_priv)
-
 
67
{
-
 
68
	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
-
 
69
		  "Device suspended\n");
-
 
70
}
-
 
71
 
52
 
72
static inline void
53
static inline void
73
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
54
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
74
{
55
{
75
	WARN_ON(d->reg_set == 0);
56
	WARN_ON(!i915_mmio_reg_valid(d->reg_set));
76
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
57
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
77
}
58
}
78
 
59
 
79
static inline void
60
static inline void
80
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
61
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
81
{
62
{
82
//	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
63
//	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
83
//	/* something from same cacheline, but !FORCEWAKE */
64
//	/* something from same cacheline, but !FORCEWAKE */
84
//	__raw_posting_read(dev_priv, ECOBUS);
65
//	__raw_posting_read(dev_priv, ECOBUS);
85
}
66
}
86
 
67
 
87
static inline void
68
static inline void
88
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
69
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
89
{
70
{
90
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
71
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
91
			     FORCEWAKE_KERNEL) == 0,
72
			     FORCEWAKE_KERNEL) == 0,
92
			    FORCEWAKE_ACK_TIMEOUT_MS))
73
			    FORCEWAKE_ACK_TIMEOUT_MS))
93
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
74
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
94
			  intel_uncore_forcewake_domain_to_str(d->id));
75
			  intel_uncore_forcewake_domain_to_str(d->id));
95
}
76
}
96
 
77
 
97
static inline void
78
static inline void
98
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
79
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
99
{
80
{
100
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
81
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
101
}
82
}
102
 
83
 
103
static inline void
84
static inline void
104
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
85
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
105
{
86
{
106
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
87
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
107
			     FORCEWAKE_KERNEL),
88
			     FORCEWAKE_KERNEL),
108
			    FORCEWAKE_ACK_TIMEOUT_MS))
89
			    FORCEWAKE_ACK_TIMEOUT_MS))
109
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
90
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
110
			  intel_uncore_forcewake_domain_to_str(d->id));
91
			  intel_uncore_forcewake_domain_to_str(d->id));
111
}
92
}
112
 
93
 
113
static inline void
94
static inline void
114
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
95
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
115
{
96
{
116
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
97
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
117
}
98
}
118
 
99
 
119
static inline void
100
static inline void
120
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
101
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
121
{
102
{
122
	/* something from same cacheline, but not from the set register */
103
	/* something from same cacheline, but not from the set register */
123
	if (d->reg_post)
104
	if (i915_mmio_reg_valid(d->reg_post))
124
		__raw_posting_read(d->i915, d->reg_post);
105
		__raw_posting_read(d->i915, d->reg_post);
125
}
106
}
126
 
107
 
127
static void
108
static void
128
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
109
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
129
{
110
{
130
	struct intel_uncore_forcewake_domain *d;
111
	struct intel_uncore_forcewake_domain *d;
131
	enum forcewake_domain_id id;
112
	enum forcewake_domain_id id;
132
 
113
 
133
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
114
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
134
		fw_domain_wait_ack_clear(d);
115
		fw_domain_wait_ack_clear(d);
135
		fw_domain_get(d);
116
		fw_domain_get(d);
136
		fw_domain_wait_ack(d);
117
		fw_domain_wait_ack(d);
137
	}
118
	}
138
}
119
}
139
 
120
 
140
static void
121
static void
141
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
122
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
142
{
123
{
143
	struct intel_uncore_forcewake_domain *d;
124
	struct intel_uncore_forcewake_domain *d;
144
	enum forcewake_domain_id id;
125
	enum forcewake_domain_id id;
145
 
126
 
146
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
127
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
147
		fw_domain_put(d);
128
		fw_domain_put(d);
148
		fw_domain_posting_read(d);
129
		fw_domain_posting_read(d);
149
	}
130
	}
150
}
131
}
151
 
132
 
152
static void
133
static void
153
fw_domains_posting_read(struct drm_i915_private *dev_priv)
134
fw_domains_posting_read(struct drm_i915_private *dev_priv)
154
{
135
{
155
	struct intel_uncore_forcewake_domain *d;
136
	struct intel_uncore_forcewake_domain *d;
156
	enum forcewake_domain_id id;
137
	enum forcewake_domain_id id;
157
 
138
 
158
	/* No need to do for all, just do for first found */
139
	/* No need to do for all, just do for first found */
159
	for_each_fw_domain(d, dev_priv, id) {
140
	for_each_fw_domain(d, dev_priv, id) {
160
		fw_domain_posting_read(d);
141
		fw_domain_posting_read(d);
161
		break;
142
		break;
162
	}
143
	}
163
}
144
}
164
 
145
 
165
static void
146
static void
166
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
147
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
167
{
148
{
168
	struct intel_uncore_forcewake_domain *d;
149
	struct intel_uncore_forcewake_domain *d;
169
	enum forcewake_domain_id id;
150
	enum forcewake_domain_id id;
170
 
151
 
171
	if (dev_priv->uncore.fw_domains == 0)
152
	if (dev_priv->uncore.fw_domains == 0)
172
		return;
153
		return;
173
 
154
 
174
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
155
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
175
		fw_domain_reset(d);
156
		fw_domain_reset(d);
176
 
157
 
177
	fw_domains_posting_read(dev_priv);
158
	fw_domains_posting_read(dev_priv);
178
}
159
}
179
 
160
 
180
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
161
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
181
{
162
{
182
	/* w/a for a sporadic read returning 0 by waiting for the GT
163
	/* w/a for a sporadic read returning 0 by waiting for the GT
183
	 * thread to wake up.
164
	 * thread to wake up.
184
	 */
165
	 */
185
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
166
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
186
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
167
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
187
		DRM_ERROR("GT thread status wait timed out\n");
168
		DRM_ERROR("GT thread status wait timed out\n");
188
}
169
}
189
 
170
 
190
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
171
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
191
					      enum forcewake_domains fw_domains)
172
					      enum forcewake_domains fw_domains)
192
{
173
{
193
	fw_domains_get(dev_priv, fw_domains);
174
	fw_domains_get(dev_priv, fw_domains);
194
 
175
 
195
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
176
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
196
	__gen6_gt_wait_for_thread_c0(dev_priv);
177
	__gen6_gt_wait_for_thread_c0(dev_priv);
197
}
178
}
198
 
179
 
199
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
180
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
200
{
181
{
201
	u32 gtfifodbg;
182
	u32 gtfifodbg;
202
 
183
 
203
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
184
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
204
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
185
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
205
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
186
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
206
}
187
}
207
 
188
 
208
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
189
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
209
				     enum forcewake_domains fw_domains)
190
				     enum forcewake_domains fw_domains)
210
{
191
{
211
	fw_domains_put(dev_priv, fw_domains);
192
	fw_domains_put(dev_priv, fw_domains);
212
	gen6_gt_check_fifodbg(dev_priv);
193
	gen6_gt_check_fifodbg(dev_priv);
213
}
194
}
214
 
195
 
215
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
196
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
216
{
197
{
217
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
198
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
218
 
199
 
219
	return count & GT_FIFO_FREE_ENTRIES_MASK;
200
	return count & GT_FIFO_FREE_ENTRIES_MASK;
220
}
201
}
221
 
202
 
222
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
203
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
223
{
204
{
224
	int ret = 0;
205
	int ret = 0;
225
 
206
 
226
	/* On VLV, FIFO will be shared by both SW and HW.
207
	/* On VLV, FIFO will be shared by both SW and HW.
227
	 * So, we need to read the FREE_ENTRIES everytime */
208
	 * So, we need to read the FREE_ENTRIES everytime */
228
	if (IS_VALLEYVIEW(dev_priv->dev))
209
	if (IS_VALLEYVIEW(dev_priv->dev))
229
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
210
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
230
 
211
 
231
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
212
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
232
		int loop = 500;
213
		int loop = 500;
233
		u32 fifo = fifo_free_entries(dev_priv);
214
		u32 fifo = fifo_free_entries(dev_priv);
234
 
215
 
235
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
216
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
236
			udelay(10);
217
			udelay(10);
237
			fifo = fifo_free_entries(dev_priv);
218
			fifo = fifo_free_entries(dev_priv);
238
		}
219
		}
239
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
220
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
240
			++ret;
221
			++ret;
241
		dev_priv->uncore.fifo_count = fifo;
222
		dev_priv->uncore.fifo_count = fifo;
242
	}
223
	}
243
	dev_priv->uncore.fifo_count--;
224
	dev_priv->uncore.fifo_count--;
244
 
225
 
245
	return ret;
226
	return ret;
246
}
227
}
247
 
228
 
248
static void intel_uncore_fw_release_timer(unsigned long arg)
229
static void intel_uncore_fw_release_timer(unsigned long arg)
249
{
230
{
250
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
231
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
251
	unsigned long irqflags;
232
	unsigned long irqflags;
252
 
233
 
253
	assert_device_not_suspended(domain->i915);
234
	assert_rpm_device_not_suspended(domain->i915);
254
 
235
 
255
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
236
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
256
	if (WARN_ON(domain->wake_count == 0))
237
	if (WARN_ON(domain->wake_count == 0))
257
		domain->wake_count++;
238
		domain->wake_count++;
258
 
239
 
259
	if (--domain->wake_count == 0)
240
	if (--domain->wake_count == 0)
260
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
241
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
261
							  1 << domain->id);
242
							  1 << domain->id);
262
 
243
 
263
	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
244
	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
264
}
245
}
265
 
246
 
266
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
247
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
267
{
248
{
268
	struct drm_i915_private *dev_priv = dev->dev_private;
249
	struct drm_i915_private *dev_priv = dev->dev_private;
269
	unsigned long irqflags;
250
	unsigned long irqflags;
270
	struct intel_uncore_forcewake_domain *domain;
251
	struct intel_uncore_forcewake_domain *domain;
271
	int retry_count = 100;
252
	int retry_count = 100;
272
	enum forcewake_domain_id id;
253
	enum forcewake_domain_id id;
273
	enum forcewake_domains fw = 0, active_domains;
254
	enum forcewake_domains fw = 0, active_domains;
274
 
255
 
275
	/* Hold uncore.lock across reset to prevent any register access
256
	/* Hold uncore.lock across reset to prevent any register access
276
	 * with forcewake not set correctly. Wait until all pending
257
	 * with forcewake not set correctly. Wait until all pending
277
	 * timers are run before holding.
258
	 * timers are run before holding.
278
	 */
259
	 */
279
	while (1) {
260
	while (1) {
280
		active_domains = 0;
261
		active_domains = 0;
281
 
262
 
282
		for_each_fw_domain(domain, dev_priv, id) {
263
		for_each_fw_domain(domain, dev_priv, id) {
283
			if (del_timer_sync(&domain->timer) == 0)
264
			if (del_timer_sync(&domain->timer) == 0)
284
				continue;
265
				continue;
285
 
266
 
286
			intel_uncore_fw_release_timer((unsigned long)domain);
267
			intel_uncore_fw_release_timer((unsigned long)domain);
287
		}
268
		}
288
 
269
 
289
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
270
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
290
 
271
 
291
		for_each_fw_domain(domain, dev_priv, id) {
272
		for_each_fw_domain(domain, dev_priv, id) {
292
//           if (timer_pending(&domain->timer))
273
//           if (timer_pending(&domain->timer))
293
//				active_domains |= (1 << id);
274
//				active_domains |= (1 << id);
294
	}
275
	}
295
 
276
 
296
		if (active_domains == 0)
277
		if (active_domains == 0)
297
			break;
278
			break;
298
 
279
 
299
		if (--retry_count == 0) {
280
		if (--retry_count == 0) {
300
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
281
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
301
			break;
282
			break;
302
		}
283
		}
303
 
284
 
304
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
285
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
305
        change_task();
286
        change_task();
306
	}
287
	}
307
 
288
 
308
	WARN_ON(active_domains);
289
	WARN_ON(active_domains);
309
 
290
 
310
	for_each_fw_domain(domain, dev_priv, id)
291
	for_each_fw_domain(domain, dev_priv, id)
311
		if (domain->wake_count)
292
		if (domain->wake_count)
312
			fw |= 1 << id;
293
			fw |= 1 << id;
313
 
294
 
314
	if (fw)
295
	if (fw)
315
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
296
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
316
 
297
 
317
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
298
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
318
 
299
 
319
	if (restore) { /* If reset with a user forcewake, try to restore */
300
	if (restore) { /* If reset with a user forcewake, try to restore */
320
		if (fw)
301
		if (fw)
321
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
302
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
322
 
303
 
323
		if (IS_GEN6(dev) || IS_GEN7(dev))
304
		if (IS_GEN6(dev) || IS_GEN7(dev))
324
			dev_priv->uncore.fifo_count =
305
			dev_priv->uncore.fifo_count =
325
				fifo_free_entries(dev_priv);
306
				fifo_free_entries(dev_priv);
326
	}
307
	}
327
 
308
 
328
	if (!restore)
309
	if (!restore)
329
		assert_forcewakes_inactive(dev_priv);
310
		assert_forcewakes_inactive(dev_priv);
330
 
311
 
331
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
312
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
332
}
313
}
333
 
314
 
334
static void intel_uncore_ellc_detect(struct drm_device *dev)
315
static void intel_uncore_ellc_detect(struct drm_device *dev)
335
{
316
{
336
	struct drm_i915_private *dev_priv = dev->dev_private;
317
	struct drm_i915_private *dev_priv = dev->dev_private;
337
 
318
 
338
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
319
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
339
	     INTEL_INFO(dev)->gen >= 9) &&
320
	     INTEL_INFO(dev)->gen >= 9) &&
340
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
321
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
341
		/* The docs do not explain exactly how the calculation can be
322
		/* The docs do not explain exactly how the calculation can be
342
		 * made. It is somewhat guessable, but for now, it's always
323
		 * made. It is somewhat guessable, but for now, it's always
343
		 * 128MB.
324
		 * 128MB.
344
		 * NB: We can't write IDICR yet because we do not have gt funcs
325
		 * NB: We can't write IDICR yet because we do not have gt funcs
345
		 * set up */
326
		 * set up */
346
		dev_priv->ellc_size = 128;
327
		dev_priv->ellc_size = 128;
347
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
328
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
348
	}
329
	}
349
}
330
}
350
 
331
 
351
static void __intel_uncore_early_sanitize(struct drm_device *dev,
332
static void __intel_uncore_early_sanitize(struct drm_device *dev,
352
					  bool restore_forcewake)
333
					  bool restore_forcewake)
353
{
334
{
354
	struct drm_i915_private *dev_priv = dev->dev_private;
335
	struct drm_i915_private *dev_priv = dev->dev_private;
355
 
336
 
356
	if (HAS_FPGA_DBG_UNCLAIMED(dev))
337
	if (HAS_FPGA_DBG_UNCLAIMED(dev))
357
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
338
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
358
 
339
 
359
	/* clear out old GT FIFO errors */
340
	/* clear out old GT FIFO errors */
360
	if (IS_GEN6(dev) || IS_GEN7(dev))
341
	if (IS_GEN6(dev) || IS_GEN7(dev))
361
		__raw_i915_write32(dev_priv, GTFIFODBG,
342
		__raw_i915_write32(dev_priv, GTFIFODBG,
362
				   __raw_i915_read32(dev_priv, GTFIFODBG));
343
				   __raw_i915_read32(dev_priv, GTFIFODBG));
363
 
344
 
364
	/* WaDisableShadowRegForCpd:chv */
345
	/* WaDisableShadowRegForCpd:chv */
365
	if (IS_CHERRYVIEW(dev)) {
346
	if (IS_CHERRYVIEW(dev)) {
366
		__raw_i915_write32(dev_priv, GTFIFOCTL,
347
		__raw_i915_write32(dev_priv, GTFIFOCTL,
367
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
348
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
368
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
349
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
369
				   GT_FIFO_CTL_RC6_POLICY_STALL);
350
				   GT_FIFO_CTL_RC6_POLICY_STALL);
370
	}
351
	}
371
 
352
 
372
	intel_uncore_forcewake_reset(dev, restore_forcewake);
353
	intel_uncore_forcewake_reset(dev, restore_forcewake);
373
}
354
}
374
 
355
 
375
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
356
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
376
{
357
{
377
	__intel_uncore_early_sanitize(dev, restore_forcewake);
358
	__intel_uncore_early_sanitize(dev, restore_forcewake);
378
	i915_check_and_clear_faults(dev);
359
	i915_check_and_clear_faults(dev);
379
}
360
}
380
 
361
 
381
void intel_uncore_sanitize(struct drm_device *dev)
362
void intel_uncore_sanitize(struct drm_device *dev)
382
{
363
{
383
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
364
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
384
	intel_disable_gt_powersave(dev);
365
	intel_disable_gt_powersave(dev);
385
}
366
}
386
 
367
 
387
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
368
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
388
					 enum forcewake_domains fw_domains)
369
					 enum forcewake_domains fw_domains)
389
{
370
{
390
	struct intel_uncore_forcewake_domain *domain;
371
	struct intel_uncore_forcewake_domain *domain;
391
	enum forcewake_domain_id id;
372
	enum forcewake_domain_id id;
392
 
373
 
393
	if (!dev_priv->uncore.funcs.force_wake_get)
374
	if (!dev_priv->uncore.funcs.force_wake_get)
394
		return;
375
		return;
395
 
376
 
396
	fw_domains &= dev_priv->uncore.fw_domains;
377
	fw_domains &= dev_priv->uncore.fw_domains;
397
 
378
 
398
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
379
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
399
		if (domain->wake_count++)
380
		if (domain->wake_count++)
400
			fw_domains &= ~(1 << id);
381
			fw_domains &= ~(1 << id);
401
	}
382
	}
402
 
383
 
403
	if (fw_domains)
384
	if (fw_domains)
404
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
385
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
405
}
386
}
406
 
387
 
407
/**
388
/**
408
 * intel_uncore_forcewake_get - grab forcewake domain references
389
 * intel_uncore_forcewake_get - grab forcewake domain references
409
 * @dev_priv: i915 device instance
390
 * @dev_priv: i915 device instance
410
 * @fw_domains: forcewake domains to get reference on
391
 * @fw_domains: forcewake domains to get reference on
411
 *
392
 *
412
 * This function can be used get GT's forcewake domain references.
393
 * This function can be used get GT's forcewake domain references.
413
 * Normal register access will handle the forcewake domains automatically.
394
 * Normal register access will handle the forcewake domains automatically.
414
 * However if some sequence requires the GT to not power down a particular
395
 * However if some sequence requires the GT to not power down a particular
415
 * forcewake domains this function should be called at the beginning of the
396
 * forcewake domains this function should be called at the beginning of the
416
 * sequence. And subsequently the reference should be dropped by symmetric
397
 * sequence. And subsequently the reference should be dropped by symmetric
417
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
398
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
418
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
399
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
419
 */
400
 */
420
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
401
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
421
				enum forcewake_domains fw_domains)
402
				enum forcewake_domains fw_domains)
422
{
403
{
423
	unsigned long irqflags;
404
	unsigned long irqflags;
424
 
405
 
425
	if (!dev_priv->uncore.funcs.force_wake_get)
406
	if (!dev_priv->uncore.funcs.force_wake_get)
426
		return;
407
		return;
427
 
408
 
428
	WARN_ON(dev_priv->pm.suspended);
409
	assert_rpm_wakelock_held(dev_priv);
429
 
410
 
430
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
411
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
431
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
412
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
432
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
413
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
433
}
414
}
434
 
415
 
435
/**
416
/**
436
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
417
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
437
 * @dev_priv: i915 device instance
418
 * @dev_priv: i915 device instance
438
 * @fw_domains: forcewake domains to get reference on
419
 * @fw_domains: forcewake domains to get reference on
439
 *
420
 *
440
 * See intel_uncore_forcewake_get(). This variant places the onus
421
 * See intel_uncore_forcewake_get(). This variant places the onus
441
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
422
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
442
 */
423
 */
443
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
424
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
444
					enum forcewake_domains fw_domains)
425
					enum forcewake_domains fw_domains)
445
{
426
{
446
	assert_spin_locked(&dev_priv->uncore.lock);
427
	assert_spin_locked(&dev_priv->uncore.lock);
447
 
428
 
448
	if (!dev_priv->uncore.funcs.force_wake_get)
429
	if (!dev_priv->uncore.funcs.force_wake_get)
449
		return;
430
		return;
450
 
431
 
451
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
432
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
452
}
433
}
453
 
434
 
454
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
435
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
455
					 enum forcewake_domains fw_domains)
436
					 enum forcewake_domains fw_domains)
456
{
437
{
457
	struct intel_uncore_forcewake_domain *domain;
438
	struct intel_uncore_forcewake_domain *domain;
458
	enum forcewake_domain_id id;
439
	enum forcewake_domain_id id;
459
 
440
 
460
	if (!dev_priv->uncore.funcs.force_wake_put)
441
	if (!dev_priv->uncore.funcs.force_wake_put)
461
		return;
442
		return;
462
 
443
 
463
	fw_domains &= dev_priv->uncore.fw_domains;
444
	fw_domains &= dev_priv->uncore.fw_domains;
464
 
445
 
465
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
446
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
466
		if (WARN_ON(domain->wake_count == 0))
447
		if (WARN_ON(domain->wake_count == 0))
467
			continue;
448
			continue;
468
 
449
 
469
		if (--domain->wake_count)
450
		if (--domain->wake_count)
470
			continue;
451
			continue;
471
 
452
 
472
		domain->wake_count++;
453
		domain->wake_count++;
473
		fw_domain_arm_timer(domain);
454
		fw_domain_arm_timer(domain);
474
	}
455
	}
475
}
456
}
476
 
457
 
477
/**
458
/**
478
 * intel_uncore_forcewake_put - release a forcewake domain reference
459
 * intel_uncore_forcewake_put - release a forcewake domain reference
479
 * @dev_priv: i915 device instance
460
 * @dev_priv: i915 device instance
480
 * @fw_domains: forcewake domains to put references
461
 * @fw_domains: forcewake domains to put references
481
 *
462
 *
482
 * This function drops the device-level forcewakes for specified
463
 * This function drops the device-level forcewakes for specified
483
 * domains obtained by intel_uncore_forcewake_get().
464
 * domains obtained by intel_uncore_forcewake_get().
484
 */
465
 */
485
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
466
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
486
				enum forcewake_domains fw_domains)
467
				enum forcewake_domains fw_domains)
487
{
468
{
488
	unsigned long irqflags;
469
	unsigned long irqflags;
489
 
470
 
490
	if (!dev_priv->uncore.funcs.force_wake_put)
471
	if (!dev_priv->uncore.funcs.force_wake_put)
491
		return;
472
		return;
492
 
473
 
493
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
474
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
494
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
475
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
495
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
476
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
496
}
477
}
497
 
478
 
498
/**
479
/**
499
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
480
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
500
 * @dev_priv: i915 device instance
481
 * @dev_priv: i915 device instance
501
 * @fw_domains: forcewake domains to get reference on
482
 * @fw_domains: forcewake domains to get reference on
502
 *
483
 *
503
 * See intel_uncore_forcewake_put(). This variant places the onus
484
 * See intel_uncore_forcewake_put(). This variant places the onus
504
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
485
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
505
 */
486
 */
506
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
487
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
507
					enum forcewake_domains fw_domains)
488
					enum forcewake_domains fw_domains)
508
{
489
{
509
	assert_spin_locked(&dev_priv->uncore.lock);
490
	assert_spin_locked(&dev_priv->uncore.lock);
510
 
491
 
511
	if (!dev_priv->uncore.funcs.force_wake_put)
492
	if (!dev_priv->uncore.funcs.force_wake_put)
512
		return;
493
		return;
513
 
494
 
514
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
495
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
515
}
496
}
516
 
497
 
517
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
498
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
518
{
499
{
519
	struct intel_uncore_forcewake_domain *domain;
500
	struct intel_uncore_forcewake_domain *domain;
520
	enum forcewake_domain_id id;
501
	enum forcewake_domain_id id;
521
 
502
 
522
	if (!dev_priv->uncore.funcs.force_wake_get)
503
	if (!dev_priv->uncore.funcs.force_wake_get)
523
		return;
504
		return;
524
 
505
 
525
	for_each_fw_domain(domain, dev_priv, id)
506
	for_each_fw_domain(domain, dev_priv, id)
526
		WARN_ON(domain->wake_count);
507
		WARN_ON(domain->wake_count);
527
}
508
}
528
 
509
 
529
/* We give fast paths for the really cool registers */
510
/* We give fast paths for the really cool registers */
530
#define NEEDS_FORCE_WAKE(reg) \
511
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
531
	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
-
 
532
 
512
 
533
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
513
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
534
 
514
 
535
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
515
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
536
	(REG_RANGE((reg), 0x2000, 0x4000) || \
516
	(REG_RANGE((reg), 0x2000, 0x4000) || \
537
	 REG_RANGE((reg), 0x5000, 0x8000) || \
517
	 REG_RANGE((reg), 0x5000, 0x8000) || \
538
	 REG_RANGE((reg), 0xB000, 0x12000) || \
518
	 REG_RANGE((reg), 0xB000, 0x12000) || \
539
	 REG_RANGE((reg), 0x2E000, 0x30000))
519
	 REG_RANGE((reg), 0x2E000, 0x30000))
540
 
520
 
541
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
521
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
542
	(REG_RANGE((reg), 0x12000, 0x14000) || \
522
	(REG_RANGE((reg), 0x12000, 0x14000) || \
543
	 REG_RANGE((reg), 0x22000, 0x24000) || \
523
	 REG_RANGE((reg), 0x22000, 0x24000) || \
544
	 REG_RANGE((reg), 0x30000, 0x40000))
524
	 REG_RANGE((reg), 0x30000, 0x40000))
545
 
525
 
546
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
526
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
547
	(REG_RANGE((reg), 0x2000, 0x4000) || \
527
	(REG_RANGE((reg), 0x2000, 0x4000) || \
548
	 REG_RANGE((reg), 0x5200, 0x8000) || \
528
	 REG_RANGE((reg), 0x5200, 0x8000) || \
549
	 REG_RANGE((reg), 0x8300, 0x8500) || \
529
	 REG_RANGE((reg), 0x8300, 0x8500) || \
550
	 REG_RANGE((reg), 0xB000, 0xB480) || \
530
	 REG_RANGE((reg), 0xB000, 0xB480) || \
551
	 REG_RANGE((reg), 0xE000, 0xE800))
531
	 REG_RANGE((reg), 0xE000, 0xE800))
552
 
532
 
553
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
533
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
554
	(REG_RANGE((reg), 0x8800, 0x8900) || \
534
	(REG_RANGE((reg), 0x8800, 0x8900) || \
555
	 REG_RANGE((reg), 0xD000, 0xD800) || \
535
	 REG_RANGE((reg), 0xD000, 0xD800) || \
556
	 REG_RANGE((reg), 0x12000, 0x14000) || \
536
	 REG_RANGE((reg), 0x12000, 0x14000) || \
557
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
537
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
558
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
538
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
559
	 REG_RANGE((reg), 0x30000, 0x38000))
539
	 REG_RANGE((reg), 0x30000, 0x38000))
560
 
540
 
561
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
541
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
562
	(REG_RANGE((reg), 0x4000, 0x5000) || \
542
	(REG_RANGE((reg), 0x4000, 0x5000) || \
563
	 REG_RANGE((reg), 0x8000, 0x8300) || \
543
	 REG_RANGE((reg), 0x8000, 0x8300) || \
564
	 REG_RANGE((reg), 0x8500, 0x8600) || \
544
	 REG_RANGE((reg), 0x8500, 0x8600) || \
565
	 REG_RANGE((reg), 0x9000, 0xB000) || \
545
	 REG_RANGE((reg), 0x9000, 0xB000) || \
566
	 REG_RANGE((reg), 0xF000, 0x10000))
546
	 REG_RANGE((reg), 0xF000, 0x10000))
567
 
547
 
568
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
548
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
569
	REG_RANGE((reg), 0xB00,  0x2000)
549
	REG_RANGE((reg), 0xB00,  0x2000)
570
 
550
 
571
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
551
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
572
	(REG_RANGE((reg), 0x2000, 0x2700) || \
552
	(REG_RANGE((reg), 0x2000, 0x2700) || \
573
	 REG_RANGE((reg), 0x3000, 0x4000) || \
553
	 REG_RANGE((reg), 0x3000, 0x4000) || \
574
	 REG_RANGE((reg), 0x5200, 0x8000) || \
554
	 REG_RANGE((reg), 0x5200, 0x8000) || \
575
	 REG_RANGE((reg), 0x8140, 0x8160) || \
555
	 REG_RANGE((reg), 0x8140, 0x8160) || \
576
	 REG_RANGE((reg), 0x8300, 0x8500) || \
556
	 REG_RANGE((reg), 0x8300, 0x8500) || \
577
	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
557
	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
578
	 REG_RANGE((reg), 0xB000, 0xB480) || \
558
	 REG_RANGE((reg), 0xB000, 0xB480) || \
579
	 REG_RANGE((reg), 0xE000, 0xE900) || \
559
	 REG_RANGE((reg), 0xE000, 0xE900) || \
580
	 REG_RANGE((reg), 0x24400, 0x24800))
560
	 REG_RANGE((reg), 0x24400, 0x24800))
581
 
561
 
582
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
562
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
583
	(REG_RANGE((reg), 0x8130, 0x8140) || \
563
	(REG_RANGE((reg), 0x8130, 0x8140) || \
584
	 REG_RANGE((reg), 0x8800, 0x8A00) || \
564
	 REG_RANGE((reg), 0x8800, 0x8A00) || \
585
	 REG_RANGE((reg), 0xD000, 0xD800) || \
565
	 REG_RANGE((reg), 0xD000, 0xD800) || \
586
	 REG_RANGE((reg), 0x12000, 0x14000) || \
566
	 REG_RANGE((reg), 0x12000, 0x14000) || \
587
	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
567
	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
588
	 REG_RANGE((reg), 0x30000, 0x40000))
568
	 REG_RANGE((reg), 0x30000, 0x40000))
589
 
569
 
590
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
570
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
591
	REG_RANGE((reg), 0x9400, 0x9800)
571
	REG_RANGE((reg), 0x9400, 0x9800)
592
 
572
 
593
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
573
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
594
	((reg) < 0x40000 &&\
574
	((reg) < 0x40000 &&\
595
	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
575
	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
596
	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
576
	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
597
	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
577
	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
598
	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
578
	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
599
 
579
 
600
static void
580
static void
601
ilk_dummy_write(struct drm_i915_private *dev_priv)
581
ilk_dummy_write(struct drm_i915_private *dev_priv)
602
{
582
{
603
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
583
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
604
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
584
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
605
	 * hence harmless to write 0 into. */
585
	 * hence harmless to write 0 into. */
606
	__raw_i915_write32(dev_priv, MI_MODE, 0);
586
	__raw_i915_write32(dev_priv, MI_MODE, 0);
607
}
587
}
608
 
588
 
609
static void
589
static void
610
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
590
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
611
			bool before)
591
			i915_reg_t reg, bool read, bool before)
612
{
592
{
613
	const char *op = read ? "reading" : "writing to";
593
	const char *op = read ? "reading" : "writing to";
614
	const char *when = before ? "before" : "after";
594
	const char *when = before ? "before" : "after";
615
 
595
 
616
	if (!i915.mmio_debug)
596
	if (!i915.mmio_debug)
617
		return;
597
		return;
618
 
598
 
619
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
599
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
620
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
600
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
621
		     when, op, reg);
601
		     when, op, i915_mmio_reg_offset(reg));
622
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
602
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
623
		i915.mmio_debug--; /* Only report the first N failures */
603
		i915.mmio_debug--; /* Only report the first N failures */
624
	}
604
	}
625
}
605
}
626
 
606
 
627
static void
607
static void
628
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
608
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
629
{
609
{
630
	static bool mmio_debug_once = true;
610
	static bool mmio_debug_once = true;
631
 
611
 
632
	if (i915.mmio_debug || !mmio_debug_once)
612
	if (i915.mmio_debug || !mmio_debug_once)
633
		return;
613
		return;
634
 
614
 
635
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
615
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
636
		DRM_DEBUG("Unclaimed register detected, "
616
		DRM_DEBUG("Unclaimed register detected, "
637
			  "enabling oneshot unclaimed register reporting. "
617
			  "enabling oneshot unclaimed register reporting. "
638
			  "Please use i915.mmio_debug=N for more information.\n");
618
			  "Please use i915.mmio_debug=N for more information.\n");
639
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
619
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
640
		i915.mmio_debug = mmio_debug_once--;
620
		i915.mmio_debug = mmio_debug_once--;
641
	}
621
	}
642
}
622
}
643
 
623
 
644
#define GEN2_READ_HEADER(x) \
624
#define GEN2_READ_HEADER(x) \
645
	u##x val = 0; \
625
	u##x val = 0; \
646
	assert_device_not_suspended(dev_priv);
626
	assert_rpm_wakelock_held(dev_priv);
647
 
627
 
648
#define GEN2_READ_FOOTER \
628
#define GEN2_READ_FOOTER \
649
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
629
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
650
	return val
630
	return val
651
 
631
 
652
#define __gen2_read(x) \
632
#define __gen2_read(x) \
653
static u##x \
633
static u##x \
654
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
634
gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
655
	GEN2_READ_HEADER(x); \
635
	GEN2_READ_HEADER(x); \
656
	val = __raw_i915_read##x(dev_priv, reg); \
636
	val = __raw_i915_read##x(dev_priv, reg); \
657
	GEN2_READ_FOOTER; \
637
	GEN2_READ_FOOTER; \
658
}
638
}
659
 
639
 
660
#define __gen5_read(x) \
640
#define __gen5_read(x) \
661
static u##x \
641
static u##x \
662
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
642
gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
663
	GEN2_READ_HEADER(x); \
643
	GEN2_READ_HEADER(x); \
664
	ilk_dummy_write(dev_priv); \
644
	ilk_dummy_write(dev_priv); \
665
	val = __raw_i915_read##x(dev_priv, reg); \
645
	val = __raw_i915_read##x(dev_priv, reg); \
666
	GEN2_READ_FOOTER; \
646
	GEN2_READ_FOOTER; \
667
}
647
}
668
 
648
 
669
__gen5_read(8)
649
__gen5_read(8)
670
__gen5_read(16)
650
__gen5_read(16)
671
__gen5_read(32)
651
__gen5_read(32)
672
__gen5_read(64)
652
__gen5_read(64)
673
__gen2_read(8)
653
__gen2_read(8)
674
__gen2_read(16)
654
__gen2_read(16)
675
__gen2_read(32)
655
__gen2_read(32)
676
__gen2_read(64)
656
__gen2_read(64)
677
 
657
 
678
#undef __gen5_read
658
#undef __gen5_read
679
#undef __gen2_read
659
#undef __gen2_read
680
 
660
 
681
#undef GEN2_READ_FOOTER
661
#undef GEN2_READ_FOOTER
682
#undef GEN2_READ_HEADER
662
#undef GEN2_READ_HEADER
683
 
663
 
684
#define GEN6_READ_HEADER(x) \
664
#define GEN6_READ_HEADER(x) \
-
 
665
	u32 offset = i915_mmio_reg_offset(reg); \
685
	unsigned long irqflags; \
666
	unsigned long irqflags; \
686
	u##x val = 0; \
667
	u##x val = 0; \
687
	assert_device_not_suspended(dev_priv); \
668
	assert_rpm_wakelock_held(dev_priv); \
688
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
669
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
689
 
670
 
690
#define GEN6_READ_FOOTER \
671
#define GEN6_READ_FOOTER \
691
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
672
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
692
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
673
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
693
	return val
674
	return val
694
 
675
 
695
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
676
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
696
				    enum forcewake_domains fw_domains)
677
				    enum forcewake_domains fw_domains)
697
{
678
{
698
	struct intel_uncore_forcewake_domain *domain;
679
	struct intel_uncore_forcewake_domain *domain;
699
	enum forcewake_domain_id id;
680
	enum forcewake_domain_id id;
700
 
681
 
701
	if (WARN_ON(!fw_domains))
682
	if (WARN_ON(!fw_domains))
702
		return;
683
		return;
703
 
684
 
704
	/* Ideally GCC would be constant-fold and eliminate this loop */
685
	/* Ideally GCC would be constant-fold and eliminate this loop */
705
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
686
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
706
		if (domain->wake_count) {
687
		if (domain->wake_count) {
707
			fw_domains &= ~(1 << id);
688
			fw_domains &= ~(1 << id);
708
			continue;
689
			continue;
709
		}
690
		}
710
 
691
 
711
		domain->wake_count++;
692
		domain->wake_count++;
712
		fw_domain_arm_timer(domain);
693
		fw_domain_arm_timer(domain);
713
	}
694
	}
714
 
695
 
715
	if (fw_domains)
696
	if (fw_domains)
716
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
697
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
717
}
698
}
718
 
-
 
719
#define __vgpu_read(x) \
-
 
720
static u##x \
-
 
721
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
-
 
722
	GEN6_READ_HEADER(x); \
-
 
723
	val = __raw_i915_read##x(dev_priv, reg); \
-
 
724
	GEN6_READ_FOOTER; \
-
 
725
}
-
 
726
 
699
 
727
#define __gen6_read(x) \
700
#define __gen6_read(x) \
728
static u##x \
701
static u##x \
729
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
702
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
730
	GEN6_READ_HEADER(x); \
703
	GEN6_READ_HEADER(x); \
731
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
704
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
732
	if (NEEDS_FORCE_WAKE(reg)) \
705
	if (NEEDS_FORCE_WAKE(offset)) \
733
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
706
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
734
	val = __raw_i915_read##x(dev_priv, reg); \
707
	val = __raw_i915_read##x(dev_priv, reg); \
735
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
708
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
736
	GEN6_READ_FOOTER; \
709
	GEN6_READ_FOOTER; \
737
}
710
}
738
 
711
 
739
#define __vlv_read(x) \
712
#define __vlv_read(x) \
740
static u##x \
713
static u##x \
741
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
714
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-
 
715
	enum forcewake_domains fw_engine = 0; \
742
	GEN6_READ_HEADER(x); \
716
	GEN6_READ_HEADER(x); \
-
 
717
	if (!NEEDS_FORCE_WAKE(offset)) \
-
 
718
		fw_engine = 0; \
743
	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
719
	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
744
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
720
		fw_engine = FORCEWAKE_RENDER; \
745
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
721
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
-
 
722
		fw_engine = FORCEWAKE_MEDIA; \
-
 
723
	if (fw_engine) \
746
		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
724
		__force_wake_get(dev_priv, fw_engine); \
747
	val = __raw_i915_read##x(dev_priv, reg); \
725
	val = __raw_i915_read##x(dev_priv, reg); \
748
	GEN6_READ_FOOTER; \
726
	GEN6_READ_FOOTER; \
749
}
727
}
750
 
728
 
751
#define __chv_read(x) \
729
#define __chv_read(x) \
752
static u##x \
730
static u##x \
753
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
731
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-
 
732
	enum forcewake_domains fw_engine = 0; \
754
	GEN6_READ_HEADER(x); \
733
	GEN6_READ_HEADER(x); \
-
 
734
	if (!NEEDS_FORCE_WAKE(offset)) \
-
 
735
		fw_engine = 0; \
755
	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
736
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
756
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
737
		fw_engine = FORCEWAKE_RENDER; \
757
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
738
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
758
		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
739
		fw_engine = FORCEWAKE_MEDIA; \
759
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
740
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
-
 
741
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
760
		__force_wake_get(dev_priv, \
742
	if (fw_engine) \
761
				 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
743
		__force_wake_get(dev_priv, fw_engine); \
762
	val = __raw_i915_read##x(dev_priv, reg); \
744
	val = __raw_i915_read##x(dev_priv, reg); \
763
	GEN6_READ_FOOTER; \
745
	GEN6_READ_FOOTER; \
764
}
746
}
765
 
747
 
766
#define SKL_NEEDS_FORCE_WAKE(reg) \
748
#define SKL_NEEDS_FORCE_WAKE(reg) \
767
	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
749
	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
768
 
750
 
769
#define __gen9_read(x) \
751
#define __gen9_read(x) \
770
static u##x \
752
static u##x \
771
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
753
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
772
	enum forcewake_domains fw_engine; \
754
	enum forcewake_domains fw_engine; \
773
	GEN6_READ_HEADER(x); \
755
	GEN6_READ_HEADER(x); \
774
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
756
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
775
	if (!SKL_NEEDS_FORCE_WAKE(reg)) \
757
	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
776
		fw_engine = 0; \
758
		fw_engine = 0; \
777
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
759
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
778
		fw_engine = FORCEWAKE_RENDER; \
760
		fw_engine = FORCEWAKE_RENDER; \
779
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
761
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
780
		fw_engine = FORCEWAKE_MEDIA; \
762
		fw_engine = FORCEWAKE_MEDIA; \
781
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
763
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
782
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
764
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783
	else \
765
	else \
784
		fw_engine = FORCEWAKE_BLITTER; \
766
		fw_engine = FORCEWAKE_BLITTER; \
785
	if (fw_engine) \
767
	if (fw_engine) \
786
		__force_wake_get(dev_priv, fw_engine); \
768
		__force_wake_get(dev_priv, fw_engine); \
787
	val = __raw_i915_read##x(dev_priv, reg); \
769
	val = __raw_i915_read##x(dev_priv, reg); \
788
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
770
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
789
	GEN6_READ_FOOTER; \
771
	GEN6_READ_FOOTER; \
790
}
772
}
791
 
-
 
792
__vgpu_read(8)
-
 
793
__vgpu_read(16)
-
 
794
__vgpu_read(32)
-
 
795
__vgpu_read(64)
773
 
796
__gen9_read(8)
774
__gen9_read(8)
797
__gen9_read(16)
775
__gen9_read(16)
798
__gen9_read(32)
776
__gen9_read(32)
799
__gen9_read(64)
777
__gen9_read(64)
800
__chv_read(8)
778
__chv_read(8)
801
__chv_read(16)
779
__chv_read(16)
802
__chv_read(32)
780
__chv_read(32)
803
__chv_read(64)
781
__chv_read(64)
804
__vlv_read(8)
782
__vlv_read(8)
805
__vlv_read(16)
783
__vlv_read(16)
806
__vlv_read(32)
784
__vlv_read(32)
807
__vlv_read(64)
785
__vlv_read(64)
808
__gen6_read(8)
786
__gen6_read(8)
809
__gen6_read(16)
787
__gen6_read(16)
810
__gen6_read(32)
788
__gen6_read(32)
811
__gen6_read(64)
789
__gen6_read(64)
812
 
790
 
813
#undef __gen9_read
791
#undef __gen9_read
814
#undef __chv_read
792
#undef __chv_read
815
#undef __vlv_read
793
#undef __vlv_read
816
#undef __gen6_read
794
#undef __gen6_read
817
#undef __vgpu_read
-
 
818
#undef GEN6_READ_FOOTER
795
#undef GEN6_READ_FOOTER
819
#undef GEN6_READ_HEADER
796
#undef GEN6_READ_HEADER
-
 
797
 
-
 
798
#define VGPU_READ_HEADER(x) \
-
 
799
	unsigned long irqflags; \
-
 
800
	u##x val = 0; \
-
 
801
	assert_rpm_device_not_suspended(dev_priv); \
-
 
802
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
 
803
 
-
 
804
#define VGPU_READ_FOOTER \
-
 
805
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-
 
806
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
-
 
807
	return val
-
 
808
 
-
 
809
#define __vgpu_read(x) \
-
 
810
static u##x \
-
 
811
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
-
 
812
	VGPU_READ_HEADER(x); \
-
 
813
	val = __raw_i915_read##x(dev_priv, reg); \
-
 
814
	VGPU_READ_FOOTER; \
-
 
815
}
-
 
816
 
-
 
817
__vgpu_read(8)
-
 
818
__vgpu_read(16)
-
 
819
__vgpu_read(32)
-
 
820
__vgpu_read(64)
-
 
821
 
-
 
822
#undef __vgpu_read
-
 
823
#undef VGPU_READ_FOOTER
-
 
824
#undef VGPU_READ_HEADER
820
 
825
 
821
#define GEN2_WRITE_HEADER \
826
#define GEN2_WRITE_HEADER \
822
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
827
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
823
	assert_device_not_suspended(dev_priv); \
828
	assert_rpm_wakelock_held(dev_priv); \
824
 
829
 
825
#define GEN2_WRITE_FOOTER
830
#define GEN2_WRITE_FOOTER
826
 
831
 
827
#define __gen2_write(x) \
832
#define __gen2_write(x) \
828
static void \
833
static void \
829
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
834
gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
830
	GEN2_WRITE_HEADER; \
835
	GEN2_WRITE_HEADER; \
831
	__raw_i915_write##x(dev_priv, reg, val); \
836
	__raw_i915_write##x(dev_priv, reg, val); \
832
	GEN2_WRITE_FOOTER; \
837
	GEN2_WRITE_FOOTER; \
833
}
838
}
834
 
839
 
835
#define __gen5_write(x) \
840
#define __gen5_write(x) \
836
static void \
841
static void \
837
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
842
gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
838
	GEN2_WRITE_HEADER; \
843
	GEN2_WRITE_HEADER; \
839
	ilk_dummy_write(dev_priv); \
844
	ilk_dummy_write(dev_priv); \
840
	__raw_i915_write##x(dev_priv, reg, val); \
845
	__raw_i915_write##x(dev_priv, reg, val); \
841
	GEN2_WRITE_FOOTER; \
846
	GEN2_WRITE_FOOTER; \
842
}
847
}
843
 
848
 
844
__gen5_write(8)
849
__gen5_write(8)
845
__gen5_write(16)
850
__gen5_write(16)
846
__gen5_write(32)
851
__gen5_write(32)
847
__gen5_write(64)
852
__gen5_write(64)
848
__gen2_write(8)
853
__gen2_write(8)
849
__gen2_write(16)
854
__gen2_write(16)
850
__gen2_write(32)
855
__gen2_write(32)
851
__gen2_write(64)
856
__gen2_write(64)
852
 
857
 
853
#undef __gen5_write
858
#undef __gen5_write
854
#undef __gen2_write
859
#undef __gen2_write
855
 
860
 
856
#undef GEN2_WRITE_FOOTER
861
#undef GEN2_WRITE_FOOTER
857
#undef GEN2_WRITE_HEADER
862
#undef GEN2_WRITE_HEADER
858
 
863
 
859
#define GEN6_WRITE_HEADER \
864
#define GEN6_WRITE_HEADER \
-
 
865
	u32 offset = i915_mmio_reg_offset(reg); \
860
	unsigned long irqflags; \
866
	unsigned long irqflags; \
861
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
867
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
862
	assert_device_not_suspended(dev_priv); \
868
	assert_rpm_wakelock_held(dev_priv); \
863
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
869
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
864
 
870
 
865
#define GEN6_WRITE_FOOTER \
871
#define GEN6_WRITE_FOOTER \
866
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
872
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
867
 
873
 
868
#define __gen6_write(x) \
874
#define __gen6_write(x) \
869
static void \
875
static void \
870
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
876
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
871
	u32 __fifo_ret = 0; \
877
	u32 __fifo_ret = 0; \
872
	GEN6_WRITE_HEADER; \
878
	GEN6_WRITE_HEADER; \
873
	if (NEEDS_FORCE_WAKE(reg)) { \
879
	if (NEEDS_FORCE_WAKE(offset)) { \
874
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
880
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
875
	} \
881
	} \
876
	__raw_i915_write##x(dev_priv, reg, val); \
882
	__raw_i915_write##x(dev_priv, reg, val); \
877
	if (unlikely(__fifo_ret)) { \
883
	if (unlikely(__fifo_ret)) { \
878
		gen6_gt_check_fifodbg(dev_priv); \
884
		gen6_gt_check_fifodbg(dev_priv); \
879
	} \
885
	} \
880
	GEN6_WRITE_FOOTER; \
886
	GEN6_WRITE_FOOTER; \
881
}
887
}
882
 
888
 
883
#define __hsw_write(x) \
889
#define __hsw_write(x) \
884
static void \
890
static void \
885
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
891
hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
886
	u32 __fifo_ret = 0; \
892
	u32 __fifo_ret = 0; \
887
	GEN6_WRITE_HEADER; \
893
	GEN6_WRITE_HEADER; \
888
	if (NEEDS_FORCE_WAKE(reg)) { \
894
	if (NEEDS_FORCE_WAKE(offset)) { \
889
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
895
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
890
	} \
896
	} \
891
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
897
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
892
	__raw_i915_write##x(dev_priv, reg, val); \
898
	__raw_i915_write##x(dev_priv, reg, val); \
893
	if (unlikely(__fifo_ret)) { \
899
	if (unlikely(__fifo_ret)) { \
894
		gen6_gt_check_fifodbg(dev_priv); \
900
		gen6_gt_check_fifodbg(dev_priv); \
895
	} \
901
	} \
896
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
902
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
897
	hsw_unclaimed_reg_detect(dev_priv); \
903
	hsw_unclaimed_reg_detect(dev_priv); \
898
	GEN6_WRITE_FOOTER; \
904
	GEN6_WRITE_FOOTER; \
899
}
905
}
900
 
-
 
901
#define __vgpu_write(x) \
-
 
902
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
-
 
903
			  off_t reg, u##x val, bool trace) { \
-
 
904
	GEN6_WRITE_HEADER; \
-
 
905
	__raw_i915_write##x(dev_priv, reg, val); \
-
 
906
	GEN6_WRITE_FOOTER; \
-
 
907
}
-
 
908
 
906
 
909
static const u32 gen8_shadowed_regs[] = {
907
static const i915_reg_t gen8_shadowed_regs[] = {
910
	FORCEWAKE_MT,
908
	FORCEWAKE_MT,
911
	GEN6_RPNSWREQ,
909
	GEN6_RPNSWREQ,
912
	GEN6_RC_VIDEO_FREQ,
910
	GEN6_RC_VIDEO_FREQ,
913
	RING_TAIL(RENDER_RING_BASE),
911
	RING_TAIL(RENDER_RING_BASE),
914
	RING_TAIL(GEN6_BSD_RING_BASE),
912
	RING_TAIL(GEN6_BSD_RING_BASE),
915
	RING_TAIL(VEBOX_RING_BASE),
913
	RING_TAIL(VEBOX_RING_BASE),
916
	RING_TAIL(BLT_RING_BASE),
914
	RING_TAIL(BLT_RING_BASE),
917
	/* TODO: Other registers are not yet used */
915
	/* TODO: Other registers are not yet used */
918
};
916
};
919
 
917
 
-
 
918
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
920
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
919
			     i915_reg_t reg)
921
{
920
{
922
	int i;
921
	int i;
923
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
922
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
924
		if (reg == gen8_shadowed_regs[i])
923
		if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
925
			return true;
924
			return true;
926
 
925
 
927
	return false;
926
	return false;
928
}
927
}
929
 
928
 
930
#define __gen8_write(x) \
929
#define __gen8_write(x) \
931
static void \
930
static void \
932
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
931
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933
	GEN6_WRITE_HEADER; \
932
	GEN6_WRITE_HEADER; \
934
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
933
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
935
	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
934
	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
936
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
935
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
937
	__raw_i915_write##x(dev_priv, reg, val); \
936
	__raw_i915_write##x(dev_priv, reg, val); \
938
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
937
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
939
	hsw_unclaimed_reg_detect(dev_priv); \
938
	hsw_unclaimed_reg_detect(dev_priv); \
940
	GEN6_WRITE_FOOTER; \
939
	GEN6_WRITE_FOOTER; \
941
}
940
}
942
 
941
 
943
#define __chv_write(x) \
942
#define __chv_write(x) \
944
static void \
943
static void \
945
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
944
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
946
	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
945
	enum forcewake_domains fw_engine = 0; \
947
	GEN6_WRITE_HEADER; \
946
	GEN6_WRITE_HEADER; \
-
 
947
	if (!NEEDS_FORCE_WAKE(offset) || \
-
 
948
	    is_gen8_shadowed(dev_priv, reg)) \
948
	if (!shadowed) { \
949
		fw_engine = 0; \
949
		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
950
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
950
			__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
951
		fw_engine = FORCEWAKE_RENDER; \
951
		else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
952
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
952
			__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
953
		fw_engine = FORCEWAKE_MEDIA; \
953
		else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
954
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
954
			__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
955
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
955
	} \
956
	if (fw_engine) \
-
 
957
		__force_wake_get(dev_priv, fw_engine); \
956
	__raw_i915_write##x(dev_priv, reg, val); \
958
	__raw_i915_write##x(dev_priv, reg, val); \
957
	GEN6_WRITE_FOOTER; \
959
	GEN6_WRITE_FOOTER; \
958
}
960
}
959
 
961
 
960
static const u32 gen9_shadowed_regs[] = {
962
static const i915_reg_t gen9_shadowed_regs[] = {
961
	RING_TAIL(RENDER_RING_BASE),
963
	RING_TAIL(RENDER_RING_BASE),
962
	RING_TAIL(GEN6_BSD_RING_BASE),
964
	RING_TAIL(GEN6_BSD_RING_BASE),
963
	RING_TAIL(VEBOX_RING_BASE),
965
	RING_TAIL(VEBOX_RING_BASE),
964
	RING_TAIL(BLT_RING_BASE),
966
	RING_TAIL(BLT_RING_BASE),
965
	FORCEWAKE_BLITTER_GEN9,
967
	FORCEWAKE_BLITTER_GEN9,
966
	FORCEWAKE_RENDER_GEN9,
968
	FORCEWAKE_RENDER_GEN9,
967
	FORCEWAKE_MEDIA_GEN9,
969
	FORCEWAKE_MEDIA_GEN9,
968
	GEN6_RPNSWREQ,
970
	GEN6_RPNSWREQ,
969
	GEN6_RC_VIDEO_FREQ,
971
	GEN6_RC_VIDEO_FREQ,
970
	/* TODO: Other registers are not yet used */
972
	/* TODO: Other registers are not yet used */
971
};
973
};
972
 
974
 
-
 
975
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
973
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
976
			     i915_reg_t reg)
974
{
977
{
975
	int i;
978
	int i;
976
	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
979
	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
977
		if (reg == gen9_shadowed_regs[i])
980
		if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
978
			return true;
981
			return true;
979
 
982
 
980
	return false;
983
	return false;
981
}
984
}
982
 
985
 
983
#define __gen9_write(x) \
986
#define __gen9_write(x) \
984
static void \
987
static void \
985
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
988
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
986
		bool trace) { \
989
		bool trace) { \
987
	enum forcewake_domains fw_engine; \
990
	enum forcewake_domains fw_engine; \
988
	GEN6_WRITE_HEADER; \
991
	GEN6_WRITE_HEADER; \
989
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
992
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
990
	if (!SKL_NEEDS_FORCE_WAKE(reg) || \
993
	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
991
	    is_gen9_shadowed(dev_priv, reg)) \
994
	    is_gen9_shadowed(dev_priv, reg)) \
992
		fw_engine = 0; \
995
		fw_engine = 0; \
993
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
996
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
994
		fw_engine = FORCEWAKE_RENDER; \
997
		fw_engine = FORCEWAKE_RENDER; \
995
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
998
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
996
		fw_engine = FORCEWAKE_MEDIA; \
999
		fw_engine = FORCEWAKE_MEDIA; \
997
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
1000
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
998
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1001
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
999
	else \
1002
	else \
1000
		fw_engine = FORCEWAKE_BLITTER; \
1003
		fw_engine = FORCEWAKE_BLITTER; \
1001
	if (fw_engine) \
1004
	if (fw_engine) \
1002
		__force_wake_get(dev_priv, fw_engine); \
1005
		__force_wake_get(dev_priv, fw_engine); \
1003
	__raw_i915_write##x(dev_priv, reg, val); \
1006
	__raw_i915_write##x(dev_priv, reg, val); \
1004
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1007
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1005
	hsw_unclaimed_reg_detect(dev_priv); \
1008
	hsw_unclaimed_reg_detect(dev_priv); \
1006
	GEN6_WRITE_FOOTER; \
1009
	GEN6_WRITE_FOOTER; \
1007
}
1010
}
1008
 
1011
 
1009
__gen9_write(8)
1012
__gen9_write(8)
1010
__gen9_write(16)
1013
__gen9_write(16)
1011
__gen9_write(32)
1014
__gen9_write(32)
1012
__gen9_write(64)
1015
__gen9_write(64)
1013
__chv_write(8)
1016
__chv_write(8)
1014
__chv_write(16)
1017
__chv_write(16)
1015
__chv_write(32)
1018
__chv_write(32)
1016
__chv_write(64)
1019
__chv_write(64)
1017
__gen8_write(8)
1020
__gen8_write(8)
1018
__gen8_write(16)
1021
__gen8_write(16)
1019
__gen8_write(32)
1022
__gen8_write(32)
1020
__gen8_write(64)
1023
__gen8_write(64)
1021
__hsw_write(8)
1024
__hsw_write(8)
1022
__hsw_write(16)
1025
__hsw_write(16)
1023
__hsw_write(32)
1026
__hsw_write(32)
1024
__hsw_write(64)
1027
__hsw_write(64)
1025
__gen6_write(8)
1028
__gen6_write(8)
1026
__gen6_write(16)
1029
__gen6_write(16)
1027
__gen6_write(32)
1030
__gen6_write(32)
1028
__gen6_write(64)
1031
__gen6_write(64)
1029
__vgpu_write(8)
-
 
1030
__vgpu_write(16)
-
 
1031
__vgpu_write(32)
-
 
1032
__vgpu_write(64)
-
 
1033
 
1032
 
1034
#undef __gen9_write
1033
#undef __gen9_write
1035
#undef __chv_write
1034
#undef __chv_write
1036
#undef __gen8_write
1035
#undef __gen8_write
1037
#undef __hsw_write
1036
#undef __hsw_write
1038
#undef __gen6_write
1037
#undef __gen6_write
1039
#undef __vgpu_write
-
 
1040
#undef GEN6_WRITE_FOOTER
1038
#undef GEN6_WRITE_FOOTER
1041
#undef GEN6_WRITE_HEADER
1039
#undef GEN6_WRITE_HEADER
-
 
1040
 
-
 
1041
#define VGPU_WRITE_HEADER \
-
 
1042
	unsigned long irqflags; \
-
 
1043
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-
 
1044
	assert_rpm_device_not_suspended(dev_priv); \
-
 
1045
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
 
1046
 
-
 
1047
#define VGPU_WRITE_FOOTER \
-
 
1048
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
-
 
1049
 
-
 
1050
#define __vgpu_write(x) \
-
 
1051
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
-
 
1052
			  i915_reg_t reg, u##x val, bool trace) { \
-
 
1053
	VGPU_WRITE_HEADER; \
-
 
1054
	__raw_i915_write##x(dev_priv, reg, val); \
-
 
1055
	VGPU_WRITE_FOOTER; \
-
 
1056
}
-
 
1057
 
-
 
1058
__vgpu_write(8)
-
 
1059
__vgpu_write(16)
-
 
1060
__vgpu_write(32)
-
 
1061
__vgpu_write(64)
-
 
1062
 
-
 
1063
#undef __vgpu_write
-
 
1064
#undef VGPU_WRITE_FOOTER
-
 
1065
#undef VGPU_WRITE_HEADER
1042
 
1066
 
1043
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1067
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1044
do { \
1068
do { \
1045
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1069
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1046
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1070
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1047
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1071
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1048
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1072
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1049
} while (0)
1073
} while (0)
1050
 
1074
 
1051
#define ASSIGN_READ_MMIO_VFUNCS(x) \
1075
#define ASSIGN_READ_MMIO_VFUNCS(x) \
1052
do { \
1076
do { \
1053
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1077
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1054
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1078
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1055
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1079
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1056
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1080
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1057
} while (0)
1081
} while (0)
1058
 
1082
 
1059
 
1083
 
1060
static void fw_domain_init(struct drm_i915_private *dev_priv,
1084
static void fw_domain_init(struct drm_i915_private *dev_priv,
1061
			   enum forcewake_domain_id domain_id,
1085
			   enum forcewake_domain_id domain_id,
-
 
1086
			   i915_reg_t reg_set,
1062
			   u32 reg_set, u32 reg_ack)
1087
			   i915_reg_t reg_ack)
1063
{
1088
{
1064
	struct intel_uncore_forcewake_domain *d;
1089
	struct intel_uncore_forcewake_domain *d;
1065
 
1090
 
1066
	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1091
	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1067
		return;
1092
		return;
1068
 
1093
 
1069
	d = &dev_priv->uncore.fw_domain[domain_id];
1094
	d = &dev_priv->uncore.fw_domain[domain_id];
1070
 
1095
 
1071
	WARN_ON(d->wake_count);
1096
	WARN_ON(d->wake_count);
1072
 
1097
 
1073
	d->wake_count = 0;
1098
	d->wake_count = 0;
1074
	d->reg_set = reg_set;
1099
	d->reg_set = reg_set;
1075
	d->reg_ack = reg_ack;
1100
	d->reg_ack = reg_ack;
1076
 
1101
 
1077
	if (IS_GEN6(dev_priv)) {
1102
	if (IS_GEN6(dev_priv)) {
1078
		d->val_reset = 0;
1103
		d->val_reset = 0;
1079
		d->val_set = FORCEWAKE_KERNEL;
1104
		d->val_set = FORCEWAKE_KERNEL;
1080
		d->val_clear = 0;
1105
		d->val_clear = 0;
1081
	} else {
1106
	} else {
1082
		/* WaRsClearFWBitsAtReset:bdw,skl */
1107
		/* WaRsClearFWBitsAtReset:bdw,skl */
1083
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1108
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1084
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1109
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1085
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1110
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1086
	}
1111
	}
1087
 
1112
 
1088
	if (IS_VALLEYVIEW(dev_priv))
1113
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1089
		d->reg_post = FORCEWAKE_ACK_VLV;
1114
		d->reg_post = FORCEWAKE_ACK_VLV;
1090
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1115
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1091
		d->reg_post = ECOBUS;
1116
		d->reg_post = ECOBUS;
1092
	else
-
 
1093
		d->reg_post = 0;
-
 
1094
 
1117
 
1095
	d->i915 = dev_priv;
1118
	d->i915 = dev_priv;
1096
	d->id = domain_id;
1119
	d->id = domain_id;
1097
 
1120
 
1098
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1121
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1099
 
1122
 
1100
	dev_priv->uncore.fw_domains |= (1 << domain_id);
1123
	dev_priv->uncore.fw_domains |= (1 << domain_id);
1101
 
1124
 
1102
	fw_domain_reset(d);
1125
	fw_domain_reset(d);
1103
}
1126
}
1104
 
1127
 
1105
static void intel_uncore_fw_domains_init(struct drm_device *dev)
1128
static void intel_uncore_fw_domains_init(struct drm_device *dev)
1106
{
1129
{
1107
	struct drm_i915_private *dev_priv = dev->dev_private;
1130
	struct drm_i915_private *dev_priv = dev->dev_private;
1108
 
1131
 
1109
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1132
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1110
		return;
1133
		return;
1111
 
1134
 
1112
	if (IS_GEN9(dev)) {
1135
	if (IS_GEN9(dev)) {
1113
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1136
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1114
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1137
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1115
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1138
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1116
			       FORCEWAKE_RENDER_GEN9,
1139
			       FORCEWAKE_RENDER_GEN9,
1117
			       FORCEWAKE_ACK_RENDER_GEN9);
1140
			       FORCEWAKE_ACK_RENDER_GEN9);
1118
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1141
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1119
			       FORCEWAKE_BLITTER_GEN9,
1142
			       FORCEWAKE_BLITTER_GEN9,
1120
			       FORCEWAKE_ACK_BLITTER_GEN9);
1143
			       FORCEWAKE_ACK_BLITTER_GEN9);
1121
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1144
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1122
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1145
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1123
	} else if (IS_VALLEYVIEW(dev)) {
1146
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1124
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1147
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1125
		if (!IS_CHERRYVIEW(dev))
1148
		if (!IS_CHERRYVIEW(dev))
1126
			dev_priv->uncore.funcs.force_wake_put =
1149
			dev_priv->uncore.funcs.force_wake_put =
1127
				fw_domains_put_with_fifo;
1150
				fw_domains_put_with_fifo;
1128
		else
1151
		else
1129
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1152
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1130
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1153
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1131
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1154
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1132
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1155
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1133
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1156
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1134
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1157
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1135
		dev_priv->uncore.funcs.force_wake_get =
1158
		dev_priv->uncore.funcs.force_wake_get =
1136
			fw_domains_get_with_thread_status;
1159
			fw_domains_get_with_thread_status;
1137
		if (IS_HASWELL(dev))
1160
		if (IS_HASWELL(dev))
1138
			dev_priv->uncore.funcs.force_wake_put =
1161
			dev_priv->uncore.funcs.force_wake_put =
1139
				fw_domains_put_with_fifo;
1162
				fw_domains_put_with_fifo;
1140
		else
1163
		else
1141
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1164
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1142
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1165
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1143
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1166
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1144
	} else if (IS_IVYBRIDGE(dev)) {
1167
	} else if (IS_IVYBRIDGE(dev)) {
1145
		u32 ecobus;
1168
		u32 ecobus;
1146
 
1169
 
1147
		/* IVB configs may use multi-threaded forcewake */
1170
		/* IVB configs may use multi-threaded forcewake */
1148
 
1171
 
1149
		/* A small trick here - if the bios hasn't configured
1172
		/* A small trick here - if the bios hasn't configured
1150
		 * MT forcewake, and if the device is in RC6, then
1173
		 * MT forcewake, and if the device is in RC6, then
1151
		 * force_wake_mt_get will not wake the device and the
1174
		 * force_wake_mt_get will not wake the device and the
1152
		 * ECOBUS read will return zero. Which will be
1175
		 * ECOBUS read will return zero. Which will be
1153
		 * (correctly) interpreted by the test below as MT
1176
		 * (correctly) interpreted by the test below as MT
1154
		 * forcewake being disabled.
1177
		 * forcewake being disabled.
1155
		 */
1178
		 */
1156
		dev_priv->uncore.funcs.force_wake_get =
1179
		dev_priv->uncore.funcs.force_wake_get =
1157
			fw_domains_get_with_thread_status;
1180
			fw_domains_get_with_thread_status;
1158
		dev_priv->uncore.funcs.force_wake_put =
1181
		dev_priv->uncore.funcs.force_wake_put =
1159
			fw_domains_put_with_fifo;
1182
			fw_domains_put_with_fifo;
1160
 
1183
 
1161
		/* We need to init first for ECOBUS access and then
1184
		/* We need to init first for ECOBUS access and then
1162
		 * determine later if we want to reinit, in case of MT access is
1185
		 * determine later if we want to reinit, in case of MT access is
1163
		 * not working. In this stage we don't know which flavour this
1186
		 * not working. In this stage we don't know which flavour this
1164
		 * ivb is, so it is better to reset also the gen6 fw registers
1187
		 * ivb is, so it is better to reset also the gen6 fw registers
1165
		 * before the ecobus check.
1188
		 * before the ecobus check.
1166
		 */
1189
		 */
1167
 
1190
 
1168
		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1191
		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1169
		__raw_posting_read(dev_priv, ECOBUS);
1192
		__raw_posting_read(dev_priv, ECOBUS);
1170
 
1193
 
1171
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1194
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1172
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1195
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1173
 
1196
 
1174
		mutex_lock(&dev->struct_mutex);
1197
		mutex_lock(&dev->struct_mutex);
1175
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1198
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1176
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1199
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1177
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1200
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1178
		mutex_unlock(&dev->struct_mutex);
1201
		mutex_unlock(&dev->struct_mutex);
1179
 
1202
 
1180
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1203
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1181
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1204
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1182
			DRM_INFO("when using vblank-synced partial screen updates.\n");
1205
			DRM_INFO("when using vblank-synced partial screen updates.\n");
1183
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1206
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1184
				       FORCEWAKE, FORCEWAKE_ACK);
1207
				       FORCEWAKE, FORCEWAKE_ACK);
1185
		}
1208
		}
1186
	} else if (IS_GEN6(dev)) {
1209
	} else if (IS_GEN6(dev)) {
1187
		dev_priv->uncore.funcs.force_wake_get =
1210
		dev_priv->uncore.funcs.force_wake_get =
1188
			fw_domains_get_with_thread_status;
1211
			fw_domains_get_with_thread_status;
1189
		dev_priv->uncore.funcs.force_wake_put =
1212
		dev_priv->uncore.funcs.force_wake_put =
1190
			fw_domains_put_with_fifo;
1213
			fw_domains_put_with_fifo;
1191
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1214
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1192
			       FORCEWAKE, FORCEWAKE_ACK);
1215
			       FORCEWAKE, FORCEWAKE_ACK);
1193
	}
1216
	}
1194
 
1217
 
1195
	/* All future platforms are expected to require complex power gating */
1218
	/* All future platforms are expected to require complex power gating */
1196
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1219
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1197
}
1220
}
1198
 
1221
 
1199
void intel_uncore_init(struct drm_device *dev)
1222
void intel_uncore_init(struct drm_device *dev)
1200
{
1223
{
1201
	struct drm_i915_private *dev_priv = dev->dev_private;
1224
	struct drm_i915_private *dev_priv = dev->dev_private;
1202
 
1225
 
1203
	i915_check_vgpu(dev);
1226
	i915_check_vgpu(dev);
1204
 
1227
 
1205
	intel_uncore_ellc_detect(dev);
1228
	intel_uncore_ellc_detect(dev);
1206
	intel_uncore_fw_domains_init(dev);
1229
	intel_uncore_fw_domains_init(dev);
1207
	__intel_uncore_early_sanitize(dev, false);
1230
	__intel_uncore_early_sanitize(dev, false);
1208
 
1231
 
1209
	switch (INTEL_INFO(dev)->gen) {
1232
	switch (INTEL_INFO(dev)->gen) {
1210
	default:
1233
	default:
1211
	case 9:
1234
	case 9:
1212
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1235
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1213
		ASSIGN_READ_MMIO_VFUNCS(gen9);
1236
		ASSIGN_READ_MMIO_VFUNCS(gen9);
1214
		break;
1237
		break;
1215
	case 8:
1238
	case 8:
1216
		if (IS_CHERRYVIEW(dev)) {
1239
		if (IS_CHERRYVIEW(dev)) {
1217
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1240
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1218
			ASSIGN_READ_MMIO_VFUNCS(chv);
1241
			ASSIGN_READ_MMIO_VFUNCS(chv);
1219
 
1242
 
1220
		} else {
1243
		} else {
1221
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1244
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1222
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1245
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1223
		}
1246
		}
1224
		break;
1247
		break;
1225
	case 7:
1248
	case 7:
1226
	case 6:
1249
	case 6:
1227
		if (IS_HASWELL(dev)) {
1250
		if (IS_HASWELL(dev)) {
1228
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1251
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1229
		} else {
1252
		} else {
1230
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1253
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1231
		}
1254
		}
1232
 
1255
 
1233
		if (IS_VALLEYVIEW(dev)) {
1256
		if (IS_VALLEYVIEW(dev)) {
1234
			ASSIGN_READ_MMIO_VFUNCS(vlv);
1257
			ASSIGN_READ_MMIO_VFUNCS(vlv);
1235
		} else {
1258
		} else {
1236
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1259
			ASSIGN_READ_MMIO_VFUNCS(gen6);
1237
		}
1260
		}
1238
		break;
1261
		break;
1239
	case 5:
1262
	case 5:
1240
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1263
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1241
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1264
		ASSIGN_READ_MMIO_VFUNCS(gen5);
1242
		break;
1265
		break;
1243
	case 4:
1266
	case 4:
1244
	case 3:
1267
	case 3:
1245
	case 2:
1268
	case 2:
1246
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1269
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1247
		ASSIGN_READ_MMIO_VFUNCS(gen2);
1270
		ASSIGN_READ_MMIO_VFUNCS(gen2);
1248
		break;
1271
		break;
1249
	}
1272
	}
1250
 
1273
 
1251
	if (intel_vgpu_active(dev)) {
1274
	if (intel_vgpu_active(dev)) {
1252
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1275
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1253
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1276
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1254
	}
1277
	}
1255
 
1278
 
1256
	i915_check_and_clear_faults(dev);
1279
	i915_check_and_clear_faults(dev);
1257
}
1280
}
1258
#undef ASSIGN_WRITE_MMIO_VFUNCS
1281
#undef ASSIGN_WRITE_MMIO_VFUNCS
1259
#undef ASSIGN_READ_MMIO_VFUNCS
1282
#undef ASSIGN_READ_MMIO_VFUNCS
1260
 
1283
 
1261
void intel_uncore_fini(struct drm_device *dev)
1284
void intel_uncore_fini(struct drm_device *dev)
1262
{
1285
{
1263
	/* Paranoia: make sure we have disabled everything before we exit. */
1286
	/* Paranoia: make sure we have disabled everything before we exit. */
1264
	intel_uncore_sanitize(dev);
1287
	intel_uncore_sanitize(dev);
1265
	intel_uncore_forcewake_reset(dev, false);
1288
	intel_uncore_forcewake_reset(dev, false);
1266
}
1289
}
1267
 
1290
 
1268
#define GEN_RANGE(l, h) GENMASK(h, l)
1291
#define GEN_RANGE(l, h) GENMASK(h, l)
1269
 
1292
 
1270
static const struct register_whitelist {
1293
static const struct register_whitelist {
1271
	uint64_t offset;
1294
	i915_reg_t offset_ldw, offset_udw;
1272
	uint32_t size;
1295
	uint32_t size;
1273
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1296
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1274
	uint32_t gen_bitmask;
1297
	uint32_t gen_bitmask;
1275
} whitelist[] = {
1298
} whitelist[] = {
-
 
1299
	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
-
 
1300
	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1276
	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
1301
	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1277
};
1302
};
1278
 
1303
 
1279
int i915_reg_read_ioctl(struct drm_device *dev,
1304
int i915_reg_read_ioctl(struct drm_device *dev,
1280
			void *data, struct drm_file *file)
1305
			void *data, struct drm_file *file)
1281
{
1306
{
1282
	struct drm_i915_private *dev_priv = dev->dev_private;
1307
	struct drm_i915_private *dev_priv = dev->dev_private;
1283
	struct drm_i915_reg_read *reg = data;
1308
	struct drm_i915_reg_read *reg = data;
1284
	struct register_whitelist const *entry = whitelist;
1309
	struct register_whitelist const *entry = whitelist;
1285
	unsigned size;
1310
	unsigned size;
1286
	u64 offset;
1311
	i915_reg_t offset_ldw, offset_udw;
1287
	int i, ret = 0;
1312
	int i, ret = 0;
1288
 
1313
 
1289
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1314
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1290
		if (entry->offset == (reg->offset & -entry->size) &&
1315
		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1291
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1316
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1292
			break;
1317
			break;
1293
	}
1318
	}
1294
 
1319
 
1295
	if (i == ARRAY_SIZE(whitelist))
1320
	if (i == ARRAY_SIZE(whitelist))
1296
		return -EINVAL;
1321
		return -EINVAL;
1297
 
1322
 
1298
	/* We use the low bits to encode extra flags as the register should
1323
	/* We use the low bits to encode extra flags as the register should
1299
	 * be naturally aligned (and those that are not so aligned merely
1324
	 * be naturally aligned (and those that are not so aligned merely
1300
	 * limit the available flags for that register).
1325
	 * limit the available flags for that register).
1301
	 */
1326
	 */
-
 
1327
	offset_ldw = entry->offset_ldw;
1302
	offset = entry->offset;
1328
	offset_udw = entry->offset_udw;
1303
	size = entry->size;
1329
	size = entry->size;
1304
	size |= reg->offset ^ offset;
1330
	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1305
 
1331
 
1306
	intel_runtime_pm_get(dev_priv);
1332
	intel_runtime_pm_get(dev_priv);
1307
 
1333
 
1308
	switch (size) {
1334
	switch (size) {
1309
	case 8 | 1:
1335
	case 8 | 1:
1310
		reg->val = I915_READ64_2x32(offset, offset+4);
1336
		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1311
		break;
1337
		break;
1312
	case 8:
1338
	case 8:
1313
		reg->val = I915_READ64(offset);
1339
		reg->val = I915_READ64(offset_ldw);
1314
		break;
1340
		break;
1315
	case 4:
1341
	case 4:
1316
		reg->val = I915_READ(offset);
1342
		reg->val = I915_READ(offset_ldw);
1317
		break;
1343
		break;
1318
	case 2:
1344
	case 2:
1319
		reg->val = I915_READ16(offset);
1345
		reg->val = I915_READ16(offset_ldw);
1320
		break;
1346
		break;
1321
	case 1:
1347
	case 1:
1322
		reg->val = I915_READ8(offset);
1348
		reg->val = I915_READ8(offset_ldw);
1323
		break;
1349
		break;
1324
	default:
1350
	default:
1325
		ret = -EINVAL;
1351
		ret = -EINVAL;
1326
		goto out;
1352
		goto out;
1327
	}
1353
	}
1328
 
1354
 
1329
out:
1355
out:
1330
	intel_runtime_pm_put(dev_priv);
1356
	intel_runtime_pm_put(dev_priv);
1331
	return ret;
1357
	return ret;
1332
}
1358
}
1333
 
1359
 
1334
int i915_get_reset_stats_ioctl(struct drm_device *dev,
1360
int i915_get_reset_stats_ioctl(struct drm_device *dev,
1335
			       void *data, struct drm_file *file)
1361
			       void *data, struct drm_file *file)
1336
{
1362
{
1337
	struct drm_i915_private *dev_priv = dev->dev_private;
1363
	struct drm_i915_private *dev_priv = dev->dev_private;
1338
	struct drm_i915_reset_stats *args = data;
1364
	struct drm_i915_reset_stats *args = data;
1339
	struct i915_ctx_hang_stats *hs;
1365
	struct i915_ctx_hang_stats *hs;
1340
	struct intel_context *ctx;
1366
	struct intel_context *ctx;
1341
	int ret;
1367
	int ret;
1342
 
1368
 
1343
	if (args->flags || args->pad)
1369
	if (args->flags || args->pad)
1344
		return -EINVAL;
1370
		return -EINVAL;
-
 
1371
 
-
 
1372
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
-
 
1373
		return -EPERM;
1345
 
1374
 
1346
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1375
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1347
	if (ret)
1376
	if (ret)
1348
		return ret;
1377
		return ret;
1349
 
1378
 
1350
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1379
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1351
	if (IS_ERR(ctx)) {
1380
	if (IS_ERR(ctx)) {
1352
		mutex_unlock(&dev->struct_mutex);
1381
		mutex_unlock(&dev->struct_mutex);
1353
		return PTR_ERR(ctx);
1382
		return PTR_ERR(ctx);
1354
	}
1383
	}
1355
	hs = &ctx->hang_stats;
1384
	hs = &ctx->hang_stats;
-
 
1385
 
1356
 
1386
	if (capable(CAP_SYS_ADMIN))
-
 
1387
    args->reset_count = i915_reset_count(&dev_priv->gpu_error);
-
 
1388
	else
1357
    args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1389
		args->reset_count = 0;
1358
 
1390
 
1359
	args->batch_active = hs->batch_active;
1391
	args->batch_active = hs->batch_active;
1360
	args->batch_pending = hs->batch_pending;
1392
	args->batch_pending = hs->batch_pending;
1361
 
1393
 
1362
	mutex_unlock(&dev->struct_mutex);
1394
	mutex_unlock(&dev->struct_mutex);
1363
 
1395
 
1364
	return 0;
1396
	return 0;
1365
}
1397
}
1366
 
1398
 
1367
static int i915_reset_complete(struct drm_device *dev)
1399
static int i915_reset_complete(struct drm_device *dev)
1368
{
1400
{
1369
	u8 gdrst;
1401
	u8 gdrst;
1370
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1402
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1371
	return (gdrst & GRDOM_RESET_STATUS) == 0;
1403
	return (gdrst & GRDOM_RESET_STATUS) == 0;
1372
}
1404
}
1373
 
1405
 
1374
static int i915_do_reset(struct drm_device *dev)
1406
static int i915_do_reset(struct drm_device *dev)
1375
{
1407
{
1376
	/* assert reset for at least 20 usec */
1408
	/* assert reset for at least 20 usec */
1377
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1409
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1378
	udelay(20);
1410
	udelay(20);
1379
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1411
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1380
 
1412
 
1381
	return wait_for(i915_reset_complete(dev), 500);
1413
	return wait_for(i915_reset_complete(dev), 500);
1382
}
1414
}
1383
 
1415
 
1384
static int g4x_reset_complete(struct drm_device *dev)
1416
static int g4x_reset_complete(struct drm_device *dev)
1385
{
1417
{
1386
	u8 gdrst;
1418
	u8 gdrst;
1387
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1419
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1388
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1420
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1389
}
1421
}
1390
 
1422
 
1391
static int g33_do_reset(struct drm_device *dev)
1423
static int g33_do_reset(struct drm_device *dev)
1392
{
1424
{
1393
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1425
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1394
	return wait_for(g4x_reset_complete(dev), 500);
1426
	return wait_for(g4x_reset_complete(dev), 500);
1395
}
1427
}
1396
 
1428
 
1397
static int g4x_do_reset(struct drm_device *dev)
1429
static int g4x_do_reset(struct drm_device *dev)
1398
{
1430
{
1399
	struct drm_i915_private *dev_priv = dev->dev_private;
1431
	struct drm_i915_private *dev_priv = dev->dev_private;
1400
	int ret;
1432
	int ret;
1401
 
1433
 
1402
	pci_write_config_byte(dev->pdev, I915_GDRST,
1434
	pci_write_config_byte(dev->pdev, I915_GDRST,
1403
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1435
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
1404
	ret =  wait_for(g4x_reset_complete(dev), 500);
1436
	ret =  wait_for(g4x_reset_complete(dev), 500);
1405
	if (ret)
1437
	if (ret)
1406
		return ret;
1438
		return ret;
1407
 
1439
 
1408
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1440
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1409
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1441
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1410
	POSTING_READ(VDECCLK_GATE_D);
1442
	POSTING_READ(VDECCLK_GATE_D);
1411
 
1443
 
1412
	pci_write_config_byte(dev->pdev, I915_GDRST,
1444
	pci_write_config_byte(dev->pdev, I915_GDRST,
1413
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1445
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1414
	ret =  wait_for(g4x_reset_complete(dev), 500);
1446
	ret =  wait_for(g4x_reset_complete(dev), 500);
1415
	if (ret)
1447
	if (ret)
1416
		return ret;
1448
		return ret;
1417
 
1449
 
1418
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1450
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1419
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1451
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1420
	POSTING_READ(VDECCLK_GATE_D);
1452
	POSTING_READ(VDECCLK_GATE_D);
1421
 
1453
 
1422
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1454
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1423
 
1455
 
1424
	return 0;
1456
	return 0;
1425
}
1457
}
1426
 
1458
 
1427
static int ironlake_do_reset(struct drm_device *dev)
1459
static int ironlake_do_reset(struct drm_device *dev)
1428
{
1460
{
1429
	struct drm_i915_private *dev_priv = dev->dev_private;
1461
	struct drm_i915_private *dev_priv = dev->dev_private;
1430
	int ret;
1462
	int ret;
1431
 
1463
 
1432
	I915_WRITE(ILK_GDSR,
1464
	I915_WRITE(ILK_GDSR,
1433
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1465
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1434
	ret = wait_for((I915_READ(ILK_GDSR) &
1466
	ret = wait_for((I915_READ(ILK_GDSR) &
1435
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1467
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1436
	if (ret)
1468
	if (ret)
1437
		return ret;
1469
		return ret;
1438
 
1470
 
1439
	I915_WRITE(ILK_GDSR,
1471
	I915_WRITE(ILK_GDSR,
1440
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1472
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1441
	ret = wait_for((I915_READ(ILK_GDSR) &
1473
	ret = wait_for((I915_READ(ILK_GDSR) &
1442
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1474
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1443
	if (ret)
1475
	if (ret)
1444
		return ret;
1476
		return ret;
1445
 
1477
 
1446
	I915_WRITE(ILK_GDSR, 0);
1478
	I915_WRITE(ILK_GDSR, 0);
1447
 
1479
 
1448
	return 0;
1480
	return 0;
1449
}
1481
}
1450
 
1482
 
1451
static int gen6_do_reset(struct drm_device *dev)
1483
static int gen6_do_reset(struct drm_device *dev)
1452
{
1484
{
1453
	struct drm_i915_private *dev_priv = dev->dev_private;
1485
	struct drm_i915_private *dev_priv = dev->dev_private;
1454
	int	ret;
1486
	int	ret;
1455
 
1487
 
1456
	/* Reset the chip */
1488
	/* Reset the chip */
1457
 
1489
 
1458
	/* GEN6_GDRST is not in the gt power well, no need to check
1490
	/* GEN6_GDRST is not in the gt power well, no need to check
1459
	 * for fifo space for the write or forcewake the chip for
1491
	 * for fifo space for the write or forcewake the chip for
1460
	 * the read
1492
	 * the read
1461
	 */
1493
	 */
1462
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1494
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1463
 
1495
 
1464
	/* Spin waiting for the device to ack the reset request */
1496
	/* Spin waiting for the device to ack the reset request */
1465
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1497
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1466
 
1498
 
1467
	intel_uncore_forcewake_reset(dev, true);
1499
	intel_uncore_forcewake_reset(dev, true);
1468
 
1500
 
1469
	return ret;
1501
	return ret;
1470
}
1502
}
1471
 
1503
 
1472
static int wait_for_register(struct drm_i915_private *dev_priv,
1504
static int wait_for_register(struct drm_i915_private *dev_priv,
1473
			     const u32 reg,
1505
			     i915_reg_t reg,
1474
			     const u32 mask,
1506
			     const u32 mask,
1475
			     const u32 value,
1507
			     const u32 value,
1476
			     const unsigned long timeout_ms)
1508
			     const unsigned long timeout_ms)
1477
{
1509
{
1478
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1510
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1479
}
1511
}
1480
 
1512
 
1481
static int gen8_do_reset(struct drm_device *dev)
1513
static int gen8_do_reset(struct drm_device *dev)
1482
{
1514
{
1483
	struct drm_i915_private *dev_priv = dev->dev_private;
1515
	struct drm_i915_private *dev_priv = dev->dev_private;
1484
	struct intel_engine_cs *engine;
1516
	struct intel_engine_cs *engine;
1485
	int i;
1517
	int i;
1486
 
1518
 
1487
	for_each_ring(engine, dev_priv, i) {
1519
	for_each_ring(engine, dev_priv, i) {
1488
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1520
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1489
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1521
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1490
 
1522
 
1491
		if (wait_for_register(dev_priv,
1523
		if (wait_for_register(dev_priv,
1492
				      RING_RESET_CTL(engine->mmio_base),
1524
				      RING_RESET_CTL(engine->mmio_base),
1493
				      RESET_CTL_READY_TO_RESET,
1525
				      RESET_CTL_READY_TO_RESET,
1494
				      RESET_CTL_READY_TO_RESET,
1526
				      RESET_CTL_READY_TO_RESET,
1495
				      700)) {
1527
				      700)) {
1496
			DRM_ERROR("%s: reset request timeout\n", engine->name);
1528
			DRM_ERROR("%s: reset request timeout\n", engine->name);
1497
			goto not_ready;
1529
			goto not_ready;
1498
		}
1530
		}
1499
	}
1531
	}
1500
 
1532
 
1501
	return gen6_do_reset(dev);
1533
	return gen6_do_reset(dev);
1502
 
1534
 
1503
not_ready:
1535
not_ready:
1504
	for_each_ring(engine, dev_priv, i)
1536
	for_each_ring(engine, dev_priv, i)
1505
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1537
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1506
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1538
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1507
 
1539
 
1508
	return -EIO;
1540
	return -EIO;
1509
}
1541
}
1510
 
1542
 
1511
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1543
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1512
{
1544
{
1513
	if (!i915.reset)
1545
	if (!i915.reset)
1514
		return NULL;
1546
		return NULL;
1515
 
1547
 
1516
	if (INTEL_INFO(dev)->gen >= 8)
1548
	if (INTEL_INFO(dev)->gen >= 8)
1517
		return gen8_do_reset;
1549
		return gen8_do_reset;
1518
	else if (INTEL_INFO(dev)->gen >= 6)
1550
	else if (INTEL_INFO(dev)->gen >= 6)
1519
		return gen6_do_reset;
1551
		return gen6_do_reset;
1520
	else if (IS_GEN5(dev))
1552
	else if (IS_GEN5(dev))
1521
		return ironlake_do_reset;
1553
		return ironlake_do_reset;
1522
	else if (IS_G4X(dev))
1554
	else if (IS_G4X(dev))
1523
		return g4x_do_reset;
1555
		return g4x_do_reset;
1524
	else if (IS_G33(dev))
1556
	else if (IS_G33(dev))
1525
		return g33_do_reset;
1557
		return g33_do_reset;
1526
	else if (INTEL_INFO(dev)->gen >= 3)
1558
	else if (INTEL_INFO(dev)->gen >= 3)
1527
		return i915_do_reset;
1559
		return i915_do_reset;
1528
	else
1560
	else
1529
		return NULL;
1561
		return NULL;
1530
}
1562
}
1531
 
1563
 
1532
int intel_gpu_reset(struct drm_device *dev)
1564
int intel_gpu_reset(struct drm_device *dev)
1533
{
1565
{
1534
	struct drm_i915_private *dev_priv = to_i915(dev);
1566
	struct drm_i915_private *dev_priv = to_i915(dev);
1535
	int (*reset)(struct drm_device *);
1567
	int (*reset)(struct drm_device *);
1536
	int ret;
1568
	int ret;
1537
 
1569
 
1538
	reset = intel_get_gpu_reset(dev);
1570
	reset = intel_get_gpu_reset(dev);
1539
	if (reset == NULL)
1571
	if (reset == NULL)
1540
		return -ENODEV;
1572
		return -ENODEV;
1541
 
1573
 
1542
	/* If the power well sleeps during the reset, the reset
1574
	/* If the power well sleeps during the reset, the reset
1543
	 * request may be dropped and never completes (causing -EIO).
1575
	 * request may be dropped and never completes (causing -EIO).
1544
	 */
1576
	 */
1545
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1577
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1546
	ret = reset(dev);
1578
	ret = reset(dev);
1547
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1579
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1548
 
1580
 
1549
	return ret;
1581
	return ret;
1550
}
1582
}
1551
 
1583
 
1552
bool intel_has_gpu_reset(struct drm_device *dev)
1584
bool intel_has_gpu_reset(struct drm_device *dev)
1553
{
1585
{
1554
	return intel_get_gpu_reset(dev) != NULL;
1586
	return intel_get_gpu_reset(dev) != NULL;
1555
}
1587
}
1556
 
1588
 
1557
void intel_uncore_check_errors(struct drm_device *dev)
1589
void intel_uncore_check_errors(struct drm_device *dev)
1558
{
1590
{
1559
	struct drm_i915_private *dev_priv = dev->dev_private;
1591
	struct drm_i915_private *dev_priv = dev->dev_private;
1560
 
1592
 
1561
	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1593
	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1562
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1594
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1563
		DRM_ERROR("Unclaimed register before interrupt\n");
1595
		DRM_ERROR("Unclaimed register before interrupt\n");
1564
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1596
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1565
	}
1597
	}
1566
}
1598
}