Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6937 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4104 Serge 1
/*
2
 * Copyright © 2013 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 */
23
 
24
#include "i915_drv.h"
25
#include "intel_drv.h"
6084 serge 26
#include "i915_vgpu.h"
4104 Serge 27
 
6084 serge 28
#include 
4104 Serge 29
 
6084 serge 30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
 
4104 Serge 32
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
33
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
34
 
35
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
36
#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
37
 
38
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
39
#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
40
 
41
#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
42
#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
43
 
44
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
45
 
6084 serge 46
static const char * const forcewake_domain_names[] = {
47
	"render",
48
	"blitter",
49
	"media",
50
};
51
 
52
const char *
53
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
54
{
55
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
56
 
57
	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
58
		return forcewake_domain_names[id];
59
 
60
	WARN_ON(id);
61
 
62
	return "unknown";
63
}
64
 
5060 serge 65
static void
66
assert_device_not_suspended(struct drm_i915_private *dev_priv)
67
{
5354 serge 68
	WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
6084 serge 69
		  "Device suspended\n");
5060 serge 70
}
4104 Serge 71
 
6084 serge 72
static inline void
73
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
4104 Serge 74
{
6084 serge 75
	WARN_ON(d->reg_set == 0);
76
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
4104 Serge 77
}
78
 
6084 serge 79
static inline void
80
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
4104 Serge 81
{
6084 serge 82
//	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
83
//	/* something from same cacheline, but !FORCEWAKE */
84
//	__raw_posting_read(dev_priv, ECOBUS);
4104 Serge 85
}
86
 
6084 serge 87
static inline void
88
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
4104 Serge 89
{
6084 serge 90
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
91
			     FORCEWAKE_KERNEL) == 0,
4104 Serge 92
			    FORCEWAKE_ACK_TIMEOUT_MS))
6084 serge 93
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
94
			  intel_uncore_forcewake_domain_to_str(d->id));
4104 Serge 95
}
96
 
6084 serge 97
static inline void
98
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
4104 Serge 99
{
6084 serge 100
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
4104 Serge 101
}
102
 
6084 serge 103
static inline void
104
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
4104 Serge 105
{
6084 serge 106
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
107
			     FORCEWAKE_KERNEL),
4104 Serge 108
			    FORCEWAKE_ACK_TIMEOUT_MS))
6084 serge 109
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
110
			  intel_uncore_forcewake_domain_to_str(d->id));
4104 Serge 111
}
112
 
6084 serge 113
static inline void
114
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
4104 Serge 115
{
6084 serge 116
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
4104 Serge 117
}
118
 
6084 serge 119
static inline void
120
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
4104 Serge 121
{
6084 serge 122
	/* something from same cacheline, but not from the set register */
123
	if (d->reg_post)
124
		__raw_posting_read(d->i915, d->reg_post);
4104 Serge 125
}
126
 
6084 serge 127
static void
128
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 129
{
6084 serge 130
	struct intel_uncore_forcewake_domain *d;
131
	enum forcewake_domain_id id;
5060 serge 132
 
6084 serge 133
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
134
		fw_domain_wait_ack_clear(d);
135
		fw_domain_get(d);
136
		fw_domain_wait_ack(d);
137
	}
4104 Serge 138
}
139
 
6084 serge 140
static void
141
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 142
{
6084 serge 143
	struct intel_uncore_forcewake_domain *d;
144
	enum forcewake_domain_id id;
4104 Serge 145
 
6084 serge 146
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
147
		fw_domain_put(d);
148
		fw_domain_posting_read(d);
4104 Serge 149
	}
150
}
151
 
6084 serge 152
static void
153
fw_domains_posting_read(struct drm_i915_private *dev_priv)
4104 Serge 154
{
6084 serge 155
	struct intel_uncore_forcewake_domain *d;
156
	enum forcewake_domain_id id;
4104 Serge 157
 
6084 serge 158
	/* No need to do for all, just do for first found */
159
	for_each_fw_domain(d, dev_priv, id) {
160
		fw_domain_posting_read(d);
161
		break;
4560 Serge 162
	}
4104 Serge 163
}
164
 
6084 serge 165
static void
166
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 167
{
6084 serge 168
	struct intel_uncore_forcewake_domain *d;
169
	enum forcewake_domain_id id;
4560 Serge 170
 
6084 serge 171
	if (dev_priv->uncore.fw_domains == 0)
172
		return;
4560 Serge 173
 
6084 serge 174
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
175
		fw_domain_reset(d);
4560 Serge 176
 
6084 serge 177
	fw_domains_posting_read(dev_priv);
4104 Serge 178
}
179
 
6084 serge 180
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
4560 Serge 181
{
6084 serge 182
	/* w/a for a sporadic read returning 0 by waiting for the GT
183
	 * thread to wake up.
184
	 */
185
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
186
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
187
		DRM_ERROR("GT thread status wait timed out\n");
4560 Serge 188
}
189
 
6084 serge 190
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
191
					      enum forcewake_domains fw_domains)
4560 Serge 192
{
6084 serge 193
	fw_domains_get(dev_priv, fw_domains);
4560 Serge 194
 
6084 serge 195
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
196
	__gen6_gt_wait_for_thread_c0(dev_priv);
4560 Serge 197
}
198
 
6084 serge 199
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5354 serge 200
{
6084 serge 201
	u32 gtfifodbg;
5354 serge 202
 
6084 serge 203
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
204
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
205
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
5354 serge 206
}
207
 
6084 serge 208
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
209
				     enum forcewake_domains fw_domains)
5354 serge 210
{
6084 serge 211
	fw_domains_put(dev_priv, fw_domains);
212
	gen6_gt_check_fifodbg(dev_priv);
5354 serge 213
}
214
 
6084 serge 215
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
5354 serge 216
{
6084 serge 217
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
5354 serge 218
 
6084 serge 219
	return count & GT_FIFO_FREE_ENTRIES_MASK;
5354 serge 220
}
221
 
6084 serge 222
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5354 serge 223
{
6084 serge 224
	int ret = 0;
5354 serge 225
 
6084 serge 226
	/* On VLV, FIFO will be shared by both SW and HW.
227
	 * So, we need to read the FREE_ENTRIES everytime */
228
	if (IS_VALLEYVIEW(dev_priv->dev))
229
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
5354 serge 230
 
6084 serge 231
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
232
		int loop = 500;
233
		u32 fifo = fifo_free_entries(dev_priv);
5354 serge 234
 
6084 serge 235
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
236
			udelay(10);
237
			fifo = fifo_free_entries(dev_priv);
238
		}
239
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
240
			++ret;
241
		dev_priv->uncore.fifo_count = fifo;
5354 serge 242
	}
6084 serge 243
	dev_priv->uncore.fifo_count--;
5354 serge 244
 
6084 serge 245
	return ret;
5354 serge 246
}
247
 
6084 serge 248
static void intel_uncore_fw_release_timer(unsigned long arg)
5354 serge 249
{
6084 serge 250
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
5354 serge 251
	unsigned long irqflags;
252
 
6084 serge 253
	assert_device_not_suspended(domain->i915);
5354 serge 254
 
6084 serge 255
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
256
	if (WARN_ON(domain->wake_count == 0))
257
		domain->wake_count++;
5354 serge 258
 
6084 serge 259
	if (--domain->wake_count == 0)
260
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
261
							  1 << domain->id);
5354 serge 262
 
6084 serge 263
	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
5354 serge 264
}
265
 
5060 serge 266
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
4371 Serge 267
{
268
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 269
	unsigned long irqflags;
6084 serge 270
	struct intel_uncore_forcewake_domain *domain;
271
	int retry_count = 100;
272
	enum forcewake_domain_id id;
273
	enum forcewake_domains fw = 0, active_domains;
4371 Serge 274
 
5060 serge 275
	/* Hold uncore.lock across reset to prevent any register access
6084 serge 276
	 * with forcewake not set correctly. Wait until all pending
277
	 * timers are run before holding.
5060 serge 278
	 */
6084 serge 279
	while (1) {
280
		active_domains = 0;
5060 serge 281
 
6084 serge 282
		for_each_fw_domain(domain, dev_priv, id) {
283
			if (del_timer_sync(&domain->timer) == 0)
284
				continue;
5060 serge 285
 
6084 serge 286
			intel_uncore_fw_release_timer((unsigned long)domain);
287
		}
5060 serge 288
 
6084 serge 289
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
5354 serge 290
 
6084 serge 291
		for_each_fw_domain(domain, dev_priv, id) {
292
//           if (timer_pending(&domain->timer))
293
//				active_domains |= (1 << id);
294
	}
5060 serge 295
 
6084 serge 296
		if (active_domains == 0)
297
			break;
5060 serge 298
 
6084 serge 299
		if (--retry_count == 0) {
300
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
301
			break;
302
		}
5354 serge 303
 
6084 serge 304
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
305
        change_task();
4371 Serge 306
	}
5060 serge 307
 
6084 serge 308
	WARN_ON(active_domains);
309
 
310
	for_each_fw_domain(domain, dev_priv, id)
311
		if (domain->wake_count)
312
			fw |= 1 << id;
313
 
314
	if (fw)
315
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
316
 
317
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
318
 
319
	if (restore) { /* If reset with a user forcewake, try to restore */
5060 serge 320
		if (fw)
321
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
322
 
323
		if (IS_GEN6(dev) || IS_GEN7(dev))
324
			dev_priv->uncore.fifo_count =
6084 serge 325
				fifo_free_entries(dev_priv);
5060 serge 326
	}
327
 
6084 serge 328
	if (!restore)
329
		assert_forcewakes_inactive(dev_priv);
330
 
5060 serge 331
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4371 Serge 332
}
333
 
6084 serge 334
static void intel_uncore_ellc_detect(struct drm_device *dev)
4104 Serge 335
{
336
	struct drm_i915_private *dev_priv = dev->dev_private;
337
 
6084 serge 338
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
339
	     INTEL_INFO(dev)->gen >= 9) &&
340
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
4560 Serge 341
		/* The docs do not explain exactly how the calculation can be
342
		 * made. It is somewhat guessable, but for now, it's always
343
		 * 128MB.
344
		 * NB: We can't write IDICR yet because we do not have gt funcs
345
		 * set up */
346
		dev_priv->ellc_size = 128;
347
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
348
	}
6084 serge 349
}
4104 Serge 350
 
6084 serge 351
static void __intel_uncore_early_sanitize(struct drm_device *dev,
352
					  bool restore_forcewake)
353
{
354
	struct drm_i915_private *dev_priv = dev->dev_private;
355
 
356
	if (HAS_FPGA_DBG_UNCLAIMED(dev))
357
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
358
 
4560 Serge 359
	/* clear out old GT FIFO errors */
360
	if (IS_GEN6(dev) || IS_GEN7(dev))
361
		__raw_i915_write32(dev_priv, GTFIFODBG,
362
				   __raw_i915_read32(dev_priv, GTFIFODBG));
4104 Serge 363
 
6084 serge 364
	/* WaDisableShadowRegForCpd:chv */
365
	if (IS_CHERRYVIEW(dev)) {
366
		__raw_i915_write32(dev_priv, GTFIFOCTL,
367
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
368
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
369
				   GT_FIFO_CTL_RC6_POLICY_STALL);
370
	}
371
 
5060 serge 372
	intel_uncore_forcewake_reset(dev, restore_forcewake);
4104 Serge 373
}
374
 
5354 serge 375
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
376
{
377
	__intel_uncore_early_sanitize(dev, restore_forcewake);
378
	i915_check_and_clear_faults(dev);
379
}
380
 
4104 Serge 381
void intel_uncore_sanitize(struct drm_device *dev)
382
{
383
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
384
	intel_disable_gt_powersave(dev);
385
}
386
 
6084 serge 387
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
388
					 enum forcewake_domains fw_domains)
389
{
390
	struct intel_uncore_forcewake_domain *domain;
391
	enum forcewake_domain_id id;
392
 
393
	if (!dev_priv->uncore.funcs.force_wake_get)
394
		return;
395
 
396
	fw_domains &= dev_priv->uncore.fw_domains;
397
 
398
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
399
		if (domain->wake_count++)
400
			fw_domains &= ~(1 << id);
401
	}
402
 
403
	if (fw_domains)
404
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
405
}
406
 
407
/**
408
 * intel_uncore_forcewake_get - grab forcewake domain references
409
 * @dev_priv: i915 device instance
410
 * @fw_domains: forcewake domains to get reference on
411
 *
412
 * This function can be used get GT's forcewake domain references.
413
 * Normal register access will handle the forcewake domains automatically.
414
 * However if some sequence requires the GT to not power down a particular
415
 * forcewake domains this function should be called at the beginning of the
416
 * sequence. And subsequently the reference should be dropped by symmetric
417
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
418
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
4104 Serge 419
 */
6084 serge 420
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
421
				enum forcewake_domains fw_domains)
4104 Serge 422
{
423
	unsigned long irqflags;
424
 
4560 Serge 425
	if (!dev_priv->uncore.funcs.force_wake_get)
426
		return;
427
 
6084 serge 428
	WARN_ON(dev_priv->pm.suspended);
4560 Serge 429
 
4104 Serge 430
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6084 serge 431
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
4104 Serge 432
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
433
}
434
 
6084 serge 435
/**
436
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
437
 * @dev_priv: i915 device instance
438
 * @fw_domains: forcewake domains to get reference on
439
 *
440
 * See intel_uncore_forcewake_get(). This variant places the onus
441
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
4104 Serge 442
 */
6084 serge 443
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
444
					enum forcewake_domains fw_domains)
4104 Serge 445
{
6084 serge 446
	assert_spin_locked(&dev_priv->uncore.lock);
4104 Serge 447
 
6084 serge 448
	if (!dev_priv->uncore.funcs.force_wake_get)
449
		return;
450
 
451
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
452
}
453
 
454
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
455
					 enum forcewake_domains fw_domains)
456
{
457
	struct intel_uncore_forcewake_domain *domain;
458
	enum forcewake_domain_id id;
459
 
4560 Serge 460
	if (!dev_priv->uncore.funcs.force_wake_put)
461
		return;
462
 
6084 serge 463
	fw_domains &= dev_priv->uncore.fw_domains;
5354 serge 464
 
6084 serge 465
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
466
		if (WARN_ON(domain->wake_count == 0))
467
			continue;
468
 
469
		if (--domain->wake_count)
470
			continue;
471
 
472
		domain->wake_count++;
473
		fw_domain_arm_timer(domain);
5060 serge 474
	}
6084 serge 475
}
4560 Serge 476
 
6084 serge 477
/**
478
 * intel_uncore_forcewake_put - release a forcewake domain reference
479
 * @dev_priv: i915 device instance
480
 * @fw_domains: forcewake domains to put references
481
 *
482
 * This function drops the device-level forcewakes for specified
483
 * domains obtained by intel_uncore_forcewake_get().
484
 */
485
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
486
				enum forcewake_domains fw_domains)
487
{
488
	unsigned long irqflags;
4560 Serge 489
 
6084 serge 490
	if (!dev_priv->uncore.funcs.force_wake_put)
491
		return;
492
 
4104 Serge 493
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6084 serge 494
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
4104 Serge 495
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6084 serge 496
}
4560 Serge 497
 
6084 serge 498
/**
499
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
500
 * @dev_priv: i915 device instance
501
 * @fw_domains: forcewake domains to get reference on
502
 *
503
 * See intel_uncore_forcewake_put(). This variant places the onus
504
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
505
 */
506
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
507
					enum forcewake_domains fw_domains)
508
{
509
	assert_spin_locked(&dev_priv->uncore.lock);
510
 
511
	if (!dev_priv->uncore.funcs.force_wake_put)
512
		return;
513
 
514
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
4104 Serge 515
}
516
 
6084 serge 517
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
5060 serge 518
{
6084 serge 519
	struct intel_uncore_forcewake_domain *domain;
520
	enum forcewake_domain_id id;
521
 
5060 serge 522
	if (!dev_priv->uncore.funcs.force_wake_get)
523
		return;
524
 
6084 serge 525
	for_each_fw_domain(domain, dev_priv, id)
526
		WARN_ON(domain->wake_count);
5060 serge 527
}
528
 
4104 Serge 529
/* We give fast paths for the really cool registers */
6084 serge 530
#define NEEDS_FORCE_WAKE(reg) \
4560 Serge 531
	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
4104 Serge 532
 
5060 serge 533
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
534
 
535
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
536
	(REG_RANGE((reg), 0x2000, 0x4000) || \
537
	 REG_RANGE((reg), 0x5000, 0x8000) || \
538
	 REG_RANGE((reg), 0xB000, 0x12000) || \
539
	 REG_RANGE((reg), 0x2E000, 0x30000))
540
 
541
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
542
	(REG_RANGE((reg), 0x12000, 0x14000) || \
543
	 REG_RANGE((reg), 0x22000, 0x24000) || \
544
	 REG_RANGE((reg), 0x30000, 0x40000))
545
 
546
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
547
	(REG_RANGE((reg), 0x2000, 0x4000) || \
6084 serge 548
	 REG_RANGE((reg), 0x5200, 0x8000) || \
5060 serge 549
	 REG_RANGE((reg), 0x8300, 0x8500) || \
6084 serge 550
	 REG_RANGE((reg), 0xB000, 0xB480) || \
5060 serge 551
	 REG_RANGE((reg), 0xE000, 0xE800))
552
 
553
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
554
	(REG_RANGE((reg), 0x8800, 0x8900) || \
555
	 REG_RANGE((reg), 0xD000, 0xD800) || \
556
	 REG_RANGE((reg), 0x12000, 0x14000) || \
557
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
558
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
6084 serge 559
	 REG_RANGE((reg), 0x30000, 0x38000))
5060 serge 560
 
561
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
562
	(REG_RANGE((reg), 0x4000, 0x5000) || \
563
	 REG_RANGE((reg), 0x8000, 0x8300) || \
564
	 REG_RANGE((reg), 0x8500, 0x8600) || \
565
	 REG_RANGE((reg), 0x9000, 0xB000) || \
6084 serge 566
	 REG_RANGE((reg), 0xF000, 0x10000))
5060 serge 567
 
5354 serge 568
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
569
	REG_RANGE((reg), 0xB00,  0x2000)
570
 
571
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
572
	(REG_RANGE((reg), 0x2000, 0x2700) || \
573
	 REG_RANGE((reg), 0x3000, 0x4000) || \
574
	 REG_RANGE((reg), 0x5200, 0x8000) || \
575
	 REG_RANGE((reg), 0x8140, 0x8160) || \
576
	 REG_RANGE((reg), 0x8300, 0x8500) || \
577
	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
578
	 REG_RANGE((reg), 0xB000, 0xB480) || \
579
	 REG_RANGE((reg), 0xE000, 0xE900) || \
580
	 REG_RANGE((reg), 0x24400, 0x24800))
581
 
582
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
583
	(REG_RANGE((reg), 0x8130, 0x8140) || \
584
	 REG_RANGE((reg), 0x8800, 0x8A00) || \
585
	 REG_RANGE((reg), 0xD000, 0xD800) || \
586
	 REG_RANGE((reg), 0x12000, 0x14000) || \
587
	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
588
	 REG_RANGE((reg), 0x30000, 0x40000))
589
 
590
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
591
	REG_RANGE((reg), 0x9400, 0x9800)
592
 
593
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
594
	((reg) < 0x40000 &&\
595
	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
596
	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
597
	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
598
	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
599
 
4104 Serge 600
static void
601
ilk_dummy_write(struct drm_i915_private *dev_priv)
602
{
603
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
604
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
605
	 * hence harmless to write 0 into. */
606
	__raw_i915_write32(dev_priv, MI_MODE, 0);
607
}
608
 
609
static void
5060 serge 610
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
611
			bool before)
4104 Serge 612
{
5060 serge 613
	const char *op = read ? "reading" : "writing to";
614
	const char *when = before ? "before" : "after";
615
 
616
	if (!i915.mmio_debug)
617
		return;
618
 
4560 Serge 619
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
5060 serge 620
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
621
		     when, op, reg);
4104 Serge 622
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
6084 serge 623
		i915.mmio_debug--; /* Only report the first N failures */
4104 Serge 624
	}
625
}
626
 
627
static void
5060 serge 628
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
4104 Serge 629
{
6084 serge 630
	static bool mmio_debug_once = true;
631
 
632
	if (i915.mmio_debug || !mmio_debug_once)
5060 serge 633
		return;
634
 
4560 Serge 635
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
6084 serge 636
		DRM_DEBUG("Unclaimed register detected, "
637
			  "enabling oneshot unclaimed register reporting. "
638
			  "Please use i915.mmio_debug=N for more information.\n");
4104 Serge 639
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
6084 serge 640
		i915.mmio_debug = mmio_debug_once--;
4104 Serge 641
	}
642
}
643
 
6084 serge 644
#define GEN2_READ_HEADER(x) \
4104 Serge 645
	u##x val = 0; \
6084 serge 646
	assert_device_not_suspended(dev_priv);
4560 Serge 647
 
6084 serge 648
#define GEN2_READ_FOOTER \
4560 Serge 649
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
650
	return val
651
 
6084 serge 652
#define __gen2_read(x) \
4560 Serge 653
static u##x \
6084 serge 654
gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
655
	GEN2_READ_HEADER(x); \
4560 Serge 656
	val = __raw_i915_read##x(dev_priv, reg); \
6084 serge 657
	GEN2_READ_FOOTER; \
4560 Serge 658
}
659
 
660
#define __gen5_read(x) \
661
static u##x \
662
gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
6084 serge 663
	GEN2_READ_HEADER(x); \
664
	ilk_dummy_write(dev_priv); \
4560 Serge 665
	val = __raw_i915_read##x(dev_priv, reg); \
6084 serge 666
	GEN2_READ_FOOTER; \
4560 Serge 667
}
668
 
6084 serge 669
__gen5_read(8)
670
__gen5_read(16)
671
__gen5_read(32)
672
__gen5_read(64)
673
__gen2_read(8)
674
__gen2_read(16)
675
__gen2_read(32)
676
__gen2_read(64)
677
 
678
#undef __gen5_read
679
#undef __gen2_read
680
 
681
#undef GEN2_READ_FOOTER
682
#undef GEN2_READ_HEADER
683
 
684
#define GEN6_READ_HEADER(x) \
685
	unsigned long irqflags; \
686
	u##x val = 0; \
687
	assert_device_not_suspended(dev_priv); \
688
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
689
 
690
#define GEN6_READ_FOOTER \
691
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
692
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
693
	return val
694
 
695
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
696
				    enum forcewake_domains fw_domains)
697
{
698
	struct intel_uncore_forcewake_domain *domain;
699
	enum forcewake_domain_id id;
700
 
701
	if (WARN_ON(!fw_domains))
702
		return;
703
 
704
	/* Ideally GCC would be constant-fold and eliminate this loop */
705
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
706
		if (domain->wake_count) {
707
			fw_domains &= ~(1 << id);
708
			continue;
709
		}
710
 
711
		domain->wake_count++;
712
		fw_domain_arm_timer(domain);
713
	}
714
 
715
	if (fw_domains)
716
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
717
}
718
 
719
#define __vgpu_read(x) \
720
static u##x \
721
vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
722
	GEN6_READ_HEADER(x); \
723
	val = __raw_i915_read##x(dev_priv, reg); \
724
	GEN6_READ_FOOTER; \
725
}
726
 
4560 Serge 727
#define __gen6_read(x) \
728
static u##x \
729
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
6084 serge 730
	GEN6_READ_HEADER(x); \
5060 serge 731
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
6084 serge 732
	if (NEEDS_FORCE_WAKE(reg)) \
733
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
734
	val = __raw_i915_read##x(dev_priv, reg); \
5060 serge 735
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
6084 serge 736
	GEN6_READ_FOOTER; \
4104 Serge 737
}
738
 
4560 Serge 739
#define __vlv_read(x) \
740
static u##x \
741
vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
6084 serge 742
	GEN6_READ_HEADER(x); \
743
	if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
744
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
745
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
746
		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
747
	val = __raw_i915_read##x(dev_priv, reg); \
748
	GEN6_READ_FOOTER; \
5060 serge 749
}
750
 
751
#define __chv_read(x) \
752
static u##x \
753
chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
6084 serge 754
	GEN6_READ_HEADER(x); \
755
	if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
756
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
757
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
758
		__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
759
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
760
		__force_wake_get(dev_priv, \
761
				 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
762
	val = __raw_i915_read##x(dev_priv, reg); \
763
	GEN6_READ_FOOTER; \
4560 Serge 764
}
4104 Serge 765
 
6084 serge 766
#define SKL_NEEDS_FORCE_WAKE(reg) \
5354 serge 767
	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
768
 
769
#define __gen9_read(x) \
770
static u##x \
771
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
6084 serge 772
	enum forcewake_domains fw_engine; \
773
	GEN6_READ_HEADER(x); \
774
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
775
	if (!SKL_NEEDS_FORCE_WAKE(reg)) \
776
		fw_engine = 0; \
777
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
778
		fw_engine = FORCEWAKE_RENDER; \
779
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
780
		fw_engine = FORCEWAKE_MEDIA; \
781
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
782
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783
	else \
784
		fw_engine = FORCEWAKE_BLITTER; \
785
	if (fw_engine) \
786
		__force_wake_get(dev_priv, fw_engine); \
787
	val = __raw_i915_read##x(dev_priv, reg); \
788
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
789
	GEN6_READ_FOOTER; \
5354 serge 790
}
791
 
6084 serge 792
__vgpu_read(8)
793
__vgpu_read(16)
794
__vgpu_read(32)
795
__vgpu_read(64)
5354 serge 796
__gen9_read(8)
797
__gen9_read(16)
798
__gen9_read(32)
799
__gen9_read(64)
5060 serge 800
__chv_read(8)
801
__chv_read(16)
802
__chv_read(32)
803
__chv_read(64)
4560 Serge 804
__vlv_read(8)
805
__vlv_read(16)
806
__vlv_read(32)
807
__vlv_read(64)
808
__gen6_read(8)
809
__gen6_read(16)
810
__gen6_read(32)
811
__gen6_read(64)
812
 
5354 serge 813
#undef __gen9_read
5060 serge 814
#undef __chv_read
4560 Serge 815
#undef __vlv_read
816
#undef __gen6_read
6084 serge 817
#undef __vgpu_read
818
#undef GEN6_READ_FOOTER
819
#undef GEN6_READ_HEADER
4560 Serge 820
 
6084 serge 821
#define GEN2_WRITE_HEADER \
4560 Serge 822
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
5060 serge 823
	assert_device_not_suspended(dev_priv); \
4560 Serge 824
 
6084 serge 825
#define GEN2_WRITE_FOOTER
4560 Serge 826
 
6084 serge 827
#define __gen2_write(x) \
4560 Serge 828
static void \
6084 serge 829
gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
830
	GEN2_WRITE_HEADER; \
4560 Serge 831
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 832
	GEN2_WRITE_FOOTER; \
4560 Serge 833
}
834
 
835
#define __gen5_write(x) \
836
static void \
837
gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
6084 serge 838
	GEN2_WRITE_HEADER; \
4560 Serge 839
	ilk_dummy_write(dev_priv); \
840
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 841
	GEN2_WRITE_FOOTER; \
4560 Serge 842
}
843
 
6084 serge 844
__gen5_write(8)
845
__gen5_write(16)
846
__gen5_write(32)
847
__gen5_write(64)
848
__gen2_write(8)
849
__gen2_write(16)
850
__gen2_write(32)
851
__gen2_write(64)
852
 
853
#undef __gen5_write
854
#undef __gen2_write
855
 
856
#undef GEN2_WRITE_FOOTER
857
#undef GEN2_WRITE_HEADER
858
 
859
#define GEN6_WRITE_HEADER \
860
	unsigned long irqflags; \
861
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
862
	assert_device_not_suspended(dev_priv); \
863
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
864
 
865
#define GEN6_WRITE_FOOTER \
866
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
867
 
4560 Serge 868
#define __gen6_write(x) \
869
static void \
870
gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
4104 Serge 871
	u32 __fifo_ret = 0; \
6084 serge 872
	GEN6_WRITE_HEADER; \
873
	if (NEEDS_FORCE_WAKE(reg)) { \
4104 Serge 874
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
875
	} \
4560 Serge 876
	__raw_i915_write##x(dev_priv, reg, val); \
877
	if (unlikely(__fifo_ret)) { \
878
		gen6_gt_check_fifodbg(dev_priv); \
879
	} \
6084 serge 880
	GEN6_WRITE_FOOTER; \
4560 Serge 881
}
882
 
883
#define __hsw_write(x) \
884
static void \
885
hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
886
	u32 __fifo_ret = 0; \
6084 serge 887
	GEN6_WRITE_HEADER; \
888
	if (NEEDS_FORCE_WAKE(reg)) { \
4560 Serge 889
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
890
	} \
5060 serge 891
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
4104 Serge 892
	__raw_i915_write##x(dev_priv, reg, val); \
893
	if (unlikely(__fifo_ret)) { \
894
		gen6_gt_check_fifodbg(dev_priv); \
895
	} \
5060 serge 896
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
897
	hsw_unclaimed_reg_detect(dev_priv); \
6084 serge 898
	GEN6_WRITE_FOOTER; \
4104 Serge 899
}
900
 
6084 serge 901
#define __vgpu_write(x) \
902
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
903
			  off_t reg, u##x val, bool trace) { \
904
	GEN6_WRITE_HEADER; \
905
	__raw_i915_write##x(dev_priv, reg, val); \
906
	GEN6_WRITE_FOOTER; \
907
}
908
 
4560 Serge 909
static const u32 gen8_shadowed_regs[] = {
910
	FORCEWAKE_MT,
911
	GEN6_RPNSWREQ,
912
	GEN6_RC_VIDEO_FREQ,
913
	RING_TAIL(RENDER_RING_BASE),
914
	RING_TAIL(GEN6_BSD_RING_BASE),
915
	RING_TAIL(VEBOX_RING_BASE),
916
	RING_TAIL(BLT_RING_BASE),
917
	/* TODO: Other registers are not yet used */
918
};
919
 
920
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
921
{
922
	int i;
923
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
924
		if (reg == gen8_shadowed_regs[i])
925
			return true;
926
 
927
	return false;
928
}
929
 
930
#define __gen8_write(x) \
931
static void \
932
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
6084 serge 933
	GEN6_WRITE_HEADER; \
5060 serge 934
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
6084 serge 935
	if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
936
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
4560 Serge 937
	__raw_i915_write##x(dev_priv, reg, val); \
5060 serge 938
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
939
	hsw_unclaimed_reg_detect(dev_priv); \
6084 serge 940
	GEN6_WRITE_FOOTER; \
4560 Serge 941
}
942
 
5060 serge 943
#define __chv_write(x) \
944
static void \
945
chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
946
	bool shadowed = is_gen8_shadowed(dev_priv, reg); \
6084 serge 947
	GEN6_WRITE_HEADER; \
5060 serge 948
	if (!shadowed) { \
6084 serge 949
		if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
950
			__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
951
		else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
952
			__force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
953
		else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
954
			__force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
5060 serge 955
	} \
956
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 957
	GEN6_WRITE_FOOTER; \
5060 serge 958
}
959
 
5354 serge 960
static const u32 gen9_shadowed_regs[] = {
961
	RING_TAIL(RENDER_RING_BASE),
962
	RING_TAIL(GEN6_BSD_RING_BASE),
963
	RING_TAIL(VEBOX_RING_BASE),
964
	RING_TAIL(BLT_RING_BASE),
965
	FORCEWAKE_BLITTER_GEN9,
966
	FORCEWAKE_RENDER_GEN9,
967
	FORCEWAKE_MEDIA_GEN9,
968
	GEN6_RPNSWREQ,
969
	GEN6_RC_VIDEO_FREQ,
970
	/* TODO: Other registers are not yet used */
971
};
972
 
973
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
974
{
975
	int i;
976
	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
977
		if (reg == gen9_shadowed_regs[i])
978
			return true;
979
 
980
	return false;
981
}
982
 
983
#define __gen9_write(x) \
984
static void \
985
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
986
		bool trace) { \
6084 serge 987
	enum forcewake_domains fw_engine; \
988
	GEN6_WRITE_HEADER; \
989
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
990
	if (!SKL_NEEDS_FORCE_WAKE(reg) || \
991
	    is_gen9_shadowed(dev_priv, reg)) \
992
		fw_engine = 0; \
993
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
994
		fw_engine = FORCEWAKE_RENDER; \
995
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
996
		fw_engine = FORCEWAKE_MEDIA; \
997
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
998
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
999
	else \
1000
		fw_engine = FORCEWAKE_BLITTER; \
1001
	if (fw_engine) \
1002
		__force_wake_get(dev_priv, fw_engine); \
1003
	__raw_i915_write##x(dev_priv, reg, val); \
1004
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1005
	hsw_unclaimed_reg_detect(dev_priv); \
1006
	GEN6_WRITE_FOOTER; \
5354 serge 1007
}
1008
 
1009
__gen9_write(8)
1010
__gen9_write(16)
1011
__gen9_write(32)
1012
__gen9_write(64)
5060 serge 1013
__chv_write(8)
1014
__chv_write(16)
1015
__chv_write(32)
1016
__chv_write(64)
4560 Serge 1017
__gen8_write(8)
1018
__gen8_write(16)
1019
__gen8_write(32)
1020
__gen8_write(64)
1021
__hsw_write(8)
1022
__hsw_write(16)
1023
__hsw_write(32)
1024
__hsw_write(64)
1025
__gen6_write(8)
1026
__gen6_write(16)
1027
__gen6_write(32)
1028
__gen6_write(64)
6084 serge 1029
__vgpu_write(8)
1030
__vgpu_write(16)
1031
__vgpu_write(32)
1032
__vgpu_write(64)
4560 Serge 1033
 
5354 serge 1034
#undef __gen9_write
5060 serge 1035
#undef __chv_write
4560 Serge 1036
#undef __gen8_write
1037
#undef __hsw_write
1038
#undef __gen6_write
6084 serge 1039
#undef __vgpu_write
1040
#undef GEN6_WRITE_FOOTER
1041
#undef GEN6_WRITE_HEADER
4560 Serge 1042
 
5354 serge 1043
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1044
do { \
1045
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1046
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1047
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1048
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1049
} while (0)
1050
 
1051
#define ASSIGN_READ_MMIO_VFUNCS(x) \
1052
do { \
1053
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1054
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1055
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1056
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1057
} while (0)
1058
 
6084 serge 1059
 
1060
static void fw_domain_init(struct drm_i915_private *dev_priv,
1061
			   enum forcewake_domain_id domain_id,
1062
			   u32 reg_set, u32 reg_ack)
4560 Serge 1063
{
6084 serge 1064
	struct intel_uncore_forcewake_domain *d;
1065
 
1066
	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1067
		return;
1068
 
1069
	d = &dev_priv->uncore.fw_domain[domain_id];
1070
 
1071
	WARN_ON(d->wake_count);
1072
 
1073
	d->wake_count = 0;
1074
	d->reg_set = reg_set;
1075
	d->reg_ack = reg_ack;
1076
 
1077
	if (IS_GEN6(dev_priv)) {
1078
		d->val_reset = 0;
1079
		d->val_set = FORCEWAKE_KERNEL;
1080
		d->val_clear = 0;
1081
	} else {
1082
		/* WaRsClearFWBitsAtReset:bdw,skl */
1083
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1084
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1085
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1086
	}
1087
 
1088
	if (IS_VALLEYVIEW(dev_priv))
1089
		d->reg_post = FORCEWAKE_ACK_VLV;
1090
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1091
		d->reg_post = ECOBUS;
1092
	else
1093
		d->reg_post = 0;
1094
 
1095
	d->i915 = dev_priv;
1096
	d->id = domain_id;
1097
 
1098
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1099
 
1100
	dev_priv->uncore.fw_domains |= (1 << domain_id);
1101
 
1102
	fw_domain_reset(d);
1103
}
1104
 
1105
static void intel_uncore_fw_domains_init(struct drm_device *dev)
1106
{
4560 Serge 1107
	struct drm_i915_private *dev_priv = dev->dev_private;
1108
 
6084 serge 1109
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1110
		return;
4560 Serge 1111
 
5354 serge 1112
	if (IS_GEN9(dev)) {
6084 serge 1113
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1114
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1115
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1116
			       FORCEWAKE_RENDER_GEN9,
1117
			       FORCEWAKE_ACK_RENDER_GEN9);
1118
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1119
			       FORCEWAKE_BLITTER_GEN9,
1120
			       FORCEWAKE_ACK_BLITTER_GEN9);
1121
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1122
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
5354 serge 1123
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 1124
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1125
		if (!IS_CHERRYVIEW(dev))
1126
			dev_priv->uncore.funcs.force_wake_put =
1127
				fw_domains_put_with_fifo;
1128
		else
1129
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1130
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1131
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1132
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1133
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
5354 serge 1134
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 1135
		dev_priv->uncore.funcs.force_wake_get =
1136
			fw_domains_get_with_thread_status;
6660 serge 1137
		if (IS_HASWELL(dev))
1138
			dev_priv->uncore.funcs.force_wake_put =
1139
				fw_domains_put_with_fifo;
1140
		else
6084 serge 1141
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1142
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1143
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
4560 Serge 1144
	} else if (IS_IVYBRIDGE(dev)) {
1145
		u32 ecobus;
1146
 
1147
		/* IVB configs may use multi-threaded forcewake */
1148
 
1149
		/* A small trick here - if the bios hasn't configured
1150
		 * MT forcewake, and if the device is in RC6, then
1151
		 * force_wake_mt_get will not wake the device and the
1152
		 * ECOBUS read will return zero. Which will be
1153
		 * (correctly) interpreted by the test below as MT
1154
		 * forcewake being disabled.
1155
		 */
6084 serge 1156
		dev_priv->uncore.funcs.force_wake_get =
1157
			fw_domains_get_with_thread_status;
1158
		dev_priv->uncore.funcs.force_wake_put =
1159
			fw_domains_put_with_fifo;
1160
 
1161
		/* We need to init first for ECOBUS access and then
1162
		 * determine later if we want to reinit, in case of MT access is
1163
		 * not working. In this stage we don't know which flavour this
1164
		 * ivb is, so it is better to reset also the gen6 fw registers
1165
		 * before the ecobus check.
1166
		 */
1167
 
1168
		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1169
		__raw_posting_read(dev_priv, ECOBUS);
1170
 
1171
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1172
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1173
 
4560 Serge 1174
		mutex_lock(&dev->struct_mutex);
6084 serge 1175
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
4560 Serge 1176
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
6084 serge 1177
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
4560 Serge 1178
		mutex_unlock(&dev->struct_mutex);
1179
 
6084 serge 1180
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
4560 Serge 1181
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1182
			DRM_INFO("when using vblank-synced partial screen updates.\n");
6084 serge 1183
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1184
				       FORCEWAKE, FORCEWAKE_ACK);
4560 Serge 1185
		}
1186
	} else if (IS_GEN6(dev)) {
1187
		dev_priv->uncore.funcs.force_wake_get =
6084 serge 1188
			fw_domains_get_with_thread_status;
4560 Serge 1189
		dev_priv->uncore.funcs.force_wake_put =
6084 serge 1190
			fw_domains_put_with_fifo;
1191
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1192
			       FORCEWAKE, FORCEWAKE_ACK);
4560 Serge 1193
	}
1194
 
6084 serge 1195
	/* All future platforms are expected to require complex power gating */
1196
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1197
}
1198
 
1199
void intel_uncore_init(struct drm_device *dev)
1200
{
1201
	struct drm_i915_private *dev_priv = dev->dev_private;
1202
 
1203
	i915_check_vgpu(dev);
1204
 
1205
	intel_uncore_ellc_detect(dev);
1206
	intel_uncore_fw_domains_init(dev);
1207
	__intel_uncore_early_sanitize(dev, false);
1208
 
4560 Serge 1209
	switch (INTEL_INFO(dev)->gen) {
1210
	default:
5354 serge 1211
	case 9:
1212
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1213
		ASSIGN_READ_MMIO_VFUNCS(gen9);
1214
		break;
1215
	case 8:
5060 serge 1216
		if (IS_CHERRYVIEW(dev)) {
5354 serge 1217
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1218
			ASSIGN_READ_MMIO_VFUNCS(chv);
5060 serge 1219
 
1220
		} else {
5354 serge 1221
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1222
			ASSIGN_READ_MMIO_VFUNCS(gen6);
5060 serge 1223
		}
4560 Serge 1224
		break;
1225
	case 7:
1226
	case 6:
1227
		if (IS_HASWELL(dev)) {
5354 serge 1228
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
4560 Serge 1229
		} else {
5354 serge 1230
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
4560 Serge 1231
		}
1232
 
1233
		if (IS_VALLEYVIEW(dev)) {
5354 serge 1234
			ASSIGN_READ_MMIO_VFUNCS(vlv);
4560 Serge 1235
		} else {
5354 serge 1236
			ASSIGN_READ_MMIO_VFUNCS(gen6);
4560 Serge 1237
		}
1238
		break;
1239
	case 5:
5354 serge 1240
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1241
		ASSIGN_READ_MMIO_VFUNCS(gen5);
4560 Serge 1242
		break;
1243
	case 4:
1244
	case 3:
1245
	case 2:
6084 serge 1246
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1247
		ASSIGN_READ_MMIO_VFUNCS(gen2);
4560 Serge 1248
		break;
1249
	}
5354 serge 1250
 
6084 serge 1251
	if (intel_vgpu_active(dev)) {
1252
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1253
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1254
	}
1255
 
5354 serge 1256
	i915_check_and_clear_faults(dev);
4560 Serge 1257
}
5354 serge 1258
#undef ASSIGN_WRITE_MMIO_VFUNCS
1259
#undef ASSIGN_READ_MMIO_VFUNCS
4560 Serge 1260
 
1261
void intel_uncore_fini(struct drm_device *dev)
1262
{
1263
	/* Paranoia: make sure we have disabled everything before we exit. */
1264
	intel_uncore_sanitize(dev);
5060 serge 1265
	intel_uncore_forcewake_reset(dev, false);
4560 Serge 1266
}
1267
 
5060 serge 1268
#define GEN_RANGE(l, h) GENMASK(h, l)
1269
 
4104 Serge 1270
static const struct register_whitelist {
1271
	uint64_t offset;
1272
	uint32_t size;
5060 serge 1273
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1274
	uint32_t gen_bitmask;
4104 Serge 1275
} whitelist[] = {
5354 serge 1276
	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
4104 Serge 1277
};
1278
 
1279
int i915_reg_read_ioctl(struct drm_device *dev,
1280
			void *data, struct drm_file *file)
1281
{
1282
	struct drm_i915_private *dev_priv = dev->dev_private;
1283
	struct drm_i915_reg_read *reg = data;
1284
	struct register_whitelist const *entry = whitelist;
6084 serge 1285
	unsigned size;
1286
	u64 offset;
5060 serge 1287
	int i, ret = 0;
4104 Serge 1288
 
1289
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
6084 serge 1290
		if (entry->offset == (reg->offset & -entry->size) &&
4104 Serge 1291
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1292
			break;
1293
	}
1294
 
1295
	if (i == ARRAY_SIZE(whitelist))
1296
		return -EINVAL;
1297
 
6084 serge 1298
	/* We use the low bits to encode extra flags as the register should
1299
	 * be naturally aligned (and those that are not so aligned merely
1300
	 * limit the available flags for that register).
1301
	 */
1302
	offset = entry->offset;
1303
	size = entry->size;
1304
	size |= reg->offset ^ offset;
1305
 
1306
	intel_runtime_pm_get(dev_priv);
1307
 
1308
	switch (size) {
1309
	case 8 | 1:
1310
		reg->val = I915_READ64_2x32(offset, offset+4);
1311
		break;
4104 Serge 1312
	case 8:
6084 serge 1313
		reg->val = I915_READ64(offset);
4104 Serge 1314
		break;
1315
	case 4:
6084 serge 1316
		reg->val = I915_READ(offset);
4104 Serge 1317
		break;
1318
	case 2:
6084 serge 1319
		reg->val = I915_READ16(offset);
4104 Serge 1320
		break;
1321
	case 1:
6084 serge 1322
		reg->val = I915_READ8(offset);
4104 Serge 1323
		break;
1324
	default:
5060 serge 1325
		ret = -EINVAL;
1326
		goto out;
4104 Serge 1327
	}
1328
 
5060 serge 1329
out:
6084 serge 1330
	intel_runtime_pm_put(dev_priv);
5060 serge 1331
	return ret;
4104 Serge 1332
}
1333
 
4560 Serge 1334
int i915_get_reset_stats_ioctl(struct drm_device *dev,
1335
			       void *data, struct drm_file *file)
4104 Serge 1336
{
1337
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 1338
	struct drm_i915_reset_stats *args = data;
1339
	struct i915_ctx_hang_stats *hs;
5060 serge 1340
	struct intel_context *ctx;
4560 Serge 1341
	int ret;
4104 Serge 1342
 
4560 Serge 1343
	if (args->flags || args->pad)
1344
		return -EINVAL;
4104 Serge 1345
 
4560 Serge 1346
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1347
	if (ret)
1348
		return ret;
4104 Serge 1349
 
5060 serge 1350
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1351
	if (IS_ERR(ctx)) {
4560 Serge 1352
		mutex_unlock(&dev->struct_mutex);
5060 serge 1353
		return PTR_ERR(ctx);
4104 Serge 1354
	}
5060 serge 1355
	hs = &ctx->hang_stats;
4104 Serge 1356
 
4560 Serge 1357
    args->reset_count = i915_reset_count(&dev_priv->gpu_error);
4104 Serge 1358
 
4560 Serge 1359
	args->batch_active = hs->batch_active;
1360
	args->batch_pending = hs->batch_pending;
4104 Serge 1361
 
4560 Serge 1362
	mutex_unlock(&dev->struct_mutex);
1363
 
4104 Serge 1364
	return 0;
1365
}
1366
 
5354 serge 1367
static int i915_reset_complete(struct drm_device *dev)
4104 Serge 1368
{
1369
	u8 gdrst;
5354 serge 1370
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1371
	return (gdrst & GRDOM_RESET_STATUS) == 0;
4104 Serge 1372
}
1373
 
5354 serge 1374
static int i915_do_reset(struct drm_device *dev)
4104 Serge 1375
{
5354 serge 1376
	/* assert reset for at least 20 usec */
1377
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1378
	udelay(20);
1379
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
4104 Serge 1380
 
5354 serge 1381
	return wait_for(i915_reset_complete(dev), 500);
1382
}
5060 serge 1383
 
5354 serge 1384
static int g4x_reset_complete(struct drm_device *dev)
1385
{
1386
	u8 gdrst;
1387
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1388
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1389
}
4104 Serge 1390
 
5354 serge 1391
static int g33_do_reset(struct drm_device *dev)
1392
{
1393
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1394
	return wait_for(g4x_reset_complete(dev), 500);
4104 Serge 1395
}
1396
 
5060 serge 1397
static int g4x_do_reset(struct drm_device *dev)
1398
{
1399
	struct drm_i915_private *dev_priv = dev->dev_private;
1400
	int ret;
1401
 
5354 serge 1402
	pci_write_config_byte(dev->pdev, I915_GDRST,
5060 serge 1403
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
5354 serge 1404
	ret =  wait_for(g4x_reset_complete(dev), 500);
5060 serge 1405
	if (ret)
1406
		return ret;
1407
 
1408
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1409
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1410
	POSTING_READ(VDECCLK_GATE_D);
1411
 
5354 serge 1412
	pci_write_config_byte(dev->pdev, I915_GDRST,
5060 serge 1413
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
5354 serge 1414
	ret =  wait_for(g4x_reset_complete(dev), 500);
5060 serge 1415
	if (ret)
1416
		return ret;
1417
 
1418
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1419
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1420
	POSTING_READ(VDECCLK_GATE_D);
1421
 
5354 serge 1422
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
5060 serge 1423
 
1424
	return 0;
1425
}
1426
 
4104 Serge 1427
static int ironlake_do_reset(struct drm_device *dev)
1428
{
1429
	struct drm_i915_private *dev_priv = dev->dev_private;
1430
	int ret;
1431
 
6084 serge 1432
	I915_WRITE(ILK_GDSR,
5060 serge 1433
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
6084 serge 1434
	ret = wait_for((I915_READ(ILK_GDSR) &
5060 serge 1435
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
4104 Serge 1436
	if (ret)
1437
		return ret;
1438
 
6084 serge 1439
	I915_WRITE(ILK_GDSR,
5060 serge 1440
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
6084 serge 1441
	ret = wait_for((I915_READ(ILK_GDSR) &
5060 serge 1442
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1443
	if (ret)
1444
		return ret;
1445
 
6084 serge 1446
	I915_WRITE(ILK_GDSR, 0);
5060 serge 1447
 
1448
	return 0;
4104 Serge 1449
}
1450
 
1451
static int gen6_do_reset(struct drm_device *dev)
1452
{
1453
	struct drm_i915_private *dev_priv = dev->dev_private;
1454
	int	ret;
1455
 
1456
	/* Reset the chip */
1457
 
1458
	/* GEN6_GDRST is not in the gt power well, no need to check
1459
	 * for fifo space for the write or forcewake the chip for
1460
	 * the read
1461
	 */
1462
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1463
 
1464
	/* Spin waiting for the device to ack the reset request */
1465
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1466
 
5060 serge 1467
	intel_uncore_forcewake_reset(dev, true);
4104 Serge 1468
 
1469
	return ret;
1470
}
1471
 
6084 serge 1472
static int wait_for_register(struct drm_i915_private *dev_priv,
1473
			     const u32 reg,
1474
			     const u32 mask,
1475
			     const u32 value,
1476
			     const unsigned long timeout_ms)
4104 Serge 1477
{
6084 serge 1478
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1479
}
1480
 
1481
static int gen8_do_reset(struct drm_device *dev)
1482
{
1483
	struct drm_i915_private *dev_priv = dev->dev_private;
1484
	struct intel_engine_cs *engine;
1485
	int i;
1486
 
1487
	for_each_ring(engine, dev_priv, i) {
1488
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1489
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1490
 
1491
		if (wait_for_register(dev_priv,
1492
				      RING_RESET_CTL(engine->mmio_base),
1493
				      RESET_CTL_READY_TO_RESET,
1494
				      RESET_CTL_READY_TO_RESET,
1495
				      700)) {
1496
			DRM_ERROR("%s: reset request timeout\n", engine->name);
1497
			goto not_ready;
1498
		}
1499
	}
1500
 
1501
	return gen6_do_reset(dev);
1502
 
1503
not_ready:
1504
	for_each_ring(engine, dev_priv, i)
1505
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1506
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1507
 
1508
	return -EIO;
1509
}
1510
 
1511
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1512
{
1513
	if (!i915.reset)
1514
		return NULL;
1515
 
1516
	if (INTEL_INFO(dev)->gen >= 8)
1517
		return gen8_do_reset;
1518
	else if (INTEL_INFO(dev)->gen >= 6)
1519
		return gen6_do_reset;
5060 serge 1520
	else if (IS_GEN5(dev))
6084 serge 1521
		return ironlake_do_reset;
5060 serge 1522
	else if (IS_G4X(dev))
6084 serge 1523
		return g4x_do_reset;
5354 serge 1524
	else if (IS_G33(dev))
6084 serge 1525
		return g33_do_reset;
5354 serge 1526
	else if (INTEL_INFO(dev)->gen >= 3)
6084 serge 1527
		return i915_do_reset;
1528
	else
1529
		return NULL;
1530
}
1531
 
1532
int intel_gpu_reset(struct drm_device *dev)
1533
{
1534
	struct drm_i915_private *dev_priv = to_i915(dev);
1535
	int (*reset)(struct drm_device *);
1536
	int ret;
1537
 
1538
	reset = intel_get_gpu_reset(dev);
1539
	if (reset == NULL)
5060 serge 1540
		return -ENODEV;
6084 serge 1541
 
1542
	/* If the power well sleeps during the reset, the reset
1543
	 * request may be dropped and never completes (causing -EIO).
1544
	 */
1545
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1546
	ret = reset(dev);
1547
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1548
 
1549
	return ret;
4104 Serge 1550
}
1551
 
6084 serge 1552
bool intel_has_gpu_reset(struct drm_device *dev)
1553
{
1554
	return intel_get_gpu_reset(dev) != NULL;
1555
}
1556
 
4104 Serge 1557
void intel_uncore_check_errors(struct drm_device *dev)
1558
{
1559
	struct drm_i915_private *dev_priv = dev->dev_private;
1560
 
1561
	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1562
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1563
		DRM_ERROR("Unclaimed register before interrupt\n");
1564
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1565
	}
1566
}