Subversion Repositories Kolibri OS

Rev

Rev 6660 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4104 Serge 1
/*
2
 * Copyright © 2013 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 */
23
 
24
#include "i915_drv.h"
25
#include "intel_drv.h"
6084 serge 26
#include "i915_vgpu.h"
4104 Serge 27
 
6084 serge 28
#include 
4104 Serge 29
 
6084 serge 30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
 
6937 serge 32
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
4104 Serge 33
 
6084 serge 34
static const char * const forcewake_domain_names[] = {
35
	"render",
36
	"blitter",
37
	"media",
38
};
39
 
40
const char *
41
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42
{
43
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
 
45
	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46
		return forcewake_domain_names[id];
47
 
48
	WARN_ON(id);
49
 
50
	return "unknown";
51
}
52
 
53
static inline void
54
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
4104 Serge 55
{
6937 serge 56
	WARN_ON(!i915_mmio_reg_valid(d->reg_set));
6084 serge 57
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
4104 Serge 58
}
59
 
6084 serge 60
static inline void
61
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
4104 Serge 62
{
6084 serge 63
//	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
64
//	/* something from same cacheline, but !FORCEWAKE */
65
//	__raw_posting_read(dev_priv, ECOBUS);
4104 Serge 66
}
67
 
6084 serge 68
static inline void
69
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
4104 Serge 70
{
6084 serge 71
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
72
			     FORCEWAKE_KERNEL) == 0,
4104 Serge 73
			    FORCEWAKE_ACK_TIMEOUT_MS))
6084 serge 74
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
75
			  intel_uncore_forcewake_domain_to_str(d->id));
4104 Serge 76
}
77
 
6084 serge 78
static inline void
79
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
4104 Serge 80
{
6084 serge 81
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
4104 Serge 82
}
83
 
6084 serge 84
static inline void
85
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
4104 Serge 86
{
6084 serge 87
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
88
			     FORCEWAKE_KERNEL),
4104 Serge 89
			    FORCEWAKE_ACK_TIMEOUT_MS))
6084 serge 90
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
91
			  intel_uncore_forcewake_domain_to_str(d->id));
4104 Serge 92
}
93
 
6084 serge 94
static inline void
95
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
4104 Serge 96
{
6084 serge 97
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
4104 Serge 98
}
99
 
6084 serge 100
static inline void
101
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
4104 Serge 102
{
6084 serge 103
	/* something from same cacheline, but not from the set register */
6937 serge 104
	if (i915_mmio_reg_valid(d->reg_post))
6084 serge 105
		__raw_posting_read(d->i915, d->reg_post);
4104 Serge 106
}
107
 
6084 serge 108
static void
109
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 110
{
6084 serge 111
	struct intel_uncore_forcewake_domain *d;
112
	enum forcewake_domain_id id;
5060 serge 113
 
6084 serge 114
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
115
		fw_domain_wait_ack_clear(d);
116
		fw_domain_get(d);
117
		fw_domain_wait_ack(d);
118
	}
4104 Serge 119
}
120
 
6084 serge 121
static void
122
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 123
{
6084 serge 124
	struct intel_uncore_forcewake_domain *d;
125
	enum forcewake_domain_id id;
4104 Serge 126
 
6084 serge 127
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
128
		fw_domain_put(d);
129
		fw_domain_posting_read(d);
4104 Serge 130
	}
131
}
132
 
6084 serge 133
static void
134
fw_domains_posting_read(struct drm_i915_private *dev_priv)
4104 Serge 135
{
6084 serge 136
	struct intel_uncore_forcewake_domain *d;
137
	enum forcewake_domain_id id;
4104 Serge 138
 
6084 serge 139
	/* No need to do for all, just do for first found */
140
	for_each_fw_domain(d, dev_priv, id) {
141
		fw_domain_posting_read(d);
142
		break;
4560 Serge 143
	}
4104 Serge 144
}
145
 
6084 serge 146
static void
147
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 148
{
6084 serge 149
	struct intel_uncore_forcewake_domain *d;
150
	enum forcewake_domain_id id;
4560 Serge 151
 
6084 serge 152
	if (dev_priv->uncore.fw_domains == 0)
153
		return;
4560 Serge 154
 
6084 serge 155
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
156
		fw_domain_reset(d);
4560 Serge 157
 
6084 serge 158
	fw_domains_posting_read(dev_priv);
4104 Serge 159
}
160
 
6084 serge 161
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
4560 Serge 162
{
6084 serge 163
	/* w/a for a sporadic read returning 0 by waiting for the GT
164
	 * thread to wake up.
165
	 */
166
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
167
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
168
		DRM_ERROR("GT thread status wait timed out\n");
4560 Serge 169
}
170
 
6084 serge 171
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
172
					      enum forcewake_domains fw_domains)
4560 Serge 173
{
6084 serge 174
	fw_domains_get(dev_priv, fw_domains);
4560 Serge 175
 
6084 serge 176
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
177
	__gen6_gt_wait_for_thread_c0(dev_priv);
4560 Serge 178
}
179
 
6084 serge 180
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5354 serge 181
{
6084 serge 182
	u32 gtfifodbg;
5354 serge 183
 
6084 serge 184
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
185
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
186
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
5354 serge 187
}
188
 
6084 serge 189
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
190
				     enum forcewake_domains fw_domains)
5354 serge 191
{
6084 serge 192
	fw_domains_put(dev_priv, fw_domains);
193
	gen6_gt_check_fifodbg(dev_priv);
5354 serge 194
}
195
 
6084 serge 196
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
5354 serge 197
{
6084 serge 198
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
5354 serge 199
 
6084 serge 200
	return count & GT_FIFO_FREE_ENTRIES_MASK;
5354 serge 201
}
202
 
6084 serge 203
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5354 serge 204
{
6084 serge 205
	int ret = 0;
5354 serge 206
 
6084 serge 207
	/* On VLV, FIFO will be shared by both SW and HW.
208
	 * So, we need to read the FREE_ENTRIES everytime */
209
	if (IS_VALLEYVIEW(dev_priv->dev))
210
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
5354 serge 211
 
6084 serge 212
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
213
		int loop = 500;
214
		u32 fifo = fifo_free_entries(dev_priv);
5354 serge 215
 
6084 serge 216
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
217
			udelay(10);
218
			fifo = fifo_free_entries(dev_priv);
219
		}
220
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
221
			++ret;
222
		dev_priv->uncore.fifo_count = fifo;
5354 serge 223
	}
6084 serge 224
	dev_priv->uncore.fifo_count--;
5354 serge 225
 
6084 serge 226
	return ret;
5354 serge 227
}
228
 
6084 serge 229
static void intel_uncore_fw_release_timer(unsigned long arg)
5354 serge 230
{
6084 serge 231
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
5354 serge 232
	unsigned long irqflags;
233
 
6937 serge 234
	assert_rpm_device_not_suspended(domain->i915);
5354 serge 235
 
6084 serge 236
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
237
	if (WARN_ON(domain->wake_count == 0))
238
		domain->wake_count++;
5354 serge 239
 
6084 serge 240
	if (--domain->wake_count == 0)
241
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
242
							  1 << domain->id);
5354 serge 243
 
6084 serge 244
	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
5354 serge 245
}
246
 
5060 serge 247
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
4371 Serge 248
{
249
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 250
	unsigned long irqflags;
6084 serge 251
	struct intel_uncore_forcewake_domain *domain;
252
	int retry_count = 100;
253
	enum forcewake_domain_id id;
254
	enum forcewake_domains fw = 0, active_domains;
4371 Serge 255
 
5060 serge 256
	/* Hold uncore.lock across reset to prevent any register access
6084 serge 257
	 * with forcewake not set correctly. Wait until all pending
258
	 * timers are run before holding.
5060 serge 259
	 */
6084 serge 260
	while (1) {
261
		active_domains = 0;
5060 serge 262
 
6084 serge 263
		for_each_fw_domain(domain, dev_priv, id) {
264
			if (del_timer_sync(&domain->timer) == 0)
265
				continue;
5060 serge 266
 
6084 serge 267
			intel_uncore_fw_release_timer((unsigned long)domain);
268
		}
5060 serge 269
 
6084 serge 270
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
5354 serge 271
 
6084 serge 272
		for_each_fw_domain(domain, dev_priv, id) {
273
//           if (timer_pending(&domain->timer))
274
//				active_domains |= (1 << id);
275
	}
5060 serge 276
 
6084 serge 277
		if (active_domains == 0)
278
			break;
5060 serge 279
 
6084 serge 280
		if (--retry_count == 0) {
281
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
282
			break;
283
		}
5354 serge 284
 
6084 serge 285
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
286
        change_task();
4371 Serge 287
	}
5060 serge 288
 
6084 serge 289
	WARN_ON(active_domains);
290
 
291
	for_each_fw_domain(domain, dev_priv, id)
292
		if (domain->wake_count)
293
			fw |= 1 << id;
294
 
295
	if (fw)
296
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
297
 
298
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
299
 
300
	if (restore) { /* If reset with a user forcewake, try to restore */
5060 serge 301
		if (fw)
302
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
303
 
304
		if (IS_GEN6(dev) || IS_GEN7(dev))
305
			dev_priv->uncore.fifo_count =
6084 serge 306
				fifo_free_entries(dev_priv);
5060 serge 307
	}
308
 
6084 serge 309
	if (!restore)
310
		assert_forcewakes_inactive(dev_priv);
311
 
5060 serge 312
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4371 Serge 313
}
314
 
6084 serge 315
static void intel_uncore_ellc_detect(struct drm_device *dev)
4104 Serge 316
{
317
	struct drm_i915_private *dev_priv = dev->dev_private;
318
 
6084 serge 319
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
320
	     INTEL_INFO(dev)->gen >= 9) &&
321
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
4560 Serge 322
		/* The docs do not explain exactly how the calculation can be
323
		 * made. It is somewhat guessable, but for now, it's always
324
		 * 128MB.
325
		 * NB: We can't write IDICR yet because we do not have gt funcs
326
		 * set up */
327
		dev_priv->ellc_size = 128;
328
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
329
	}
6084 serge 330
}
4104 Serge 331
 
6084 serge 332
static void __intel_uncore_early_sanitize(struct drm_device *dev,
333
					  bool restore_forcewake)
334
{
335
	struct drm_i915_private *dev_priv = dev->dev_private;
336
 
337
	if (HAS_FPGA_DBG_UNCLAIMED(dev))
338
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
339
 
4560 Serge 340
	/* clear out old GT FIFO errors */
341
	if (IS_GEN6(dev) || IS_GEN7(dev))
342
		__raw_i915_write32(dev_priv, GTFIFODBG,
343
				   __raw_i915_read32(dev_priv, GTFIFODBG));
4104 Serge 344
 
6084 serge 345
	/* WaDisableShadowRegForCpd:chv */
346
	if (IS_CHERRYVIEW(dev)) {
347
		__raw_i915_write32(dev_priv, GTFIFOCTL,
348
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
349
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
350
				   GT_FIFO_CTL_RC6_POLICY_STALL);
351
	}
352
 
5060 serge 353
	intel_uncore_forcewake_reset(dev, restore_forcewake);
4104 Serge 354
}
355
 
5354 serge 356
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
357
{
358
	__intel_uncore_early_sanitize(dev, restore_forcewake);
359
	i915_check_and_clear_faults(dev);
360
}
361
 
4104 Serge 362
void intel_uncore_sanitize(struct drm_device *dev)
363
{
364
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
365
	intel_disable_gt_powersave(dev);
366
}
367
 
6084 serge 368
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
369
					 enum forcewake_domains fw_domains)
370
{
371
	struct intel_uncore_forcewake_domain *domain;
372
	enum forcewake_domain_id id;
373
 
374
	if (!dev_priv->uncore.funcs.force_wake_get)
375
		return;
376
 
377
	fw_domains &= dev_priv->uncore.fw_domains;
378
 
379
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
380
		if (domain->wake_count++)
381
			fw_domains &= ~(1 << id);
382
	}
383
 
384
	if (fw_domains)
385
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
386
}
387
 
388
/**
389
 * intel_uncore_forcewake_get - grab forcewake domain references
390
 * @dev_priv: i915 device instance
391
 * @fw_domains: forcewake domains to get reference on
392
 *
393
 * This function can be used get GT's forcewake domain references.
394
 * Normal register access will handle the forcewake domains automatically.
395
 * However if some sequence requires the GT to not power down a particular
396
 * forcewake domains this function should be called at the beginning of the
397
 * sequence. And subsequently the reference should be dropped by symmetric
398
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
399
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
4104 Serge 400
 */
6084 serge 401
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
402
				enum forcewake_domains fw_domains)
4104 Serge 403
{
404
	unsigned long irqflags;
405
 
4560 Serge 406
	if (!dev_priv->uncore.funcs.force_wake_get)
407
		return;
408
 
6937 serge 409
	assert_rpm_wakelock_held(dev_priv);
4560 Serge 410
 
4104 Serge 411
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6084 serge 412
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
4104 Serge 413
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
414
}
415
 
6084 serge 416
/**
417
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
418
 * @dev_priv: i915 device instance
419
 * @fw_domains: forcewake domains to get reference on
420
 *
421
 * See intel_uncore_forcewake_get(). This variant places the onus
422
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
4104 Serge 423
 */
6084 serge 424
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
425
					enum forcewake_domains fw_domains)
4104 Serge 426
{
6084 serge 427
	assert_spin_locked(&dev_priv->uncore.lock);
4104 Serge 428
 
6084 serge 429
	if (!dev_priv->uncore.funcs.force_wake_get)
430
		return;
431
 
432
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
433
}
434
 
435
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
436
					 enum forcewake_domains fw_domains)
437
{
438
	struct intel_uncore_forcewake_domain *domain;
439
	enum forcewake_domain_id id;
440
 
4560 Serge 441
	if (!dev_priv->uncore.funcs.force_wake_put)
442
		return;
443
 
6084 serge 444
	fw_domains &= dev_priv->uncore.fw_domains;
5354 serge 445
 
6084 serge 446
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
447
		if (WARN_ON(domain->wake_count == 0))
448
			continue;
449
 
450
		if (--domain->wake_count)
451
			continue;
452
 
453
		domain->wake_count++;
454
		fw_domain_arm_timer(domain);
5060 serge 455
	}
6084 serge 456
}
4560 Serge 457
 
6084 serge 458
/**
459
 * intel_uncore_forcewake_put - release a forcewake domain reference
460
 * @dev_priv: i915 device instance
461
 * @fw_domains: forcewake domains to put references
462
 *
463
 * This function drops the device-level forcewakes for specified
464
 * domains obtained by intel_uncore_forcewake_get().
465
 */
466
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
467
				enum forcewake_domains fw_domains)
468
{
469
	unsigned long irqflags;
4560 Serge 470
 
6084 serge 471
	if (!dev_priv->uncore.funcs.force_wake_put)
472
		return;
473
 
4104 Serge 474
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6084 serge 475
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
4104 Serge 476
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6084 serge 477
}
4560 Serge 478
 
6084 serge 479
/**
480
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
481
 * @dev_priv: i915 device instance
482
 * @fw_domains: forcewake domains to get reference on
483
 *
484
 * See intel_uncore_forcewake_put(). This variant places the onus
485
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
486
 */
487
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
488
					enum forcewake_domains fw_domains)
489
{
490
	assert_spin_locked(&dev_priv->uncore.lock);
491
 
492
	if (!dev_priv->uncore.funcs.force_wake_put)
493
		return;
494
 
495
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
4104 Serge 496
}
497
 
6084 serge 498
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
5060 serge 499
{
6084 serge 500
	struct intel_uncore_forcewake_domain *domain;
501
	enum forcewake_domain_id id;
502
 
5060 serge 503
	if (!dev_priv->uncore.funcs.force_wake_get)
504
		return;
505
 
6084 serge 506
	for_each_fw_domain(domain, dev_priv, id)
507
		WARN_ON(domain->wake_count);
5060 serge 508
}
509
 
4104 Serge 510
/* We give fast paths for the really cool registers */
6937 serge 511
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
4104 Serge 512
 
5060 serge 513
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
514
 
515
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
516
	(REG_RANGE((reg), 0x2000, 0x4000) || \
517
	 REG_RANGE((reg), 0x5000, 0x8000) || \
518
	 REG_RANGE((reg), 0xB000, 0x12000) || \
519
	 REG_RANGE((reg), 0x2E000, 0x30000))
520
 
521
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
522
	(REG_RANGE((reg), 0x12000, 0x14000) || \
523
	 REG_RANGE((reg), 0x22000, 0x24000) || \
524
	 REG_RANGE((reg), 0x30000, 0x40000))
525
 
526
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
527
	(REG_RANGE((reg), 0x2000, 0x4000) || \
6084 serge 528
	 REG_RANGE((reg), 0x5200, 0x8000) || \
5060 serge 529
	 REG_RANGE((reg), 0x8300, 0x8500) || \
6084 serge 530
	 REG_RANGE((reg), 0xB000, 0xB480) || \
5060 serge 531
	 REG_RANGE((reg), 0xE000, 0xE800))
532
 
533
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
534
	(REG_RANGE((reg), 0x8800, 0x8900) || \
535
	 REG_RANGE((reg), 0xD000, 0xD800) || \
536
	 REG_RANGE((reg), 0x12000, 0x14000) || \
537
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
538
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
6084 serge 539
	 REG_RANGE((reg), 0x30000, 0x38000))
5060 serge 540
 
541
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
542
	(REG_RANGE((reg), 0x4000, 0x5000) || \
543
	 REG_RANGE((reg), 0x8000, 0x8300) || \
544
	 REG_RANGE((reg), 0x8500, 0x8600) || \
545
	 REG_RANGE((reg), 0x9000, 0xB000) || \
6084 serge 546
	 REG_RANGE((reg), 0xF000, 0x10000))
5060 serge 547
 
5354 serge 548
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
549
	REG_RANGE((reg), 0xB00,  0x2000)
550
 
551
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
552
	(REG_RANGE((reg), 0x2000, 0x2700) || \
553
	 REG_RANGE((reg), 0x3000, 0x4000) || \
554
	 REG_RANGE((reg), 0x5200, 0x8000) || \
555
	 REG_RANGE((reg), 0x8140, 0x8160) || \
556
	 REG_RANGE((reg), 0x8300, 0x8500) || \
557
	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
558
	 REG_RANGE((reg), 0xB000, 0xB480) || \
559
	 REG_RANGE((reg), 0xE000, 0xE900) || \
560
	 REG_RANGE((reg), 0x24400, 0x24800))
561
 
562
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
563
	(REG_RANGE((reg), 0x8130, 0x8140) || \
564
	 REG_RANGE((reg), 0x8800, 0x8A00) || \
565
	 REG_RANGE((reg), 0xD000, 0xD800) || \
566
	 REG_RANGE((reg), 0x12000, 0x14000) || \
567
	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
568
	 REG_RANGE((reg), 0x30000, 0x40000))
569
 
570
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
571
	REG_RANGE((reg), 0x9400, 0x9800)
572
 
573
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
574
	((reg) < 0x40000 &&\
575
	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
576
	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
577
	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
578
	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
579
 
4104 Serge 580
static void
581
ilk_dummy_write(struct drm_i915_private *dev_priv)
582
{
583
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
584
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
585
	 * hence harmless to write 0 into. */
586
	__raw_i915_write32(dev_priv, MI_MODE, 0);
587
}
588
 
589
static void
6937 serge 590
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
591
			i915_reg_t reg, bool read, bool before)
4104 Serge 592
{
5060 serge 593
	const char *op = read ? "reading" : "writing to";
594
	const char *when = before ? "before" : "after";
595
 
596
	if (!i915.mmio_debug)
597
		return;
598
 
4560 Serge 599
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
5060 serge 600
		WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
6937 serge 601
		     when, op, i915_mmio_reg_offset(reg));
4104 Serge 602
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
6084 serge 603
		i915.mmio_debug--; /* Only report the first N failures */
4104 Serge 604
	}
605
}
606
 
607
static void
5060 serge 608
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
4104 Serge 609
{
6084 serge 610
	static bool mmio_debug_once = true;
611
 
612
	if (i915.mmio_debug || !mmio_debug_once)
5060 serge 613
		return;
614
 
4560 Serge 615
	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
6084 serge 616
		DRM_DEBUG("Unclaimed register detected, "
617
			  "enabling oneshot unclaimed register reporting. "
618
			  "Please use i915.mmio_debug=N for more information.\n");
4104 Serge 619
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
6084 serge 620
		i915.mmio_debug = mmio_debug_once--;
4104 Serge 621
	}
622
}
623
 
6084 serge 624
#define GEN2_READ_HEADER(x) \
4104 Serge 625
	u##x val = 0; \
6937 serge 626
	assert_rpm_wakelock_held(dev_priv);
4560 Serge 627
 
6084 serge 628
#define GEN2_READ_FOOTER \
4560 Serge 629
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
630
	return val
631
 
6084 serge 632
#define __gen2_read(x) \
4560 Serge 633
static u##x \
6937 serge 634
gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 635
	GEN2_READ_HEADER(x); \
4560 Serge 636
	val = __raw_i915_read##x(dev_priv, reg); \
6084 serge 637
	GEN2_READ_FOOTER; \
4560 Serge 638
}
639
 
640
#define __gen5_read(x) \
641
static u##x \
6937 serge 642
gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 643
	GEN2_READ_HEADER(x); \
644
	ilk_dummy_write(dev_priv); \
4560 Serge 645
	val = __raw_i915_read##x(dev_priv, reg); \
6084 serge 646
	GEN2_READ_FOOTER; \
4560 Serge 647
}
648
 
6084 serge 649
__gen5_read(8)
650
__gen5_read(16)
651
__gen5_read(32)
652
__gen5_read(64)
653
__gen2_read(8)
654
__gen2_read(16)
655
__gen2_read(32)
656
__gen2_read(64)
657
 
658
#undef __gen5_read
659
#undef __gen2_read
660
 
661
#undef GEN2_READ_FOOTER
662
#undef GEN2_READ_HEADER
663
 
664
#define GEN6_READ_HEADER(x) \
6937 serge 665
	u32 offset = i915_mmio_reg_offset(reg); \
6084 serge 666
	unsigned long irqflags; \
667
	u##x val = 0; \
6937 serge 668
	assert_rpm_wakelock_held(dev_priv); \
6084 serge 669
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
670
 
671
#define GEN6_READ_FOOTER \
672
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
673
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
674
	return val
675
 
676
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
677
				    enum forcewake_domains fw_domains)
678
{
679
	struct intel_uncore_forcewake_domain *domain;
680
	enum forcewake_domain_id id;
681
 
682
	if (WARN_ON(!fw_domains))
683
		return;
684
 
685
	/* Ideally GCC would be constant-fold and eliminate this loop */
686
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
687
		if (domain->wake_count) {
688
			fw_domains &= ~(1 << id);
689
			continue;
690
		}
691
 
692
		domain->wake_count++;
693
		fw_domain_arm_timer(domain);
694
	}
695
 
696
	if (fw_domains)
697
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
698
}
699
 
4560 Serge 700
#define __gen6_read(x) \
701
static u##x \
6937 serge 702
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 703
	GEN6_READ_HEADER(x); \
5060 serge 704
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
6937 serge 705
	if (NEEDS_FORCE_WAKE(offset)) \
6084 serge 706
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
707
	val = __raw_i915_read##x(dev_priv, reg); \
5060 serge 708
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
6084 serge 709
	GEN6_READ_FOOTER; \
4104 Serge 710
}
711
 
4560 Serge 712
#define __vlv_read(x) \
713
static u##x \
6937 serge 714
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
715
	enum forcewake_domains fw_engine = 0; \
6084 serge 716
	GEN6_READ_HEADER(x); \
6937 serge 717
	if (!NEEDS_FORCE_WAKE(offset)) \
718
		fw_engine = 0; \
719
	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
720
		fw_engine = FORCEWAKE_RENDER; \
721
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
722
		fw_engine = FORCEWAKE_MEDIA; \
723
	if (fw_engine) \
724
		__force_wake_get(dev_priv, fw_engine); \
6084 serge 725
	val = __raw_i915_read##x(dev_priv, reg); \
726
	GEN6_READ_FOOTER; \
5060 serge 727
}
728
 
729
#define __chv_read(x) \
730
static u##x \
6937 serge 731
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
732
	enum forcewake_domains fw_engine = 0; \
6084 serge 733
	GEN6_READ_HEADER(x); \
6937 serge 734
	if (!NEEDS_FORCE_WAKE(offset)) \
735
		fw_engine = 0; \
736
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
737
		fw_engine = FORCEWAKE_RENDER; \
738
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
739
		fw_engine = FORCEWAKE_MEDIA; \
740
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
741
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
742
	if (fw_engine) \
743
		__force_wake_get(dev_priv, fw_engine); \
6084 serge 744
	val = __raw_i915_read##x(dev_priv, reg); \
745
	GEN6_READ_FOOTER; \
4560 Serge 746
}
4104 Serge 747
 
6084 serge 748
#define SKL_NEEDS_FORCE_WAKE(reg) \
5354 serge 749
	 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
750
 
751
#define __gen9_read(x) \
752
static u##x \
6937 serge 753
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 754
	enum forcewake_domains fw_engine; \
755
	GEN6_READ_HEADER(x); \
756
	hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
6937 serge 757
	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
6084 serge 758
		fw_engine = 0; \
6937 serge 759
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
6084 serge 760
		fw_engine = FORCEWAKE_RENDER; \
6937 serge 761
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
6084 serge 762
		fw_engine = FORCEWAKE_MEDIA; \
6937 serge 763
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
6084 serge 764
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
765
	else \
766
		fw_engine = FORCEWAKE_BLITTER; \
767
	if (fw_engine) \
768
		__force_wake_get(dev_priv, fw_engine); \
769
	val = __raw_i915_read##x(dev_priv, reg); \
770
	hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
771
	GEN6_READ_FOOTER; \
5354 serge 772
}
773
 
774
__gen9_read(8)
775
__gen9_read(16)
776
__gen9_read(32)
777
__gen9_read(64)
5060 serge 778
__chv_read(8)
779
__chv_read(16)
780
__chv_read(32)
781
__chv_read(64)
4560 Serge 782
__vlv_read(8)
783
__vlv_read(16)
784
__vlv_read(32)
785
__vlv_read(64)
786
__gen6_read(8)
787
__gen6_read(16)
788
__gen6_read(32)
789
__gen6_read(64)
790
 
5354 serge 791
#undef __gen9_read
5060 serge 792
#undef __chv_read
4560 Serge 793
#undef __vlv_read
794
#undef __gen6_read
6084 serge 795
#undef GEN6_READ_FOOTER
796
#undef GEN6_READ_HEADER
4560 Serge 797
 
6937 serge 798
#define VGPU_READ_HEADER(x) \
799
	unsigned long irqflags; \
800
	u##x val = 0; \
801
	assert_rpm_device_not_suspended(dev_priv); \
802
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
803
 
804
#define VGPU_READ_FOOTER \
805
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
806
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
807
	return val
808
 
809
#define __vgpu_read(x) \
810
static u##x \
811
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
812
	VGPU_READ_HEADER(x); \
813
	val = __raw_i915_read##x(dev_priv, reg); \
814
	VGPU_READ_FOOTER; \
815
}
816
 
817
__vgpu_read(8)
818
__vgpu_read(16)
819
__vgpu_read(32)
820
__vgpu_read(64)
821
 
822
#undef __vgpu_read
823
#undef VGPU_READ_FOOTER
824
#undef VGPU_READ_HEADER
825
 
6084 serge 826
#define GEN2_WRITE_HEADER \
4560 Serge 827
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
6937 serge 828
	assert_rpm_wakelock_held(dev_priv); \
4560 Serge 829
 
6084 serge 830
#define GEN2_WRITE_FOOTER
4560 Serge 831
 
6084 serge 832
#define __gen2_write(x) \
4560 Serge 833
static void \
6937 serge 834
gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6084 serge 835
	GEN2_WRITE_HEADER; \
4560 Serge 836
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 837
	GEN2_WRITE_FOOTER; \
4560 Serge 838
}
839
 
840
#define __gen5_write(x) \
841
static void \
6937 serge 842
gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6084 serge 843
	GEN2_WRITE_HEADER; \
4560 Serge 844
	ilk_dummy_write(dev_priv); \
845
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 846
	GEN2_WRITE_FOOTER; \
4560 Serge 847
}
848
 
6084 serge 849
__gen5_write(8)
850
__gen5_write(16)
851
__gen5_write(32)
852
__gen5_write(64)
853
__gen2_write(8)
854
__gen2_write(16)
855
__gen2_write(32)
856
__gen2_write(64)
857
 
858
#undef __gen5_write
859
#undef __gen2_write
860
 
861
#undef GEN2_WRITE_FOOTER
862
#undef GEN2_WRITE_HEADER
863
 
864
#define GEN6_WRITE_HEADER \
6937 serge 865
	u32 offset = i915_mmio_reg_offset(reg); \
6084 serge 866
	unsigned long irqflags; \
867
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
6937 serge 868
	assert_rpm_wakelock_held(dev_priv); \
6084 serge 869
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
870
 
871
#define GEN6_WRITE_FOOTER \
872
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
873
 
4560 Serge 874
#define __gen6_write(x) \
875
static void \
6937 serge 876
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
4104 Serge 877
	u32 __fifo_ret = 0; \
6084 serge 878
	GEN6_WRITE_HEADER; \
6937 serge 879
	if (NEEDS_FORCE_WAKE(offset)) { \
4104 Serge 880
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
881
	} \
4560 Serge 882
	__raw_i915_write##x(dev_priv, reg, val); \
883
	if (unlikely(__fifo_ret)) { \
884
		gen6_gt_check_fifodbg(dev_priv); \
885
	} \
6084 serge 886
	GEN6_WRITE_FOOTER; \
4560 Serge 887
}
888
 
889
#define __hsw_write(x) \
890
static void \
6937 serge 891
hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
4560 Serge 892
	u32 __fifo_ret = 0; \
6084 serge 893
	GEN6_WRITE_HEADER; \
6937 serge 894
	if (NEEDS_FORCE_WAKE(offset)) { \
4560 Serge 895
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
896
	} \
5060 serge 897
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
4104 Serge 898
	__raw_i915_write##x(dev_priv, reg, val); \
899
	if (unlikely(__fifo_ret)) { \
900
		gen6_gt_check_fifodbg(dev_priv); \
901
	} \
5060 serge 902
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
903
	hsw_unclaimed_reg_detect(dev_priv); \
6084 serge 904
	GEN6_WRITE_FOOTER; \
4104 Serge 905
}
906
 
6937 serge 907
static const i915_reg_t gen8_shadowed_regs[] = {
4560 Serge 908
	FORCEWAKE_MT,
909
	GEN6_RPNSWREQ,
910
	GEN6_RC_VIDEO_FREQ,
911
	RING_TAIL(RENDER_RING_BASE),
912
	RING_TAIL(GEN6_BSD_RING_BASE),
913
	RING_TAIL(VEBOX_RING_BASE),
914
	RING_TAIL(BLT_RING_BASE),
915
	/* TODO: Other registers are not yet used */
916
};
917
 
6937 serge 918
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
919
			     i915_reg_t reg)
4560 Serge 920
{
921
	int i;
922
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
6937 serge 923
		if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
4560 Serge 924
			return true;
925
 
926
	return false;
927
}
928
 
929
#define __gen8_write(x) \
930
static void \
6937 serge 931
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6084 serge 932
	GEN6_WRITE_HEADER; \
5060 serge 933
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
6937 serge 934
	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
6084 serge 935
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
4560 Serge 936
	__raw_i915_write##x(dev_priv, reg, val); \
5060 serge 937
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
938
	hsw_unclaimed_reg_detect(dev_priv); \
6084 serge 939
	GEN6_WRITE_FOOTER; \
4560 Serge 940
}
941
 
5060 serge 942
#define __chv_write(x) \
943
static void \
6937 serge 944
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
945
	enum forcewake_domains fw_engine = 0; \
6084 serge 946
	GEN6_WRITE_HEADER; \
6937 serge 947
	if (!NEEDS_FORCE_WAKE(offset) || \
948
	    is_gen8_shadowed(dev_priv, reg)) \
949
		fw_engine = 0; \
950
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
951
		fw_engine = FORCEWAKE_RENDER; \
952
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
953
		fw_engine = FORCEWAKE_MEDIA; \
954
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
955
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
956
	if (fw_engine) \
957
		__force_wake_get(dev_priv, fw_engine); \
5060 serge 958
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 959
	GEN6_WRITE_FOOTER; \
5060 serge 960
}
961
 
6937 serge 962
static const i915_reg_t gen9_shadowed_regs[] = {
5354 serge 963
	RING_TAIL(RENDER_RING_BASE),
964
	RING_TAIL(GEN6_BSD_RING_BASE),
965
	RING_TAIL(VEBOX_RING_BASE),
966
	RING_TAIL(BLT_RING_BASE),
967
	FORCEWAKE_BLITTER_GEN9,
968
	FORCEWAKE_RENDER_GEN9,
969
	FORCEWAKE_MEDIA_GEN9,
970
	GEN6_RPNSWREQ,
971
	GEN6_RC_VIDEO_FREQ,
972
	/* TODO: Other registers are not yet used */
973
};
974
 
6937 serge 975
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
976
			     i915_reg_t reg)
5354 serge 977
{
978
	int i;
979
	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
6937 serge 980
		if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
5354 serge 981
			return true;
982
 
983
	return false;
984
}
985
 
986
#define __gen9_write(x) \
987
static void \
6937 serge 988
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
5354 serge 989
		bool trace) { \
6084 serge 990
	enum forcewake_domains fw_engine; \
991
	GEN6_WRITE_HEADER; \
992
	hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
6937 serge 993
	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
6084 serge 994
	    is_gen9_shadowed(dev_priv, reg)) \
995
		fw_engine = 0; \
6937 serge 996
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
6084 serge 997
		fw_engine = FORCEWAKE_RENDER; \
6937 serge 998
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
6084 serge 999
		fw_engine = FORCEWAKE_MEDIA; \
6937 serge 1000
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
6084 serge 1001
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1002
	else \
1003
		fw_engine = FORCEWAKE_BLITTER; \
1004
	if (fw_engine) \
1005
		__force_wake_get(dev_priv, fw_engine); \
1006
	__raw_i915_write##x(dev_priv, reg, val); \
1007
	hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
1008
	hsw_unclaimed_reg_detect(dev_priv); \
1009
	GEN6_WRITE_FOOTER; \
5354 serge 1010
}
1011
 
1012
__gen9_write(8)
1013
__gen9_write(16)
1014
__gen9_write(32)
1015
__gen9_write(64)
5060 serge 1016
__chv_write(8)
1017
__chv_write(16)
1018
__chv_write(32)
1019
__chv_write(64)
4560 Serge 1020
__gen8_write(8)
1021
__gen8_write(16)
1022
__gen8_write(32)
1023
__gen8_write(64)
1024
__hsw_write(8)
1025
__hsw_write(16)
1026
__hsw_write(32)
1027
__hsw_write(64)
1028
__gen6_write(8)
1029
__gen6_write(16)
1030
__gen6_write(32)
1031
__gen6_write(64)
1032
 
5354 serge 1033
#undef __gen9_write
5060 serge 1034
#undef __chv_write
4560 Serge 1035
#undef __gen8_write
1036
#undef __hsw_write
1037
#undef __gen6_write
6084 serge 1038
#undef GEN6_WRITE_FOOTER
1039
#undef GEN6_WRITE_HEADER
4560 Serge 1040
 
6937 serge 1041
#define VGPU_WRITE_HEADER \
1042
	unsigned long irqflags; \
1043
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1044
	assert_rpm_device_not_suspended(dev_priv); \
1045
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1046
 
1047
#define VGPU_WRITE_FOOTER \
1048
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1049
 
1050
#define __vgpu_write(x) \
1051
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1052
			  i915_reg_t reg, u##x val, bool trace) { \
1053
	VGPU_WRITE_HEADER; \
1054
	__raw_i915_write##x(dev_priv, reg, val); \
1055
	VGPU_WRITE_FOOTER; \
1056
}
1057
 
1058
__vgpu_write(8)
1059
__vgpu_write(16)
1060
__vgpu_write(32)
1061
__vgpu_write(64)
1062
 
1063
#undef __vgpu_write
1064
#undef VGPU_WRITE_FOOTER
1065
#undef VGPU_WRITE_HEADER
1066
 
5354 serge 1067
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1068
do { \
1069
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1070
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1071
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1072
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1073
} while (0)
1074
 
1075
#define ASSIGN_READ_MMIO_VFUNCS(x) \
1076
do { \
1077
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1078
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1079
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1080
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1081
} while (0)
1082
 
6084 serge 1083
 
1084
static void fw_domain_init(struct drm_i915_private *dev_priv,
1085
			   enum forcewake_domain_id domain_id,
6937 serge 1086
			   i915_reg_t reg_set,
1087
			   i915_reg_t reg_ack)
4560 Serge 1088
{
6084 serge 1089
	struct intel_uncore_forcewake_domain *d;
1090
 
1091
	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1092
		return;
1093
 
1094
	d = &dev_priv->uncore.fw_domain[domain_id];
1095
 
1096
	WARN_ON(d->wake_count);
1097
 
1098
	d->wake_count = 0;
1099
	d->reg_set = reg_set;
1100
	d->reg_ack = reg_ack;
1101
 
1102
	if (IS_GEN6(dev_priv)) {
1103
		d->val_reset = 0;
1104
		d->val_set = FORCEWAKE_KERNEL;
1105
		d->val_clear = 0;
1106
	} else {
1107
		/* WaRsClearFWBitsAtReset:bdw,skl */
1108
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1109
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1110
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1111
	}
1112
 
6937 serge 1113
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6084 serge 1114
		d->reg_post = FORCEWAKE_ACK_VLV;
1115
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1116
		d->reg_post = ECOBUS;
1117
 
1118
	d->i915 = dev_priv;
1119
	d->id = domain_id;
1120
 
1121
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1122
 
1123
	dev_priv->uncore.fw_domains |= (1 << domain_id);
1124
 
1125
	fw_domain_reset(d);
1126
}
1127
 
1128
static void intel_uncore_fw_domains_init(struct drm_device *dev)
1129
{
4560 Serge 1130
	struct drm_i915_private *dev_priv = dev->dev_private;
1131
 
6084 serge 1132
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1133
		return;
4560 Serge 1134
 
5354 serge 1135
	if (IS_GEN9(dev)) {
6084 serge 1136
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1137
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1138
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1139
			       FORCEWAKE_RENDER_GEN9,
1140
			       FORCEWAKE_ACK_RENDER_GEN9);
1141
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1142
			       FORCEWAKE_BLITTER_GEN9,
1143
			       FORCEWAKE_ACK_BLITTER_GEN9);
1144
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1145
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
6937 serge 1146
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 1147
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1148
		if (!IS_CHERRYVIEW(dev))
1149
			dev_priv->uncore.funcs.force_wake_put =
1150
				fw_domains_put_with_fifo;
1151
		else
1152
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1153
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1154
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1155
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1156
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
5354 serge 1157
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 1158
		dev_priv->uncore.funcs.force_wake_get =
1159
			fw_domains_get_with_thread_status;
6660 serge 1160
		if (IS_HASWELL(dev))
1161
			dev_priv->uncore.funcs.force_wake_put =
1162
				fw_domains_put_with_fifo;
1163
		else
6084 serge 1164
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1165
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1166
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
4560 Serge 1167
	} else if (IS_IVYBRIDGE(dev)) {
1168
		u32 ecobus;
1169
 
1170
		/* IVB configs may use multi-threaded forcewake */
1171
 
1172
		/* A small trick here - if the bios hasn't configured
1173
		 * MT forcewake, and if the device is in RC6, then
1174
		 * force_wake_mt_get will not wake the device and the
1175
		 * ECOBUS read will return zero. Which will be
1176
		 * (correctly) interpreted by the test below as MT
1177
		 * forcewake being disabled.
1178
		 */
6084 serge 1179
		dev_priv->uncore.funcs.force_wake_get =
1180
			fw_domains_get_with_thread_status;
1181
		dev_priv->uncore.funcs.force_wake_put =
1182
			fw_domains_put_with_fifo;
1183
 
1184
		/* We need to init first for ECOBUS access and then
1185
		 * determine later if we want to reinit, in case of MT access is
1186
		 * not working. In this stage we don't know which flavour this
1187
		 * ivb is, so it is better to reset also the gen6 fw registers
1188
		 * before the ecobus check.
1189
		 */
1190
 
1191
		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1192
		__raw_posting_read(dev_priv, ECOBUS);
1193
 
1194
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1195
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1196
 
4560 Serge 1197
		mutex_lock(&dev->struct_mutex);
6084 serge 1198
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
4560 Serge 1199
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
6084 serge 1200
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
4560 Serge 1201
		mutex_unlock(&dev->struct_mutex);
1202
 
6084 serge 1203
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
4560 Serge 1204
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1205
			DRM_INFO("when using vblank-synced partial screen updates.\n");
6084 serge 1206
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1207
				       FORCEWAKE, FORCEWAKE_ACK);
4560 Serge 1208
		}
1209
	} else if (IS_GEN6(dev)) {
1210
		dev_priv->uncore.funcs.force_wake_get =
6084 serge 1211
			fw_domains_get_with_thread_status;
4560 Serge 1212
		dev_priv->uncore.funcs.force_wake_put =
6084 serge 1213
			fw_domains_put_with_fifo;
1214
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1215
			       FORCEWAKE, FORCEWAKE_ACK);
4560 Serge 1216
	}
1217
 
6084 serge 1218
	/* All future platforms are expected to require complex power gating */
1219
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1220
}
1221
 
1222
void intel_uncore_init(struct drm_device *dev)
1223
{
1224
	struct drm_i915_private *dev_priv = dev->dev_private;
1225
 
1226
	i915_check_vgpu(dev);
1227
 
1228
	intel_uncore_ellc_detect(dev);
1229
	intel_uncore_fw_domains_init(dev);
1230
	__intel_uncore_early_sanitize(dev, false);
1231
 
4560 Serge 1232
	switch (INTEL_INFO(dev)->gen) {
1233
	default:
5354 serge 1234
	case 9:
1235
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1236
		ASSIGN_READ_MMIO_VFUNCS(gen9);
1237
		break;
1238
	case 8:
5060 serge 1239
		if (IS_CHERRYVIEW(dev)) {
5354 serge 1240
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1241
			ASSIGN_READ_MMIO_VFUNCS(chv);
5060 serge 1242
 
1243
		} else {
5354 serge 1244
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1245
			ASSIGN_READ_MMIO_VFUNCS(gen6);
5060 serge 1246
		}
4560 Serge 1247
		break;
1248
	case 7:
1249
	case 6:
1250
		if (IS_HASWELL(dev)) {
5354 serge 1251
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
4560 Serge 1252
		} else {
5354 serge 1253
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
4560 Serge 1254
		}
1255
 
1256
		if (IS_VALLEYVIEW(dev)) {
5354 serge 1257
			ASSIGN_READ_MMIO_VFUNCS(vlv);
4560 Serge 1258
		} else {
5354 serge 1259
			ASSIGN_READ_MMIO_VFUNCS(gen6);
4560 Serge 1260
		}
1261
		break;
1262
	case 5:
5354 serge 1263
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1264
		ASSIGN_READ_MMIO_VFUNCS(gen5);
4560 Serge 1265
		break;
1266
	case 4:
1267
	case 3:
1268
	case 2:
6084 serge 1269
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1270
		ASSIGN_READ_MMIO_VFUNCS(gen2);
4560 Serge 1271
		break;
1272
	}
5354 serge 1273
 
6084 serge 1274
	if (intel_vgpu_active(dev)) {
1275
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1276
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1277
	}
1278
 
5354 serge 1279
	i915_check_and_clear_faults(dev);
4560 Serge 1280
}
5354 serge 1281
#undef ASSIGN_WRITE_MMIO_VFUNCS
1282
#undef ASSIGN_READ_MMIO_VFUNCS
4560 Serge 1283
 
1284
void intel_uncore_fini(struct drm_device *dev)
1285
{
1286
	/* Paranoia: make sure we have disabled everything before we exit. */
1287
	intel_uncore_sanitize(dev);
5060 serge 1288
	intel_uncore_forcewake_reset(dev, false);
4560 Serge 1289
}
1290
 
5060 serge 1291
#define GEN_RANGE(l, h) GENMASK(h, l)
1292
 
4104 Serge 1293
static const struct register_whitelist {
6937 serge 1294
	i915_reg_t offset_ldw, offset_udw;
4104 Serge 1295
	uint32_t size;
5060 serge 1296
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1297
	uint32_t gen_bitmask;
4104 Serge 1298
} whitelist[] = {
6937 serge 1299
	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1300
	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1301
	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
4104 Serge 1302
};
1303
 
1304
int i915_reg_read_ioctl(struct drm_device *dev,
1305
			void *data, struct drm_file *file)
1306
{
1307
	struct drm_i915_private *dev_priv = dev->dev_private;
1308
	struct drm_i915_reg_read *reg = data;
1309
	struct register_whitelist const *entry = whitelist;
6084 serge 1310
	unsigned size;
6937 serge 1311
	i915_reg_t offset_ldw, offset_udw;
5060 serge 1312
	int i, ret = 0;
4104 Serge 1313
 
1314
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
6937 serge 1315
		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
4104 Serge 1316
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1317
			break;
1318
	}
1319
 
1320
	if (i == ARRAY_SIZE(whitelist))
1321
		return -EINVAL;
1322
 
6084 serge 1323
	/* We use the low bits to encode extra flags as the register should
1324
	 * be naturally aligned (and those that are not so aligned merely
1325
	 * limit the available flags for that register).
1326
	 */
6937 serge 1327
	offset_ldw = entry->offset_ldw;
1328
	offset_udw = entry->offset_udw;
6084 serge 1329
	size = entry->size;
6937 serge 1330
	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
6084 serge 1331
 
1332
	intel_runtime_pm_get(dev_priv);
1333
 
1334
	switch (size) {
1335
	case 8 | 1:
6937 serge 1336
		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
6084 serge 1337
		break;
4104 Serge 1338
	case 8:
6937 serge 1339
		reg->val = I915_READ64(offset_ldw);
4104 Serge 1340
		break;
1341
	case 4:
6937 serge 1342
		reg->val = I915_READ(offset_ldw);
4104 Serge 1343
		break;
1344
	case 2:
6937 serge 1345
		reg->val = I915_READ16(offset_ldw);
4104 Serge 1346
		break;
1347
	case 1:
6937 serge 1348
		reg->val = I915_READ8(offset_ldw);
4104 Serge 1349
		break;
1350
	default:
5060 serge 1351
		ret = -EINVAL;
1352
		goto out;
4104 Serge 1353
	}
1354
 
5060 serge 1355
out:
6084 serge 1356
	intel_runtime_pm_put(dev_priv);
5060 serge 1357
	return ret;
4104 Serge 1358
}
1359
 
4560 Serge 1360
int i915_get_reset_stats_ioctl(struct drm_device *dev,
1361
			       void *data, struct drm_file *file)
4104 Serge 1362
{
1363
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 1364
	struct drm_i915_reset_stats *args = data;
1365
	struct i915_ctx_hang_stats *hs;
5060 serge 1366
	struct intel_context *ctx;
4560 Serge 1367
	int ret;
4104 Serge 1368
 
4560 Serge 1369
	if (args->flags || args->pad)
1370
		return -EINVAL;
4104 Serge 1371
 
6937 serge 1372
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1373
		return -EPERM;
1374
 
4560 Serge 1375
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1376
	if (ret)
1377
		return ret;
4104 Serge 1378
 
5060 serge 1379
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1380
	if (IS_ERR(ctx)) {
4560 Serge 1381
		mutex_unlock(&dev->struct_mutex);
5060 serge 1382
		return PTR_ERR(ctx);
4104 Serge 1383
	}
5060 serge 1384
	hs = &ctx->hang_stats;
4104 Serge 1385
 
6937 serge 1386
	if (capable(CAP_SYS_ADMIN))
4560 Serge 1387
    args->reset_count = i915_reset_count(&dev_priv->gpu_error);
6937 serge 1388
	else
1389
		args->reset_count = 0;
4104 Serge 1390
 
4560 Serge 1391
	args->batch_active = hs->batch_active;
1392
	args->batch_pending = hs->batch_pending;
4104 Serge 1393
 
4560 Serge 1394
	mutex_unlock(&dev->struct_mutex);
1395
 
4104 Serge 1396
	return 0;
1397
}
1398
 
5354 serge 1399
static int i915_reset_complete(struct drm_device *dev)
4104 Serge 1400
{
1401
	u8 gdrst;
5354 serge 1402
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1403
	return (gdrst & GRDOM_RESET_STATUS) == 0;
4104 Serge 1404
}
1405
 
5354 serge 1406
static int i915_do_reset(struct drm_device *dev)
4104 Serge 1407
{
5354 serge 1408
	/* assert reset for at least 20 usec */
1409
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1410
	udelay(20);
1411
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
4104 Serge 1412
 
5354 serge 1413
	return wait_for(i915_reset_complete(dev), 500);
1414
}
5060 serge 1415
 
5354 serge 1416
static int g4x_reset_complete(struct drm_device *dev)
1417
{
1418
	u8 gdrst;
1419
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1420
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1421
}
4104 Serge 1422
 
5354 serge 1423
static int g33_do_reset(struct drm_device *dev)
1424
{
1425
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1426
	return wait_for(g4x_reset_complete(dev), 500);
4104 Serge 1427
}
1428
 
5060 serge 1429
static int g4x_do_reset(struct drm_device *dev)
1430
{
1431
	struct drm_i915_private *dev_priv = dev->dev_private;
1432
	int ret;
1433
 
5354 serge 1434
	pci_write_config_byte(dev->pdev, I915_GDRST,
5060 serge 1435
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
5354 serge 1436
	ret =  wait_for(g4x_reset_complete(dev), 500);
5060 serge 1437
	if (ret)
1438
		return ret;
1439
 
1440
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1441
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1442
	POSTING_READ(VDECCLK_GATE_D);
1443
 
5354 serge 1444
	pci_write_config_byte(dev->pdev, I915_GDRST,
5060 serge 1445
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
5354 serge 1446
	ret =  wait_for(g4x_reset_complete(dev), 500);
5060 serge 1447
	if (ret)
1448
		return ret;
1449
 
1450
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1451
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1452
	POSTING_READ(VDECCLK_GATE_D);
1453
 
5354 serge 1454
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
5060 serge 1455
 
1456
	return 0;
1457
}
1458
 
4104 Serge 1459
static int ironlake_do_reset(struct drm_device *dev)
1460
{
1461
	struct drm_i915_private *dev_priv = dev->dev_private;
1462
	int ret;
1463
 
6084 serge 1464
	I915_WRITE(ILK_GDSR,
5060 serge 1465
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
6084 serge 1466
	ret = wait_for((I915_READ(ILK_GDSR) &
5060 serge 1467
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
4104 Serge 1468
	if (ret)
1469
		return ret;
1470
 
6084 serge 1471
	I915_WRITE(ILK_GDSR,
5060 serge 1472
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
6084 serge 1473
	ret = wait_for((I915_READ(ILK_GDSR) &
5060 serge 1474
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1475
	if (ret)
1476
		return ret;
1477
 
6084 serge 1478
	I915_WRITE(ILK_GDSR, 0);
5060 serge 1479
 
1480
	return 0;
4104 Serge 1481
}
1482
 
1483
static int gen6_do_reset(struct drm_device *dev)
1484
{
1485
	struct drm_i915_private *dev_priv = dev->dev_private;
1486
	int	ret;
1487
 
1488
	/* Reset the chip */
1489
 
1490
	/* GEN6_GDRST is not in the gt power well, no need to check
1491
	 * for fifo space for the write or forcewake the chip for
1492
	 * the read
1493
	 */
1494
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1495
 
1496
	/* Spin waiting for the device to ack the reset request */
1497
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1498
 
5060 serge 1499
	intel_uncore_forcewake_reset(dev, true);
4104 Serge 1500
 
1501
	return ret;
1502
}
1503
 
6084 serge 1504
static int wait_for_register(struct drm_i915_private *dev_priv,
6937 serge 1505
			     i915_reg_t reg,
6084 serge 1506
			     const u32 mask,
1507
			     const u32 value,
1508
			     const unsigned long timeout_ms)
4104 Serge 1509
{
6084 serge 1510
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1511
}
1512
 
1513
static int gen8_do_reset(struct drm_device *dev)
1514
{
1515
	struct drm_i915_private *dev_priv = dev->dev_private;
1516
	struct intel_engine_cs *engine;
1517
	int i;
1518
 
1519
	for_each_ring(engine, dev_priv, i) {
1520
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1521
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1522
 
1523
		if (wait_for_register(dev_priv,
1524
				      RING_RESET_CTL(engine->mmio_base),
1525
				      RESET_CTL_READY_TO_RESET,
1526
				      RESET_CTL_READY_TO_RESET,
1527
				      700)) {
1528
			DRM_ERROR("%s: reset request timeout\n", engine->name);
1529
			goto not_ready;
1530
		}
1531
	}
1532
 
1533
	return gen6_do_reset(dev);
1534
 
1535
not_ready:
1536
	for_each_ring(engine, dev_priv, i)
1537
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1538
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1539
 
1540
	return -EIO;
1541
}
1542
 
1543
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1544
{
1545
	if (!i915.reset)
1546
		return NULL;
1547
 
1548
	if (INTEL_INFO(dev)->gen >= 8)
1549
		return gen8_do_reset;
1550
	else if (INTEL_INFO(dev)->gen >= 6)
1551
		return gen6_do_reset;
5060 serge 1552
	else if (IS_GEN5(dev))
6084 serge 1553
		return ironlake_do_reset;
5060 serge 1554
	else if (IS_G4X(dev))
6084 serge 1555
		return g4x_do_reset;
5354 serge 1556
	else if (IS_G33(dev))
6084 serge 1557
		return g33_do_reset;
5354 serge 1558
	else if (INTEL_INFO(dev)->gen >= 3)
6084 serge 1559
		return i915_do_reset;
1560
	else
1561
		return NULL;
1562
}
1563
 
1564
int intel_gpu_reset(struct drm_device *dev)
1565
{
1566
	struct drm_i915_private *dev_priv = to_i915(dev);
1567
	int (*reset)(struct drm_device *);
1568
	int ret;
1569
 
1570
	reset = intel_get_gpu_reset(dev);
1571
	if (reset == NULL)
5060 serge 1572
		return -ENODEV;
6084 serge 1573
 
1574
	/* If the power well sleeps during the reset, the reset
1575
	 * request may be dropped and never completes (causing -EIO).
1576
	 */
1577
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1578
	ret = reset(dev);
1579
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1580
 
1581
	return ret;
4104 Serge 1582
}
1583
 
6084 serge 1584
bool intel_has_gpu_reset(struct drm_device *dev)
1585
{
1586
	return intel_get_gpu_reset(dev) != NULL;
1587
}
1588
 
4104 Serge 1589
void intel_uncore_check_errors(struct drm_device *dev)
1590
{
1591
	struct drm_i915_private *dev_priv = dev->dev_private;
1592
 
1593
	if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
1594
	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1595
		DRM_ERROR("Unclaimed register before interrupt\n");
1596
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1597
	}
1598
}