Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4104 Serge 1
/*
2
 * Copyright © 2013 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 */
23
 
24
#include "i915_drv.h"
25
#include "intel_drv.h"
6084 serge 26
#include "i915_vgpu.h"
4104 Serge 27
 
6084 serge 28
#include 
4104 Serge 29
 
6084 serge 30
#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
 
6937 serge 32
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
4104 Serge 33
 
6084 serge 34
static const char * const forcewake_domain_names[] = {
35
	"render",
36
	"blitter",
37
	"media",
38
};
39
 
40
const char *
41
intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42
{
43
	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
 
45
	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46
		return forcewake_domain_names[id];
47
 
48
	WARN_ON(id);
49
 
50
	return "unknown";
51
}
52
 
53
static inline void
54
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
4104 Serge 55
{
6937 serge 56
	WARN_ON(!i915_mmio_reg_valid(d->reg_set));
6084 serge 57
	__raw_i915_write32(d->i915, d->reg_set, d->val_reset);
4104 Serge 58
}
59
 
6084 serge 60
static inline void
61
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
4104 Serge 62
{
6084 serge 63
//	__raw_i915_write32(dev_priv, FORCEWAKE, 0);
64
//	/* something from same cacheline, but !FORCEWAKE */
65
//	__raw_posting_read(dev_priv, ECOBUS);
4104 Serge 66
}
67
 
6084 serge 68
static inline void
69
fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
4104 Serge 70
{
6084 serge 71
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
72
			     FORCEWAKE_KERNEL) == 0,
4104 Serge 73
			    FORCEWAKE_ACK_TIMEOUT_MS))
6084 serge 74
		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
75
			  intel_uncore_forcewake_domain_to_str(d->id));
4104 Serge 76
}
77
 
6084 serge 78
static inline void
79
fw_domain_get(const struct intel_uncore_forcewake_domain *d)
4104 Serge 80
{
6084 serge 81
	__raw_i915_write32(d->i915, d->reg_set, d->val_set);
4104 Serge 82
}
83
 
6084 serge 84
static inline void
85
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
4104 Serge 86
{
6084 serge 87
	if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
88
			     FORCEWAKE_KERNEL),
4104 Serge 89
			    FORCEWAKE_ACK_TIMEOUT_MS))
6084 serge 90
		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
91
			  intel_uncore_forcewake_domain_to_str(d->id));
4104 Serge 92
}
93
 
6084 serge 94
static inline void
95
fw_domain_put(const struct intel_uncore_forcewake_domain *d)
4104 Serge 96
{
6084 serge 97
	__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
4104 Serge 98
}
99
 
6084 serge 100
static inline void
101
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
4104 Serge 102
{
6084 serge 103
	/* something from same cacheline, but not from the set register */
6937 serge 104
	if (i915_mmio_reg_valid(d->reg_post))
6084 serge 105
		__raw_posting_read(d->i915, d->reg_post);
4104 Serge 106
}
107
 
6084 serge 108
static void
109
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 110
{
6084 serge 111
	struct intel_uncore_forcewake_domain *d;
112
	enum forcewake_domain_id id;
5060 serge 113
 
6084 serge 114
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
115
		fw_domain_wait_ack_clear(d);
116
		fw_domain_get(d);
117
		fw_domain_wait_ack(d);
118
	}
4104 Serge 119
}
120
 
6084 serge 121
static void
122
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 123
{
6084 serge 124
	struct intel_uncore_forcewake_domain *d;
125
	enum forcewake_domain_id id;
4104 Serge 126
 
6084 serge 127
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
128
		fw_domain_put(d);
129
		fw_domain_posting_read(d);
4104 Serge 130
	}
131
}
132
 
6084 serge 133
static void
134
fw_domains_posting_read(struct drm_i915_private *dev_priv)
4104 Serge 135
{
6084 serge 136
	struct intel_uncore_forcewake_domain *d;
137
	enum forcewake_domain_id id;
4104 Serge 138
 
6084 serge 139
	/* No need to do for all, just do for first found */
140
	for_each_fw_domain(d, dev_priv, id) {
141
		fw_domain_posting_read(d);
142
		break;
4560 Serge 143
	}
4104 Serge 144
}
145
 
6084 serge 146
static void
147
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
4104 Serge 148
{
6084 serge 149
	struct intel_uncore_forcewake_domain *d;
150
	enum forcewake_domain_id id;
4560 Serge 151
 
6084 serge 152
	if (dev_priv->uncore.fw_domains == 0)
153
		return;
4560 Serge 154
 
6084 serge 155
	for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
156
		fw_domain_reset(d);
4560 Serge 157
 
6084 serge 158
	fw_domains_posting_read(dev_priv);
4104 Serge 159
}
160
 
6084 serge 161
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
4560 Serge 162
{
6084 serge 163
	/* w/a for a sporadic read returning 0 by waiting for the GT
164
	 * thread to wake up.
165
	 */
166
	if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
167
				GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
168
		DRM_ERROR("GT thread status wait timed out\n");
4560 Serge 169
}
170
 
6084 serge 171
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
172
					      enum forcewake_domains fw_domains)
4560 Serge 173
{
6084 serge 174
	fw_domains_get(dev_priv, fw_domains);
4560 Serge 175
 
6084 serge 176
	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
177
	__gen6_gt_wait_for_thread_c0(dev_priv);
4560 Serge 178
}
179
 
6084 serge 180
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5354 serge 181
{
6084 serge 182
	u32 gtfifodbg;
5354 serge 183
 
6084 serge 184
	gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
185
	if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
186
		__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
5354 serge 187
}
188
 
6084 serge 189
static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
190
				     enum forcewake_domains fw_domains)
5354 serge 191
{
6084 serge 192
	fw_domains_put(dev_priv, fw_domains);
193
	gen6_gt_check_fifodbg(dev_priv);
5354 serge 194
}
195
 
6084 serge 196
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
5354 serge 197
{
6084 serge 198
	u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
5354 serge 199
 
6084 serge 200
	return count & GT_FIFO_FREE_ENTRIES_MASK;
5354 serge 201
}
202
 
6084 serge 203
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5354 serge 204
{
6084 serge 205
	int ret = 0;
5354 serge 206
 
6084 serge 207
	/* On VLV, FIFO will be shared by both SW and HW.
208
	 * So, we need to read the FREE_ENTRIES everytime */
209
	if (IS_VALLEYVIEW(dev_priv->dev))
210
		dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
5354 serge 211
 
6084 serge 212
	if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
213
		int loop = 500;
214
		u32 fifo = fifo_free_entries(dev_priv);
5354 serge 215
 
6084 serge 216
		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
217
			udelay(10);
218
			fifo = fifo_free_entries(dev_priv);
219
		}
220
		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
221
			++ret;
222
		dev_priv->uncore.fifo_count = fifo;
5354 serge 223
	}
6084 serge 224
	dev_priv->uncore.fifo_count--;
5354 serge 225
 
6084 serge 226
	return ret;
5354 serge 227
}
228
 
6084 serge 229
static void intel_uncore_fw_release_timer(unsigned long arg)
5354 serge 230
{
6084 serge 231
	struct intel_uncore_forcewake_domain *domain = (void *)arg;
5354 serge 232
	unsigned long irqflags;
233
 
6937 serge 234
	assert_rpm_device_not_suspended(domain->i915);
5354 serge 235
 
6084 serge 236
	spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
237
	if (WARN_ON(domain->wake_count == 0))
238
		domain->wake_count++;
5354 serge 239
 
6084 serge 240
	if (--domain->wake_count == 0)
241
		domain->i915->uncore.funcs.force_wake_put(domain->i915,
242
							  1 << domain->id);
5354 serge 243
 
6084 serge 244
	spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
5354 serge 245
}
246
 
5060 serge 247
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
4371 Serge 248
{
249
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 250
	unsigned long irqflags;
6084 serge 251
	struct intel_uncore_forcewake_domain *domain;
252
	int retry_count = 100;
253
	enum forcewake_domain_id id;
254
	enum forcewake_domains fw = 0, active_domains;
4371 Serge 255
 
5060 serge 256
	/* Hold uncore.lock across reset to prevent any register access
6084 serge 257
	 * with forcewake not set correctly. Wait until all pending
258
	 * timers are run before holding.
5060 serge 259
	 */
6084 serge 260
	while (1) {
261
		active_domains = 0;
5060 serge 262
 
6084 serge 263
		for_each_fw_domain(domain, dev_priv, id) {
264
			if (del_timer_sync(&domain->timer) == 0)
265
				continue;
5060 serge 266
 
6084 serge 267
			intel_uncore_fw_release_timer((unsigned long)domain);
268
		}
5060 serge 269
 
6084 serge 270
		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
5354 serge 271
 
6084 serge 272
		for_each_fw_domain(domain, dev_priv, id) {
273
//           if (timer_pending(&domain->timer))
274
//				active_domains |= (1 << id);
275
	}
5060 serge 276
 
6084 serge 277
		if (active_domains == 0)
278
			break;
5060 serge 279
 
6084 serge 280
		if (--retry_count == 0) {
281
			DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
282
			break;
283
		}
5354 serge 284
 
6084 serge 285
		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
286
        change_task();
4371 Serge 287
	}
5060 serge 288
 
6084 serge 289
	WARN_ON(active_domains);
290
 
291
	for_each_fw_domain(domain, dev_priv, id)
292
		if (domain->wake_count)
293
			fw |= 1 << id;
294
 
295
	if (fw)
296
		dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
297
 
298
	fw_domains_reset(dev_priv, FORCEWAKE_ALL);
299
 
300
	if (restore) { /* If reset with a user forcewake, try to restore */
5060 serge 301
		if (fw)
302
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
303
 
304
		if (IS_GEN6(dev) || IS_GEN7(dev))
305
			dev_priv->uncore.fifo_count =
6084 serge 306
				fifo_free_entries(dev_priv);
5060 serge 307
	}
308
 
6084 serge 309
	if (!restore)
310
		assert_forcewakes_inactive(dev_priv);
311
 
5060 serge 312
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4371 Serge 313
}
314
 
6084 serge 315
static void intel_uncore_ellc_detect(struct drm_device *dev)
4104 Serge 316
{
317
	struct drm_i915_private *dev_priv = dev->dev_private;
318
 
6084 serge 319
	if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
320
	     INTEL_INFO(dev)->gen >= 9) &&
321
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
4560 Serge 322
		/* The docs do not explain exactly how the calculation can be
323
		 * made. It is somewhat guessable, but for now, it's always
324
		 * 128MB.
325
		 * NB: We can't write IDICR yet because we do not have gt funcs
326
		 * set up */
327
		dev_priv->ellc_size = 128;
328
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
329
	}
6084 serge 330
}
4104 Serge 331
 
7144 serge 332
static bool
333
fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
334
{
335
	u32 dbg;
336
 
337
	dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
338
	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
339
		return false;
340
 
341
	__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
342
 
343
	return true;
344
}
345
 
346
static bool
347
vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
348
{
349
	u32 cer;
350
 
351
	cer = __raw_i915_read32(dev_priv, CLAIM_ER);
352
	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
353
		return false;
354
 
355
	__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
356
 
357
	return true;
358
}
359
 
360
static bool
361
check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
362
{
363
	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
364
		return fpga_check_for_unclaimed_mmio(dev_priv);
365
 
366
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
367
		return vlv_check_for_unclaimed_mmio(dev_priv);
368
 
369
	return false;
370
}
371
 
6084 serge 372
static void __intel_uncore_early_sanitize(struct drm_device *dev,
373
					  bool restore_forcewake)
374
{
375
	struct drm_i915_private *dev_priv = dev->dev_private;
376
 
7144 serge 377
	/* clear out unclaimed reg detection bit */
378
	if (check_for_unclaimed_mmio(dev_priv))
379
		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
6084 serge 380
 
4560 Serge 381
	/* clear out old GT FIFO errors */
382
	if (IS_GEN6(dev) || IS_GEN7(dev))
383
		__raw_i915_write32(dev_priv, GTFIFODBG,
384
				   __raw_i915_read32(dev_priv, GTFIFODBG));
4104 Serge 385
 
6084 serge 386
	/* WaDisableShadowRegForCpd:chv */
387
	if (IS_CHERRYVIEW(dev)) {
388
		__raw_i915_write32(dev_priv, GTFIFOCTL,
389
				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
390
				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
391
				   GT_FIFO_CTL_RC6_POLICY_STALL);
392
	}
393
 
5060 serge 394
	intel_uncore_forcewake_reset(dev, restore_forcewake);
4104 Serge 395
}
396
 
5354 serge 397
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
398
{
399
	__intel_uncore_early_sanitize(dev, restore_forcewake);
400
	i915_check_and_clear_faults(dev);
401
}
402
 
4104 Serge 403
void intel_uncore_sanitize(struct drm_device *dev)
404
{
7144 serge 405
	i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
406
 
4104 Serge 407
	/* BIOS often leaves RC6 enabled, but disable it for hw init */
408
	intel_disable_gt_powersave(dev);
409
}
410
 
6084 serge 411
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
412
					 enum forcewake_domains fw_domains)
413
{
414
	struct intel_uncore_forcewake_domain *domain;
415
	enum forcewake_domain_id id;
416
 
417
	if (!dev_priv->uncore.funcs.force_wake_get)
418
		return;
419
 
420
	fw_domains &= dev_priv->uncore.fw_domains;
421
 
422
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
423
		if (domain->wake_count++)
424
			fw_domains &= ~(1 << id);
425
	}
426
 
427
	if (fw_domains)
428
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
429
}
430
 
431
/**
432
 * intel_uncore_forcewake_get - grab forcewake domain references
433
 * @dev_priv: i915 device instance
434
 * @fw_domains: forcewake domains to get reference on
435
 *
436
 * This function can be used get GT's forcewake domain references.
437
 * Normal register access will handle the forcewake domains automatically.
438
 * However if some sequence requires the GT to not power down a particular
439
 * forcewake domains this function should be called at the beginning of the
440
 * sequence. And subsequently the reference should be dropped by symmetric
441
 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
442
 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
4104 Serge 443
 */
6084 serge 444
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
445
				enum forcewake_domains fw_domains)
4104 Serge 446
{
447
	unsigned long irqflags;
448
 
4560 Serge 449
	if (!dev_priv->uncore.funcs.force_wake_get)
450
		return;
451
 
6937 serge 452
	assert_rpm_wakelock_held(dev_priv);
4560 Serge 453
 
4104 Serge 454
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6084 serge 455
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
4104 Serge 456
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
457
}
458
 
6084 serge 459
/**
460
 * intel_uncore_forcewake_get__locked - grab forcewake domain references
461
 * @dev_priv: i915 device instance
462
 * @fw_domains: forcewake domains to get reference on
463
 *
464
 * See intel_uncore_forcewake_get(). This variant places the onus
465
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
4104 Serge 466
 */
6084 serge 467
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
468
					enum forcewake_domains fw_domains)
4104 Serge 469
{
6084 serge 470
	assert_spin_locked(&dev_priv->uncore.lock);
4104 Serge 471
 
6084 serge 472
	if (!dev_priv->uncore.funcs.force_wake_get)
473
		return;
474
 
475
	__intel_uncore_forcewake_get(dev_priv, fw_domains);
476
}
477
 
478
static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
479
					 enum forcewake_domains fw_domains)
480
{
481
	struct intel_uncore_forcewake_domain *domain;
482
	enum forcewake_domain_id id;
483
 
4560 Serge 484
	if (!dev_priv->uncore.funcs.force_wake_put)
485
		return;
486
 
6084 serge 487
	fw_domains &= dev_priv->uncore.fw_domains;
5354 serge 488
 
6084 serge 489
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
490
		if (WARN_ON(domain->wake_count == 0))
491
			continue;
492
 
493
		if (--domain->wake_count)
494
			continue;
495
 
496
		domain->wake_count++;
497
		fw_domain_arm_timer(domain);
5060 serge 498
	}
6084 serge 499
}
4560 Serge 500
 
6084 serge 501
/**
502
 * intel_uncore_forcewake_put - release a forcewake domain reference
503
 * @dev_priv: i915 device instance
504
 * @fw_domains: forcewake domains to put references
505
 *
506
 * This function drops the device-level forcewakes for specified
507
 * domains obtained by intel_uncore_forcewake_get().
508
 */
509
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
510
				enum forcewake_domains fw_domains)
511
{
512
	unsigned long irqflags;
4560 Serge 513
 
6084 serge 514
	if (!dev_priv->uncore.funcs.force_wake_put)
515
		return;
516
 
4104 Serge 517
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6084 serge 518
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
4104 Serge 519
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6084 serge 520
}
4560 Serge 521
 
6084 serge 522
/**
523
 * intel_uncore_forcewake_put__locked - grab forcewake domain references
524
 * @dev_priv: i915 device instance
525
 * @fw_domains: forcewake domains to get reference on
526
 *
527
 * See intel_uncore_forcewake_put(). This variant places the onus
528
 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
529
 */
530
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
531
					enum forcewake_domains fw_domains)
532
{
533
	assert_spin_locked(&dev_priv->uncore.lock);
534
 
535
	if (!dev_priv->uncore.funcs.force_wake_put)
536
		return;
537
 
538
	__intel_uncore_forcewake_put(dev_priv, fw_domains);
4104 Serge 539
}
540
 
6084 serge 541
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
5060 serge 542
{
6084 serge 543
	struct intel_uncore_forcewake_domain *domain;
544
	enum forcewake_domain_id id;
545
 
5060 serge 546
	if (!dev_priv->uncore.funcs.force_wake_get)
547
		return;
548
 
6084 serge 549
	for_each_fw_domain(domain, dev_priv, id)
550
		WARN_ON(domain->wake_count);
5060 serge 551
}
552
 
4104 Serge 553
/* We give fast paths for the really cool registers */
6937 serge 554
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
4104 Serge 555
 
5060 serge 556
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
557
 
558
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
559
	(REG_RANGE((reg), 0x2000, 0x4000) || \
560
	 REG_RANGE((reg), 0x5000, 0x8000) || \
561
	 REG_RANGE((reg), 0xB000, 0x12000) || \
562
	 REG_RANGE((reg), 0x2E000, 0x30000))
563
 
564
#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
565
	(REG_RANGE((reg), 0x12000, 0x14000) || \
566
	 REG_RANGE((reg), 0x22000, 0x24000) || \
567
	 REG_RANGE((reg), 0x30000, 0x40000))
568
 
569
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
570
	(REG_RANGE((reg), 0x2000, 0x4000) || \
6084 serge 571
	 REG_RANGE((reg), 0x5200, 0x8000) || \
5060 serge 572
	 REG_RANGE((reg), 0x8300, 0x8500) || \
6084 serge 573
	 REG_RANGE((reg), 0xB000, 0xB480) || \
5060 serge 574
	 REG_RANGE((reg), 0xE000, 0xE800))
575
 
576
#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
577
	(REG_RANGE((reg), 0x8800, 0x8900) || \
578
	 REG_RANGE((reg), 0xD000, 0xD800) || \
579
	 REG_RANGE((reg), 0x12000, 0x14000) || \
580
	 REG_RANGE((reg), 0x1A000, 0x1C000) || \
581
	 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
6084 serge 582
	 REG_RANGE((reg), 0x30000, 0x38000))
5060 serge 583
 
584
#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
585
	(REG_RANGE((reg), 0x4000, 0x5000) || \
586
	 REG_RANGE((reg), 0x8000, 0x8300) || \
587
	 REG_RANGE((reg), 0x8500, 0x8600) || \
588
	 REG_RANGE((reg), 0x9000, 0xB000) || \
6084 serge 589
	 REG_RANGE((reg), 0xF000, 0x10000))
5060 serge 590
 
5354 serge 591
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
592
	REG_RANGE((reg), 0xB00,  0x2000)
593
 
594
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
595
	(REG_RANGE((reg), 0x2000, 0x2700) || \
596
	 REG_RANGE((reg), 0x3000, 0x4000) || \
597
	 REG_RANGE((reg), 0x5200, 0x8000) || \
598
	 REG_RANGE((reg), 0x8140, 0x8160) || \
599
	 REG_RANGE((reg), 0x8300, 0x8500) || \
600
	 REG_RANGE((reg), 0x8C00, 0x8D00) || \
601
	 REG_RANGE((reg), 0xB000, 0xB480) || \
602
	 REG_RANGE((reg), 0xE000, 0xE900) || \
603
	 REG_RANGE((reg), 0x24400, 0x24800))
604
 
605
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
606
	(REG_RANGE((reg), 0x8130, 0x8140) || \
607
	 REG_RANGE((reg), 0x8800, 0x8A00) || \
608
	 REG_RANGE((reg), 0xD000, 0xD800) || \
609
	 REG_RANGE((reg), 0x12000, 0x14000) || \
610
	 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
611
	 REG_RANGE((reg), 0x30000, 0x40000))
612
 
613
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
614
	REG_RANGE((reg), 0x9400, 0x9800)
615
 
616
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
7144 serge 617
	((reg) < 0x40000 && \
5354 serge 618
	 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
619
	 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
620
	 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
621
	 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
622
 
4104 Serge 623
static void
624
ilk_dummy_write(struct drm_i915_private *dev_priv)
625
{
626
	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
627
	 * the chip from rc6 before touching it for real. MI_MODE is masked,
628
	 * hence harmless to write 0 into. */
629
	__raw_i915_write32(dev_priv, MI_MODE, 0);
630
}
631
 
632
static void
7144 serge 633
__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
634
		      const i915_reg_t reg,
635
		      const bool read,
636
		      const bool before)
4104 Serge 637
{
7144 serge 638
	/* XXX. We limit the auto arming traces for mmio
639
	 * debugs on these platforms. There are just too many
640
	 * revealed by these and CI/Bat suffers from the noise.
641
	 * Please fix and then re-enable the automatic traces.
642
	 */
643
	if (i915.mmio_debug < 2 &&
644
	    (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
5060 serge 645
		return;
646
 
7144 serge 647
	if (WARN(check_for_unclaimed_mmio(dev_priv),
648
		 "Unclaimed register detected %s %s register 0x%x\n",
649
		 before ? "before" : "after",
650
		 read ? "reading" : "writing to",
651
		 i915_mmio_reg_offset(reg)))
6084 serge 652
		i915.mmio_debug--; /* Only report the first N failures */
4104 Serge 653
}
654
 
7144 serge 655
static inline void
656
unclaimed_reg_debug(struct drm_i915_private *dev_priv,
657
		    const i915_reg_t reg,
658
		    const bool read,
659
		    const bool before)
4104 Serge 660
{
7144 serge 661
	if (likely(!i915.mmio_debug))
5060 serge 662
		return;
663
 
7144 serge 664
	__unclaimed_reg_debug(dev_priv, reg, read, before);
4104 Serge 665
}
666
 
6084 serge 667
#define GEN2_READ_HEADER(x) \
4104 Serge 668
	u##x val = 0; \
6937 serge 669
	assert_rpm_wakelock_held(dev_priv);
4560 Serge 670
 
6084 serge 671
#define GEN2_READ_FOOTER \
4560 Serge 672
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
673
	return val
674
 
6084 serge 675
#define __gen2_read(x) \
4560 Serge 676
static u##x \
6937 serge 677
gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 678
	GEN2_READ_HEADER(x); \
4560 Serge 679
	val = __raw_i915_read##x(dev_priv, reg); \
6084 serge 680
	GEN2_READ_FOOTER; \
4560 Serge 681
}
682
 
683
#define __gen5_read(x) \
684
static u##x \
6937 serge 685
gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 686
	GEN2_READ_HEADER(x); \
687
	ilk_dummy_write(dev_priv); \
4560 Serge 688
	val = __raw_i915_read##x(dev_priv, reg); \
6084 serge 689
	GEN2_READ_FOOTER; \
4560 Serge 690
}
691
 
6084 serge 692
__gen5_read(8)
693
__gen5_read(16)
694
__gen5_read(32)
695
__gen5_read(64)
696
__gen2_read(8)
697
__gen2_read(16)
698
__gen2_read(32)
699
__gen2_read(64)
700
 
701
#undef __gen5_read
702
#undef __gen2_read
703
 
704
#undef GEN2_READ_FOOTER
705
#undef GEN2_READ_HEADER
706
 
707
#define GEN6_READ_HEADER(x) \
6937 serge 708
	u32 offset = i915_mmio_reg_offset(reg); \
6084 serge 709
	unsigned long irqflags; \
710
	u##x val = 0; \
6937 serge 711
	assert_rpm_wakelock_held(dev_priv); \
7144 serge 712
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
713
	unclaimed_reg_debug(dev_priv, reg, true, true)
6084 serge 714
 
715
#define GEN6_READ_FOOTER \
7144 serge 716
	unclaimed_reg_debug(dev_priv, reg, true, false); \
6084 serge 717
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
718
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
719
	return val
720
 
721
static inline void __force_wake_get(struct drm_i915_private *dev_priv,
722
				    enum forcewake_domains fw_domains)
723
{
724
	struct intel_uncore_forcewake_domain *domain;
725
	enum forcewake_domain_id id;
726
 
727
	if (WARN_ON(!fw_domains))
728
		return;
729
 
730
	/* Ideally GCC would be constant-fold and eliminate this loop */
731
	for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
732
		if (domain->wake_count) {
733
			fw_domains &= ~(1 << id);
734
			continue;
735
		}
736
 
737
		domain->wake_count++;
738
		fw_domain_arm_timer(domain);
739
	}
740
 
741
	if (fw_domains)
742
		dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
743
}
744
 
4560 Serge 745
#define __gen6_read(x) \
746
static u##x \
6937 serge 747
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 748
	GEN6_READ_HEADER(x); \
6937 serge 749
	if (NEEDS_FORCE_WAKE(offset)) \
6084 serge 750
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
751
	val = __raw_i915_read##x(dev_priv, reg); \
752
	GEN6_READ_FOOTER; \
4104 Serge 753
}
754
 
4560 Serge 755
#define __vlv_read(x) \
756
static u##x \
6937 serge 757
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
758
	enum forcewake_domains fw_engine = 0; \
6084 serge 759
	GEN6_READ_HEADER(x); \
6937 serge 760
	if (!NEEDS_FORCE_WAKE(offset)) \
761
		fw_engine = 0; \
762
	else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
763
		fw_engine = FORCEWAKE_RENDER; \
764
	else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
765
		fw_engine = FORCEWAKE_MEDIA; \
766
	if (fw_engine) \
767
		__force_wake_get(dev_priv, fw_engine); \
6084 serge 768
	val = __raw_i915_read##x(dev_priv, reg); \
769
	GEN6_READ_FOOTER; \
5060 serge 770
}
771
 
772
#define __chv_read(x) \
773
static u##x \
6937 serge 774
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
775
	enum forcewake_domains fw_engine = 0; \
6084 serge 776
	GEN6_READ_HEADER(x); \
6937 serge 777
	if (!NEEDS_FORCE_WAKE(offset)) \
778
		fw_engine = 0; \
779
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
780
		fw_engine = FORCEWAKE_RENDER; \
781
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
782
		fw_engine = FORCEWAKE_MEDIA; \
783
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
784
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
785
	if (fw_engine) \
786
		__force_wake_get(dev_priv, fw_engine); \
6084 serge 787
	val = __raw_i915_read##x(dev_priv, reg); \
788
	GEN6_READ_FOOTER; \
4560 Serge 789
}
4104 Serge 790
 
6084 serge 791
#define SKL_NEEDS_FORCE_WAKE(reg) \
7144 serge 792
	((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
5354 serge 793
 
794
#define __gen9_read(x) \
795
static u##x \
6937 serge 796
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
6084 serge 797
	enum forcewake_domains fw_engine; \
798
	GEN6_READ_HEADER(x); \
6937 serge 799
	if (!SKL_NEEDS_FORCE_WAKE(offset)) \
6084 serge 800
		fw_engine = 0; \
6937 serge 801
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
6084 serge 802
		fw_engine = FORCEWAKE_RENDER; \
6937 serge 803
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
6084 serge 804
		fw_engine = FORCEWAKE_MEDIA; \
6937 serge 805
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
6084 serge 806
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
807
	else \
808
		fw_engine = FORCEWAKE_BLITTER; \
809
	if (fw_engine) \
810
		__force_wake_get(dev_priv, fw_engine); \
811
	val = __raw_i915_read##x(dev_priv, reg); \
812
	GEN6_READ_FOOTER; \
5354 serge 813
}
814
 
815
__gen9_read(8)
816
__gen9_read(16)
817
__gen9_read(32)
818
__gen9_read(64)
5060 serge 819
__chv_read(8)
820
__chv_read(16)
821
__chv_read(32)
822
__chv_read(64)
4560 Serge 823
__vlv_read(8)
824
__vlv_read(16)
825
__vlv_read(32)
826
__vlv_read(64)
827
__gen6_read(8)
828
__gen6_read(16)
829
__gen6_read(32)
830
__gen6_read(64)
831
 
5354 serge 832
#undef __gen9_read
5060 serge 833
#undef __chv_read
4560 Serge 834
#undef __vlv_read
835
#undef __gen6_read
6084 serge 836
#undef GEN6_READ_FOOTER
837
#undef GEN6_READ_HEADER
4560 Serge 838
 
6937 serge 839
#define VGPU_READ_HEADER(x) \
840
	unsigned long irqflags; \
841
	u##x val = 0; \
842
	assert_rpm_device_not_suspended(dev_priv); \
843
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
844
 
845
#define VGPU_READ_FOOTER \
846
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
847
	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
848
	return val
849
 
850
#define __vgpu_read(x) \
851
static u##x \
852
vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
853
	VGPU_READ_HEADER(x); \
854
	val = __raw_i915_read##x(dev_priv, reg); \
855
	VGPU_READ_FOOTER; \
856
}
857
 
858
__vgpu_read(8)
859
__vgpu_read(16)
860
__vgpu_read(32)
861
__vgpu_read(64)
862
 
863
#undef __vgpu_read
864
#undef VGPU_READ_FOOTER
865
#undef VGPU_READ_HEADER
866
 
6084 serge 867
#define GEN2_WRITE_HEADER \
4560 Serge 868
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
6937 serge 869
	assert_rpm_wakelock_held(dev_priv); \
4560 Serge 870
 
6084 serge 871
#define GEN2_WRITE_FOOTER
4560 Serge 872
 
6084 serge 873
#define __gen2_write(x) \
4560 Serge 874
static void \
6937 serge 875
gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6084 serge 876
	GEN2_WRITE_HEADER; \
4560 Serge 877
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 878
	GEN2_WRITE_FOOTER; \
4560 Serge 879
}
880
 
881
#define __gen5_write(x) \
882
static void \
6937 serge 883
gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6084 serge 884
	GEN2_WRITE_HEADER; \
4560 Serge 885
	ilk_dummy_write(dev_priv); \
886
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 887
	GEN2_WRITE_FOOTER; \
4560 Serge 888
}
889
 
6084 serge 890
__gen5_write(8)
891
__gen5_write(16)
892
__gen5_write(32)
893
__gen5_write(64)
894
__gen2_write(8)
895
__gen2_write(16)
896
__gen2_write(32)
897
__gen2_write(64)
898
 
899
#undef __gen5_write
900
#undef __gen2_write
901
 
902
#undef GEN2_WRITE_FOOTER
903
#undef GEN2_WRITE_HEADER
904
 
905
#define GEN6_WRITE_HEADER \
6937 serge 906
	u32 offset = i915_mmio_reg_offset(reg); \
6084 serge 907
	unsigned long irqflags; \
908
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
6937 serge 909
	assert_rpm_wakelock_held(dev_priv); \
7144 serge 910
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
911
	unclaimed_reg_debug(dev_priv, reg, false, true)
6084 serge 912
 
913
#define GEN6_WRITE_FOOTER \
7144 serge 914
	unclaimed_reg_debug(dev_priv, reg, false, false); \
6084 serge 915
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
916
 
4560 Serge 917
#define __gen6_write(x) \
918
static void \
6937 serge 919
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
4104 Serge 920
	u32 __fifo_ret = 0; \
6084 serge 921
	GEN6_WRITE_HEADER; \
6937 serge 922
	if (NEEDS_FORCE_WAKE(offset)) { \
4104 Serge 923
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
924
	} \
4560 Serge 925
	__raw_i915_write##x(dev_priv, reg, val); \
926
	if (unlikely(__fifo_ret)) { \
927
		gen6_gt_check_fifodbg(dev_priv); \
928
	} \
6084 serge 929
	GEN6_WRITE_FOOTER; \
4560 Serge 930
}
931
 
932
#define __hsw_write(x) \
933
static void \
6937 serge 934
hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
4560 Serge 935
	u32 __fifo_ret = 0; \
6084 serge 936
	GEN6_WRITE_HEADER; \
6937 serge 937
	if (NEEDS_FORCE_WAKE(offset)) { \
4560 Serge 938
		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
939
	} \
4104 Serge 940
	__raw_i915_write##x(dev_priv, reg, val); \
941
	if (unlikely(__fifo_ret)) { \
942
		gen6_gt_check_fifodbg(dev_priv); \
943
	} \
6084 serge 944
	GEN6_WRITE_FOOTER; \
4104 Serge 945
}
946
 
6937 serge 947
static const i915_reg_t gen8_shadowed_regs[] = {
4560 Serge 948
	FORCEWAKE_MT,
949
	GEN6_RPNSWREQ,
950
	GEN6_RC_VIDEO_FREQ,
951
	RING_TAIL(RENDER_RING_BASE),
952
	RING_TAIL(GEN6_BSD_RING_BASE),
953
	RING_TAIL(VEBOX_RING_BASE),
954
	RING_TAIL(BLT_RING_BASE),
955
	/* TODO: Other registers are not yet used */
956
};
957
 
6937 serge 958
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
959
			     i915_reg_t reg)
4560 Serge 960
{
961
	int i;
962
	for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
6937 serge 963
		if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
4560 Serge 964
			return true;
965
 
966
	return false;
967
}
968
 
969
#define __gen8_write(x) \
970
static void \
6937 serge 971
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
6084 serge 972
	GEN6_WRITE_HEADER; \
6937 serge 973
	if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
6084 serge 974
		__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
4560 Serge 975
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 976
	GEN6_WRITE_FOOTER; \
4560 Serge 977
}
978
 
5060 serge 979
#define __chv_write(x) \
980
static void \
6937 serge 981
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
982
	enum forcewake_domains fw_engine = 0; \
6084 serge 983
	GEN6_WRITE_HEADER; \
6937 serge 984
	if (!NEEDS_FORCE_WAKE(offset) || \
985
	    is_gen8_shadowed(dev_priv, reg)) \
986
		fw_engine = 0; \
987
	else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
988
		fw_engine = FORCEWAKE_RENDER; \
989
	else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
990
		fw_engine = FORCEWAKE_MEDIA; \
991
	else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
992
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
993
	if (fw_engine) \
994
		__force_wake_get(dev_priv, fw_engine); \
5060 serge 995
	__raw_i915_write##x(dev_priv, reg, val); \
6084 serge 996
	GEN6_WRITE_FOOTER; \
5060 serge 997
}
998
 
6937 serge 999
static const i915_reg_t gen9_shadowed_regs[] = {
5354 serge 1000
	RING_TAIL(RENDER_RING_BASE),
1001
	RING_TAIL(GEN6_BSD_RING_BASE),
1002
	RING_TAIL(VEBOX_RING_BASE),
1003
	RING_TAIL(BLT_RING_BASE),
1004
	FORCEWAKE_BLITTER_GEN9,
1005
	FORCEWAKE_RENDER_GEN9,
1006
	FORCEWAKE_MEDIA_GEN9,
1007
	GEN6_RPNSWREQ,
1008
	GEN6_RC_VIDEO_FREQ,
1009
	/* TODO: Other registers are not yet used */
1010
};
1011
 
6937 serge 1012
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
1013
			     i915_reg_t reg)
5354 serge 1014
{
1015
	int i;
1016
	for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
6937 serge 1017
		if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
5354 serge 1018
			return true;
1019
 
1020
	return false;
1021
}
1022
 
1023
#define __gen9_write(x) \
1024
static void \
6937 serge 1025
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
5354 serge 1026
		bool trace) { \
6084 serge 1027
	enum forcewake_domains fw_engine; \
1028
	GEN6_WRITE_HEADER; \
6937 serge 1029
	if (!SKL_NEEDS_FORCE_WAKE(offset) || \
6084 serge 1030
	    is_gen9_shadowed(dev_priv, reg)) \
1031
		fw_engine = 0; \
6937 serge 1032
	else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
6084 serge 1033
		fw_engine = FORCEWAKE_RENDER; \
6937 serge 1034
	else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
6084 serge 1035
		fw_engine = FORCEWAKE_MEDIA; \
6937 serge 1036
	else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
6084 serge 1037
		fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1038
	else \
1039
		fw_engine = FORCEWAKE_BLITTER; \
1040
	if (fw_engine) \
1041
		__force_wake_get(dev_priv, fw_engine); \
1042
	__raw_i915_write##x(dev_priv, reg, val); \
1043
	GEN6_WRITE_FOOTER; \
5354 serge 1044
}
1045
 
1046
__gen9_write(8)
1047
__gen9_write(16)
1048
__gen9_write(32)
1049
__gen9_write(64)
5060 serge 1050
__chv_write(8)
1051
__chv_write(16)
1052
__chv_write(32)
1053
__chv_write(64)
4560 Serge 1054
__gen8_write(8)
1055
__gen8_write(16)
1056
__gen8_write(32)
1057
__gen8_write(64)
1058
__hsw_write(8)
1059
__hsw_write(16)
1060
__hsw_write(32)
1061
__hsw_write(64)
1062
__gen6_write(8)
1063
__gen6_write(16)
1064
__gen6_write(32)
1065
__gen6_write(64)
1066
 
5354 serge 1067
#undef __gen9_write
5060 serge 1068
#undef __chv_write
4560 Serge 1069
#undef __gen8_write
1070
#undef __hsw_write
1071
#undef __gen6_write
6084 serge 1072
#undef GEN6_WRITE_FOOTER
1073
#undef GEN6_WRITE_HEADER
4560 Serge 1074
 
6937 serge 1075
#define VGPU_WRITE_HEADER \
1076
	unsigned long irqflags; \
1077
	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1078
	assert_rpm_device_not_suspended(dev_priv); \
1079
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1080
 
1081
#define VGPU_WRITE_FOOTER \
1082
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1083
 
1084
#define __vgpu_write(x) \
1085
static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1086
			  i915_reg_t reg, u##x val, bool trace) { \
1087
	VGPU_WRITE_HEADER; \
1088
	__raw_i915_write##x(dev_priv, reg, val); \
1089
	VGPU_WRITE_FOOTER; \
1090
}
1091
 
1092
__vgpu_write(8)
1093
__vgpu_write(16)
1094
__vgpu_write(32)
1095
__vgpu_write(64)
1096
 
1097
#undef __vgpu_write
1098
#undef VGPU_WRITE_FOOTER
1099
#undef VGPU_WRITE_HEADER
1100
 
5354 serge 1101
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1102
do { \
1103
	dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1104
	dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1105
	dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1106
	dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1107
} while (0)
1108
 
1109
#define ASSIGN_READ_MMIO_VFUNCS(x) \
1110
do { \
1111
	dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1112
	dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1113
	dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1114
	dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1115
} while (0)
1116
 
6084 serge 1117
 
1118
static void fw_domain_init(struct drm_i915_private *dev_priv,
1119
			   enum forcewake_domain_id domain_id,
6937 serge 1120
			   i915_reg_t reg_set,
1121
			   i915_reg_t reg_ack)
4560 Serge 1122
{
6084 serge 1123
	struct intel_uncore_forcewake_domain *d;
1124
 
1125
	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1126
		return;
1127
 
1128
	d = &dev_priv->uncore.fw_domain[domain_id];
1129
 
1130
	WARN_ON(d->wake_count);
1131
 
1132
	d->wake_count = 0;
1133
	d->reg_set = reg_set;
1134
	d->reg_ack = reg_ack;
1135
 
1136
	if (IS_GEN6(dev_priv)) {
1137
		d->val_reset = 0;
1138
		d->val_set = FORCEWAKE_KERNEL;
1139
		d->val_clear = 0;
1140
	} else {
1141
		/* WaRsClearFWBitsAtReset:bdw,skl */
1142
		d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1143
		d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1144
		d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1145
	}
1146
 
6937 serge 1147
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6084 serge 1148
		d->reg_post = FORCEWAKE_ACK_VLV;
1149
	else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1150
		d->reg_post = ECOBUS;
1151
 
1152
	d->i915 = dev_priv;
1153
	d->id = domain_id;
1154
 
1155
	setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1156
 
1157
	dev_priv->uncore.fw_domains |= (1 << domain_id);
1158
 
1159
	fw_domain_reset(d);
1160
}
1161
 
1162
static void intel_uncore_fw_domains_init(struct drm_device *dev)
1163
{
4560 Serge 1164
	struct drm_i915_private *dev_priv = dev->dev_private;
1165
 
6084 serge 1166
	if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1167
		return;
4560 Serge 1168
 
5354 serge 1169
	if (IS_GEN9(dev)) {
6084 serge 1170
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1171
		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1172
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1173
			       FORCEWAKE_RENDER_GEN9,
1174
			       FORCEWAKE_ACK_RENDER_GEN9);
1175
		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1176
			       FORCEWAKE_BLITTER_GEN9,
1177
			       FORCEWAKE_ACK_BLITTER_GEN9);
1178
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1179
			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
6937 serge 1180
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 1181
		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1182
		if (!IS_CHERRYVIEW(dev))
1183
			dev_priv->uncore.funcs.force_wake_put =
1184
				fw_domains_put_with_fifo;
1185
		else
1186
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1187
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1188
			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1189
		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1190
			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
5354 serge 1191
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6084 serge 1192
		dev_priv->uncore.funcs.force_wake_get =
1193
			fw_domains_get_with_thread_status;
6660 serge 1194
		if (IS_HASWELL(dev))
1195
			dev_priv->uncore.funcs.force_wake_put =
1196
				fw_domains_put_with_fifo;
1197
		else
7144 serge 1198
			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
6084 serge 1199
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1200
			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
4560 Serge 1201
	} else if (IS_IVYBRIDGE(dev)) {
1202
		u32 ecobus;
1203
 
1204
		/* IVB configs may use multi-threaded forcewake */
1205
 
1206
		/* A small trick here - if the bios hasn't configured
1207
		 * MT forcewake, and if the device is in RC6, then
1208
		 * force_wake_mt_get will not wake the device and the
1209
		 * ECOBUS read will return zero. Which will be
1210
		 * (correctly) interpreted by the test below as MT
1211
		 * forcewake being disabled.
1212
		 */
6084 serge 1213
		dev_priv->uncore.funcs.force_wake_get =
1214
			fw_domains_get_with_thread_status;
1215
		dev_priv->uncore.funcs.force_wake_put =
1216
			fw_domains_put_with_fifo;
1217
 
1218
		/* We need to init first for ECOBUS access and then
1219
		 * determine later if we want to reinit, in case of MT access is
1220
		 * not working. In this stage we don't know which flavour this
1221
		 * ivb is, so it is better to reset also the gen6 fw registers
1222
		 * before the ecobus check.
1223
		 */
1224
 
1225
		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
1226
		__raw_posting_read(dev_priv, ECOBUS);
1227
 
1228
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1229
			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1230
 
4560 Serge 1231
		mutex_lock(&dev->struct_mutex);
6084 serge 1232
		fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
4560 Serge 1233
		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
6084 serge 1234
		fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
4560 Serge 1235
		mutex_unlock(&dev->struct_mutex);
1236
 
6084 serge 1237
		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
4560 Serge 1238
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1239
			DRM_INFO("when using vblank-synced partial screen updates.\n");
6084 serge 1240
			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1241
				       FORCEWAKE, FORCEWAKE_ACK);
4560 Serge 1242
		}
1243
	} else if (IS_GEN6(dev)) {
1244
		dev_priv->uncore.funcs.force_wake_get =
6084 serge 1245
			fw_domains_get_with_thread_status;
4560 Serge 1246
		dev_priv->uncore.funcs.force_wake_put =
6084 serge 1247
			fw_domains_put_with_fifo;
1248
		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1249
			       FORCEWAKE, FORCEWAKE_ACK);
4560 Serge 1250
	}
1251
 
6084 serge 1252
	/* All future platforms are expected to require complex power gating */
1253
	WARN_ON(dev_priv->uncore.fw_domains == 0);
1254
}
1255
 
1256
void intel_uncore_init(struct drm_device *dev)
1257
{
1258
	struct drm_i915_private *dev_priv = dev->dev_private;
1259
 
1260
	i915_check_vgpu(dev);
1261
 
1262
	intel_uncore_ellc_detect(dev);
1263
	intel_uncore_fw_domains_init(dev);
1264
	__intel_uncore_early_sanitize(dev, false);
1265
 
7144 serge 1266
	dev_priv->uncore.unclaimed_mmio_check = 1;
1267
 
4560 Serge 1268
	switch (INTEL_INFO(dev)->gen) {
1269
	default:
5354 serge 1270
	case 9:
1271
		ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1272
		ASSIGN_READ_MMIO_VFUNCS(gen9);
1273
		break;
1274
	case 8:
5060 serge 1275
		if (IS_CHERRYVIEW(dev)) {
5354 serge 1276
			ASSIGN_WRITE_MMIO_VFUNCS(chv);
1277
			ASSIGN_READ_MMIO_VFUNCS(chv);
5060 serge 1278
 
1279
		} else {
5354 serge 1280
			ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1281
			ASSIGN_READ_MMIO_VFUNCS(gen6);
5060 serge 1282
		}
4560 Serge 1283
		break;
1284
	case 7:
1285
	case 6:
1286
		if (IS_HASWELL(dev)) {
5354 serge 1287
			ASSIGN_WRITE_MMIO_VFUNCS(hsw);
4560 Serge 1288
		} else {
5354 serge 1289
			ASSIGN_WRITE_MMIO_VFUNCS(gen6);
4560 Serge 1290
		}
1291
 
1292
		if (IS_VALLEYVIEW(dev)) {
5354 serge 1293
			ASSIGN_READ_MMIO_VFUNCS(vlv);
4560 Serge 1294
		} else {
5354 serge 1295
			ASSIGN_READ_MMIO_VFUNCS(gen6);
4560 Serge 1296
		}
1297
		break;
1298
	case 5:
5354 serge 1299
		ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1300
		ASSIGN_READ_MMIO_VFUNCS(gen5);
4560 Serge 1301
		break;
1302
	case 4:
1303
	case 3:
1304
	case 2:
6084 serge 1305
		ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1306
		ASSIGN_READ_MMIO_VFUNCS(gen2);
4560 Serge 1307
		break;
1308
	}
5354 serge 1309
 
6084 serge 1310
	if (intel_vgpu_active(dev)) {
1311
		ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1312
		ASSIGN_READ_MMIO_VFUNCS(vgpu);
1313
	}
1314
 
5354 serge 1315
	i915_check_and_clear_faults(dev);
4560 Serge 1316
}
5354 serge 1317
#undef ASSIGN_WRITE_MMIO_VFUNCS
1318
#undef ASSIGN_READ_MMIO_VFUNCS
4560 Serge 1319
 
1320
void intel_uncore_fini(struct drm_device *dev)
1321
{
1322
	/* Paranoia: make sure we have disabled everything before we exit. */
1323
	intel_uncore_sanitize(dev);
5060 serge 1324
	intel_uncore_forcewake_reset(dev, false);
4560 Serge 1325
}
1326
 
5060 serge 1327
#define GEN_RANGE(l, h) GENMASK(h, l)
1328
 
4104 Serge 1329
static const struct register_whitelist {
6937 serge 1330
	i915_reg_t offset_ldw, offset_udw;
4104 Serge 1331
	uint32_t size;
5060 serge 1332
	/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1333
	uint32_t gen_bitmask;
4104 Serge 1334
} whitelist[] = {
6937 serge 1335
	{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1336
	  .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1337
	  .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
4104 Serge 1338
};
1339
 
1340
int i915_reg_read_ioctl(struct drm_device *dev,
1341
			void *data, struct drm_file *file)
1342
{
1343
	struct drm_i915_private *dev_priv = dev->dev_private;
1344
	struct drm_i915_reg_read *reg = data;
1345
	struct register_whitelist const *entry = whitelist;
6084 serge 1346
	unsigned size;
6937 serge 1347
	i915_reg_t offset_ldw, offset_udw;
5060 serge 1348
	int i, ret = 0;
4104 Serge 1349
 
1350
	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
6937 serge 1351
		if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
4104 Serge 1352
		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1353
			break;
1354
	}
1355
 
1356
	if (i == ARRAY_SIZE(whitelist))
1357
		return -EINVAL;
1358
 
6084 serge 1359
	/* We use the low bits to encode extra flags as the register should
1360
	 * be naturally aligned (and those that are not so aligned merely
1361
	 * limit the available flags for that register).
1362
	 */
6937 serge 1363
	offset_ldw = entry->offset_ldw;
1364
	offset_udw = entry->offset_udw;
6084 serge 1365
	size = entry->size;
6937 serge 1366
	size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
6084 serge 1367
 
1368
	intel_runtime_pm_get(dev_priv);
1369
 
1370
	switch (size) {
1371
	case 8 | 1:
6937 serge 1372
		reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
6084 serge 1373
		break;
4104 Serge 1374
	case 8:
6937 serge 1375
		reg->val = I915_READ64(offset_ldw);
4104 Serge 1376
		break;
1377
	case 4:
6937 serge 1378
		reg->val = I915_READ(offset_ldw);
4104 Serge 1379
		break;
1380
	case 2:
6937 serge 1381
		reg->val = I915_READ16(offset_ldw);
4104 Serge 1382
		break;
1383
	case 1:
6937 serge 1384
		reg->val = I915_READ8(offset_ldw);
4104 Serge 1385
		break;
1386
	default:
5060 serge 1387
		ret = -EINVAL;
1388
		goto out;
4104 Serge 1389
	}
1390
 
5060 serge 1391
out:
6084 serge 1392
	intel_runtime_pm_put(dev_priv);
5060 serge 1393
	return ret;
4104 Serge 1394
}
1395
 
4560 Serge 1396
int i915_get_reset_stats_ioctl(struct drm_device *dev,
1397
			       void *data, struct drm_file *file)
4104 Serge 1398
{
1399
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 1400
	struct drm_i915_reset_stats *args = data;
1401
	struct i915_ctx_hang_stats *hs;
5060 serge 1402
	struct intel_context *ctx;
4560 Serge 1403
	int ret;
4104 Serge 1404
 
4560 Serge 1405
	if (args->flags || args->pad)
1406
		return -EINVAL;
4104 Serge 1407
 
6937 serge 1408
	if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1409
		return -EPERM;
1410
 
4560 Serge 1411
	ret = mutex_lock_interruptible(&dev->struct_mutex);
1412
	if (ret)
1413
		return ret;
4104 Serge 1414
 
5060 serge 1415
	ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1416
	if (IS_ERR(ctx)) {
4560 Serge 1417
		mutex_unlock(&dev->struct_mutex);
5060 serge 1418
		return PTR_ERR(ctx);
4104 Serge 1419
	}
5060 serge 1420
	hs = &ctx->hang_stats;
4104 Serge 1421
 
6937 serge 1422
	if (capable(CAP_SYS_ADMIN))
7144 serge 1423
		args->reset_count = i915_reset_count(&dev_priv->gpu_error);
6937 serge 1424
	else
1425
		args->reset_count = 0;
4104 Serge 1426
 
4560 Serge 1427
	args->batch_active = hs->batch_active;
1428
	args->batch_pending = hs->batch_pending;
4104 Serge 1429
 
4560 Serge 1430
	mutex_unlock(&dev->struct_mutex);
1431
 
4104 Serge 1432
	return 0;
1433
}
1434
 
5354 serge 1435
static int i915_reset_complete(struct drm_device *dev)
4104 Serge 1436
{
1437
	u8 gdrst;
5354 serge 1438
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1439
	return (gdrst & GRDOM_RESET_STATUS) == 0;
4104 Serge 1440
}
1441
 
5354 serge 1442
static int i915_do_reset(struct drm_device *dev)
4104 Serge 1443
{
5354 serge 1444
	/* assert reset for at least 20 usec */
1445
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1446
	udelay(20);
1447
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
4104 Serge 1448
 
5354 serge 1449
	return wait_for(i915_reset_complete(dev), 500);
1450
}
5060 serge 1451
 
5354 serge 1452
static int g4x_reset_complete(struct drm_device *dev)
1453
{
1454
	u8 gdrst;
1455
	pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1456
	return (gdrst & GRDOM_RESET_ENABLE) == 0;
1457
}
4104 Serge 1458
 
5354 serge 1459
static int g33_do_reset(struct drm_device *dev)
1460
{
1461
	pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1462
	return wait_for(g4x_reset_complete(dev), 500);
4104 Serge 1463
}
1464
 
5060 serge 1465
static int g4x_do_reset(struct drm_device *dev)
1466
{
1467
	struct drm_i915_private *dev_priv = dev->dev_private;
1468
	int ret;
1469
 
5354 serge 1470
	pci_write_config_byte(dev->pdev, I915_GDRST,
5060 serge 1471
			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
5354 serge 1472
	ret =  wait_for(g4x_reset_complete(dev), 500);
5060 serge 1473
	if (ret)
1474
		return ret;
1475
 
1476
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1477
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1478
	POSTING_READ(VDECCLK_GATE_D);
1479
 
5354 serge 1480
	pci_write_config_byte(dev->pdev, I915_GDRST,
5060 serge 1481
			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
5354 serge 1482
	ret =  wait_for(g4x_reset_complete(dev), 500);
5060 serge 1483
	if (ret)
1484
		return ret;
1485
 
1486
	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
1487
	I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1488
	POSTING_READ(VDECCLK_GATE_D);
1489
 
5354 serge 1490
	pci_write_config_byte(dev->pdev, I915_GDRST, 0);
5060 serge 1491
 
1492
	return 0;
1493
}
1494
 
4104 Serge 1495
static int ironlake_do_reset(struct drm_device *dev)
1496
{
1497
	struct drm_i915_private *dev_priv = dev->dev_private;
1498
	int ret;
1499
 
6084 serge 1500
	I915_WRITE(ILK_GDSR,
5060 serge 1501
		   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
6084 serge 1502
	ret = wait_for((I915_READ(ILK_GDSR) &
5060 serge 1503
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
4104 Serge 1504
	if (ret)
1505
		return ret;
1506
 
6084 serge 1507
	I915_WRITE(ILK_GDSR,
5060 serge 1508
		   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
6084 serge 1509
	ret = wait_for((I915_READ(ILK_GDSR) &
5060 serge 1510
			ILK_GRDOM_RESET_ENABLE) == 0, 500);
1511
	if (ret)
1512
		return ret;
1513
 
6084 serge 1514
	I915_WRITE(ILK_GDSR, 0);
5060 serge 1515
 
1516
	return 0;
4104 Serge 1517
}
1518
 
1519
static int gen6_do_reset(struct drm_device *dev)
1520
{
1521
	struct drm_i915_private *dev_priv = dev->dev_private;
1522
	int	ret;
1523
 
1524
	/* Reset the chip */
1525
 
1526
	/* GEN6_GDRST is not in the gt power well, no need to check
1527
	 * for fifo space for the write or forcewake the chip for
1528
	 * the read
1529
	 */
1530
	__raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1531
 
1532
	/* Spin waiting for the device to ack the reset request */
1533
	ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1534
 
5060 serge 1535
	intel_uncore_forcewake_reset(dev, true);
4104 Serge 1536
 
1537
	return ret;
1538
}
1539
 
6084 serge 1540
static int wait_for_register(struct drm_i915_private *dev_priv,
6937 serge 1541
			     i915_reg_t reg,
6084 serge 1542
			     const u32 mask,
1543
			     const u32 value,
1544
			     const unsigned long timeout_ms)
4104 Serge 1545
{
6084 serge 1546
	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1547
}
1548
 
1549
static int gen8_do_reset(struct drm_device *dev)
1550
{
1551
	struct drm_i915_private *dev_priv = dev->dev_private;
1552
	struct intel_engine_cs *engine;
1553
	int i;
1554
 
1555
	for_each_ring(engine, dev_priv, i) {
1556
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1557
			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1558
 
1559
		if (wait_for_register(dev_priv,
1560
				      RING_RESET_CTL(engine->mmio_base),
1561
				      RESET_CTL_READY_TO_RESET,
1562
				      RESET_CTL_READY_TO_RESET,
1563
				      700)) {
1564
			DRM_ERROR("%s: reset request timeout\n", engine->name);
1565
			goto not_ready;
1566
		}
1567
	}
1568
 
1569
	return gen6_do_reset(dev);
1570
 
1571
not_ready:
1572
	for_each_ring(engine, dev_priv, i)
1573
		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1574
			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1575
 
1576
	return -EIO;
1577
}
1578
 
1579
static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1580
{
1581
	if (!i915.reset)
1582
		return NULL;
1583
 
1584
	if (INTEL_INFO(dev)->gen >= 8)
1585
		return gen8_do_reset;
1586
	else if (INTEL_INFO(dev)->gen >= 6)
1587
		return gen6_do_reset;
5060 serge 1588
	else if (IS_GEN5(dev))
6084 serge 1589
		return ironlake_do_reset;
5060 serge 1590
	else if (IS_G4X(dev))
6084 serge 1591
		return g4x_do_reset;
5354 serge 1592
	else if (IS_G33(dev))
6084 serge 1593
		return g33_do_reset;
5354 serge 1594
	else if (INTEL_INFO(dev)->gen >= 3)
6084 serge 1595
		return i915_do_reset;
1596
	else
1597
		return NULL;
1598
}
1599
 
1600
int intel_gpu_reset(struct drm_device *dev)
1601
{
1602
	struct drm_i915_private *dev_priv = to_i915(dev);
1603
	int (*reset)(struct drm_device *);
1604
	int ret;
1605
 
1606
	reset = intel_get_gpu_reset(dev);
1607
	if (reset == NULL)
5060 serge 1608
		return -ENODEV;
6084 serge 1609
 
1610
	/* If the power well sleeps during the reset, the reset
1611
	 * request may be dropped and never completes (causing -EIO).
1612
	 */
1613
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1614
	ret = reset(dev);
1615
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1616
 
1617
	return ret;
4104 Serge 1618
}
1619
 
6084 serge 1620
bool intel_has_gpu_reset(struct drm_device *dev)
1621
{
1622
	return intel_get_gpu_reset(dev) != NULL;
1623
}
1624
 
7144 serge 1625
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
4104 Serge 1626
{
7144 serge 1627
	return check_for_unclaimed_mmio(dev_priv);
1628
}
4104 Serge 1629
 
7144 serge 1630
bool
1631
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1632
{
1633
	if (unlikely(i915.mmio_debug ||
1634
		     dev_priv->uncore.unclaimed_mmio_check <= 0))
1635
		return false;
1636
 
1637
	if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1638
		DRM_DEBUG("Unclaimed register detected, "
1639
			  "enabling oneshot unclaimed register reporting. "
1640
			  "Please use i915.mmio_debug=N for more information.\n");
1641
		i915.mmio_debug++;
1642
		dev_priv->uncore.unclaimed_mmio_check--;
1643
		return true;
4104 Serge 1644
	}
7144 serge 1645
 
1646
	return false;
4104 Serge 1647
}