Subversion Repositories Kolibri OS

Rev

Rev 5367 | Rev 6088 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5367 Rev 6084
Line 34... Line 34...
34
#include 
34
#include 
35
#include 
35
#include 
36
#include "intel_drv.h"
36
#include "intel_drv.h"
37
#include 
37
#include 
38
#include "i915_drv.h"
38
#include "i915_drv.h"
-
 
39
#include "i915_vgpu.h"
39
#include "i915_trace.h"
40
#include "i915_trace.h"
40
#include 
41
#include 
41
#include 
42
#include 
42
//#include 
43
//#include 
43
//#include 
44
//#include 
44
//#include 
45
//#include 
45
#include 
46
#include 
46
//#include 
47
//#include 
-
 
48
#include 
Line 47... Line 49...
47
 
49
 
Line 48... Line 50...
48
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
50
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
49
 
51
 
Line 61... Line 63...
61
		/* Reject all old ums/dri params. */
63
		/* Reject all old ums/dri params. */
62
		return -ENODEV;
64
		return -ENODEV;
63
	case I915_PARAM_CHIPSET_ID:
65
	case I915_PARAM_CHIPSET_ID:
64
		value = dev->pdev->device;
66
		value = dev->pdev->device;
65
		break;
67
		break;
-
 
68
	case I915_PARAM_REVISION:
-
 
69
		value = dev->pdev->revision;
-
 
70
		break;
66
	case I915_PARAM_HAS_GEM:
71
	case I915_PARAM_HAS_GEM:
67
		value = 1;
72
		value = 1;
68
		break;
73
		break;
69
	case I915_PARAM_NUM_FENCES_AVAIL:
74
	case I915_PARAM_NUM_FENCES_AVAIL:
70
		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
75
		value = dev_priv->num_fence_regs;
71
		break;
76
		break;
72
	case I915_PARAM_HAS_OVERLAY:
77
	case I915_PARAM_HAS_OVERLAY:
73
		value = dev_priv->overlay ? 1 : 0;
78
		value = dev_priv->overlay ? 1 : 0;
74
		break;
79
		break;
75
	case I915_PARAM_HAS_PAGEFLIPPING:
80
	case I915_PARAM_HAS_PAGEFLIPPING:
Line 86... Line 91...
86
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
91
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
87
		break;
92
		break;
88
	case I915_PARAM_HAS_VEBOX:
93
	case I915_PARAM_HAS_VEBOX:
89
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
94
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
90
		break;
95
		break;
-
 
96
	case I915_PARAM_HAS_BSD2:
-
 
97
		value = intel_ring_initialized(&dev_priv->ring[VCS2]);
-
 
98
		break;
91
	case I915_PARAM_HAS_RELAXED_FENCING:
99
	case I915_PARAM_HAS_RELAXED_FENCING:
92
		value = 1;
100
		value = 1;
93
		break;
101
		break;
94
	case I915_PARAM_HAS_COHERENT_RINGS:
102
	case I915_PARAM_HAS_COHERENT_RINGS:
95
		value = 1;
103
		value = 1;
Line 137... Line 145...
137
		value = i915_cmd_parser_get_version();
145
		value = i915_cmd_parser_get_version();
138
		break;
146
		break;
139
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
147
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
140
		value = 1;
148
		value = 1;
141
		break;
149
		break;
-
 
150
	case I915_PARAM_MMAP_VERSION:
-
 
151
		value = 1;
-
 
152
		break;
-
 
153
	case I915_PARAM_SUBSLICE_TOTAL:
-
 
154
		value = INTEL_INFO(dev)->subslice_total;
-
 
155
		if (!value)
-
 
156
			return -ENODEV;
-
 
157
		break;
-
 
158
	case I915_PARAM_EU_TOTAL:
-
 
159
		value = INTEL_INFO(dev)->eu_total;
-
 
160
		if (!value)
-
 
161
			return -ENODEV;
-
 
162
		break;
-
 
163
	case I915_PARAM_HAS_GPU_RESET:
-
 
164
		value = i915.enable_hangcheck &&
-
 
165
			intel_has_gpu_reset(dev);
-
 
166
		break;
-
 
167
	case I915_PARAM_HAS_RESOURCE_STREAMER:
-
 
168
		value = HAS_RESOURCE_STREAMER(dev);
-
 
169
		break;
142
	default:
170
	default:
143
		DRM_DEBUG("Unknown parameter %d\n", param->param);
171
		DRM_DEBUG("Unknown parameter %d\n", param->param);
144
		return -EINVAL;
172
		return -EINVAL;
145
	}
173
	}
Line 146... Line 174...
146
 
174
 
Line 147... Line 175...
147
    *param->value = value;
175
    *param->value = value;
148
 
176
 
Line 149... Line -...
149
	return 0;
-
 
150
}
-
 
151
 
-
 
152
#if 0
-
 
153
static int i915_setparam(struct drm_device *dev, void *data,
-
 
154
			 struct drm_file *file_priv)
-
 
155
{
-
 
156
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
157
	drm_i915_setparam_t *param = data;
-
 
158
 
-
 
159
	switch (param->param) {
-
 
160
	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-
 
161
	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-
 
162
	case I915_SETPARAM_ALLOW_BATCHBUFFER:
-
 
163
		/* Reject all old ums/dri params. */
-
 
164
		return -ENODEV;
-
 
165
 
-
 
166
	case I915_SETPARAM_NUM_USED_FENCES:
-
 
167
		if (param->value > dev_priv->num_fence_regs ||
-
 
168
		    param->value < 0)
-
 
169
			return -EINVAL;
-
 
170
		/* Userspace can use first N regs */
-
 
171
		dev_priv->fence_reg_start = param->value;
-
 
172
		break;
-
 
173
	default:
-
 
174
		DRM_DEBUG_DRIVER("unknown parameter %d\n",
-
 
175
					param->param);
-
 
176
		return -EINVAL;
-
 
177
	}
-
 
178
 
-
 
179
	return 0;
-
 
180
}
177
	return 0;
181
#endif
178
}
182
 
179
 
Line 183... Line 180...
183
static int i915_get_bridge_dev(struct drm_device *dev)
180
static int i915_get_bridge_dev(struct drm_device *dev)
Line 198... Line 195...
198
 
195
 
199
#define DEVEN_REG 0x54
196
#define DEVEN_REG 0x54
Line 200... Line -...
200
#define   DEVEN_MCHBAR_EN (1 << 28)
-
 
201
 
-
 
202
 
197
#define   DEVEN_MCHBAR_EN (1 << 28)
203
 
198
 
204
 
199
 
205
/* Setup MCHBAR if possible, return true if we should disable it again */
200
/* Setup MCHBAR if possible, return true if we should disable it again */
206
static void
201
static void
Line 225... Line 220...
225
	}
220
	}
Line 226... Line 221...
226
 
221
 
227
	/* If it's already enabled, don't have to do anything */
222
	/* If it's already enabled, don't have to do anything */
228
	if (enabled)
223
	if (enabled)
229
		return;
-
 
230
 
-
 
231
	dbgprintf("Epic fail\n");
224
		return;
232
 
-
 
233
#if 0
225
/*
234
	if (intel_alloc_mchbar_resource(dev))
226
	if (intel_alloc_mchbar_resource(dev))
Line -... Line 227...
-
 
227
		return;
-
 
228
 
235
		return;
229
	God help us all
Line -... Line 230...
-
 
230
*/
-
 
231
	dev_priv->mchbar_need_disable = true;
236
 
232
 
237
	dev_priv->mchbar_need_disable = true;
233
	DRM_INFO("enable MCHBAR\n");
238
 
234
 
239
	/* Space is allocated or reserved, so enable it. */
235
	/* Space is allocated or reserved, so enable it. */
240
	if (IS_I915G(dev) || IS_I915GM(dev)) {
236
	if (IS_I915G(dev) || IS_I915GM(dev)) {
241
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
237
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
242
				       temp | DEVEN_MCHBAR_EN);
238
				       temp | DEVEN_MCHBAR_EN);
243
	} else {
239
	} else {
244
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-
 
245
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
240
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
Line -... Line 241...
-
 
241
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
-
 
242
	}
-
 
243
}
-
 
244
 
-
 
245
static void
-
 
246
intel_teardown_mchbar(struct drm_device *dev)
-
 
247
{
-
 
248
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
249
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
 
250
	u32 temp;
-
 
251
 
-
 
252
	if (dev_priv->mchbar_need_disable) {
-
 
253
		if (IS_I915G(dev) || IS_I915GM(dev)) {
-
 
254
			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
-
 
255
			temp &= ~DEVEN_MCHBAR_EN;
-
 
256
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
-
 
257
		} else {
-
 
258
			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-
 
259
			temp &= ~1;
-
 
260
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
-
 
261
		}
-
 
262
	}
Line 246... Line 263...
246
	}
263
 
247
#endif
264
	if (dev_priv->mch_res.start)
248
}
265
		release_resource(&dev_priv->mch_res);
249
 
266
}
Line 315... Line 332...
315
    ret = intel_fbdev_init(dev);
332
	ret = intel_fbdev_init(dev);
316
    if (ret)
333
	if (ret)
317
		goto cleanup_gem;
334
		goto cleanup_gem;
Line 318... Line 335...
318
 
335
 
319
	/* Only enable hotplug handling once the fbdev is fully set up. */
336
	/* Only enable hotplug handling once the fbdev is fully set up. */
Line 320... Line 337...
320
	intel_hpd_init(dev_priv);
337
//	intel_hpd_init(dev_priv);
321
 
338
 
322
	/*
339
	/*
323
	 * Some ports require correctly set-up hpd registers for detection to
340
	 * Some ports require correctly set-up hpd registers for detection to
Line 381... Line 398...
381
{
398
{
382
	return 0;
399
	return 0;
383
}
400
}
384
#endif
401
#endif
Line -... Line 402...
-
 
402
 
-
 
403
#if !defined(CONFIG_VGA_CONSOLE)
-
 
404
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-
 
405
{
-
 
406
	return 0;
-
 
407
}
-
 
408
#elif !defined(CONFIG_DUMMY_CONSOLE)
-
 
409
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-
 
410
{
-
 
411
	return -ENODEV;
-
 
412
}
-
 
413
#else
-
 
414
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-
 
415
{
-
 
416
	int ret = 0;
-
 
417
 
-
 
418
	DRM_INFO("Replacing VGA console driver\n");
-
 
419
 
-
 
420
	console_lock();
-
 
421
	if (con_is_bound(&vga_con))
-
 
422
		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
-
 
423
	if (ret == 0) {
-
 
424
		ret = do_unregister_con_driver(&vga_con);
-
 
425
 
-
 
426
		/* Ignore "already unregistered". */
-
 
427
		if (ret == -ENODEV)
-
 
428
			ret = 0;
-
 
429
	}
-
 
430
	console_unlock();
-
 
431
 
-
 
432
	return ret;
-
 
433
}
-
 
434
#endif
385
 
435
 
386
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
436
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
387
{
437
{
Line 388... Line 438...
388
	const struct intel_device_info *info = &dev_priv->info;
438
	const struct intel_device_info *info = &dev_priv->info;
Line 401... Line 451...
401
#undef SEP_EMPTY
451
#undef SEP_EMPTY
402
#undef PRINT_FLAG
452
#undef PRINT_FLAG
403
#undef SEP_COMMA
453
#undef SEP_COMMA
404
}
454
}
Line -... Line 455...
-
 
455
 
-
 
456
static void cherryview_sseu_info_init(struct drm_device *dev)
-
 
457
{
-
 
458
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
459
	struct intel_device_info *info;
-
 
460
	u32 fuse, eu_dis;
-
 
461
 
-
 
462
	info = (struct intel_device_info *)&dev_priv->info;
-
 
463
	fuse = I915_READ(CHV_FUSE_GT);
-
 
464
 
-
 
465
	info->slice_total = 1;
-
 
466
 
-
 
467
	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-
 
468
		info->subslice_per_slice++;
-
 
469
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
-
 
470
				 CHV_FGT_EU_DIS_SS0_R1_MASK);
-
 
471
		info->eu_total += 8 - hweight32(eu_dis);
-
 
472
	}
-
 
473
 
-
 
474
	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-
 
475
		info->subslice_per_slice++;
-
 
476
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
-
 
477
				 CHV_FGT_EU_DIS_SS1_R1_MASK);
-
 
478
		info->eu_total += 8 - hweight32(eu_dis);
-
 
479
	}
-
 
480
 
-
 
481
	info->subslice_total = info->subslice_per_slice;
-
 
482
	/*
-
 
483
	 * CHV expected to always have a uniform distribution of EU
-
 
484
	 * across subslices.
-
 
485
	*/
-
 
486
	info->eu_per_subslice = info->subslice_total ?
-
 
487
				info->eu_total / info->subslice_total :
-
 
488
				0;
-
 
489
	/*
-
 
490
	 * CHV supports subslice power gating on devices with more than
-
 
491
	 * one subslice, and supports EU power gating on devices with
-
 
492
	 * more than one EU pair per subslice.
-
 
493
	*/
-
 
494
	info->has_slice_pg = 0;
-
 
495
	info->has_subslice_pg = (info->subslice_total > 1);
-
 
496
	info->has_eu_pg = (info->eu_per_subslice > 2);
-
 
497
}
-
 
498
 
-
 
499
static void gen9_sseu_info_init(struct drm_device *dev)
-
 
500
{
-
 
501
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
502
	struct intel_device_info *info;
-
 
503
	int s_max = 3, ss_max = 4, eu_max = 8;
-
 
504
	int s, ss;
-
 
505
	u32 fuse2, s_enable, ss_disable, eu_disable;
-
 
506
	u8 eu_mask = 0xff;
-
 
507
 
-
 
508
	info = (struct intel_device_info *)&dev_priv->info;
-
 
509
	fuse2 = I915_READ(GEN8_FUSE2);
-
 
510
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
-
 
511
		   GEN8_F2_S_ENA_SHIFT;
-
 
512
	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
-
 
513
		     GEN9_F2_SS_DIS_SHIFT;
-
 
514
 
-
 
515
	info->slice_total = hweight32(s_enable);
-
 
516
	/*
-
 
517
	 * The subslice disable field is global, i.e. it applies
-
 
518
	 * to each of the enabled slices.
-
 
519
	*/
-
 
520
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
-
 
521
	info->subslice_total = info->slice_total *
-
 
522
			       info->subslice_per_slice;
-
 
523
 
-
 
524
	/*
-
 
525
	 * Iterate through enabled slices and subslices to
-
 
526
	 * count the total enabled EU.
-
 
527
	*/
-
 
528
	for (s = 0; s < s_max; s++) {
-
 
529
		if (!(s_enable & (0x1 << s)))
-
 
530
			/* skip disabled slice */
-
 
531
			continue;
-
 
532
 
-
 
533
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
-
 
534
		for (ss = 0; ss < ss_max; ss++) {
-
 
535
			int eu_per_ss;
-
 
536
 
-
 
537
			if (ss_disable & (0x1 << ss))
-
 
538
				/* skip disabled subslice */
-
 
539
				continue;
-
 
540
 
-
 
541
			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
-
 
542
						      eu_mask);
-
 
543
 
-
 
544
			/*
-
 
545
			 * Record which subslice(s) has(have) 7 EUs. we
-
 
546
			 * can tune the hash used to spread work among
-
 
547
			 * subslices if they are unbalanced.
-
 
548
			 */
-
 
549
			if (eu_per_ss == 7)
-
 
550
				info->subslice_7eu[s] |= 1 << ss;
-
 
551
 
-
 
552
			info->eu_total += eu_per_ss;
-
 
553
		}
-
 
554
	}
-
 
555
 
-
 
556
	/*
-
 
557
	 * SKL is expected to always have a uniform distribution
-
 
558
	 * of EU across subslices with the exception that any one
-
 
559
	 * EU in any one subslice may be fused off for die
-
 
560
	 * recovery. BXT is expected to be perfectly uniform in EU
-
 
561
	 * distribution.
-
 
562
	*/
-
 
563
	info->eu_per_subslice = info->subslice_total ?
-
 
564
				DIV_ROUND_UP(info->eu_total,
-
 
565
					     info->subslice_total) : 0;
-
 
566
	/*
-
 
567
	 * SKL supports slice power gating on devices with more than
-
 
568
	 * one slice, and supports EU power gating on devices with
-
 
569
	 * more than one EU pair per subslice. BXT supports subslice
-
 
570
	 * power gating on devices with more than one subslice, and
-
 
571
	 * supports EU power gating on devices with more than one EU
-
 
572
	 * pair per subslice.
-
 
573
	*/
-
 
574
	info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
-
 
575
	info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
-
 
576
	info->has_eu_pg = (info->eu_per_subslice > 2);
-
 
577
}
-
 
578
 
-
 
579
static void broadwell_sseu_info_init(struct drm_device *dev)
-
 
580
{
-
 
581
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
582
	struct intel_device_info *info;
-
 
583
	const int s_max = 3, ss_max = 3, eu_max = 8;
-
 
584
	int s, ss;
-
 
585
	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
 
586
 
-
 
587
	fuse2 = I915_READ(GEN8_FUSE2);
-
 
588
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-
 
589
	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
-
 
590
 
-
 
591
	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
-
 
592
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
-
 
593
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
-
 
594
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
-
 
595
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
-
 
596
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
-
 
597
			 (32 - GEN8_EU_DIS1_S2_SHIFT));
-
 
598
 
-
 
599
 
-
 
600
	info = (struct intel_device_info *)&dev_priv->info;
-
 
601
	info->slice_total = hweight32(s_enable);
-
 
602
 
-
 
603
	/*
-
 
604
	 * The subslice disable field is global, i.e. it applies
-
 
605
	 * to each of the enabled slices.
-
 
606
	 */
-
 
607
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
-
 
608
	info->subslice_total = info->slice_total * info->subslice_per_slice;
-
 
609
 
-
 
610
	/*
-
 
611
	 * Iterate through enabled slices and subslices to
-
 
612
	 * count the total enabled EU.
-
 
613
	 */
-
 
614
	for (s = 0; s < s_max; s++) {
-
 
615
		if (!(s_enable & (0x1 << s)))
-
 
616
			/* skip disabled slice */
-
 
617
			continue;
-
 
618
 
-
 
619
		for (ss = 0; ss < ss_max; ss++) {
-
 
620
			u32 n_disabled;
-
 
621
 
-
 
622
			if (ss_disable & (0x1 << ss))
-
 
623
				/* skip disabled subslice */
-
 
624
				continue;
-
 
625
 
-
 
626
			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
-
 
627
 
-
 
628
			/*
-
 
629
			 * Record which subslices have 7 EUs.
-
 
630
			 */
-
 
631
			if (eu_max - n_disabled == 7)
-
 
632
				info->subslice_7eu[s] |= 1 << ss;
-
 
633
 
-
 
634
			info->eu_total += eu_max - n_disabled;
-
 
635
		}
-
 
636
	}
-
 
637
 
-
 
638
	/*
-
 
639
	 * BDW is expected to always have a uniform distribution of EU across
-
 
640
	 * subslices with the exception that any one EU in any one subslice may
-
 
641
	 * be fused off for die recovery.
-
 
642
	 */
-
 
643
	info->eu_per_subslice = info->subslice_total ?
-
 
644
		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
-
 
645
 
-
 
646
	/*
-
 
647
	 * BDW supports slice power gating on devices with more than
-
 
648
	 * one slice.
-
 
649
	 */
-
 
650
	info->has_slice_pg = (info->slice_total > 1);
-
 
651
	info->has_subslice_pg = 0;
-
 
652
	info->has_eu_pg = 0;
-
 
653
}
405
 
654
 
406
/*
655
/*
407
 * Determine various intel_device_info fields at runtime.
656
 * Determine various intel_device_info fields at runtime.
408
 *
657
 *
409
 * Use it when either:
658
 * Use it when either:
Line 422... Line 671...
422
	struct intel_device_info *info;
671
	struct intel_device_info *info;
423
	enum pipe pipe;
672
	enum pipe pipe;
Line 424... Line 673...
424
 
673
 
Line -... Line 674...
-
 
674
	info = (struct intel_device_info *)&dev_priv->info;
-
 
675
 
-
 
676
	/*
-
 
677
	 * Skylake and Broxton currently don't expose the topmost plane as its
-
 
678
	 * use is exclusive with the legacy cursor and we only want to expose
-
 
679
	 * one of those, not both. Until we can safely expose the topmost plane
-
 
680
	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
-
 
681
	 * we don't expose the topmost plane at all to prevent ABI breakage
-
 
682
	 * down the line.
-
 
683
	 */
-
 
684
	if (IS_BROXTON(dev)) {
-
 
685
		info->num_sprites[PIPE_A] = 2;
425
	info = (struct intel_device_info *)&dev_priv->info;
686
		info->num_sprites[PIPE_B] = 2;
426
 
687
		info->num_sprites[PIPE_C] = 1;
427
	if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
688
	} else if (IS_VALLEYVIEW(dev))
428
		for_each_pipe(dev_priv, pipe)
689
		for_each_pipe(dev_priv, pipe)
429
			info->num_sprites[pipe] = 2;
690
			info->num_sprites[pipe] = 2;
430
	else
691
	else
Line 455... Line 716...
455
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
716
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
456
			DRM_INFO("Display fused off, disabling\n");
717
			DRM_INFO("Display fused off, disabling\n");
457
			info->num_pipes = 0;
718
			info->num_pipes = 0;
458
		}
719
		}
459
	}
720
	}
-
 
721
 
-
 
722
	/* Initialize slice/subslice/EU info */
-
 
723
	if (IS_CHERRYVIEW(dev))
-
 
724
		cherryview_sseu_info_init(dev);
-
 
725
	else if (IS_BROADWELL(dev))
-
 
726
		broadwell_sseu_info_init(dev);
-
 
727
	else if (INTEL_INFO(dev)->gen >= 9)
-
 
728
		gen9_sseu_info_init(dev);
-
 
729
 
-
 
730
	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-
 
731
	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-
 
732
	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-
 
733
	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-
 
734
	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
-
 
735
	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-
 
736
			 info->has_slice_pg ? "y" : "n");
-
 
737
	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-
 
738
			 info->has_subslice_pg ? "y" : "n");
-
 
739
	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-
 
740
			 info->has_eu_pg ? "y" : "n");
-
 
741
}
-
 
742
 
-
 
743
static void intel_init_dpio(struct drm_i915_private *dev_priv)
-
 
744
{
-
 
745
	if (!IS_VALLEYVIEW(dev_priv))
-
 
746
		return;
-
 
747
 
-
 
748
	/*
-
 
749
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
-
 
750
	 * CHV x1 PHY (DP/HDMI D)
-
 
751
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
-
 
752
	 */
-
 
753
	if (IS_CHERRYVIEW(dev_priv)) {
-
 
754
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
-
 
755
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
-
 
756
	} else {
-
 
757
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
-
 
758
	}
460
}
759
}
Line 461... Line 760...
461
 
760
 
462
/**
761
/**
463
 * i915_driver_load - setup chip and create an initial config
762
 * i915_driver_load - setup chip and create an initial config
Line 477... Line 776...
477
	int ret = 0, mmio_bar, mmio_size;
776
	int ret = 0, mmio_bar, mmio_size;
478
	uint32_t aperture_size;
777
	uint32_t aperture_size;
Line 479... Line 778...
479
 
778
 
Line 480... Line -...
480
	info = (struct intel_device_info *) flags;
-
 
481
 
779
	info = (struct intel_device_info *) flags;
482
 
780
 
483
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
781
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
Line 484... Line 782...
484
    if (dev_priv == NULL)
782
	if (dev_priv == NULL)
Line 496... Line 794...
496
	spin_lock_init(&dev_priv->gpu_error.lock);
794
	spin_lock_init(&dev_priv->gpu_error.lock);
497
	mutex_init(&dev_priv->backlight_lock);
795
	mutex_init(&dev_priv->backlight_lock);
498
	spin_lock_init(&dev_priv->uncore.lock);
796
	spin_lock_init(&dev_priv->uncore.lock);
499
	spin_lock_init(&dev_priv->mm.object_stat_lock);
797
	spin_lock_init(&dev_priv->mm.object_stat_lock);
500
	spin_lock_init(&dev_priv->mmio_flip_lock);
798
	spin_lock_init(&dev_priv->mmio_flip_lock);
501
	mutex_init(&dev_priv->dpio_lock);
799
	mutex_init(&dev_priv->sb_lock);
502
	mutex_init(&dev_priv->modeset_restore_lock);
800
	mutex_init(&dev_priv->modeset_restore_lock);
-
 
801
	mutex_init(&dev_priv->csr_lock);
-
 
802
	mutex_init(&dev_priv->av_mutex);
Line 503... Line 803...
503
 
803
 
Line 504... Line 804...
504
	intel_pm_setup(dev);
804
	intel_pm_setup(dev);
Line 543... Line 843...
543
	/* This must be called before any calls to HAS_PCH_* */
843
	/* This must be called before any calls to HAS_PCH_* */
544
	intel_detect_pch(dev);
844
	intel_detect_pch(dev);
Line 545... Line 845...
545
 
845
 
Line -... Line 846...
-
 
846
	intel_uncore_init(dev);
-
 
847
 
-
 
848
	/* Load CSR Firmware for SKL */
546
	intel_uncore_init(dev);
849
	intel_csr_ucode_init(dev);
547
 
850
 
548
	ret = i915_gem_gtt_init(dev);
851
	ret = i915_gem_gtt_init(dev);
Line -... Line 852...
-
 
852
	if (ret)
-
 
853
		goto out_freecsr;
-
 
854
 
-
 
855
	ret = i915_kick_out_vgacon(dev_priv);
-
 
856
	if (ret) {
Line 549... Line 857...
549
	if (ret)
857
		DRM_ERROR("failed to remove conflicting VGA console\n");
Line 550... Line -...
550
		goto out_regs;
-
 
Line 551... Line 858...
551
 
858
		goto out_gtt;
552
 
859
	}
553
	pci_set_master(dev->pdev);
860
 
554
 
861
	pci_set_master(dev->pdev);
Line 569... Line 876...
569
	if (dev_priv->gtt.mappable == NULL) {
876
	if (dev_priv->gtt.mappable == NULL) {
570
		ret = -EIO;
877
		ret = -EIO;
571
		goto out_gtt;
878
		goto out_gtt;
572
	}
879
	}
Line -... Line 880...
-
 
880
 
573
 
881
 
574
    /* The i915 workqueue is primarily used for batched retirement of
882
	/* The i915 workqueue is primarily used for batched retirement of
575
     * requests (and thus managing bo) once the task has been completed
883
	 * requests (and thus managing bo) once the task has been completed
576
     * by the GPU. i915_gem_retire_requests() is called directly when we
884
	 * by the GPU. i915_gem_retire_requests() is called directly when we
577
     * need high-priority retirement, such as waiting for an explicit
885
	 * need high-priority retirement, such as waiting for an explicit
Line 599... Line 907...
599
    /* Try to make sure MCHBAR is enabled before poking at it */
907
	/* Try to make sure MCHBAR is enabled before poking at it */
600
	intel_setup_mchbar(dev);
908
	intel_setup_mchbar(dev);
601
    intel_setup_gmbus(dev);
909
	intel_setup_gmbus(dev);
602
    intel_opregion_setup(dev);
910
	intel_opregion_setup(dev);
Line 603... Line -...
603
 
-
 
604
    intel_setup_bios(dev);
-
 
605
 
911
 
Line 606... Line 912...
606
    i915_gem_load(dev);
912
	i915_gem_load(dev);
607
 
913
 
608
    /* On the 945G/GM, the chipset reports the MSI capability on the
914
	/* On the 945G/GM, the chipset reports the MSI capability on the
Line 617... Line 923...
617
     * stuck interrupts on some machines.
923
	 * stuck interrupts on some machines.
618
     */
924
	 */
Line 619... Line 925...
619
 
925
 
Line -... Line 926...
-
 
926
	intel_device_info_runtime_init(dev);
-
 
927
 
620
	intel_device_info_runtime_init(dev);
928
	intel_init_dpio(dev_priv);
621
 
929
 
622
//   if (INTEL_INFO(dev)->num_pipes) {
930
//   if (INTEL_INFO(dev)->num_pipes) {
623
//       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
931
//       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
624
//       if (ret)
932
//       if (ret)
Line 625... Line 933...
625
//           goto out_gem_unload;
933
//           goto out_gem_unload;
Line 626... Line -...
626
//   }
-
 
627
 
934
//   }
628
	intel_power_domains_init(dev_priv);
935
 
629
 
936
	intel_power_domains_init(dev_priv);
630
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
937
 
631
		ret = i915_load_modeset_init(dev);
938
	ret = i915_load_modeset_init(dev);
632
		if (ret < 0) {
-
 
Line -... Line 939...
-
 
939
	if (ret < 0) {
-
 
940
		DRM_ERROR("failed to init modeset\n");
-
 
941
		goto out_power_well;
-
 
942
	}
-
 
943
 
-
 
944
	/*
Line 633... Line 945...
633
			DRM_ERROR("failed to init modeset\n");
945
	 * Notify a valid surface after modesetting,
634
			goto out_power_well;
946
	 * when running inside a VM.
635
		}
947
	 */
636
	}
948
	if (intel_vgpu_active(dev))
Line 653... Line 965...
653
out_power_well:
965
out_power_well:
654
out_gem_unload:
966
out_gem_unload:
Line 655... Line 967...
655
 
967
 
656
out_mtrrfree:
968
out_mtrrfree:
-
 
969
out_gtt:
657
out_gtt:
970
	i915_global_gtt_cleanup(dev);
658
out_regs:
971
out_freecsr:
659
put_bridge:
972
put_bridge:
660
free_priv:
973
free_priv:
661
    kfree(dev_priv);
974
    kfree(dev_priv);
662
    return ret;
975
    return ret;
Line 663... Line 976...
663
}
976
}
664
 
-
 
665
#if 0
977
 
666
 
978
#if 0
667
int i915_driver_unload(struct drm_device *dev)
979
int i915_driver_unload(struct drm_device *dev)
668
{
980
{
Line -... Line 981...
-
 
981
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
982
	int ret;
669
	struct drm_i915_private *dev_priv = dev->dev_private;
983
 
670
	int ret;
984
	i915_audio_component_cleanup(dev_priv);
671
 
985
 
672
	ret = i915_gem_suspend(dev);
986
	ret = i915_gem_suspend(dev);
673
	if (ret) {
987
	if (ret) {
Line -... Line 988...
-
 
988
		DRM_ERROR("failed to idle hardware: %d\n", ret);
Line 674... Line 989...
674
		DRM_ERROR("failed to idle hardware: %d\n", ret);
989
		return ret;
Line 675... Line 990...
675
		return ret;
990
	}
Line 676... Line 991...
676
	}
991
 
677
 
992
	intel_power_domains_fini(dev_priv);
Line 678... Line 993...
678
 
993
 
679
	intel_gpu_ips_teardown();
994
	intel_gpu_ips_teardown();
Line 680... Line 995...
680
 
995
 
Line -... Line 996...
-
 
996
	i915_teardown_sysfs(dev);
-
 
997
 
681
	i915_teardown_sysfs(dev);
998
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
-
 
999
	unregister_shrinker(&dev_priv->mm.shrinker);
682
 
1000
 
Line 683... Line 1001...
683
	if (dev_priv->mm.inactive_shrinker.scan_objects)
1001
	io_mapping_free(dev_priv->gtt.mappable);
684
		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1002
	arch_phys_wc_del(dev_priv->gtt.mtrr);
685
 
1003
 
Line 698... Line 1016...
698
		if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1016
	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
699
			kfree(dev_priv->vbt.child_dev);
1017
		kfree(dev_priv->vbt.child_dev);
700
			dev_priv->vbt.child_dev = NULL;
1018
		dev_priv->vbt.child_dev = NULL;
701
			dev_priv->vbt.child_dev_num = 0;
1019
		dev_priv->vbt.child_dev_num = 0;
702
		}
1020
	}
-
 
1021
	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
-
 
1022
	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-
 
1023
	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
-
 
1024
	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
Line 703... Line 1025...
703
 
1025
 
704
		vga_switcheroo_unregister_client(dev->pdev);
1026
	vga_switcheroo_unregister_client(dev->pdev);
705
		vga_client_register(dev->pdev, NULL, NULL, NULL);
-
 
Line 706... Line 1027...
706
	}
1027
	vga_client_register(dev->pdev, NULL, NULL, NULL);
707
 
1028
 
708
	/* Free error state after interrupts are fully disabled. */
-
 
709
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1029
	/* Free error state after interrupts are fully disabled. */
Line 710... Line 1030...
710
	cancel_work_sync(&dev_priv->gpu_error.work);
1030
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
711
	i915_destroy_error_state(dev);
1031
	i915_destroy_error_state(dev);
Line 712... Line 1032...
712
 
1032
 
Line 713... Line -...
713
	if (dev->pdev->msi_enabled)
-
 
714
		pci_disable_msi(dev->pdev);
1033
	if (dev->pdev->msi_enabled)
715
 
1034
		pci_disable_msi(dev->pdev);
Line -... Line 1035...
-
 
1035
 
716
	intel_opregion_fini(dev);
1036
	intel_opregion_fini(dev);
717
 
1037
 
718
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1038
	/* Flush any outstanding unpin_work. */
719
		/* Flush any outstanding unpin_work. */
1039
	flush_workqueue(dev_priv->wq);
-
 
1040
 
720
		flush_workqueue(dev_priv->wq);
1041
	intel_guc_ucode_fini(dev);
721
 
1042
	mutex_lock(&dev->struct_mutex);
-
 
1043
	i915_gem_cleanup_ringbuffer(dev);
Line 722... Line 1044...
722
		mutex_lock(&dev->struct_mutex);
1044
	i915_gem_context_fini(dev);
723
		i915_gem_cleanup_ringbuffer(dev);
1045
	mutex_unlock(&dev->struct_mutex);
Line 724... Line 1046...
724
		i915_gem_context_fini(dev);
1046
	intel_fbc_cleanup_cfb(dev_priv);
725
		mutex_unlock(&dev->struct_mutex);
1047
	i915_gem_cleanup_stolen(dev);
-
 
1048
 
726
		i915_gem_cleanup_stolen(dev);
1049
	intel_csr_ucode_fini(dev);
Line 727... Line 1050...
727
	}
1050
 
Line 728... Line 1051...
728
 
1051
	intel_teardown_gmbus(dev);
729
	intel_teardown_gmbus(dev);
1052
	intel_teardown_mchbar(dev);
730
	intel_teardown_mchbar(dev);
1053
 
Line -... Line 1054...
-
 
1054
	destroy_workqueue(dev_priv->hotplug.dp_wq);
731
 
1055
	destroy_workqueue(dev_priv->wq);
732
	destroy_workqueue(dev_priv->dp_wq);
1056
	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
733
	destroy_workqueue(dev_priv->wq);
-
 
734
	pm_qos_remove_request(&dev_priv->pm_qos);
1057
	pm_qos_remove_request(&dev_priv->pm_qos);
735
 
1058
 
Line 736... Line 1059...
736
	i915_global_gtt_cleanup(dev);
1059
	i915_global_gtt_cleanup(dev);
737
 
1060
 
Line 784... Line 1107...
784
	mutex_lock(&dev->struct_mutex);
1107
	mutex_lock(&dev->struct_mutex);
785
	i915_gem_context_close(dev, file);
1108
	i915_gem_context_close(dev, file);
786
	i915_gem_release(dev, file);
1109
	i915_gem_release(dev, file);
787
	mutex_unlock(&dev->struct_mutex);
1110
	mutex_unlock(&dev->struct_mutex);
Line 788... Line -...
788
 
-
 
789
	if (drm_core_check_feature(dev, DRIVER_MODESET))
1111
 
790
		intel_modeset_preclose(dev, file);
1112
	intel_modeset_preclose(dev, file);
Line 791... Line 1113...
791
}
1113
}
792
 
1114
 
Line 797... Line 1119...
797
	if (file_priv && file_priv->bsd_ring)
1119
	if (file_priv && file_priv->bsd_ring)
798
		file_priv->bsd_ring = NULL;
1120
		file_priv->bsd_ring = NULL;
799
	kfree(file_priv);
1121
	kfree(file_priv);
800
}
1122
}
Line -... Line 1123...
-
 
1123
 
-
 
1124
static int
-
 
1125
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
-
 
1126
			  struct drm_file *file)
-
 
1127
{
-
 
1128
	return -ENODEV;
-
 
1129
}
801
 
1130
 
802
const struct drm_ioctl_desc i915_ioctls[] = {
1131
const struct drm_ioctl_desc i915_ioctls[] = {
803
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1132
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
804
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1133
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
805
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1134
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
806
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1135
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
807
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1136
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
808
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1137
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
809
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1138
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
810
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1139
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
811
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1140
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
812
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1141
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
813
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1142
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
814
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1143
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
815
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1144
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
816
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1145
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
817
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1146
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
818
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1147
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
819
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1148
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
820
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1149
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
821
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1150
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
822
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1151
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
823
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1152
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
824
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1153
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
825
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1154
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
826
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1155
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
827
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1156
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
828
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1157
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
829
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1158
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
830
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1159
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
831
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1160
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
832
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1161
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
833
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1162
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
834
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1163
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
835
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1164
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
836
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1165
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
837
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1166
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
838
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1167
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
839
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1168
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
840
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1169
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
841
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1170
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
842
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1171
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
843
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1172
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
844
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1173
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
845
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1174
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
846
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1175
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
847
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1176
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
848
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1177
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
849
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1178
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
850
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1179
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
851
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1180
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
-
 
1181
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
-
 
1182
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
852
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1183
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
Line 853... Line 1184...
853
};
1184
};
854
 
-
 
855
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
 
856
 
-
 
857
/*
-
 
858
 * This is really ugly: Because old userspace abused the linux agp interface to
-
 
859
 * manage the gtt, we need to claim that all intel devices are agp.  For
-
 
860
 * otherwise the drm core refuses to initialize the agp support code.
-
 
861
 */
-
 
862
int i915_driver_device_is_agp(struct drm_device *dev)
-
 
863
{
-
 
864
	return 1;
1185