Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
 
31
#include 
32
#include 
33
#include 
5354 serge 34
#include 
2326 Serge 35
#include "intel_drv.h"
3031 serge 36
#include 
2326 Serge 37
#include "i915_drv.h"
6084 serge 38
#include "i915_vgpu.h"
2351 Serge 39
#include "i915_trace.h"
2326 Serge 40
#include 
5354 serge 41
#include 
6660 serge 42
#include 
2326 Serge 43
//#include 
7144 serge 44
#include 
2330 Serge 45
#include 
6937 serge 46
//#include 
6103 serge 47
#include 
6084 serge 48
#include 
2326 Serge 49
 
50
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
51
 
4246 Serge 52
int i915_getparam(struct drm_device *dev, void *data,
3031 serge 53
			 struct drm_file *file_priv)
54
{
5060 serge 55
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 56
	drm_i915_getparam_t *param = data;
57
	int value;
58
 
59
	switch (param->param) {
60
	case I915_PARAM_IRQ_ACTIVE:
61
	case I915_PARAM_ALLOW_BATCHBUFFER:
62
	case I915_PARAM_LAST_DISPATCH:
5354 serge 63
		/* Reject all old ums/dri params. */
64
		return -ENODEV;
3031 serge 65
	case I915_PARAM_CHIPSET_ID:
5060 serge 66
		value = dev->pdev->device;
3031 serge 67
		break;
6084 serge 68
	case I915_PARAM_REVISION:
69
		value = dev->pdev->revision;
70
		break;
3031 serge 71
	case I915_PARAM_HAS_GEM:
72
		value = 1;
73
		break;
74
	case I915_PARAM_NUM_FENCES_AVAIL:
6084 serge 75
		value = dev_priv->num_fence_regs;
3031 serge 76
		break;
77
	case I915_PARAM_HAS_OVERLAY:
78
		value = dev_priv->overlay ? 1 : 0;
79
		break;
80
	case I915_PARAM_HAS_PAGEFLIPPING:
81
		value = 1;
82
		break;
83
	case I915_PARAM_HAS_EXECBUF2:
84
		/* depends on GEM */
85
		value = 1;
86
		break;
87
	case I915_PARAM_HAS_BSD:
88
		value = intel_ring_initialized(&dev_priv->ring[VCS]);
89
		break;
90
	case I915_PARAM_HAS_BLT:
91
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
92
		break;
4246 Serge 93
	case I915_PARAM_HAS_VEBOX:
94
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
95
		break;
6084 serge 96
	case I915_PARAM_HAS_BSD2:
97
		value = intel_ring_initialized(&dev_priv->ring[VCS2]);
98
		break;
3031 serge 99
	case I915_PARAM_HAS_RELAXED_FENCING:
100
		value = 1;
101
		break;
102
	case I915_PARAM_HAS_COHERENT_RINGS:
103
		value = 1;
104
		break;
105
	case I915_PARAM_HAS_EXEC_CONSTANTS:
106
		value = INTEL_INFO(dev)->gen >= 4;
107
		break;
108
	case I915_PARAM_HAS_RELAXED_DELTA:
109
		value = 1;
110
		break;
111
	case I915_PARAM_HAS_GEN7_SOL_RESET:
112
		value = 1;
113
		break;
114
	case I915_PARAM_HAS_LLC:
115
		value = HAS_LLC(dev);
116
		break;
4246 Serge 117
	case I915_PARAM_HAS_WT:
118
		value = HAS_WT(dev);
119
		break;
3031 serge 120
	case I915_PARAM_HAS_ALIASING_PPGTT:
5354 serge 121
		value = USES_PPGTT(dev);
3031 serge 122
		break;
123
	case I915_PARAM_HAS_WAIT_TIMEOUT:
124
		value = 1;
125
		break;
126
	case I915_PARAM_HAS_SEMAPHORES:
127
		value = i915_semaphore_is_enabled(dev);
128
		break;
129
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
130
		value = 1;
131
		break;
6084 serge 132
	case I915_PARAM_HAS_SECURE_BATCHES:
6937 serge 133
		value = capable(CAP_SYS_ADMIN);
3243 Serge 134
		break;
135
	case I915_PARAM_HAS_PINNED_BATCHES:
136
		value = 1;
137
		break;
3480 Serge 138
	case I915_PARAM_HAS_EXEC_NO_RELOC:
139
		value = 1;
6084 serge 140
		break;
3480 Serge 141
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
4392 Serge 142
		value = 1;
6084 serge 143
		break;
5060 serge 144
	case I915_PARAM_CMD_PARSER_VERSION:
145
		value = i915_cmd_parser_get_version();
146
		break;
5354 serge 147
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
148
		value = 1;
149
		break;
6084 serge 150
	case I915_PARAM_MMAP_VERSION:
151
		value = 1;
152
		break;
153
	case I915_PARAM_SUBSLICE_TOTAL:
154
		value = INTEL_INFO(dev)->subslice_total;
155
		if (!value)
156
			return -ENODEV;
157
		break;
158
	case I915_PARAM_EU_TOTAL:
159
		value = INTEL_INFO(dev)->eu_total;
160
		if (!value)
161
			return -ENODEV;
162
		break;
163
	case I915_PARAM_HAS_GPU_RESET:
164
		value = i915.enable_hangcheck &&
165
			intel_has_gpu_reset(dev);
166
		break;
167
	case I915_PARAM_HAS_RESOURCE_STREAMER:
168
		value = HAS_RESOURCE_STREAMER(dev);
169
		break;
7144 serge 170
	case I915_PARAM_HAS_EXEC_SOFTPIN:
171
		value = 1;
172
		break;
3031 serge 173
	default:
4104 Serge 174
		DRM_DEBUG("Unknown parameter %d\n", param->param);
3031 serge 175
		return -EINVAL;
176
	}
177
 
3255 Serge 178
    *param->value = value;
179
 
3031 serge 180
	return 0;
181
}
182
 
183
static int i915_get_bridge_dev(struct drm_device *dev)
184
{
185
	struct drm_i915_private *dev_priv = dev->dev_private;
186
 
6131 serge 187
	dev_priv->bridge_dev = _pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
3031 serge 188
	if (!dev_priv->bridge_dev) {
189
		DRM_ERROR("bridge device not found\n");
190
		return -1;
191
	}
192
	return 0;
193
}
194
 
2330 Serge 195
#define MCHBAR_I915 0x44
196
#define MCHBAR_I965 0x48
197
#define MCHBAR_SIZE (4*4096)
198
 
199
#define DEVEN_REG 0x54
200
#define   DEVEN_MCHBAR_EN (1 << 28)
201
 
202
 
203
/* Setup MCHBAR if possible, return true if we should disable it again */
204
static void
205
intel_setup_mchbar(struct drm_device *dev)
206
{
5060 serge 207
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 208
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
209
	u32 temp;
210
	bool enabled;
211
 
6937 serge 212
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5060 serge 213
		return;
214
 
2330 Serge 215
	dev_priv->mchbar_need_disable = false;
216
 
217
	if (IS_I915G(dev) || IS_I915GM(dev)) {
218
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
219
		enabled = !!(temp & DEVEN_MCHBAR_EN);
220
	} else {
221
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
222
		enabled = temp & 1;
223
	}
224
 
225
	/* If it's already enabled, don't have to do anything */
226
	if (enabled)
227
		return;
6084 serge 228
/*
2330 Serge 229
	if (intel_alloc_mchbar_resource(dev))
230
		return;
231
 
6084 serge 232
	God help us all
233
*/
2330 Serge 234
	dev_priv->mchbar_need_disable = true;
235
 
236
	/* Space is allocated or reserved, so enable it. */
237
	if (IS_I915G(dev) || IS_I915GM(dev)) {
238
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
239
				       temp | DEVEN_MCHBAR_EN);
240
	} else {
241
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
242
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
243
	}
244
}
245
 
6084 serge 246
static void
247
intel_teardown_mchbar(struct drm_device *dev)
248
{
249
	struct drm_i915_private *dev_priv = dev->dev_private;
250
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
251
	u32 temp;
2330 Serge 252
 
6084 serge 253
	if (dev_priv->mchbar_need_disable) {
254
		if (IS_I915G(dev) || IS_I915GM(dev)) {
255
			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
256
			temp &= ~DEVEN_MCHBAR_EN;
257
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
258
		} else {
259
			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
260
			temp &= ~1;
261
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
262
		}
263
	}
264
 
265
	if (dev_priv->mch_res.start)
266
		release_resource(&dev_priv->mch_res);
267
}
268
 
3031 serge 269
/* true = enable decode, false = disable decoder */
270
static unsigned int i915_vga_set_decode(void *cookie, bool state)
2330 Serge 271
{
3031 serge 272
	struct drm_device *dev = cookie;
2330 Serge 273
 
3031 serge 274
	intel_modeset_vga_set_state(dev, state);
275
	if (state)
276
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
277
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
278
	else
279
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
280
}
2330 Serge 281
 
282
 
283
 
284
 
285
 
286
 
2327 Serge 287
static int i915_load_modeset_init(struct drm_device *dev)
288
{
6084 serge 289
	struct drm_i915_private *dev_priv = dev->dev_private;
290
	int ret;
2327 Serge 291
 
6937 serge 292
	ret = intel_bios_init(dev_priv);
6084 serge 293
	if (ret)
294
		DRM_INFO("failed to find VBIOS tables\n");
2327 Serge 295
 
5367 serge 296
	/* If we have > 1 VGA cards, then we need to arbitrate access
297
	 * to the common VGA resources.
298
	 *
299
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
300
	 * then we do not take part in VGA arbitration and the
301
	 * vga_client_register() fails with -ENODEV.
302
	 */
2327 Serge 303
 
304
 
6937 serge 305
	intel_power_domains_init_hw(dev_priv, false);
5060 serge 306
 
6937 serge 307
	intel_csr_ucode_init(dev_priv);
308
 
5354 serge 309
	ret = intel_irq_install(dev_priv);
3480 Serge 310
	if (ret)
7144 serge 311
		goto cleanup_csr;
3480 Serge 312
 
6660 serge 313
	intel_setup_gmbus(dev);
314
 
3480 Serge 315
	/* Important: The output setup functions called by modeset_init need
316
	 * working irqs for e.g. gmbus and dp aux transfers. */
6084 serge 317
	intel_modeset_init(dev);
2327 Serge 318
 
6937 serge 319
	intel_guc_ucode_init(dev);
320
 
3031 serge 321
	ret = i915_gem_init(dev);
6084 serge 322
	if (ret)
5060 serge 323
		goto cleanup_irq;
2327 Serge 324
 
6084 serge 325
	intel_modeset_gem_init(dev);
2327 Serge 326
 
6084 serge 327
	/* Always safe in the mode setting case. */
328
	/* FIXME: do pre/post-mode set stuff in core KMS code */
4560 Serge 329
	dev->vblank_disable_allowed = true;
5060 serge 330
	if (INTEL_INFO(dev)->num_pipes == 0)
3746 Serge 331
		return 0;
2327 Serge 332
 
6084 serge 333
	ret = intel_fbdev_init(dev);
334
	if (ret)
3480 Serge 335
		goto cleanup_gem;
2327 Serge 336
 
3480 Serge 337
	/* Only enable hotplug handling once the fbdev is fully set up. */
6296 serge 338
	intel_hpd_init(dev_priv);
2327 Serge 339
 
3480 Serge 340
	/*
341
	 * Some ports require correctly set-up hpd registers for detection to
342
	 * work properly (leading to ghost connected connector status), e.g. VGA
343
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
344
	 * irqs are fully enabled. Now we should scan for the initial config
345
	 * only once hotplug handling is enabled, but due to screwed-up locking
346
	 * around kms/fbdev init we can't protect the fdbev initial config
347
	 * scanning against hotplug events. Hence do this first and ignore the
348
	 * tiny window where we will loose hotplug notifactions.
349
	 */
5354 serge 350
	intel_fbdev_initial_config(dev_priv, 0);
3480 Serge 351
 
352
	drm_kms_helper_poll_init(dev);
353
 
6084 serge 354
	return 0;
2327 Serge 355
 
3480 Serge 356
cleanup_gem:
357
	mutex_lock(&dev->struct_mutex);
358
	i915_gem_cleanup_ringbuffer(dev);
4293 Serge 359
	i915_gem_context_fini(dev);
3480 Serge 360
	mutex_unlock(&dev->struct_mutex);
5060 serge 361
cleanup_irq:
6937 serge 362
	intel_guc_ucode_fini(dev);
7144 serge 363
cleanup_csr:
2327 Serge 364
cleanup_vga_client:
365
out:
6084 serge 366
	return ret;
2327 Serge 367
}
368
 
4560 Serge 369
#if IS_ENABLED(CONFIG_FB)
5060 serge 370
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 371
{
372
	struct apertures_struct *ap;
373
	struct pci_dev *pdev = dev_priv->dev->pdev;
374
	bool primary;
5060 serge 375
	int ret;
2326 Serge 376
 
4560 Serge 377
	ap = alloc_apertures(1);
378
	if (!ap)
5060 serge 379
		return -ENOMEM;
4560 Serge 380
 
381
	ap->ranges[0].base = dev_priv->gtt.mappable_base;
382
	ap->ranges[0].size = dev_priv->gtt.mappable_end;
383
 
384
	primary =
385
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
386
 
5060 serge 387
	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
4560 Serge 388
 
389
	kfree(ap);
5060 serge 390
 
391
	return ret;
4560 Serge 392
}
393
#else
5060 serge 394
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 395
{
5060 serge 396
	return 0;
4560 Serge 397
}
398
#endif
399
 
6084 serge 400
#if !defined(CONFIG_VGA_CONSOLE)
401
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
402
{
403
	return 0;
404
}
405
#elif !defined(CONFIG_DUMMY_CONSOLE)
406
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
407
{
408
	return -ENODEV;
409
}
410
#else
411
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
412
{
413
	int ret = 0;
414
 
415
	DRM_INFO("Replacing VGA console driver\n");
416
 
417
	console_lock();
418
	if (con_is_bound(&vga_con))
419
		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
420
	if (ret == 0) {
421
		ret = do_unregister_con_driver(&vga_con);
422
 
423
		/* Ignore "already unregistered". */
424
		if (ret == -ENODEV)
425
			ret = 0;
426
	}
427
	console_unlock();
428
 
429
	return ret;
430
}
431
#endif
432
 
3031 serge 433
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
2326 Serge 434
{
5060 serge 435
	const struct intel_device_info *info = &dev_priv->info;
2326 Serge 436
 
4104 Serge 437
#define PRINT_S(name) "%s"
438
#define SEP_EMPTY
439
#define PRINT_FLAG(name) info->name ? #name "," : ""
440
#define SEP_COMMA ,
5060 serge 441
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
4104 Serge 442
			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
3031 serge 443
			 info->gen,
444
			 dev_priv->dev->pdev->device,
5060 serge 445
			 dev_priv->dev->pdev->revision,
4104 Serge 446
			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
447
#undef PRINT_S
448
#undef SEP_EMPTY
449
#undef PRINT_FLAG
450
#undef SEP_COMMA
2326 Serge 451
}
452
 
6084 serge 453
static void cherryview_sseu_info_init(struct drm_device *dev)
454
{
455
	struct drm_i915_private *dev_priv = dev->dev_private;
456
	struct intel_device_info *info;
457
	u32 fuse, eu_dis;
458
 
459
	info = (struct intel_device_info *)&dev_priv->info;
460
	fuse = I915_READ(CHV_FUSE_GT);
461
 
462
	info->slice_total = 1;
463
 
464
	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
465
		info->subslice_per_slice++;
466
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
467
				 CHV_FGT_EU_DIS_SS0_R1_MASK);
468
		info->eu_total += 8 - hweight32(eu_dis);
469
	}
470
 
471
	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
472
		info->subslice_per_slice++;
473
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
474
				 CHV_FGT_EU_DIS_SS1_R1_MASK);
475
		info->eu_total += 8 - hweight32(eu_dis);
476
	}
477
 
478
	info->subslice_total = info->subslice_per_slice;
479
	/*
480
	 * CHV expected to always have a uniform distribution of EU
481
	 * across subslices.
482
	*/
483
	info->eu_per_subslice = info->subslice_total ?
484
				info->eu_total / info->subslice_total :
485
				0;
486
	/*
487
	 * CHV supports subslice power gating on devices with more than
488
	 * one subslice, and supports EU power gating on devices with
489
	 * more than one EU pair per subslice.
490
	*/
491
	info->has_slice_pg = 0;
492
	info->has_subslice_pg = (info->subslice_total > 1);
493
	info->has_eu_pg = (info->eu_per_subslice > 2);
494
}
495
 
496
static void gen9_sseu_info_init(struct drm_device *dev)
497
{
498
	struct drm_i915_private *dev_priv = dev->dev_private;
499
	struct intel_device_info *info;
500
	int s_max = 3, ss_max = 4, eu_max = 8;
501
	int s, ss;
502
	u32 fuse2, s_enable, ss_disable, eu_disable;
503
	u8 eu_mask = 0xff;
504
 
505
	info = (struct intel_device_info *)&dev_priv->info;
506
	fuse2 = I915_READ(GEN8_FUSE2);
507
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
508
		   GEN8_F2_S_ENA_SHIFT;
509
	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
510
		     GEN9_F2_SS_DIS_SHIFT;
511
 
512
	info->slice_total = hweight32(s_enable);
513
	/*
514
	 * The subslice disable field is global, i.e. it applies
515
	 * to each of the enabled slices.
516
	*/
517
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
518
	info->subslice_total = info->slice_total *
519
			       info->subslice_per_slice;
520
 
521
	/*
522
	 * Iterate through enabled slices and subslices to
523
	 * count the total enabled EU.
524
	*/
525
	for (s = 0; s < s_max; s++) {
526
		if (!(s_enable & (0x1 << s)))
527
			/* skip disabled slice */
528
			continue;
529
 
530
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
531
		for (ss = 0; ss < ss_max; ss++) {
532
			int eu_per_ss;
533
 
534
			if (ss_disable & (0x1 << ss))
535
				/* skip disabled subslice */
536
				continue;
537
 
538
			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
539
						      eu_mask);
540
 
541
			/*
542
			 * Record which subslice(s) has(have) 7 EUs. we
543
			 * can tune the hash used to spread work among
544
			 * subslices if they are unbalanced.
545
			 */
546
			if (eu_per_ss == 7)
547
				info->subslice_7eu[s] |= 1 << ss;
548
 
549
			info->eu_total += eu_per_ss;
550
		}
551
	}
552
 
553
	/*
554
	 * SKL is expected to always have a uniform distribution
555
	 * of EU across subslices with the exception that any one
556
	 * EU in any one subslice may be fused off for die
557
	 * recovery. BXT is expected to be perfectly uniform in EU
558
	 * distribution.
559
	*/
560
	info->eu_per_subslice = info->subslice_total ?
561
				DIV_ROUND_UP(info->eu_total,
562
					     info->subslice_total) : 0;
563
	/*
564
	 * SKL supports slice power gating on devices with more than
565
	 * one slice, and supports EU power gating on devices with
566
	 * more than one EU pair per subslice. BXT supports subslice
567
	 * power gating on devices with more than one subslice, and
568
	 * supports EU power gating on devices with more than one EU
569
	 * pair per subslice.
570
	*/
6937 serge 571
	info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
572
			       (info->slice_total > 1));
6084 serge 573
	info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
574
	info->has_eu_pg = (info->eu_per_subslice > 2);
575
}
576
 
577
static void broadwell_sseu_info_init(struct drm_device *dev)
578
{
579
	struct drm_i915_private *dev_priv = dev->dev_private;
580
	struct intel_device_info *info;
581
	const int s_max = 3, ss_max = 3, eu_max = 8;
582
	int s, ss;
583
	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
584
 
585
	fuse2 = I915_READ(GEN8_FUSE2);
586
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
587
	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
588
 
589
	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
590
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
591
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
592
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
593
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
594
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
595
			 (32 - GEN8_EU_DIS1_S2_SHIFT));
596
 
597
 
598
	info = (struct intel_device_info *)&dev_priv->info;
599
	info->slice_total = hweight32(s_enable);
600
 
601
	/*
602
	 * The subslice disable field is global, i.e. it applies
603
	 * to each of the enabled slices.
604
	 */
605
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
606
	info->subslice_total = info->slice_total * info->subslice_per_slice;
607
 
608
	/*
609
	 * Iterate through enabled slices and subslices to
610
	 * count the total enabled EU.
611
	 */
612
	for (s = 0; s < s_max; s++) {
613
		if (!(s_enable & (0x1 << s)))
614
			/* skip disabled slice */
615
			continue;
616
 
617
		for (ss = 0; ss < ss_max; ss++) {
618
			u32 n_disabled;
619
 
620
			if (ss_disable & (0x1 << ss))
621
				/* skip disabled subslice */
622
				continue;
623
 
624
			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
625
 
626
			/*
627
			 * Record which subslices have 7 EUs.
628
			 */
629
			if (eu_max - n_disabled == 7)
630
				info->subslice_7eu[s] |= 1 << ss;
631
 
632
			info->eu_total += eu_max - n_disabled;
633
		}
634
	}
635
 
636
	/*
637
	 * BDW is expected to always have a uniform distribution of EU across
638
	 * subslices with the exception that any one EU in any one subslice may
639
	 * be fused off for die recovery.
640
	 */
641
	info->eu_per_subslice = info->subslice_total ?
642
		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
643
 
644
	/*
645
	 * BDW supports slice power gating on devices with more than
646
	 * one slice.
647
	 */
648
	info->has_slice_pg = (info->slice_total > 1);
649
	info->has_subslice_pg = 0;
650
	info->has_eu_pg = 0;
651
}
652
 
5060 serge 653
/*
654
 * Determine various intel_device_info fields at runtime.
655
 *
656
 * Use it when either:
657
 *   - it's judged too laborious to fill n static structures with the limit
658
 *     when a simple if statement does the job,
659
 *   - run-time checks (eg read fuse/strap registers) are needed.
660
 *
661
 * This function needs to be called:
662
 *   - after the MMIO has been setup as we are reading registers,
663
 *   - after the PCH has been detected,
664
 *   - before the first usage of the fields it can tweak.
665
 */
666
static void intel_device_info_runtime_init(struct drm_device *dev)
667
{
668
	struct drm_i915_private *dev_priv = dev->dev_private;
669
	struct intel_device_info *info;
670
	enum pipe pipe;
671
 
672
	info = (struct intel_device_info *)&dev_priv->info;
673
 
6084 serge 674
	/*
675
	 * Skylake and Broxton currently don't expose the topmost plane as its
676
	 * use is exclusive with the legacy cursor and we only want to expose
677
	 * one of those, not both. Until we can safely expose the topmost plane
678
	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
679
	 * we don't expose the topmost plane at all to prevent ABI breakage
680
	 * down the line.
681
	 */
682
	if (IS_BROXTON(dev)) {
683
		info->num_sprites[PIPE_A] = 2;
684
		info->num_sprites[PIPE_B] = 2;
685
		info->num_sprites[PIPE_C] = 1;
6937 serge 686
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 687
		for_each_pipe(dev_priv, pipe)
5060 serge 688
			info->num_sprites[pipe] = 2;
689
	else
5354 serge 690
		for_each_pipe(dev_priv, pipe)
5060 serge 691
			info->num_sprites[pipe] = 1;
692
 
693
	if (i915.disable_display) {
694
		DRM_INFO("Display disabled (module parameter)\n");
695
		info->num_pipes = 0;
696
	} else if (info->num_pipes > 0 &&
697
		   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
6937 serge 698
		   HAS_PCH_SPLIT(dev)) {
5060 serge 699
		u32 fuse_strap = I915_READ(FUSE_STRAP);
700
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
701
 
702
		/*
703
		 * SFUSE_STRAP is supposed to have a bit signalling the display
704
		 * is fused off. Unfortunately it seems that, at least in
705
		 * certain cases, fused off display means that PCH display
706
		 * reads don't land anywhere. In that case, we read 0s.
707
		 *
708
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
709
		 * should be set when taking over after the firmware.
710
		 */
711
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
712
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
713
		    (dev_priv->pch_type == PCH_CPT &&
714
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
715
			DRM_INFO("Display fused off, disabling\n");
716
			info->num_pipes = 0;
7144 serge 717
		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
718
			DRM_INFO("PipeC fused off\n");
719
			info->num_pipes -= 1;
5060 serge 720
		}
7144 serge 721
	} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
722
		u32 dfsm = I915_READ(SKL_DFSM);
723
		u8 disabled_mask = 0;
724
		bool invalid;
725
		int num_bits;
726
 
727
		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
728
			disabled_mask |= BIT(PIPE_A);
729
		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
730
			disabled_mask |= BIT(PIPE_B);
731
		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
732
			disabled_mask |= BIT(PIPE_C);
733
 
734
		num_bits = hweight8(disabled_mask);
735
 
736
		switch (disabled_mask) {
737
		case BIT(PIPE_A):
738
		case BIT(PIPE_B):
739
		case BIT(PIPE_A) | BIT(PIPE_B):
740
		case BIT(PIPE_A) | BIT(PIPE_C):
741
			invalid = true;
742
			break;
743
		default:
744
			invalid = false;
745
		}
746
 
747
		if (num_bits > info->num_pipes || invalid)
748
			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
749
				  disabled_mask);
750
		else
751
			info->num_pipes -= num_bits;
5060 serge 752
	}
6084 serge 753
 
754
	/* Initialize slice/subslice/EU info */
755
	if (IS_CHERRYVIEW(dev))
756
		cherryview_sseu_info_init(dev);
757
	else if (IS_BROADWELL(dev))
758
		broadwell_sseu_info_init(dev);
759
	else if (INTEL_INFO(dev)->gen >= 9)
760
		gen9_sseu_info_init(dev);
761
 
762
	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
763
	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
764
	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
765
	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
766
	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
767
	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
768
			 info->has_slice_pg ? "y" : "n");
769
	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
770
			 info->has_subslice_pg ? "y" : "n");
771
	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
772
			 info->has_eu_pg ? "y" : "n");
5060 serge 773
}
774
 
6084 serge 775
static void intel_init_dpio(struct drm_i915_private *dev_priv)
776
{
777
	/*
778
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
779
	 * CHV x1 PHY (DP/HDMI D)
780
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
781
	 */
782
	if (IS_CHERRYVIEW(dev_priv)) {
783
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
784
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
6937 serge 785
	} else if (IS_VALLEYVIEW(dev_priv)) {
6084 serge 786
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
787
	}
788
}
789
 
7144 serge 790
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
791
{
792
	/*
793
	 * The i915 workqueue is primarily used for batched retirement of
794
	 * requests (and thus managing bo) once the task has been completed
795
	 * by the GPU. i915_gem_retire_requests() is called directly when we
796
	 * need high-priority retirement, such as waiting for an explicit
797
	 * bo.
798
	 *
799
	 * It is also used for periodic low-priority events, such as
800
	 * idle-timers and recording error state.
801
	 *
802
	 * All tasks on the workqueue are expected to acquire the dev mutex
803
	 * so there is no point in running more than one instance of the
804
	 * workqueue at any time.  Use an ordered one.
805
	 */
806
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
807
	if (dev_priv->wq == NULL)
808
		goto out_err;
809
 
810
	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
811
	if (dev_priv->hotplug.dp_wq == NULL)
812
		goto out_free_wq;
813
 
814
	dev_priv->gpu_error.hangcheck_wq =
815
		alloc_ordered_workqueue("i915-hangcheck", 0);
816
	if (dev_priv->gpu_error.hangcheck_wq == NULL)
817
		goto out_free_dp_wq;
818
 
819
	system_wq = dev_priv->wq;
820
 
821
	return 0;
822
 
823
out_free_dp_wq:
824
out_free_wq:
825
out_err:
826
	DRM_ERROR("Failed to allocate workqueues.\n");
827
 
828
	return -ENOMEM;
829
}
830
 
831
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
832
{
833
}
834
 
835
static int i915_mmio_setup(struct drm_device *dev)
836
{
837
	struct drm_i915_private *dev_priv = to_i915(dev);
838
	int mmio_bar;
839
	int mmio_size;
840
 
841
	mmio_bar = IS_GEN2(dev) ? 1 : 0;
842
	/*
843
	 * Before gen4, the registers and the GTT are behind different BARs.
844
	 * However, from gen4 onwards, the registers and the GTT are shared
845
	 * in the same BAR, so we want to restrict this ioremap from
846
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
847
	 * the register BAR remains the same size for all the earlier
848
	 * generations up to Ironlake.
849
	 */
850
	if (INTEL_INFO(dev)->gen < 5)
851
		mmio_size = 512 * 1024;
852
	else
853
		mmio_size = 2 * 1024 * 1024;
854
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
855
	if (dev_priv->regs == NULL) {
856
		DRM_ERROR("failed to map registers\n");
857
 
858
		return -EIO;
859
	}
860
 
861
	/* Try to make sure MCHBAR is enabled before poking at it */
862
	intel_setup_mchbar(dev);
863
 
864
	return 0;
865
}
866
 
2326 Serge 867
/**
868
 * i915_driver_load - setup chip and create an initial config
869
 * @dev: DRM device
870
 * @flags: startup flags
871
 *
872
 * The driver load routine has to do several things:
873
 *   - drive output discovery via intel_modeset_init()
874
 *   - initialize the memory manager
875
 *   - allocate initial config memory
876
 *   - setup the DRM framebuffer with the allocated memory
877
 */
878
int i915_driver_load(struct drm_device *dev, unsigned long flags)
879
{
6084 serge 880
	struct drm_i915_private *dev_priv;
5060 serge 881
	struct intel_device_info *info, *device_info;
7144 serge 882
	int ret = 0;
3031 serge 883
	uint32_t aperture_size;
2326 Serge 884
 
3031 serge 885
	info = (struct intel_device_info *) flags;
886
 
4560 Serge 887
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
6084 serge 888
	if (dev_priv == NULL)
889
		return -ENOMEM;
2326 Serge 890
 
5060 serge 891
	dev->dev_private = dev_priv;
6084 serge 892
	dev_priv->dev = dev;
2326 Serge 893
 
5354 serge 894
	/* Setup the write-once "constant" device info */
5060 serge 895
	device_info = (struct intel_device_info *)&dev_priv->info;
5354 serge 896
	memcpy(device_info, info, sizeof(dev_priv->info));
897
	device_info->device_id = dev->pdev->device;
5060 serge 898
 
4104 Serge 899
	spin_lock_init(&dev_priv->irq_lock);
900
	spin_lock_init(&dev_priv->gpu_error.lock);
5354 serge 901
	mutex_init(&dev_priv->backlight_lock);
4104 Serge 902
	spin_lock_init(&dev_priv->uncore.lock);
903
	spin_lock_init(&dev_priv->mm.object_stat_lock);
5060 serge 904
	spin_lock_init(&dev_priv->mmio_flip_lock);
6084 serge 905
	mutex_init(&dev_priv->sb_lock);
4104 Serge 906
	mutex_init(&dev_priv->modeset_restore_lock);
6084 serge 907
	mutex_init(&dev_priv->av_mutex);
4104 Serge 908
 
7144 serge 909
	ret = i915_workqueues_init(dev_priv);
910
	if (ret < 0)
911
		goto out_free_priv;
912
 
4560 Serge 913
	intel_pm_setup(dev);
4104 Serge 914
 
6937 serge 915
	intel_runtime_pm_get(dev_priv);
916
 
4560 Serge 917
	intel_display_crc_init(dev);
918
 
3031 serge 919
	i915_dump_device_info(dev_priv);
920
 
4104 Serge 921
	/* Not all pre-production machines fall into this category, only the
922
	 * very first ones. Almost everything should work, except for maybe
923
	 * suspend/resume. And we don't implement workarounds that affect only
924
	 * pre-production machines. */
925
	if (IS_HSW_EARLY_SDV(dev))
926
		DRM_INFO("This is an early pre-production Haswell machine. "
927
			 "It may not be fully functional.\n");
928
 
6084 serge 929
	if (i915_get_bridge_dev(dev)) {
930
		ret = -EIO;
7144 serge 931
		goto out_runtime_pm_put;
6084 serge 932
	}
2326 Serge 933
 
7144 serge 934
	ret = i915_mmio_setup(dev);
935
	if (ret < 0)
3746 Serge 936
		goto put_bridge;
2326 Serge 937
 
6320 serge 938
	set_fake_framebuffer();
939
 
4560 Serge 940
	/* This must be called before any calls to HAS_PCH_* */
941
	intel_detect_pch(dev);
4104 Serge 942
 
4560 Serge 943
	intel_uncore_init(dev);
944
 
3746 Serge 945
	ret = i915_gem_gtt_init(dev);
946
	if (ret)
7144 serge 947
		goto out_uncore_fini;
3746 Serge 948
 
6088 serge 949
	/* WARNING: Apparently we must kick fbdev drivers before vgacon,
950
	 * otherwise the vga fbdev driver falls over. */
951
	ret = i915_kick_out_firmware_fb(dev_priv);
952
	if (ret) {
953
		DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
954
		goto out_gtt;
955
	}
956
 
6084 serge 957
	ret = i915_kick_out_vgacon(dev_priv);
958
	if (ret) {
959
		DRM_ERROR("failed to remove conflicting VGA console\n");
960
		goto out_gtt;
961
	}
3746 Serge 962
 
963
	pci_set_master(dev->pdev);
964
 
965
 
6084 serge 966
	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
967
	 * using 32bit addressing, overwriting memory if HWS is located
968
	 * above 4GB.
969
	 *
970
	 * The documentation also mentions an issue with undefined
971
	 * behaviour if any general state is accessed within a page above 4GB,
972
	 * which also needs to be handled carefully.
973
	 */
3746 Serge 974
 
3480 Serge 975
	aperture_size = dev_priv->gtt.mappable_end;
2326 Serge 976
 
7144 serge 977
    printk("aperture base %x size = %x\n",(u32)dev_priv->gtt.mappable_base,(u32)aperture_size);
978
	dev_priv->gtt.mappable =
979
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
980
				     aperture_size);
4539 Serge 981
	if (dev_priv->gtt.mappable == NULL) {
982
		ret = -EIO;
4560 Serge 983
		goto out_gtt;
4539 Serge 984
	}
2326 Serge 985
 
6084 serge 986
 
5354 serge 987
	intel_irq_init(dev_priv);
4104 Serge 988
	intel_uncore_sanitize(dev);
2326 Serge 989
 
6084 serge 990
	intel_opregion_setup(dev);
2326 Serge 991
 
7144 serge 992
	i915_gem_load_init(dev);
2326 Serge 993
 
6084 serge 994
	/* On the 945G/GM, the chipset reports the MSI capability on the
995
	 * integrated graphics even though the support isn't actually there
996
	 * according to the published specs.  It doesn't appear to function
997
	 * correctly in testing on 945G.
998
	 * This may be a side effect of MSI having been made available for PEG
999
	 * and the registers being closely associated.
1000
	 *
1001
	 * According to chipset errata, on the 965GM, MSI interrupts may
1002
	 * be lost or delayed, but we use them anyways to avoid
1003
	 * stuck interrupts on some machines.
1004
	 */
2326 Serge 1005
 
5060 serge 1006
	intel_device_info_runtime_init(dev);
2326 Serge 1007
 
6084 serge 1008
	intel_init_dpio(dev_priv);
1009
 
6088 serge 1010
	if (INTEL_INFO(dev)->num_pipes) {
1011
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1012
		if (ret)
1013
			goto out_gem_unload;
1014
	}
4293 Serge 1015
 
5060 serge 1016
	intel_power_domains_init(dev_priv);
4560 Serge 1017
 
6084 serge 1018
	ret = i915_load_modeset_init(dev);
1019
	if (ret < 0) {
1020
		DRM_ERROR("failed to init modeset\n");
1021
		goto out_power_well;
4293 Serge 1022
	}
2326 Serge 1023
 
6084 serge 1024
	/*
1025
	 * Notify a valid surface after modesetting,
1026
	 * when running inside a VM.
1027
	 */
1028
	if (intel_vgpu_active(dev))
1029
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
4293 Serge 1030
 
4126 Serge 1031
	if (INTEL_INFO(dev)->num_pipes) {
6084 serge 1032
		/* Must be done after probing outputs */
4126 Serge 1033
		intel_opregion_init(dev);
1034
	}
2326 Serge 1035
 
3031 serge 1036
	if (IS_GEN5(dev))
1037
		intel_gpu_ips_init(dev_priv);
2326 Serge 1038
 
5367 serge 1039
//   intel_runtime_pm_enable(dev_priv);
4560 Serge 1040
 
6084 serge 1041
	main_device = dev;
4104 Serge 1042
 
6084 serge 1043
	return 0;
2326 Serge 1044
 
4560 Serge 1045
out_power_well:
6296 serge 1046
	drm_vblank_cleanup(dev);
2326 Serge 1047
out_gem_unload:
4560 Serge 1048
out_gtt:
6084 serge 1049
	i915_global_gtt_cleanup(dev);
7144 serge 1050
out_uncore_fini:
2326 Serge 1051
put_bridge:
7144 serge 1052
out_runtime_pm_put:
1053
	i915_workqueues_cleanup(dev_priv);
1054
out_free_priv:
1055
	kfree(dev_priv);
1056
 
1057
	return ret;
2326 Serge 1058
}
1059
 
3031 serge 1060
#if 0
1061
int i915_driver_unload(struct drm_device *dev)
1062
{
1063
	struct drm_i915_private *dev_priv = dev->dev_private;
1064
	int ret;
1065
 
6937 serge 1066
	intel_fbdev_fini(dev);
1067
 
6084 serge 1068
	i915_audio_component_cleanup(dev_priv);
1069
 
4560 Serge 1070
	ret = i915_gem_suspend(dev);
1071
	if (ret) {
1072
		DRM_ERROR("failed to idle hardware: %d\n", ret);
1073
		return ret;
1074
	}
1075
 
6084 serge 1076
	intel_power_domains_fini(dev_priv);
4560 Serge 1077
 
3031 serge 1078
	intel_gpu_ips_teardown();
1079
 
1080
	i915_teardown_sysfs(dev);
1081
 
7144 serge 1082
	i915_gem_shrinker_cleanup(dev_priv);
3031 serge 1083
 
3480 Serge 1084
	io_mapping_free(dev_priv->gtt.mappable);
4104 Serge 1085
	arch_phys_wc_del(dev_priv->gtt.mtrr);
3031 serge 1086
 
1087
	acpi_video_unregister();
1088
 
6084 serge 1089
	drm_vblank_cleanup(dev);
3031 serge 1090
 
6084 serge 1091
	intel_modeset_cleanup(dev);
1092
 
1093
	/*
1094
	 * free the memory space allocated for the child device
1095
	 * config parsed from VBT
1096
	 */
1097
	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1098
		kfree(dev_priv->vbt.child_dev);
1099
		dev_priv->vbt.child_dev = NULL;
1100
		dev_priv->vbt.child_dev_num = 0;
3031 serge 1101
	}
6084 serge 1102
	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1103
	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1104
	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1105
	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
3031 serge 1106
 
6084 serge 1107
	vga_switcheroo_unregister_client(dev->pdev);
1108
	vga_client_register(dev->pdev, NULL, NULL, NULL);
1109
 
7144 serge 1110
	intel_csr_ucode_fini(dev_priv);
1111
 
3031 serge 1112
	/* Free error state after interrupts are fully disabled. */
6084 serge 1113
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3031 serge 1114
	i915_destroy_error_state(dev);
1115
 
1116
	if (dev->pdev->msi_enabled)
1117
		pci_disable_msi(dev->pdev);
1118
 
1119
	intel_opregion_fini(dev);
1120
 
6084 serge 1121
	/* Flush any outstanding unpin_work. */
1122
	flush_workqueue(dev_priv->wq);
3031 serge 1123
 
6084 serge 1124
	intel_guc_ucode_fini(dev);
1125
	mutex_lock(&dev->struct_mutex);
1126
	i915_gem_cleanup_ringbuffer(dev);
1127
	i915_gem_context_fini(dev);
1128
	mutex_unlock(&dev->struct_mutex);
1129
	intel_fbc_cleanup_cfb(dev_priv);
3031 serge 1130
 
3480 Serge 1131
	pm_qos_remove_request(&dev_priv->pm_qos);
3031 serge 1132
 
5354 serge 1133
	i915_global_gtt_cleanup(dev);
4104 Serge 1134
 
4560 Serge 1135
	intel_uncore_fini(dev);
7144 serge 1136
	i915_mmio_cleanup(dev);
4560 Serge 1137
 
7144 serge 1138
	i915_gem_load_cleanup(dev);
3031 serge 1139
	pci_dev_put(dev_priv->bridge_dev);
7144 serge 1140
	i915_workqueues_cleanup(dev_priv);
5060 serge 1141
	kfree(dev_priv);
3031 serge 1142
 
1143
	return 0;
1144
}
3263 Serge 1145
#endif
3031 serge 1146
 
1147
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1148
{
5060 serge 1149
	int ret;
3031 serge 1150
 
5060 serge 1151
	ret = i915_gem_open(dev, file);
1152
	if (ret)
1153
		return ret;
3031 serge 1154
 
1155
	return 0;
1156
}
1157
 
3263 Serge 1158
#if 0
3031 serge 1159
/**
1160
 * i915_driver_lastclose - clean up after all DRM clients have exited
1161
 * @dev: DRM device
1162
 *
1163
 * Take care of cleaning up after all DRM clients have exited.  In the
1164
 * mode setting case, we want to restore the kernel's initial mode (just
1165
 * in case the last client left us in a bad state).
1166
 *
1167
 * Additionally, in the non-mode setting case, we'll tear down the GTT
1168
 * and DMA structures, since the kernel won't be using them, and clea
1169
 * up any GEM state.
1170
 */
5060 serge 1171
void i915_driver_lastclose(struct drm_device *dev)
3031 serge 1172
{
6084 serge 1173
	intel_fbdev_restore_mode(dev);
1174
	vga_switcheroo_process_delayed_switch();
3031 serge 1175
}
1176
 
5060 serge 1177
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
3031 serge 1178
{
4560 Serge 1179
	mutex_lock(&dev->struct_mutex);
5060 serge 1180
	i915_gem_context_close(dev, file);
1181
	i915_gem_release(dev, file);
4560 Serge 1182
	mutex_unlock(&dev->struct_mutex);
3031 serge 1183
}
1184
 
1185
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1186
{
1187
	struct drm_i915_file_private *file_priv = file->driver_priv;
1188
 
1189
	kfree(file_priv);
1190
}
1191
 
6084 serge 1192
static int
1193
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1194
			  struct drm_file *file)
1195
{
1196
	return -ENODEV;
1197
}
1198
 
4104 Serge 1199
const struct drm_ioctl_desc i915_ioctls[] = {
5354 serge 1200
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1201
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1202
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1203
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1204
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1205
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
4104 Serge 1206
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
6084 serge 1207
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3031 serge 1208
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1209
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1210
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 1211
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3031 serge 1212
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1213
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 1214
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1215
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1216
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6084 serge 1217
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1218
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1219
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1220
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1221
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1222
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1223
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1224
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1225
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1226
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1227
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1228
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1229
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1230
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1231
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1232
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1233
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1234
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1235
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1236
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1237
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1238
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1239
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1240
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1241
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1242
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1243
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1244
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1245
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1246
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1247
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1248
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1249
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1250
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1251
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
3031 serge 1252
};
1253
 
5060 serge 1254
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
3031 serge 1255
#endif