Subversion Repositories Kolibri OS

Rev

Rev 5367 | Rev 6088 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
 
5354 serge 31
#include 
3031 serge 32
#include 
33
#include 
34
#include 
5354 serge 35
#include 
2326 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2326 Serge 38
#include "i915_drv.h"
6084 serge 39
#include "i915_vgpu.h"
2351 Serge 40
#include "i915_trace.h"
2326 Serge 41
#include 
5354 serge 42
#include 
2326 Serge 43
//#include 
44
//#include 
45
//#include 
2330 Serge 46
#include 
2326 Serge 47
//#include 
6084 serge 48
#include 
2326 Serge 49
 
50
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
51
 
4246 Serge 52
int i915_getparam(struct drm_device *dev, void *data,
3031 serge 53
			 struct drm_file *file_priv)
54
{
5060 serge 55
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 56
	drm_i915_getparam_t *param = data;
57
	int value;
58
 
59
	switch (param->param) {
60
	case I915_PARAM_IRQ_ACTIVE:
61
	case I915_PARAM_ALLOW_BATCHBUFFER:
62
	case I915_PARAM_LAST_DISPATCH:
5354 serge 63
		/* Reject all old ums/dri params. */
64
		return -ENODEV;
3031 serge 65
	case I915_PARAM_CHIPSET_ID:
5060 serge 66
		value = dev->pdev->device;
3031 serge 67
		break;
6084 serge 68
	case I915_PARAM_REVISION:
69
		value = dev->pdev->revision;
70
		break;
3031 serge 71
	case I915_PARAM_HAS_GEM:
72
		value = 1;
73
		break;
74
	case I915_PARAM_NUM_FENCES_AVAIL:
6084 serge 75
		value = dev_priv->num_fence_regs;
3031 serge 76
		break;
77
	case I915_PARAM_HAS_OVERLAY:
78
		value = dev_priv->overlay ? 1 : 0;
79
		break;
80
	case I915_PARAM_HAS_PAGEFLIPPING:
81
		value = 1;
82
		break;
83
	case I915_PARAM_HAS_EXECBUF2:
84
		/* depends on GEM */
85
		value = 1;
86
		break;
87
	case I915_PARAM_HAS_BSD:
88
		value = intel_ring_initialized(&dev_priv->ring[VCS]);
89
		break;
90
	case I915_PARAM_HAS_BLT:
91
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
92
		break;
4246 Serge 93
	case I915_PARAM_HAS_VEBOX:
94
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
95
		break;
6084 serge 96
	case I915_PARAM_HAS_BSD2:
97
		value = intel_ring_initialized(&dev_priv->ring[VCS2]);
98
		break;
3031 serge 99
	case I915_PARAM_HAS_RELAXED_FENCING:
100
		value = 1;
101
		break;
102
	case I915_PARAM_HAS_COHERENT_RINGS:
103
		value = 1;
104
		break;
105
	case I915_PARAM_HAS_EXEC_CONSTANTS:
106
		value = INTEL_INFO(dev)->gen >= 4;
107
		break;
108
	case I915_PARAM_HAS_RELAXED_DELTA:
109
		value = 1;
110
		break;
111
	case I915_PARAM_HAS_GEN7_SOL_RESET:
112
		value = 1;
113
		break;
114
	case I915_PARAM_HAS_LLC:
115
		value = HAS_LLC(dev);
116
		break;
4246 Serge 117
	case I915_PARAM_HAS_WT:
118
		value = HAS_WT(dev);
119
		break;
3031 serge 120
	case I915_PARAM_HAS_ALIASING_PPGTT:
5354 serge 121
		value = USES_PPGTT(dev);
3031 serge 122
		break;
123
	case I915_PARAM_HAS_WAIT_TIMEOUT:
124
		value = 1;
125
		break;
126
	case I915_PARAM_HAS_SEMAPHORES:
127
		value = i915_semaphore_is_enabled(dev);
128
		break;
129
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
130
		value = 1;
131
		break;
6084 serge 132
	case I915_PARAM_HAS_SECURE_BATCHES:
133
		value = 1;
3243 Serge 134
		break;
135
	case I915_PARAM_HAS_PINNED_BATCHES:
136
		value = 1;
137
		break;
3480 Serge 138
	case I915_PARAM_HAS_EXEC_NO_RELOC:
139
		value = 1;
6084 serge 140
		break;
3480 Serge 141
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
4392 Serge 142
		value = 1;
6084 serge 143
		break;
5060 serge 144
	case I915_PARAM_CMD_PARSER_VERSION:
145
		value = i915_cmd_parser_get_version();
146
		break;
5354 serge 147
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
148
		value = 1;
149
		break;
6084 serge 150
	case I915_PARAM_MMAP_VERSION:
151
		value = 1;
152
		break;
153
	case I915_PARAM_SUBSLICE_TOTAL:
154
		value = INTEL_INFO(dev)->subslice_total;
155
		if (!value)
156
			return -ENODEV;
157
		break;
158
	case I915_PARAM_EU_TOTAL:
159
		value = INTEL_INFO(dev)->eu_total;
160
		if (!value)
161
			return -ENODEV;
162
		break;
163
	case I915_PARAM_HAS_GPU_RESET:
164
		value = i915.enable_hangcheck &&
165
			intel_has_gpu_reset(dev);
166
		break;
167
	case I915_PARAM_HAS_RESOURCE_STREAMER:
168
		value = HAS_RESOURCE_STREAMER(dev);
169
		break;
3031 serge 170
	default:
4104 Serge 171
		DRM_DEBUG("Unknown parameter %d\n", param->param);
3031 serge 172
		return -EINVAL;
173
	}
174
 
3255 Serge 175
    *param->value = value;
176
 
3031 serge 177
	return 0;
178
}
179
 
180
static int i915_get_bridge_dev(struct drm_device *dev)
181
{
182
	struct drm_i915_private *dev_priv = dev->dev_private;
183
 
184
	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
185
	if (!dev_priv->bridge_dev) {
186
		DRM_ERROR("bridge device not found\n");
187
		return -1;
188
	}
189
	return 0;
190
}
191
 
2330 Serge 192
#define MCHBAR_I915 0x44
193
#define MCHBAR_I965 0x48
194
#define MCHBAR_SIZE (4*4096)
195
 
196
#define DEVEN_REG 0x54
197
#define   DEVEN_MCHBAR_EN (1 << 28)
198
 
199
 
200
/* Setup MCHBAR if possible, return true if we should disable it again */
201
static void
202
intel_setup_mchbar(struct drm_device *dev)
203
{
5060 serge 204
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 205
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
206
	u32 temp;
207
	bool enabled;
208
 
5060 serge 209
	if (IS_VALLEYVIEW(dev))
210
		return;
211
 
2330 Serge 212
	dev_priv->mchbar_need_disable = false;
213
 
214
	if (IS_I915G(dev) || IS_I915GM(dev)) {
215
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
216
		enabled = !!(temp & DEVEN_MCHBAR_EN);
217
	} else {
218
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
219
		enabled = temp & 1;
220
	}
221
 
222
	/* If it's already enabled, don't have to do anything */
223
	if (enabled)
224
		return;
6084 serge 225
/*
2330 Serge 226
	if (intel_alloc_mchbar_resource(dev))
227
		return;
228
 
6084 serge 229
	God help us all
230
*/
2330 Serge 231
	dev_priv->mchbar_need_disable = true;
232
 
6084 serge 233
	DRM_INFO("enable MCHBAR\n");
234
 
2330 Serge 235
	/* Space is allocated or reserved, so enable it. */
236
	if (IS_I915G(dev) || IS_I915GM(dev)) {
237
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
238
				       temp | DEVEN_MCHBAR_EN);
239
	} else {
240
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
241
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
242
	}
243
}
244
 
6084 serge 245
static void
246
intel_teardown_mchbar(struct drm_device *dev)
247
{
248
	struct drm_i915_private *dev_priv = dev->dev_private;
249
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
250
	u32 temp;
2330 Serge 251
 
6084 serge 252
	if (dev_priv->mchbar_need_disable) {
253
		if (IS_I915G(dev) || IS_I915GM(dev)) {
254
			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
255
			temp &= ~DEVEN_MCHBAR_EN;
256
			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
257
		} else {
258
			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
259
			temp &= ~1;
260
			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
261
		}
262
	}
263
 
264
	if (dev_priv->mch_res.start)
265
		release_resource(&dev_priv->mch_res);
266
}
267
 
3031 serge 268
/* true = enable decode, false = disable decoder */
269
static unsigned int i915_vga_set_decode(void *cookie, bool state)
2330 Serge 270
{
3031 serge 271
	struct drm_device *dev = cookie;
2330 Serge 272
 
3031 serge 273
	intel_modeset_vga_set_state(dev, state);
274
	if (state)
275
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
276
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
277
	else
278
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
279
}
2330 Serge 280
 
281
 
282
 
283
 
284
 
285
 
2327 Serge 286
static int i915_load_modeset_init(struct drm_device *dev)
287
{
6084 serge 288
	struct drm_i915_private *dev_priv = dev->dev_private;
289
	int ret;
2327 Serge 290
 
6084 serge 291
	ret = intel_parse_bios(dev);
292
	if (ret)
293
		DRM_INFO("failed to find VBIOS tables\n");
2327 Serge 294
 
5367 serge 295
	/* If we have > 1 VGA cards, then we need to arbitrate access
296
	 * to the common VGA resources.
297
	 *
298
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
299
	 * then we do not take part in VGA arbitration and the
300
	 * vga_client_register() fails with -ENODEV.
301
	 */
2327 Serge 302
 
3031 serge 303
	/* Initialise stolen first so that we may reserve preallocated
304
	 * objects for the BIOS to KMS transition.
305
	 */
306
	ret = i915_gem_init_stolen(dev);
307
	if (ret)
308
		goto cleanup_vga_switcheroo;
2327 Serge 309
 
5060 serge 310
	intel_power_domains_init_hw(dev_priv);
311
 
5354 serge 312
	ret = intel_irq_install(dev_priv);
3480 Serge 313
	if (ret)
314
		goto cleanup_gem_stolen;
315
 
316
	/* Important: The output setup functions called by modeset_init need
317
	 * working irqs for e.g. gmbus and dp aux transfers. */
6084 serge 318
	intel_modeset_init(dev);
2327 Serge 319
 
3031 serge 320
	ret = i915_gem_init(dev);
6084 serge 321
	if (ret)
5060 serge 322
		goto cleanup_irq;
2327 Serge 323
 
6084 serge 324
	intel_modeset_gem_init(dev);
2327 Serge 325
 
6084 serge 326
	/* Always safe in the mode setting case. */
327
	/* FIXME: do pre/post-mode set stuff in core KMS code */
4560 Serge 328
	dev->vblank_disable_allowed = true;
5060 serge 329
	if (INTEL_INFO(dev)->num_pipes == 0)
3746 Serge 330
		return 0;
2327 Serge 331
 
6084 serge 332
	ret = intel_fbdev_init(dev);
333
	if (ret)
3480 Serge 334
		goto cleanup_gem;
2327 Serge 335
 
3480 Serge 336
	/* Only enable hotplug handling once the fbdev is fully set up. */
6084 serge 337
//	intel_hpd_init(dev_priv);
2327 Serge 338
 
3480 Serge 339
	/*
340
	 * Some ports require correctly set-up hpd registers for detection to
341
	 * work properly (leading to ghost connected connector status), e.g. VGA
342
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
343
	 * irqs are fully enabled. Now we should scan for the initial config
344
	 * only once hotplug handling is enabled, but due to screwed-up locking
345
	 * around kms/fbdev init we can't protect the fdbev initial config
346
	 * scanning against hotplug events. Hence do this first and ignore the
347
	 * tiny window where we will loose hotplug notifactions.
348
	 */
5354 serge 349
	intel_fbdev_initial_config(dev_priv, 0);
3480 Serge 350
 
351
	drm_kms_helper_poll_init(dev);
352
 
6084 serge 353
	return 0;
2327 Serge 354
 
3480 Serge 355
cleanup_gem:
356
	mutex_lock(&dev->struct_mutex);
357
	i915_gem_cleanup_ringbuffer(dev);
4293 Serge 358
	i915_gem_context_fini(dev);
3480 Serge 359
	mutex_unlock(&dev->struct_mutex);
5060 serge 360
cleanup_irq:
4104 Serge 361
//	drm_irq_uninstall(dev);
3031 serge 362
cleanup_gem_stolen:
363
//	i915_gem_cleanup_stolen(dev);
2327 Serge 364
cleanup_vga_switcheroo:
4104 Serge 365
//	vga_switcheroo_unregister_client(dev->pdev);
2327 Serge 366
cleanup_vga_client:
4104 Serge 367
//	vga_client_register(dev->pdev, NULL, NULL, NULL);
2327 Serge 368
out:
6084 serge 369
	return ret;
2327 Serge 370
}
371
 
4560 Serge 372
#if IS_ENABLED(CONFIG_FB)
5060 serge 373
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 374
{
375
	struct apertures_struct *ap;
376
	struct pci_dev *pdev = dev_priv->dev->pdev;
377
	bool primary;
5060 serge 378
	int ret;
2326 Serge 379
 
4560 Serge 380
	ap = alloc_apertures(1);
381
	if (!ap)
5060 serge 382
		return -ENOMEM;
4560 Serge 383
 
384
	ap->ranges[0].base = dev_priv->gtt.mappable_base;
385
	ap->ranges[0].size = dev_priv->gtt.mappable_end;
386
 
387
	primary =
388
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
389
 
5060 serge 390
	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
4560 Serge 391
 
392
	kfree(ap);
5060 serge 393
 
394
	return ret;
4560 Serge 395
}
396
#else
5060 serge 397
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 398
{
5060 serge 399
	return 0;
4560 Serge 400
}
401
#endif
402
 
6084 serge 403
#if !defined(CONFIG_VGA_CONSOLE)
404
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
405
{
406
	return 0;
407
}
408
#elif !defined(CONFIG_DUMMY_CONSOLE)
409
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
410
{
411
	return -ENODEV;
412
}
413
#else
414
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
415
{
416
	int ret = 0;
417
 
418
	DRM_INFO("Replacing VGA console driver\n");
419
 
420
	console_lock();
421
	if (con_is_bound(&vga_con))
422
		ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
423
	if (ret == 0) {
424
		ret = do_unregister_con_driver(&vga_con);
425
 
426
		/* Ignore "already unregistered". */
427
		if (ret == -ENODEV)
428
			ret = 0;
429
	}
430
	console_unlock();
431
 
432
	return ret;
433
}
434
#endif
435
 
3031 serge 436
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
2326 Serge 437
{
5060 serge 438
	const struct intel_device_info *info = &dev_priv->info;
2326 Serge 439
 
4104 Serge 440
#define PRINT_S(name) "%s"
441
#define SEP_EMPTY
442
#define PRINT_FLAG(name) info->name ? #name "," : ""
443
#define SEP_COMMA ,
5060 serge 444
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
4104 Serge 445
			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
3031 serge 446
			 info->gen,
447
			 dev_priv->dev->pdev->device,
5060 serge 448
			 dev_priv->dev->pdev->revision,
4104 Serge 449
			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
450
#undef PRINT_S
451
#undef SEP_EMPTY
452
#undef PRINT_FLAG
453
#undef SEP_COMMA
2326 Serge 454
}
455
 
6084 serge 456
static void cherryview_sseu_info_init(struct drm_device *dev)
457
{
458
	struct drm_i915_private *dev_priv = dev->dev_private;
459
	struct intel_device_info *info;
460
	u32 fuse, eu_dis;
461
 
462
	info = (struct intel_device_info *)&dev_priv->info;
463
	fuse = I915_READ(CHV_FUSE_GT);
464
 
465
	info->slice_total = 1;
466
 
467
	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
468
		info->subslice_per_slice++;
469
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
470
				 CHV_FGT_EU_DIS_SS0_R1_MASK);
471
		info->eu_total += 8 - hweight32(eu_dis);
472
	}
473
 
474
	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
475
		info->subslice_per_slice++;
476
		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
477
				 CHV_FGT_EU_DIS_SS1_R1_MASK);
478
		info->eu_total += 8 - hweight32(eu_dis);
479
	}
480
 
481
	info->subslice_total = info->subslice_per_slice;
482
	/*
483
	 * CHV expected to always have a uniform distribution of EU
484
	 * across subslices.
485
	*/
486
	info->eu_per_subslice = info->subslice_total ?
487
				info->eu_total / info->subslice_total :
488
				0;
489
	/*
490
	 * CHV supports subslice power gating on devices with more than
491
	 * one subslice, and supports EU power gating on devices with
492
	 * more than one EU pair per subslice.
493
	*/
494
	info->has_slice_pg = 0;
495
	info->has_subslice_pg = (info->subslice_total > 1);
496
	info->has_eu_pg = (info->eu_per_subslice > 2);
497
}
498
 
499
static void gen9_sseu_info_init(struct drm_device *dev)
500
{
501
	struct drm_i915_private *dev_priv = dev->dev_private;
502
	struct intel_device_info *info;
503
	int s_max = 3, ss_max = 4, eu_max = 8;
504
	int s, ss;
505
	u32 fuse2, s_enable, ss_disable, eu_disable;
506
	u8 eu_mask = 0xff;
507
 
508
	info = (struct intel_device_info *)&dev_priv->info;
509
	fuse2 = I915_READ(GEN8_FUSE2);
510
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
511
		   GEN8_F2_S_ENA_SHIFT;
512
	ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
513
		     GEN9_F2_SS_DIS_SHIFT;
514
 
515
	info->slice_total = hweight32(s_enable);
516
	/*
517
	 * The subslice disable field is global, i.e. it applies
518
	 * to each of the enabled slices.
519
	*/
520
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
521
	info->subslice_total = info->slice_total *
522
			       info->subslice_per_slice;
523
 
524
	/*
525
	 * Iterate through enabled slices and subslices to
526
	 * count the total enabled EU.
527
	*/
528
	for (s = 0; s < s_max; s++) {
529
		if (!(s_enable & (0x1 << s)))
530
			/* skip disabled slice */
531
			continue;
532
 
533
		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
534
		for (ss = 0; ss < ss_max; ss++) {
535
			int eu_per_ss;
536
 
537
			if (ss_disable & (0x1 << ss))
538
				/* skip disabled subslice */
539
				continue;
540
 
541
			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
542
						      eu_mask);
543
 
544
			/*
545
			 * Record which subslice(s) has(have) 7 EUs. we
546
			 * can tune the hash used to spread work among
547
			 * subslices if they are unbalanced.
548
			 */
549
			if (eu_per_ss == 7)
550
				info->subslice_7eu[s] |= 1 << ss;
551
 
552
			info->eu_total += eu_per_ss;
553
		}
554
	}
555
 
556
	/*
557
	 * SKL is expected to always have a uniform distribution
558
	 * of EU across subslices with the exception that any one
559
	 * EU in any one subslice may be fused off for die
560
	 * recovery. BXT is expected to be perfectly uniform in EU
561
	 * distribution.
562
	*/
563
	info->eu_per_subslice = info->subslice_total ?
564
				DIV_ROUND_UP(info->eu_total,
565
					     info->subslice_total) : 0;
566
	/*
567
	 * SKL supports slice power gating on devices with more than
568
	 * one slice, and supports EU power gating on devices with
569
	 * more than one EU pair per subslice. BXT supports subslice
570
	 * power gating on devices with more than one subslice, and
571
	 * supports EU power gating on devices with more than one EU
572
	 * pair per subslice.
573
	*/
574
	info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
575
	info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
576
	info->has_eu_pg = (info->eu_per_subslice > 2);
577
}
578
 
579
static void broadwell_sseu_info_init(struct drm_device *dev)
580
{
581
	struct drm_i915_private *dev_priv = dev->dev_private;
582
	struct intel_device_info *info;
583
	const int s_max = 3, ss_max = 3, eu_max = 8;
584
	int s, ss;
585
	u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
586
 
587
	fuse2 = I915_READ(GEN8_FUSE2);
588
	s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
589
	ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
590
 
591
	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
592
	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
593
			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
594
			 (32 - GEN8_EU_DIS0_S1_SHIFT));
595
	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
596
			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
597
			 (32 - GEN8_EU_DIS1_S2_SHIFT));
598
 
599
 
600
	info = (struct intel_device_info *)&dev_priv->info;
601
	info->slice_total = hweight32(s_enable);
602
 
603
	/*
604
	 * The subslice disable field is global, i.e. it applies
605
	 * to each of the enabled slices.
606
	 */
607
	info->subslice_per_slice = ss_max - hweight32(ss_disable);
608
	info->subslice_total = info->slice_total * info->subslice_per_slice;
609
 
610
	/*
611
	 * Iterate through enabled slices and subslices to
612
	 * count the total enabled EU.
613
	 */
614
	for (s = 0; s < s_max; s++) {
615
		if (!(s_enable & (0x1 << s)))
616
			/* skip disabled slice */
617
			continue;
618
 
619
		for (ss = 0; ss < ss_max; ss++) {
620
			u32 n_disabled;
621
 
622
			if (ss_disable & (0x1 << ss))
623
				/* skip disabled subslice */
624
				continue;
625
 
626
			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
627
 
628
			/*
629
			 * Record which subslices have 7 EUs.
630
			 */
631
			if (eu_max - n_disabled == 7)
632
				info->subslice_7eu[s] |= 1 << ss;
633
 
634
			info->eu_total += eu_max - n_disabled;
635
		}
636
	}
637
 
638
	/*
639
	 * BDW is expected to always have a uniform distribution of EU across
640
	 * subslices with the exception that any one EU in any one subslice may
641
	 * be fused off for die recovery.
642
	 */
643
	info->eu_per_subslice = info->subslice_total ?
644
		DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
645
 
646
	/*
647
	 * BDW supports slice power gating on devices with more than
648
	 * one slice.
649
	 */
650
	info->has_slice_pg = (info->slice_total > 1);
651
	info->has_subslice_pg = 0;
652
	info->has_eu_pg = 0;
653
}
654
 
5060 serge 655
/*
656
 * Determine various intel_device_info fields at runtime.
657
 *
658
 * Use it when either:
659
 *   - it's judged too laborious to fill n static structures with the limit
660
 *     when a simple if statement does the job,
661
 *   - run-time checks (eg read fuse/strap registers) are needed.
662
 *
663
 * This function needs to be called:
664
 *   - after the MMIO has been setup as we are reading registers,
665
 *   - after the PCH has been detected,
666
 *   - before the first usage of the fields it can tweak.
667
 */
668
static void intel_device_info_runtime_init(struct drm_device *dev)
669
{
670
	struct drm_i915_private *dev_priv = dev->dev_private;
671
	struct intel_device_info *info;
672
	enum pipe pipe;
673
 
674
	info = (struct intel_device_info *)&dev_priv->info;
675
 
6084 serge 676
	/*
677
	 * Skylake and Broxton currently don't expose the topmost plane as its
678
	 * use is exclusive with the legacy cursor and we only want to expose
679
	 * one of those, not both. Until we can safely expose the topmost plane
680
	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
681
	 * we don't expose the topmost plane at all to prevent ABI breakage
682
	 * down the line.
683
	 */
684
	if (IS_BROXTON(dev)) {
685
		info->num_sprites[PIPE_A] = 2;
686
		info->num_sprites[PIPE_B] = 2;
687
		info->num_sprites[PIPE_C] = 1;
688
	} else if (IS_VALLEYVIEW(dev))
5354 serge 689
		for_each_pipe(dev_priv, pipe)
5060 serge 690
			info->num_sprites[pipe] = 2;
691
	else
5354 serge 692
		for_each_pipe(dev_priv, pipe)
5060 serge 693
			info->num_sprites[pipe] = 1;
694
 
695
	if (i915.disable_display) {
696
		DRM_INFO("Display disabled (module parameter)\n");
697
		info->num_pipes = 0;
698
	} else if (info->num_pipes > 0 &&
699
		   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
700
		   !IS_VALLEYVIEW(dev)) {
701
		u32 fuse_strap = I915_READ(FUSE_STRAP);
702
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
703
 
704
		/*
705
		 * SFUSE_STRAP is supposed to have a bit signalling the display
706
		 * is fused off. Unfortunately it seems that, at least in
707
		 * certain cases, fused off display means that PCH display
708
		 * reads don't land anywhere. In that case, we read 0s.
709
		 *
710
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
711
		 * should be set when taking over after the firmware.
712
		 */
713
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
714
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
715
		    (dev_priv->pch_type == PCH_CPT &&
716
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
717
			DRM_INFO("Display fused off, disabling\n");
718
			info->num_pipes = 0;
719
		}
720
	}
6084 serge 721
 
722
	/* Initialize slice/subslice/EU info */
723
	if (IS_CHERRYVIEW(dev))
724
		cherryview_sseu_info_init(dev);
725
	else if (IS_BROADWELL(dev))
726
		broadwell_sseu_info_init(dev);
727
	else if (INTEL_INFO(dev)->gen >= 9)
728
		gen9_sseu_info_init(dev);
729
 
730
	DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
731
	DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
732
	DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
733
	DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
734
	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
735
	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
736
			 info->has_slice_pg ? "y" : "n");
737
	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
738
			 info->has_subslice_pg ? "y" : "n");
739
	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
740
			 info->has_eu_pg ? "y" : "n");
5060 serge 741
}
742
 
6084 serge 743
static void intel_init_dpio(struct drm_i915_private *dev_priv)
744
{
745
	if (!IS_VALLEYVIEW(dev_priv))
746
		return;
747
 
748
	/*
749
	 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
750
	 * CHV x1 PHY (DP/HDMI D)
751
	 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
752
	 */
753
	if (IS_CHERRYVIEW(dev_priv)) {
754
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
755
		DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
756
	} else {
757
		DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
758
	}
759
}
760
 
2326 Serge 761
/**
762
 * i915_driver_load - setup chip and create an initial config
763
 * @dev: DRM device
764
 * @flags: startup flags
765
 *
766
 * The driver load routine has to do several things:
767
 *   - drive output discovery via intel_modeset_init()
768
 *   - initialize the memory manager
769
 *   - allocate initial config memory
770
 *   - setup the DRM framebuffer with the allocated memory
771
 */
772
int i915_driver_load(struct drm_device *dev, unsigned long flags)
773
{
6084 serge 774
	struct drm_i915_private *dev_priv;
5060 serge 775
	struct intel_device_info *info, *device_info;
3031 serge 776
	int ret = 0, mmio_bar, mmio_size;
777
	uint32_t aperture_size;
2326 Serge 778
 
3031 serge 779
	info = (struct intel_device_info *) flags;
780
 
4560 Serge 781
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
6084 serge 782
	if (dev_priv == NULL)
783
		return -ENOMEM;
2326 Serge 784
 
5060 serge 785
	dev->dev_private = dev_priv;
6084 serge 786
	dev_priv->dev = dev;
2326 Serge 787
 
5354 serge 788
	/* Setup the write-once "constant" device info */
5060 serge 789
	device_info = (struct intel_device_info *)&dev_priv->info;
5354 serge 790
	memcpy(device_info, info, sizeof(dev_priv->info));
791
	device_info->device_id = dev->pdev->device;
5060 serge 792
 
4104 Serge 793
	spin_lock_init(&dev_priv->irq_lock);
794
	spin_lock_init(&dev_priv->gpu_error.lock);
5354 serge 795
	mutex_init(&dev_priv->backlight_lock);
4104 Serge 796
	spin_lock_init(&dev_priv->uncore.lock);
797
	spin_lock_init(&dev_priv->mm.object_stat_lock);
5060 serge 798
	spin_lock_init(&dev_priv->mmio_flip_lock);
6084 serge 799
	mutex_init(&dev_priv->sb_lock);
4104 Serge 800
	mutex_init(&dev_priv->modeset_restore_lock);
6084 serge 801
	mutex_init(&dev_priv->csr_lock);
802
	mutex_init(&dev_priv->av_mutex);
4104 Serge 803
 
4560 Serge 804
	intel_pm_setup(dev);
4104 Serge 805
 
4560 Serge 806
	intel_display_crc_init(dev);
807
 
3031 serge 808
	i915_dump_device_info(dev_priv);
809
 
4104 Serge 810
	/* Not all pre-production machines fall into this category, only the
811
	 * very first ones. Almost everything should work, except for maybe
812
	 * suspend/resume. And we don't implement workarounds that affect only
813
	 * pre-production machines. */
814
	if (IS_HSW_EARLY_SDV(dev))
815
		DRM_INFO("This is an early pre-production Haswell machine. "
816
			 "It may not be fully functional.\n");
817
 
6084 serge 818
	if (i915_get_bridge_dev(dev)) {
819
		ret = -EIO;
820
		goto free_priv;
821
	}
2326 Serge 822
 
4104 Serge 823
	mmio_bar = IS_GEN2(dev) ? 1 : 0;
3031 serge 824
	/* Before gen4, the registers and the GTT are behind different BARs.
825
	 * However, from gen4 onwards, the registers and the GTT are shared
826
	 * in the same BAR, so we want to restrict this ioremap from
827
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
828
	 * the register BAR remains the same size for all the earlier
829
	 * generations up to Ironlake.
830
	 */
831
	if (info->gen < 5)
832
		mmio_size = 512*1024;
833
	else
834
		mmio_size = 2*1024*1024;
835
 
836
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
4104 Serge 837
	if (!dev_priv->regs) {
838
		DRM_ERROR("failed to map registers\n");
839
		ret = -EIO;
3746 Serge 840
		goto put_bridge;
4104 Serge 841
	}
2326 Serge 842
 
4560 Serge 843
	/* This must be called before any calls to HAS_PCH_* */
844
	intel_detect_pch(dev);
4104 Serge 845
 
4560 Serge 846
	intel_uncore_init(dev);
847
 
6084 serge 848
	/* Load CSR Firmware for SKL */
849
	intel_csr_ucode_init(dev);
850
 
3746 Serge 851
	ret = i915_gem_gtt_init(dev);
852
	if (ret)
6084 serge 853
		goto out_freecsr;
3746 Serge 854
 
6084 serge 855
	ret = i915_kick_out_vgacon(dev_priv);
856
	if (ret) {
857
		DRM_ERROR("failed to remove conflicting VGA console\n");
858
		goto out_gtt;
859
	}
3746 Serge 860
 
861
	pci_set_master(dev->pdev);
862
 
863
 
6084 serge 864
	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
865
	 * using 32bit addressing, overwriting memory if HWS is located
866
	 * above 4GB.
867
	 *
868
	 * The documentation also mentions an issue with undefined
869
	 * behaviour if any general state is accessed within a page above 4GB,
870
	 * which also needs to be handled carefully.
871
	 */
3746 Serge 872
 
3480 Serge 873
	aperture_size = dev_priv->gtt.mappable_end;
2326 Serge 874
 
4539 Serge 875
	dev_priv->gtt.mappable = AllocKernelSpace(8192);
876
	if (dev_priv->gtt.mappable == NULL) {
877
		ret = -EIO;
4560 Serge 878
		goto out_gtt;
4539 Serge 879
	}
2326 Serge 880
 
6084 serge 881
 
882
	/* The i915 workqueue is primarily used for batched retirement of
883
	 * requests (and thus managing bo) once the task has been completed
884
	 * by the GPU. i915_gem_retire_requests() is called directly when we
885
	 * need high-priority retirement, such as waiting for an explicit
886
	 * bo.
887
	 *
888
	 * It is also used for periodic low-priority events, such as
889
	 * idle-timers and recording error state.
890
	 *
891
	 * All tasks on the workqueue are expected to acquire the dev mutex
892
	 * so there is no point in running more than one instance of the
3031 serge 893
	 * workqueue at any time.  Use an ordered one.
2326 Serge 894
     */
5367 serge 895
    dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0);
4104 Serge 896
	if (dev_priv->wq == NULL) {
897
		DRM_ERROR("Failed to create our workqueue.\n");
898
		ret = -ENOMEM;
899
		goto out_mtrrfree;
900
	}
3482 Serge 901
    system_wq = dev_priv->wq;
2326 Serge 902
 
903
 
5354 serge 904
	intel_irq_init(dev_priv);
4104 Serge 905
	intel_uncore_sanitize(dev);
2326 Serge 906
 
6084 serge 907
	/* Try to make sure MCHBAR is enabled before poking at it */
2330 Serge 908
	intel_setup_mchbar(dev);
6084 serge 909
	intel_setup_gmbus(dev);
910
	intel_opregion_setup(dev);
2326 Serge 911
 
6084 serge 912
	i915_gem_load(dev);
2326 Serge 913
 
6084 serge 914
	/* On the 945G/GM, the chipset reports the MSI capability on the
915
	 * integrated graphics even though the support isn't actually there
916
	 * according to the published specs.  It doesn't appear to function
917
	 * correctly in testing on 945G.
918
	 * This may be a side effect of MSI having been made available for PEG
919
	 * and the registers being closely associated.
920
	 *
921
	 * According to chipset errata, on the 965GM, MSI interrupts may
922
	 * be lost or delayed, but we use them anyways to avoid
923
	 * stuck interrupts on some machines.
924
	 */
2326 Serge 925
 
5060 serge 926
	intel_device_info_runtime_init(dev);
2326 Serge 927
 
6084 serge 928
	intel_init_dpio(dev_priv);
929
 
4560 Serge 930
//   if (INTEL_INFO(dev)->num_pipes) {
931
//       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
932
//       if (ret)
933
//           goto out_gem_unload;
934
//   }
4293 Serge 935
 
5060 serge 936
	intel_power_domains_init(dev_priv);
4560 Serge 937
 
6084 serge 938
	ret = i915_load_modeset_init(dev);
939
	if (ret < 0) {
940
		DRM_ERROR("failed to init modeset\n");
941
		goto out_power_well;
4293 Serge 942
	}
2326 Serge 943
 
6084 serge 944
	/*
945
	 * Notify a valid surface after modesetting,
946
	 * when running inside a VM.
947
	 */
948
	if (intel_vgpu_active(dev))
949
		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
4293 Serge 950
 
4126 Serge 951
	if (INTEL_INFO(dev)->num_pipes) {
6084 serge 952
		/* Must be done after probing outputs */
4126 Serge 953
		intel_opregion_init(dev);
954
	}
2326 Serge 955
 
3031 serge 956
	if (IS_GEN5(dev))
957
		intel_gpu_ips_init(dev_priv);
2326 Serge 958
 
5367 serge 959
//   intel_runtime_pm_enable(dev_priv);
4560 Serge 960
 
6084 serge 961
	main_device = dev;
4104 Serge 962
 
6084 serge 963
	return 0;
2326 Serge 964
 
4560 Serge 965
out_power_well:
2326 Serge 966
out_gem_unload:
967
 
968
out_mtrrfree:
4560 Serge 969
out_gtt:
6084 serge 970
	i915_global_gtt_cleanup(dev);
971
out_freecsr:
2326 Serge 972
put_bridge:
973
free_priv:
974
    kfree(dev_priv);
975
    return ret;
976
}
977
 
3031 serge 978
#if 0
979
int i915_driver_unload(struct drm_device *dev)
980
{
981
	struct drm_i915_private *dev_priv = dev->dev_private;
982
	int ret;
983
 
6084 serge 984
	i915_audio_component_cleanup(dev_priv);
985
 
4560 Serge 986
	ret = i915_gem_suspend(dev);
987
	if (ret) {
988
		DRM_ERROR("failed to idle hardware: %d\n", ret);
989
		return ret;
990
	}
991
 
6084 serge 992
	intel_power_domains_fini(dev_priv);
4560 Serge 993
 
3031 serge 994
	intel_gpu_ips_teardown();
995
 
996
	i915_teardown_sysfs(dev);
997
 
6084 serge 998
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
999
	unregister_shrinker(&dev_priv->mm.shrinker);
3031 serge 1000
 
3480 Serge 1001
	io_mapping_free(dev_priv->gtt.mappable);
4104 Serge 1002
	arch_phys_wc_del(dev_priv->gtt.mtrr);
3031 serge 1003
 
1004
	acpi_video_unregister();
1005
 
6084 serge 1006
	intel_fbdev_fini(dev);
3031 serge 1007
 
6084 serge 1008
	drm_vblank_cleanup(dev);
3031 serge 1009
 
6084 serge 1010
	intel_modeset_cleanup(dev);
1011
 
1012
	/*
1013
	 * free the memory space allocated for the child device
1014
	 * config parsed from VBT
1015
	 */
1016
	if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1017
		kfree(dev_priv->vbt.child_dev);
1018
		dev_priv->vbt.child_dev = NULL;
1019
		dev_priv->vbt.child_dev_num = 0;
3031 serge 1020
	}
6084 serge 1021
	kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1022
	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1023
	kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1024
	dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
3031 serge 1025
 
6084 serge 1026
	vga_switcheroo_unregister_client(dev->pdev);
1027
	vga_client_register(dev->pdev, NULL, NULL, NULL);
1028
 
3031 serge 1029
	/* Free error state after interrupts are fully disabled. */
6084 serge 1030
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3031 serge 1031
	i915_destroy_error_state(dev);
1032
 
1033
	if (dev->pdev->msi_enabled)
1034
		pci_disable_msi(dev->pdev);
1035
 
1036
	intel_opregion_fini(dev);
1037
 
6084 serge 1038
	/* Flush any outstanding unpin_work. */
1039
	flush_workqueue(dev_priv->wq);
3031 serge 1040
 
6084 serge 1041
	intel_guc_ucode_fini(dev);
1042
	mutex_lock(&dev->struct_mutex);
1043
	i915_gem_cleanup_ringbuffer(dev);
1044
	i915_gem_context_fini(dev);
1045
	mutex_unlock(&dev->struct_mutex);
1046
	intel_fbc_cleanup_cfb(dev_priv);
1047
	i915_gem_cleanup_stolen(dev);
3031 serge 1048
 
6084 serge 1049
	intel_csr_ucode_fini(dev);
1050
 
3031 serge 1051
	intel_teardown_gmbus(dev);
1052
	intel_teardown_mchbar(dev);
1053
 
6084 serge 1054
	destroy_workqueue(dev_priv->hotplug.dp_wq);
3031 serge 1055
	destroy_workqueue(dev_priv->wq);
6084 serge 1056
	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
3480 Serge 1057
	pm_qos_remove_request(&dev_priv->pm_qos);
3031 serge 1058
 
5354 serge 1059
	i915_global_gtt_cleanup(dev);
4104 Serge 1060
 
4560 Serge 1061
	intel_uncore_fini(dev);
1062
	if (dev_priv->regs != NULL)
1063
		pci_iounmap(dev->pdev, dev_priv->regs);
1064
 
6084 serge 1065
	kmem_cache_destroy(dev_priv->requests);
1066
	kmem_cache_destroy(dev_priv->vmas);
1067
	kmem_cache_destroy(dev_priv->objects);
3031 serge 1068
	pci_dev_put(dev_priv->bridge_dev);
5060 serge 1069
	kfree(dev_priv);
3031 serge 1070
 
1071
	return 0;
1072
}
3263 Serge 1073
#endif
3031 serge 1074
 
1075
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1076
{
5060 serge 1077
	int ret;
3031 serge 1078
 
5060 serge 1079
	ret = i915_gem_open(dev, file);
1080
	if (ret)
1081
		return ret;
3031 serge 1082
 
1083
	return 0;
1084
}
1085
 
3263 Serge 1086
#if 0
3031 serge 1087
/**
1088
 * i915_driver_lastclose - clean up after all DRM clients have exited
1089
 * @dev: DRM device
1090
 *
1091
 * Take care of cleaning up after all DRM clients have exited.  In the
1092
 * mode setting case, we want to restore the kernel's initial mode (just
1093
 * in case the last client left us in a bad state).
1094
 *
1095
 * Additionally, in the non-mode setting case, we'll tear down the GTT
1096
 * and DMA structures, since the kernel won't be using them, and clea
1097
 * up any GEM state.
1098
 */
5060 serge 1099
void i915_driver_lastclose(struct drm_device *dev)
3031 serge 1100
{
6084 serge 1101
	intel_fbdev_restore_mode(dev);
1102
	vga_switcheroo_process_delayed_switch();
3031 serge 1103
}
1104
 
5060 serge 1105
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
3031 serge 1106
{
4560 Serge 1107
	mutex_lock(&dev->struct_mutex);
5060 serge 1108
	i915_gem_context_close(dev, file);
1109
	i915_gem_release(dev, file);
4560 Serge 1110
	mutex_unlock(&dev->struct_mutex);
5354 serge 1111
 
6084 serge 1112
	intel_modeset_preclose(dev, file);
3031 serge 1113
}
1114
 
1115
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1116
{
1117
	struct drm_i915_file_private *file_priv = file->driver_priv;
1118
 
5060 serge 1119
	if (file_priv && file_priv->bsd_ring)
1120
		file_priv->bsd_ring = NULL;
3031 serge 1121
	kfree(file_priv);
1122
}
1123
 
6084 serge 1124
static int
1125
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1126
			  struct drm_file *file)
1127
{
1128
	return -ENODEV;
1129
}
1130
 
4104 Serge 1131
const struct drm_ioctl_desc i915_ioctls[] = {
5354 serge 1132
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1133
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1134
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1135
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1136
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1137
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
4104 Serge 1138
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
6084 serge 1139
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3031 serge 1140
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1141
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1142
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 1143
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3031 serge 1144
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1145
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 1146
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1147
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1148
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
6084 serge 1149
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1150
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1151
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1152
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1153
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1154
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1155
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1156
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1157
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1158
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1159
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1160
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1161
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1162
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1163
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1164
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1165
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1166
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1167
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1168
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1169
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1170
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1171
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1172
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1173
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1174
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1175
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1176
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1177
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1178
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1179
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1180
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1181
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1182
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1183
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
3031 serge 1184
};
1185
 
5060 serge 1186
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
3031 serge 1187
#endif