Subversion Repositories Kolibri OS

Rev

Rev 5128 | Rev 5367 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
 
5354 serge 31
#include 
3031 serge 32
#include 
33
#include 
34
#include 
5354 serge 35
#include 
2326 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2326 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
2326 Serge 40
#include 
5354 serge 41
#include 
2326 Serge 42
//#include 
43
//#include 
44
//#include 
2330 Serge 45
#include 
2326 Serge 46
//#include 
47
 
48
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
49
 
4246 Serge 50
int i915_getparam(struct drm_device *dev, void *data,
3031 serge 51
			 struct drm_file *file_priv)
52
{
5060 serge 53
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 54
	drm_i915_getparam_t *param = data;
55
	int value;
56
 
57
	switch (param->param) {
58
	case I915_PARAM_IRQ_ACTIVE:
59
	case I915_PARAM_ALLOW_BATCHBUFFER:
60
	case I915_PARAM_LAST_DISPATCH:
5354 serge 61
		/* Reject all old ums/dri params. */
62
		return -ENODEV;
3031 serge 63
	case I915_PARAM_CHIPSET_ID:
5060 serge 64
		value = dev->pdev->device;
3031 serge 65
		break;
66
	case I915_PARAM_HAS_GEM:
67
		value = 1;
68
		break;
69
	case I915_PARAM_NUM_FENCES_AVAIL:
70
		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
71
		break;
72
	case I915_PARAM_HAS_OVERLAY:
73
		value = dev_priv->overlay ? 1 : 0;
74
		break;
75
	case I915_PARAM_HAS_PAGEFLIPPING:
76
		value = 1;
77
		break;
78
	case I915_PARAM_HAS_EXECBUF2:
79
		/* depends on GEM */
80
		value = 1;
81
		break;
82
	case I915_PARAM_HAS_BSD:
83
		value = intel_ring_initialized(&dev_priv->ring[VCS]);
84
		break;
85
	case I915_PARAM_HAS_BLT:
86
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
87
		break;
4246 Serge 88
	case I915_PARAM_HAS_VEBOX:
89
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
90
		break;
3031 serge 91
	case I915_PARAM_HAS_RELAXED_FENCING:
92
		value = 1;
93
		break;
94
	case I915_PARAM_HAS_COHERENT_RINGS:
95
		value = 1;
96
		break;
97
	case I915_PARAM_HAS_EXEC_CONSTANTS:
98
		value = INTEL_INFO(dev)->gen >= 4;
99
		break;
100
	case I915_PARAM_HAS_RELAXED_DELTA:
101
		value = 1;
102
		break;
103
	case I915_PARAM_HAS_GEN7_SOL_RESET:
104
		value = 1;
105
		break;
106
	case I915_PARAM_HAS_LLC:
107
		value = HAS_LLC(dev);
108
		break;
4246 Serge 109
	case I915_PARAM_HAS_WT:
110
		value = HAS_WT(dev);
111
		break;
3031 serge 112
	case I915_PARAM_HAS_ALIASING_PPGTT:
5354 serge 113
		value = USES_PPGTT(dev);
3031 serge 114
		break;
115
	case I915_PARAM_HAS_WAIT_TIMEOUT:
116
		value = 1;
117
		break;
118
	case I915_PARAM_HAS_SEMAPHORES:
119
		value = i915_semaphore_is_enabled(dev);
120
		break;
121
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
122
		value = 1;
123
		break;
4104 Serge 124
    case I915_PARAM_HAS_SECURE_BATCHES:
3255 Serge 125
        value = 1;
3243 Serge 126
		break;
127
	case I915_PARAM_HAS_PINNED_BATCHES:
128
		value = 1;
129
		break;
3480 Serge 130
	case I915_PARAM_HAS_EXEC_NO_RELOC:
131
		value = 1;
132
        break;
133
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
4392 Serge 134
		value = 1;
3480 Serge 135
        break;
5060 serge 136
	case I915_PARAM_CMD_PARSER_VERSION:
137
		value = i915_cmd_parser_get_version();
138
		break;
5354 serge 139
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
140
		value = 1;
141
		break;
3031 serge 142
	default:
4104 Serge 143
		DRM_DEBUG("Unknown parameter %d\n", param->param);
3031 serge 144
		return -EINVAL;
145
	}
146
 
3255 Serge 147
    *param->value = value;
148
 
3031 serge 149
	return 0;
150
}
151
 
3255 Serge 152
#if 0
3031 serge 153
static int i915_setparam(struct drm_device *dev, void *data,
154
			 struct drm_file *file_priv)
155
{
5060 serge 156
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 157
	drm_i915_setparam_t *param = data;
158
 
159
	switch (param->param) {
160
	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
161
	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
162
	case I915_SETPARAM_ALLOW_BATCHBUFFER:
5354 serge 163
		/* Reject all old ums/dri params. */
164
		return -ENODEV;
165
 
3031 serge 166
	case I915_SETPARAM_NUM_USED_FENCES:
167
		if (param->value > dev_priv->num_fence_regs ||
168
		    param->value < 0)
169
			return -EINVAL;
170
		/* Userspace can use first N regs */
171
		dev_priv->fence_reg_start = param->value;
172
		break;
173
	default:
174
		DRM_DEBUG_DRIVER("unknown parameter %d\n",
175
					param->param);
176
		return -EINVAL;
177
	}
178
 
179
	return 0;
180
}
181
#endif
182
 
183
static int i915_get_bridge_dev(struct drm_device *dev)
184
{
185
	struct drm_i915_private *dev_priv = dev->dev_private;
186
 
187
	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
188
	if (!dev_priv->bridge_dev) {
189
		DRM_ERROR("bridge device not found\n");
190
		return -1;
191
	}
192
	return 0;
193
}
194
 
2330 Serge 195
#define MCHBAR_I915 0x44
196
#define MCHBAR_I965 0x48
197
#define MCHBAR_SIZE (4*4096)
198
 
199
#define DEVEN_REG 0x54
200
#define   DEVEN_MCHBAR_EN (1 << 28)
201
 
202
 
203
 
204
 
205
/* Setup MCHBAR if possible, return true if we should disable it again */
206
static void
207
intel_setup_mchbar(struct drm_device *dev)
208
{
5060 serge 209
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 210
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
211
	u32 temp;
212
	bool enabled;
213
 
5060 serge 214
	if (IS_VALLEYVIEW(dev))
215
		return;
216
 
2330 Serge 217
	dev_priv->mchbar_need_disable = false;
218
 
219
	if (IS_I915G(dev) || IS_I915GM(dev)) {
220
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
221
		enabled = !!(temp & DEVEN_MCHBAR_EN);
222
	} else {
223
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
224
		enabled = temp & 1;
225
	}
226
 
227
	/* If it's already enabled, don't have to do anything */
228
	if (enabled)
229
		return;
230
 
231
	dbgprintf("Epic fail\n");
232
 
233
#if 0
234
	if (intel_alloc_mchbar_resource(dev))
235
		return;
236
 
237
	dev_priv->mchbar_need_disable = true;
238
 
239
	/* Space is allocated or reserved, so enable it. */
240
	if (IS_I915G(dev) || IS_I915GM(dev)) {
241
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
242
				       temp | DEVEN_MCHBAR_EN);
243
	} else {
244
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
245
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
246
	}
247
#endif
248
}
249
 
250
 
3031 serge 251
/* true = enable decode, false = disable decoder */
252
static unsigned int i915_vga_set_decode(void *cookie, bool state)
2330 Serge 253
{
3031 serge 254
	struct drm_device *dev = cookie;
2330 Serge 255
 
3031 serge 256
	intel_modeset_vga_set_state(dev, state);
257
	if (state)
258
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
259
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
260
	else
261
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
262
}
2330 Serge 263
 
264
 
265
 
266
 
267
 
268
 
2327 Serge 269
static int i915_load_modeset_init(struct drm_device *dev)
270
{
271
    struct drm_i915_private *dev_priv = dev->dev_private;
272
    int ret;
273
 
274
    ret = intel_parse_bios(dev);
275
    if (ret)
276
        DRM_INFO("failed to find VBIOS tables\n");
277
 
278
 
5060 serge 279
 
3031 serge 280
	/* Initialise stolen first so that we may reserve preallocated
281
	 * objects for the BIOS to KMS transition.
282
	 */
283
	ret = i915_gem_init_stolen(dev);
284
	if (ret)
285
		goto cleanup_vga_switcheroo;
2327 Serge 286
 
5060 serge 287
	intel_power_domains_init_hw(dev_priv);
288
 
5354 serge 289
	ret = intel_irq_install(dev_priv);
3480 Serge 290
	if (ret)
291
		goto cleanup_gem_stolen;
292
 
293
	/* Important: The output setup functions called by modeset_init need
294
	 * working irqs for e.g. gmbus and dp aux transfers. */
2327 Serge 295
    intel_modeset_init(dev);
296
 
3031 serge 297
	ret = i915_gem_init(dev);
2327 Serge 298
    if (ret)
5060 serge 299
		goto cleanup_irq;
2327 Serge 300
 
301
    intel_modeset_gem_init(dev);
302
 
303
    /* Always safe in the mode setting case. */
304
    /* FIXME: do pre/post-mode set stuff in core KMS code */
4560 Serge 305
	dev->vblank_disable_allowed = true;
5060 serge 306
	if (INTEL_INFO(dev)->num_pipes == 0)
3746 Serge 307
		return 0;
2327 Serge 308
 
309
    ret = intel_fbdev_init(dev);
310
    if (ret)
3480 Serge 311
		goto cleanup_gem;
2327 Serge 312
 
3480 Serge 313
	/* Only enable hotplug handling once the fbdev is fully set up. */
5354 serge 314
	intel_hpd_init(dev_priv);
2327 Serge 315
 
3480 Serge 316
	/*
317
	 * Some ports require correctly set-up hpd registers for detection to
318
	 * work properly (leading to ghost connected connector status), e.g. VGA
319
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
320
	 * irqs are fully enabled. Now we should scan for the initial config
321
	 * only once hotplug handling is enabled, but due to screwed-up locking
322
	 * around kms/fbdev init we can't protect the fdbev initial config
323
	 * scanning against hotplug events. Hence do this first and ignore the
324
	 * tiny window where we will loose hotplug notifactions.
325
	 */
5354 serge 326
	intel_fbdev_initial_config(dev_priv, 0);
3480 Serge 327
 
328
	drm_kms_helper_poll_init(dev);
329
 
2327 Serge 330
    return 0;
331
 
3480 Serge 332
cleanup_gem:
333
	mutex_lock(&dev->struct_mutex);
334
	i915_gem_cleanup_ringbuffer(dev);
4293 Serge 335
	i915_gem_context_fini(dev);
3480 Serge 336
	mutex_unlock(&dev->struct_mutex);
5060 serge 337
cleanup_irq:
4104 Serge 338
//	drm_irq_uninstall(dev);
3031 serge 339
cleanup_gem_stolen:
340
//	i915_gem_cleanup_stolen(dev);
2327 Serge 341
cleanup_vga_switcheroo:
4104 Serge 342
//	vga_switcheroo_unregister_client(dev->pdev);
2327 Serge 343
cleanup_vga_client:
4104 Serge 344
//	vga_client_register(dev->pdev, NULL, NULL, NULL);
2327 Serge 345
out:
346
    return ret;
347
}
348
 
4560 Serge 349
#if IS_ENABLED(CONFIG_FB)
5060 serge 350
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 351
{
352
	struct apertures_struct *ap;
353
	struct pci_dev *pdev = dev_priv->dev->pdev;
354
	bool primary;
5060 serge 355
	int ret;
2326 Serge 356
 
4560 Serge 357
	ap = alloc_apertures(1);
358
	if (!ap)
5060 serge 359
		return -ENOMEM;
4560 Serge 360
 
361
	ap->ranges[0].base = dev_priv->gtt.mappable_base;
362
	ap->ranges[0].size = dev_priv->gtt.mappable_end;
363
 
364
	primary =
365
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
366
 
5060 serge 367
	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
4560 Serge 368
 
369
	kfree(ap);
5060 serge 370
 
371
	return ret;
4560 Serge 372
}
373
#else
5060 serge 374
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 375
{
5060 serge 376
	return 0;
4560 Serge 377
}
378
#endif
379
 
3031 serge 380
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
2326 Serge 381
{
5060 serge 382
	const struct intel_device_info *info = &dev_priv->info;
2326 Serge 383
 
4104 Serge 384
#define PRINT_S(name) "%s"
385
#define SEP_EMPTY
386
#define PRINT_FLAG(name) info->name ? #name "," : ""
387
#define SEP_COMMA ,
5060 serge 388
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
4104 Serge 389
			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
3031 serge 390
			 info->gen,
391
			 dev_priv->dev->pdev->device,
5060 serge 392
			 dev_priv->dev->pdev->revision,
4104 Serge 393
			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
394
#undef PRINT_S
395
#undef SEP_EMPTY
396
#undef PRINT_FLAG
397
#undef SEP_COMMA
2326 Serge 398
}
399
 
5060 serge 400
/*
401
 * Determine various intel_device_info fields at runtime.
402
 *
403
 * Use it when either:
404
 *   - it's judged too laborious to fill n static structures with the limit
405
 *     when a simple if statement does the job,
406
 *   - run-time checks (eg read fuse/strap registers) are needed.
407
 *
408
 * This function needs to be called:
409
 *   - after the MMIO has been setup as we are reading registers,
410
 *   - after the PCH has been detected,
411
 *   - before the first usage of the fields it can tweak.
412
 */
413
static void intel_device_info_runtime_init(struct drm_device *dev)
414
{
415
	struct drm_i915_private *dev_priv = dev->dev_private;
416
	struct intel_device_info *info;
417
	enum pipe pipe;
418
 
419
	info = (struct intel_device_info *)&dev_priv->info;
420
 
5354 serge 421
	if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
422
		for_each_pipe(dev_priv, pipe)
5060 serge 423
			info->num_sprites[pipe] = 2;
424
	else
5354 serge 425
		for_each_pipe(dev_priv, pipe)
5060 serge 426
			info->num_sprites[pipe] = 1;
427
 
428
	if (i915.disable_display) {
429
		DRM_INFO("Display disabled (module parameter)\n");
430
		info->num_pipes = 0;
431
	} else if (info->num_pipes > 0 &&
432
		   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
433
		   !IS_VALLEYVIEW(dev)) {
434
		u32 fuse_strap = I915_READ(FUSE_STRAP);
435
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
436
 
437
		/*
438
		 * SFUSE_STRAP is supposed to have a bit signalling the display
439
		 * is fused off. Unfortunately it seems that, at least in
440
		 * certain cases, fused off display means that PCH display
441
		 * reads don't land anywhere. In that case, we read 0s.
442
		 *
443
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
444
		 * should be set when taking over after the firmware.
445
		 */
446
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
447
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
448
		    (dev_priv->pch_type == PCH_CPT &&
449
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
450
			DRM_INFO("Display fused off, disabling\n");
451
			info->num_pipes = 0;
452
		}
453
	}
454
}
455
 
2326 Serge 456
/**
457
 * i915_driver_load - setup chip and create an initial config
458
 * @dev: DRM device
459
 * @flags: startup flags
460
 *
461
 * The driver load routine has to do several things:
462
 *   - drive output discovery via intel_modeset_init()
463
 *   - initialize the memory manager
464
 *   - allocate initial config memory
465
 *   - setup the DRM framebuffer with the allocated memory
466
 */
467
int i915_driver_load(struct drm_device *dev, unsigned long flags)
468
{
469
    struct drm_i915_private *dev_priv;
5060 serge 470
	struct intel_device_info *info, *device_info;
3031 serge 471
	int ret = 0, mmio_bar, mmio_size;
472
	uint32_t aperture_size;
2326 Serge 473
 
3031 serge 474
	info = (struct intel_device_info *) flags;
475
 
476
 
4560 Serge 477
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
2326 Serge 478
    if (dev_priv == NULL)
479
        return -ENOMEM;
480
 
5060 serge 481
	dev->dev_private = dev_priv;
2326 Serge 482
    dev_priv->dev = dev;
483
 
5354 serge 484
	/* Setup the write-once "constant" device info */
5060 serge 485
	device_info = (struct intel_device_info *)&dev_priv->info;
5354 serge 486
	memcpy(device_info, info, sizeof(dev_priv->info));
487
	device_info->device_id = dev->pdev->device;
5060 serge 488
 
4104 Serge 489
	spin_lock_init(&dev_priv->irq_lock);
490
	spin_lock_init(&dev_priv->gpu_error.lock);
5354 serge 491
	mutex_init(&dev_priv->backlight_lock);
4104 Serge 492
	spin_lock_init(&dev_priv->uncore.lock);
493
	spin_lock_init(&dev_priv->mm.object_stat_lock);
5060 serge 494
	spin_lock_init(&dev_priv->mmio_flip_lock);
4104 Serge 495
	mutex_init(&dev_priv->dpio_lock);
496
	mutex_init(&dev_priv->modeset_restore_lock);
497
 
4560 Serge 498
	intel_pm_setup(dev);
4104 Serge 499
 
4560 Serge 500
	intel_display_crc_init(dev);
501
 
3031 serge 502
	i915_dump_device_info(dev_priv);
503
 
4104 Serge 504
	/* Not all pre-production machines fall into this category, only the
505
	 * very first ones. Almost everything should work, except for maybe
506
	 * suspend/resume. And we don't implement workarounds that affect only
507
	 * pre-production machines. */
508
	if (IS_HSW_EARLY_SDV(dev))
509
		DRM_INFO("This is an early pre-production Haswell machine. "
510
			 "It may not be fully functional.\n");
511
 
2326 Serge 512
    if (i915_get_bridge_dev(dev)) {
513
        ret = -EIO;
514
        goto free_priv;
515
    }
516
 
4104 Serge 517
	mmio_bar = IS_GEN2(dev) ? 1 : 0;
3031 serge 518
	/* Before gen4, the registers and the GTT are behind different BARs.
519
	 * However, from gen4 onwards, the registers and the GTT are shared
520
	 * in the same BAR, so we want to restrict this ioremap from
521
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
522
	 * the register BAR remains the same size for all the earlier
523
	 * generations up to Ironlake.
524
	 */
525
	if (info->gen < 5)
526
		mmio_size = 512*1024;
527
	else
528
		mmio_size = 2*1024*1024;
529
 
530
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
4104 Serge 531
	if (!dev_priv->regs) {
532
		DRM_ERROR("failed to map registers\n");
533
		ret = -EIO;
3746 Serge 534
		goto put_bridge;
4104 Serge 535
	}
2326 Serge 536
 
4560 Serge 537
	/* This must be called before any calls to HAS_PCH_* */
538
	intel_detect_pch(dev);
4104 Serge 539
 
4560 Serge 540
	intel_uncore_init(dev);
541
 
3746 Serge 542
	ret = i915_gem_gtt_init(dev);
543
	if (ret)
4560 Serge 544
		goto out_regs;
3746 Serge 545
 
546
 
547
	pci_set_master(dev->pdev);
548
 
549
    /* overlay on gen2 is broken and can't address above 1G */
550
 
551
    /* 965GM sometimes incorrectly writes to hardware status page (HWS)
552
     * using 32bit addressing, overwriting memory if HWS is located
553
     * above 4GB.
554
     *
555
     * The documentation also mentions an issue with undefined
556
     * behaviour if any general state is accessed within a page above 4GB,
557
     * which also needs to be handled carefully.
558
     */
559
 
3480 Serge 560
	aperture_size = dev_priv->gtt.mappable_end;
2326 Serge 561
 
4539 Serge 562
	dev_priv->gtt.mappable = AllocKernelSpace(8192);
563
	if (dev_priv->gtt.mappable == NULL) {
564
		ret = -EIO;
4560 Serge 565
		goto out_gtt;
4539 Serge 566
	}
2326 Serge 567
 
568
    /* The i915 workqueue is primarily used for batched retirement of
569
     * requests (and thus managing bo) once the task has been completed
570
     * by the GPU. i915_gem_retire_requests() is called directly when we
571
     * need high-priority retirement, such as waiting for an explicit
572
     * bo.
573
     *
574
     * It is also used for periodic low-priority events, such as
575
     * idle-timers and recording error state.
576
     *
577
     * All tasks on the workqueue are expected to acquire the dev mutex
578
     * so there is no point in running more than one instance of the
3031 serge 579
	 * workqueue at any time.  Use an ordered one.
2326 Serge 580
     */
3031 serge 581
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
4104 Serge 582
	if (dev_priv->wq == NULL) {
583
		DRM_ERROR("Failed to create our workqueue.\n");
584
		ret = -ENOMEM;
585
		goto out_mtrrfree;
586
	}
3482 Serge 587
    system_wq = dev_priv->wq;
2326 Serge 588
 
589
 
5354 serge 590
	intel_irq_init(dev_priv);
4104 Serge 591
	intel_uncore_sanitize(dev);
2326 Serge 592
 
593
    /* Try to make sure MCHBAR is enabled before poking at it */
2330 Serge 594
	intel_setup_mchbar(dev);
2326 Serge 595
    intel_setup_gmbus(dev);
2327 Serge 596
    intel_opregion_setup(dev);
2326 Serge 597
 
2330 Serge 598
    intel_setup_bios(dev);
2326 Serge 599
 
600
    i915_gem_load(dev);
601
 
602
    /* On the 945G/GM, the chipset reports the MSI capability on the
603
     * integrated graphics even though the support isn't actually there
604
     * according to the published specs.  It doesn't appear to function
605
     * correctly in testing on 945G.
606
     * This may be a side effect of MSI having been made available for PEG
607
     * and the registers being closely associated.
608
     *
609
     * According to chipset errata, on the 965GM, MSI interrupts may
610
     * be lost or delayed, but we use them anyways to avoid
611
     * stuck interrupts on some machines.
612
     */
613
 
5060 serge 614
	intel_device_info_runtime_init(dev);
2326 Serge 615
 
4560 Serge 616
//   if (INTEL_INFO(dev)->num_pipes) {
617
//       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
618
//       if (ret)
619
//           goto out_gem_unload;
620
//   }
4293 Serge 621
 
5060 serge 622
	intel_power_domains_init(dev_priv);
4560 Serge 623
 
4293 Serge 624
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
5060 serge 625
		ret = i915_load_modeset_init(dev);
626
		if (ret < 0) {
627
			DRM_ERROR("failed to init modeset\n");
4560 Serge 628
			goto out_power_well;
5060 serge 629
		}
4293 Serge 630
	}
2326 Serge 631
 
4293 Serge 632
 
4126 Serge 633
	if (INTEL_INFO(dev)->num_pipes) {
2326 Serge 634
    /* Must be done after probing outputs */
4126 Serge 635
		intel_opregion_init(dev);
636
	}
2326 Serge 637
 
3031 serge 638
	if (IS_GEN5(dev))
639
		intel_gpu_ips_init(dev_priv);
2326 Serge 640
 
5354 serge 641
	intel_runtime_pm_enable(dev_priv);
4560 Serge 642
 
4104 Serge 643
    main_device = dev;
644
 
2326 Serge 645
    return 0;
646
 
4560 Serge 647
out_power_well:
2326 Serge 648
out_gem_unload:
649
 
650
out_mtrrfree:
4560 Serge 651
out_gtt:
652
out_regs:
2326 Serge 653
put_bridge:
654
free_priv:
655
    kfree(dev_priv);
656
    return ret;
657
}
658
 
3031 serge 659
#if 0
660
 
661
int i915_driver_unload(struct drm_device *dev)
662
{
663
	struct drm_i915_private *dev_priv = dev->dev_private;
664
	int ret;
665
 
4560 Serge 666
	ret = i915_gem_suspend(dev);
667
	if (ret) {
668
		DRM_ERROR("failed to idle hardware: %d\n", ret);
669
		return ret;
670
	}
671
 
672
 
3031 serge 673
	intel_gpu_ips_teardown();
674
 
4104 Serge 675
		/* The i915.ko module is still not prepared to be loaded when
676
		 * the power well is not enabled, so just enable it in case
677
		 * we're going to unload/reload. */
5060 serge 678
	intel_display_set_init_power(dev_priv, true);
679
	intel_power_domains_remove(dev_priv);
4104 Serge 680
 
3031 serge 681
	i915_teardown_sysfs(dev);
682
 
4104 Serge 683
	if (dev_priv->mm.inactive_shrinker.scan_objects)
3031 serge 684
		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
685
 
3480 Serge 686
	io_mapping_free(dev_priv->gtt.mappable);
4104 Serge 687
	arch_phys_wc_del(dev_priv->gtt.mtrr);
3031 serge 688
 
689
	acpi_video_unregister();
690
 
691
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
692
		intel_modeset_cleanup(dev);
693
 
694
		/*
695
		 * free the memory space allocated for the child device
696
		 * config parsed from VBT
697
		 */
4104 Serge 698
		if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
699
			kfree(dev_priv->vbt.child_dev);
700
			dev_priv->vbt.child_dev = NULL;
701
			dev_priv->vbt.child_dev_num = 0;
3031 serge 702
		}
703
 
704
		vga_switcheroo_unregister_client(dev->pdev);
705
		vga_client_register(dev->pdev, NULL, NULL, NULL);
706
	}
707
 
708
	/* Free error state after interrupts are fully disabled. */
3480 Serge 709
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
710
	cancel_work_sync(&dev_priv->gpu_error.work);
3031 serge 711
	i915_destroy_error_state(dev);
712
 
713
	if (dev->pdev->msi_enabled)
714
		pci_disable_msi(dev->pdev);
715
 
716
	intel_opregion_fini(dev);
717
 
718
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
719
		/* Flush any outstanding unpin_work. */
720
		flush_workqueue(dev_priv->wq);
721
 
722
		mutex_lock(&dev->struct_mutex);
723
		i915_gem_cleanup_ringbuffer(dev);
724
		i915_gem_context_fini(dev);
725
		mutex_unlock(&dev->struct_mutex);
726
		i915_gem_cleanup_stolen(dev);
727
	}
728
 
729
	intel_teardown_gmbus(dev);
730
	intel_teardown_mchbar(dev);
731
 
5060 serge 732
	destroy_workqueue(dev_priv->dp_wq);
3031 serge 733
	destroy_workqueue(dev_priv->wq);
3480 Serge 734
	pm_qos_remove_request(&dev_priv->pm_qos);
3031 serge 735
 
5354 serge 736
	i915_global_gtt_cleanup(dev);
4104 Serge 737
 
4560 Serge 738
	intel_uncore_fini(dev);
739
	if (dev_priv->regs != NULL)
740
		pci_iounmap(dev->pdev, dev_priv->regs);
741
 
3480 Serge 742
	if (dev_priv->slab)
743
		kmem_cache_destroy(dev_priv->slab);
744
 
3031 serge 745
	pci_dev_put(dev_priv->bridge_dev);
5060 serge 746
	kfree(dev_priv);
3031 serge 747
 
748
	return 0;
749
}
3263 Serge 750
#endif
3031 serge 751
 
752
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
753
{
5060 serge 754
	int ret;
3031 serge 755
 
5060 serge 756
	ret = i915_gem_open(dev, file);
757
	if (ret)
758
		return ret;
3031 serge 759
 
760
	return 0;
761
}
762
 
3263 Serge 763
#if 0
3031 serge 764
/**
765
 * i915_driver_lastclose - clean up after all DRM clients have exited
766
 * @dev: DRM device
767
 *
768
 * Take care of cleaning up after all DRM clients have exited.  In the
769
 * mode setting case, we want to restore the kernel's initial mode (just
770
 * in case the last client left us in a bad state).
771
 *
772
 * Additionally, in the non-mode setting case, we'll tear down the GTT
773
 * and DMA structures, since the kernel won't be using them, and clea
774
 * up any GEM state.
775
 */
5060 serge 776
void i915_driver_lastclose(struct drm_device *dev)
3031 serge 777
{
4560 Serge 778
		intel_fbdev_restore_mode(dev);
3031 serge 779
		vga_switcheroo_process_delayed_switch();
780
}
781
 
5060 serge 782
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
3031 serge 783
{
4560 Serge 784
	mutex_lock(&dev->struct_mutex);
5060 serge 785
	i915_gem_context_close(dev, file);
786
	i915_gem_release(dev, file);
4560 Serge 787
	mutex_unlock(&dev->struct_mutex);
5354 serge 788
 
789
	if (drm_core_check_feature(dev, DRIVER_MODESET))
790
		intel_modeset_preclose(dev, file);
3031 serge 791
}
792
 
793
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
794
{
795
	struct drm_i915_file_private *file_priv = file->driver_priv;
796
 
5060 serge 797
	if (file_priv && file_priv->bsd_ring)
798
		file_priv->bsd_ring = NULL;
3031 serge 799
	kfree(file_priv);
800
}
801
 
4104 Serge 802
const struct drm_ioctl_desc i915_ioctls[] = {
5354 serge 803
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
804
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
805
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
806
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
807
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
808
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
4104 Serge 809
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
3031 serge 810
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
811
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
812
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
813
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 814
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3031 serge 815
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
816
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 817
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
818
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
819
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
820
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
3031 serge 821
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
4104 Serge 822
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 823
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
824
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
4104 Serge 825
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
826
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
827
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
828
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
5354 serge 829
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
830
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
4104 Serge 831
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
832
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
833
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
834
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
835
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
836
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
837
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
838
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
839
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
840
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 841
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
4104 Serge 842
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 843
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
844
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
845
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
846
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
4104 Serge 847
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
848
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
849
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
850
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
4560 Serge 851
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
5060 serge 852
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 853
};
854
 
5060 serge 855
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
3031 serge 856
 
857
/*
858
 * This is really ugly: Because old userspace abused the linux agp interface to
859
 * manage the gtt, we need to claim that all intel devices are agp.  For
860
 * otherwise the drm core refuses to initialize the agp support code.
861
 */
5060 serge 862
int i915_driver_device_is_agp(struct drm_device *dev)
3031 serge 863
{
864
	return 1;
865
}
866
#endif