Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2
 */
3
/*
4
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sub license, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial portions
17
 * of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
3031 serge 29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
 
5354 serge 31
#include 
3031 serge 32
#include 
33
#include 
34
#include 
5354 serge 35
#include 
2326 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2326 Serge 38
#include "i915_drv.h"
2351 Serge 39
#include "i915_trace.h"
2326 Serge 40
#include 
5354 serge 41
#include 
2326 Serge 42
//#include 
43
//#include 
44
//#include 
2330 Serge 45
#include 
2326 Serge 46
//#include 
47
 
48
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
49
 
4246 Serge 50
int i915_getparam(struct drm_device *dev, void *data,
3031 serge 51
			 struct drm_file *file_priv)
52
{
5060 serge 53
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 54
	drm_i915_getparam_t *param = data;
55
	int value;
56
 
57
	switch (param->param) {
58
	case I915_PARAM_IRQ_ACTIVE:
59
	case I915_PARAM_ALLOW_BATCHBUFFER:
60
	case I915_PARAM_LAST_DISPATCH:
5354 serge 61
		/* Reject all old ums/dri params. */
62
		return -ENODEV;
3031 serge 63
	case I915_PARAM_CHIPSET_ID:
5060 serge 64
		value = dev->pdev->device;
3031 serge 65
		break;
66
	case I915_PARAM_HAS_GEM:
67
		value = 1;
68
		break;
69
	case I915_PARAM_NUM_FENCES_AVAIL:
70
		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
71
		break;
72
	case I915_PARAM_HAS_OVERLAY:
73
		value = dev_priv->overlay ? 1 : 0;
74
		break;
75
	case I915_PARAM_HAS_PAGEFLIPPING:
76
		value = 1;
77
		break;
78
	case I915_PARAM_HAS_EXECBUF2:
79
		/* depends on GEM */
80
		value = 1;
81
		break;
82
	case I915_PARAM_HAS_BSD:
83
		value = intel_ring_initialized(&dev_priv->ring[VCS]);
84
		break;
85
	case I915_PARAM_HAS_BLT:
86
		value = intel_ring_initialized(&dev_priv->ring[BCS]);
87
		break;
4246 Serge 88
	case I915_PARAM_HAS_VEBOX:
89
		value = intel_ring_initialized(&dev_priv->ring[VECS]);
90
		break;
3031 serge 91
	case I915_PARAM_HAS_RELAXED_FENCING:
92
		value = 1;
93
		break;
94
	case I915_PARAM_HAS_COHERENT_RINGS:
95
		value = 1;
96
		break;
97
	case I915_PARAM_HAS_EXEC_CONSTANTS:
98
		value = INTEL_INFO(dev)->gen >= 4;
99
		break;
100
	case I915_PARAM_HAS_RELAXED_DELTA:
101
		value = 1;
102
		break;
103
	case I915_PARAM_HAS_GEN7_SOL_RESET:
104
		value = 1;
105
		break;
106
	case I915_PARAM_HAS_LLC:
107
		value = HAS_LLC(dev);
108
		break;
4246 Serge 109
	case I915_PARAM_HAS_WT:
110
		value = HAS_WT(dev);
111
		break;
3031 serge 112
	case I915_PARAM_HAS_ALIASING_PPGTT:
5354 serge 113
		value = USES_PPGTT(dev);
3031 serge 114
		break;
115
	case I915_PARAM_HAS_WAIT_TIMEOUT:
116
		value = 1;
117
		break;
118
	case I915_PARAM_HAS_SEMAPHORES:
119
		value = i915_semaphore_is_enabled(dev);
120
		break;
121
	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
122
		value = 1;
123
		break;
4104 Serge 124
    case I915_PARAM_HAS_SECURE_BATCHES:
3255 Serge 125
        value = 1;
3243 Serge 126
		break;
127
	case I915_PARAM_HAS_PINNED_BATCHES:
128
		value = 1;
129
		break;
3480 Serge 130
	case I915_PARAM_HAS_EXEC_NO_RELOC:
131
		value = 1;
132
        break;
133
	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
4392 Serge 134
		value = 1;
3480 Serge 135
        break;
5060 serge 136
	case I915_PARAM_CMD_PARSER_VERSION:
137
		value = i915_cmd_parser_get_version();
138
		break;
5354 serge 139
	case I915_PARAM_HAS_COHERENT_PHYS_GTT:
140
		value = 1;
141
		break;
3031 serge 142
	default:
4104 Serge 143
		DRM_DEBUG("Unknown parameter %d\n", param->param);
3031 serge 144
		return -EINVAL;
145
	}
146
 
3255 Serge 147
    *param->value = value;
148
 
3031 serge 149
	return 0;
150
}
151
 
3255 Serge 152
#if 0
3031 serge 153
static int i915_setparam(struct drm_device *dev, void *data,
154
			 struct drm_file *file_priv)
155
{
5060 serge 156
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 157
	drm_i915_setparam_t *param = data;
158
 
159
	switch (param->param) {
160
	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
161
	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
162
	case I915_SETPARAM_ALLOW_BATCHBUFFER:
5354 serge 163
		/* Reject all old ums/dri params. */
164
		return -ENODEV;
165
 
3031 serge 166
	case I915_SETPARAM_NUM_USED_FENCES:
167
		if (param->value > dev_priv->num_fence_regs ||
168
		    param->value < 0)
169
			return -EINVAL;
170
		/* Userspace can use first N regs */
171
		dev_priv->fence_reg_start = param->value;
172
		break;
173
	default:
174
		DRM_DEBUG_DRIVER("unknown parameter %d\n",
175
					param->param);
176
		return -EINVAL;
177
	}
178
 
179
	return 0;
180
}
181
#endif
182
 
183
static int i915_get_bridge_dev(struct drm_device *dev)
184
{
185
	struct drm_i915_private *dev_priv = dev->dev_private;
186
 
187
	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
188
	if (!dev_priv->bridge_dev) {
189
		DRM_ERROR("bridge device not found\n");
190
		return -1;
191
	}
192
	return 0;
193
}
194
 
2330 Serge 195
#define MCHBAR_I915 0x44
196
#define MCHBAR_I965 0x48
197
#define MCHBAR_SIZE (4*4096)
198
 
199
#define DEVEN_REG 0x54
200
#define   DEVEN_MCHBAR_EN (1 << 28)
201
 
202
 
203
 
204
 
205
/* Setup MCHBAR if possible, return true if we should disable it again */
206
static void
207
intel_setup_mchbar(struct drm_device *dev)
208
{
5060 serge 209
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 210
	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
211
	u32 temp;
212
	bool enabled;
213
 
5060 serge 214
	if (IS_VALLEYVIEW(dev))
215
		return;
216
 
2330 Serge 217
	dev_priv->mchbar_need_disable = false;
218
 
219
	if (IS_I915G(dev) || IS_I915GM(dev)) {
220
		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
221
		enabled = !!(temp & DEVEN_MCHBAR_EN);
222
	} else {
223
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
224
		enabled = temp & 1;
225
	}
226
 
227
	/* If it's already enabled, don't have to do anything */
228
	if (enabled)
229
		return;
230
 
231
	dbgprintf("Epic fail\n");
232
 
233
#if 0
234
	if (intel_alloc_mchbar_resource(dev))
235
		return;
236
 
237
	dev_priv->mchbar_need_disable = true;
238
 
239
	/* Space is allocated or reserved, so enable it. */
240
	if (IS_I915G(dev) || IS_I915GM(dev)) {
241
		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
242
				       temp | DEVEN_MCHBAR_EN);
243
	} else {
244
		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
245
		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
246
	}
247
#endif
248
}
249
 
250
 
3031 serge 251
/* true = enable decode, false = disable decoder */
252
static unsigned int i915_vga_set_decode(void *cookie, bool state)
2330 Serge 253
{
3031 serge 254
	struct drm_device *dev = cookie;
2330 Serge 255
 
3031 serge 256
	intel_modeset_vga_set_state(dev, state);
257
	if (state)
258
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
259
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
260
	else
261
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
262
}
2330 Serge 263
 
264
 
265
 
266
 
267
 
268
 
2327 Serge 269
static int i915_load_modeset_init(struct drm_device *dev)
270
{
271
    struct drm_i915_private *dev_priv = dev->dev_private;
272
    int ret;
273
 
274
    ret = intel_parse_bios(dev);
275
    if (ret)
276
        DRM_INFO("failed to find VBIOS tables\n");
277
 
5367 serge 278
	/* If we have > 1 VGA cards, then we need to arbitrate access
279
	 * to the common VGA resources.
280
	 *
281
	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
282
	 * then we do not take part in VGA arbitration and the
283
	 * vga_client_register() fails with -ENODEV.
284
	 */
2327 Serge 285
 
3031 serge 286
	/* Initialise stolen first so that we may reserve preallocated
287
	 * objects for the BIOS to KMS transition.
288
	 */
289
	ret = i915_gem_init_stolen(dev);
290
	if (ret)
291
		goto cleanup_vga_switcheroo;
2327 Serge 292
 
5060 serge 293
	intel_power_domains_init_hw(dev_priv);
294
 
5354 serge 295
	ret = intel_irq_install(dev_priv);
3480 Serge 296
	if (ret)
297
		goto cleanup_gem_stolen;
298
 
299
	/* Important: The output setup functions called by modeset_init need
300
	 * working irqs for e.g. gmbus and dp aux transfers. */
2327 Serge 301
    intel_modeset_init(dev);
302
 
3031 serge 303
	ret = i915_gem_init(dev);
2327 Serge 304
    if (ret)
5060 serge 305
		goto cleanup_irq;
2327 Serge 306
 
307
    intel_modeset_gem_init(dev);
308
 
309
    /* Always safe in the mode setting case. */
310
    /* FIXME: do pre/post-mode set stuff in core KMS code */
4560 Serge 311
	dev->vblank_disable_allowed = true;
5060 serge 312
	if (INTEL_INFO(dev)->num_pipes == 0)
3746 Serge 313
		return 0;
2327 Serge 314
 
315
    ret = intel_fbdev_init(dev);
316
    if (ret)
3480 Serge 317
		goto cleanup_gem;
2327 Serge 318
 
3480 Serge 319
	/* Only enable hotplug handling once the fbdev is fully set up. */
5354 serge 320
	intel_hpd_init(dev_priv);
2327 Serge 321
 
3480 Serge 322
	/*
323
	 * Some ports require correctly set-up hpd registers for detection to
324
	 * work properly (leading to ghost connected connector status), e.g. VGA
325
	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
326
	 * irqs are fully enabled. Now we should scan for the initial config
327
	 * only once hotplug handling is enabled, but due to screwed-up locking
328
	 * around kms/fbdev init we can't protect the fdbev initial config
329
	 * scanning against hotplug events. Hence do this first and ignore the
330
	 * tiny window where we will loose hotplug notifactions.
331
	 */
5354 serge 332
	intel_fbdev_initial_config(dev_priv, 0);
3480 Serge 333
 
334
	drm_kms_helper_poll_init(dev);
335
 
2327 Serge 336
    return 0;
337
 
3480 Serge 338
cleanup_gem:
339
	mutex_lock(&dev->struct_mutex);
340
	i915_gem_cleanup_ringbuffer(dev);
4293 Serge 341
	i915_gem_context_fini(dev);
3480 Serge 342
	mutex_unlock(&dev->struct_mutex);
5060 serge 343
cleanup_irq:
4104 Serge 344
//	drm_irq_uninstall(dev);
3031 serge 345
cleanup_gem_stolen:
346
//	i915_gem_cleanup_stolen(dev);
2327 Serge 347
cleanup_vga_switcheroo:
4104 Serge 348
//	vga_switcheroo_unregister_client(dev->pdev);
2327 Serge 349
cleanup_vga_client:
4104 Serge 350
//	vga_client_register(dev->pdev, NULL, NULL, NULL);
2327 Serge 351
out:
352
    return ret;
353
}
354
 
4560 Serge 355
#if IS_ENABLED(CONFIG_FB)
5060 serge 356
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 357
{
358
	struct apertures_struct *ap;
359
	struct pci_dev *pdev = dev_priv->dev->pdev;
360
	bool primary;
5060 serge 361
	int ret;
2326 Serge 362
 
4560 Serge 363
	ap = alloc_apertures(1);
364
	if (!ap)
5060 serge 365
		return -ENOMEM;
4560 Serge 366
 
367
	ap->ranges[0].base = dev_priv->gtt.mappable_base;
368
	ap->ranges[0].size = dev_priv->gtt.mappable_end;
369
 
370
	primary =
371
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
372
 
5060 serge 373
	ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
4560 Serge 374
 
375
	kfree(ap);
5060 serge 376
 
377
	return ret;
4560 Serge 378
}
379
#else
5060 serge 380
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4560 Serge 381
{
5060 serge 382
	return 0;
4560 Serge 383
}
384
#endif
385
 
3031 serge 386
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
2326 Serge 387
{
5060 serge 388
	const struct intel_device_info *info = &dev_priv->info;
2326 Serge 389
 
4104 Serge 390
#define PRINT_S(name) "%s"
391
#define SEP_EMPTY
392
#define PRINT_FLAG(name) info->name ? #name "," : ""
393
#define SEP_COMMA ,
5060 serge 394
	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
4104 Serge 395
			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
3031 serge 396
			 info->gen,
397
			 dev_priv->dev->pdev->device,
5060 serge 398
			 dev_priv->dev->pdev->revision,
4104 Serge 399
			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
400
#undef PRINT_S
401
#undef SEP_EMPTY
402
#undef PRINT_FLAG
403
#undef SEP_COMMA
2326 Serge 404
}
405
 
5060 serge 406
/*
407
 * Determine various intel_device_info fields at runtime.
408
 *
409
 * Use it when either:
410
 *   - it's judged too laborious to fill n static structures with the limit
411
 *     when a simple if statement does the job,
412
 *   - run-time checks (eg read fuse/strap registers) are needed.
413
 *
414
 * This function needs to be called:
415
 *   - after the MMIO has been setup as we are reading registers,
416
 *   - after the PCH has been detected,
417
 *   - before the first usage of the fields it can tweak.
418
 */
419
static void intel_device_info_runtime_init(struct drm_device *dev)
420
{
421
	struct drm_i915_private *dev_priv = dev->dev_private;
422
	struct intel_device_info *info;
423
	enum pipe pipe;
424
 
425
	info = (struct intel_device_info *)&dev_priv->info;
426
 
5354 serge 427
	if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
428
		for_each_pipe(dev_priv, pipe)
5060 serge 429
			info->num_sprites[pipe] = 2;
430
	else
5354 serge 431
		for_each_pipe(dev_priv, pipe)
5060 serge 432
			info->num_sprites[pipe] = 1;
433
 
434
	if (i915.disable_display) {
435
		DRM_INFO("Display disabled (module parameter)\n");
436
		info->num_pipes = 0;
437
	} else if (info->num_pipes > 0 &&
438
		   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
439
		   !IS_VALLEYVIEW(dev)) {
440
		u32 fuse_strap = I915_READ(FUSE_STRAP);
441
		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
442
 
443
		/*
444
		 * SFUSE_STRAP is supposed to have a bit signalling the display
445
		 * is fused off. Unfortunately it seems that, at least in
446
		 * certain cases, fused off display means that PCH display
447
		 * reads don't land anywhere. In that case, we read 0s.
448
		 *
449
		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
450
		 * should be set when taking over after the firmware.
451
		 */
452
		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
453
		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
454
		    (dev_priv->pch_type == PCH_CPT &&
455
		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
456
			DRM_INFO("Display fused off, disabling\n");
457
			info->num_pipes = 0;
458
		}
459
	}
460
}
461
 
2326 Serge 462
/**
463
 * i915_driver_load - setup chip and create an initial config
464
 * @dev: DRM device
465
 * @flags: startup flags
466
 *
467
 * The driver load routine has to do several things:
468
 *   - drive output discovery via intel_modeset_init()
469
 *   - initialize the memory manager
470
 *   - allocate initial config memory
471
 *   - setup the DRM framebuffer with the allocated memory
472
 */
473
int i915_driver_load(struct drm_device *dev, unsigned long flags)
474
{
475
    struct drm_i915_private *dev_priv;
5060 serge 476
	struct intel_device_info *info, *device_info;
3031 serge 477
	int ret = 0, mmio_bar, mmio_size;
478
	uint32_t aperture_size;
2326 Serge 479
 
3031 serge 480
	info = (struct intel_device_info *) flags;
481
 
482
 
4560 Serge 483
	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
2326 Serge 484
    if (dev_priv == NULL)
485
        return -ENOMEM;
486
 
5060 serge 487
	dev->dev_private = dev_priv;
2326 Serge 488
    dev_priv->dev = dev;
489
 
5354 serge 490
	/* Setup the write-once "constant" device info */
5060 serge 491
	device_info = (struct intel_device_info *)&dev_priv->info;
5354 serge 492
	memcpy(device_info, info, sizeof(dev_priv->info));
493
	device_info->device_id = dev->pdev->device;
5060 serge 494
 
4104 Serge 495
	spin_lock_init(&dev_priv->irq_lock);
496
	spin_lock_init(&dev_priv->gpu_error.lock);
5354 serge 497
	mutex_init(&dev_priv->backlight_lock);
4104 Serge 498
	spin_lock_init(&dev_priv->uncore.lock);
499
	spin_lock_init(&dev_priv->mm.object_stat_lock);
5060 serge 500
	spin_lock_init(&dev_priv->mmio_flip_lock);
4104 Serge 501
	mutex_init(&dev_priv->dpio_lock);
502
	mutex_init(&dev_priv->modeset_restore_lock);
503
 
4560 Serge 504
	intel_pm_setup(dev);
4104 Serge 505
 
4560 Serge 506
	intel_display_crc_init(dev);
507
 
3031 serge 508
	i915_dump_device_info(dev_priv);
509
 
4104 Serge 510
	/* Not all pre-production machines fall into this category, only the
511
	 * very first ones. Almost everything should work, except for maybe
512
	 * suspend/resume. And we don't implement workarounds that affect only
513
	 * pre-production machines. */
514
	if (IS_HSW_EARLY_SDV(dev))
515
		DRM_INFO("This is an early pre-production Haswell machine. "
516
			 "It may not be fully functional.\n");
517
 
2326 Serge 518
    if (i915_get_bridge_dev(dev)) {
519
        ret = -EIO;
520
        goto free_priv;
521
    }
522
 
4104 Serge 523
	mmio_bar = IS_GEN2(dev) ? 1 : 0;
3031 serge 524
	/* Before gen4, the registers and the GTT are behind different BARs.
525
	 * However, from gen4 onwards, the registers and the GTT are shared
526
	 * in the same BAR, so we want to restrict this ioremap from
527
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
528
	 * the register BAR remains the same size for all the earlier
529
	 * generations up to Ironlake.
530
	 */
531
	if (info->gen < 5)
532
		mmio_size = 512*1024;
533
	else
534
		mmio_size = 2*1024*1024;
535
 
536
	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
4104 Serge 537
	if (!dev_priv->regs) {
538
		DRM_ERROR("failed to map registers\n");
539
		ret = -EIO;
3746 Serge 540
		goto put_bridge;
4104 Serge 541
	}
2326 Serge 542
 
4560 Serge 543
	/* This must be called before any calls to HAS_PCH_* */
544
	intel_detect_pch(dev);
4104 Serge 545
 
4560 Serge 546
	intel_uncore_init(dev);
547
 
3746 Serge 548
	ret = i915_gem_gtt_init(dev);
549
	if (ret)
4560 Serge 550
		goto out_regs;
3746 Serge 551
 
552
 
553
	pci_set_master(dev->pdev);
554
 
555
    /* overlay on gen2 is broken and can't address above 1G */
556
 
557
    /* 965GM sometimes incorrectly writes to hardware status page (HWS)
558
     * using 32bit addressing, overwriting memory if HWS is located
559
     * above 4GB.
560
     *
561
     * The documentation also mentions an issue with undefined
562
     * behaviour if any general state is accessed within a page above 4GB,
563
     * which also needs to be handled carefully.
564
     */
565
 
3480 Serge 566
	aperture_size = dev_priv->gtt.mappable_end;
2326 Serge 567
 
4539 Serge 568
	dev_priv->gtt.mappable = AllocKernelSpace(8192);
569
	if (dev_priv->gtt.mappable == NULL) {
570
		ret = -EIO;
4560 Serge 571
		goto out_gtt;
4539 Serge 572
	}
2326 Serge 573
 
574
    /* The i915 workqueue is primarily used for batched retirement of
575
     * requests (and thus managing bo) once the task has been completed
576
     * by the GPU. i915_gem_retire_requests() is called directly when we
577
     * need high-priority retirement, such as waiting for an explicit
578
     * bo.
579
     *
580
     * It is also used for periodic low-priority events, such as
581
     * idle-timers and recording error state.
582
     *
583
     * All tasks on the workqueue are expected to acquire the dev mutex
584
     * so there is no point in running more than one instance of the
3031 serge 585
	 * workqueue at any time.  Use an ordered one.
2326 Serge 586
     */
5367 serge 587
    dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0);
4104 Serge 588
	if (dev_priv->wq == NULL) {
589
		DRM_ERROR("Failed to create our workqueue.\n");
590
		ret = -ENOMEM;
591
		goto out_mtrrfree;
592
	}
3482 Serge 593
    system_wq = dev_priv->wq;
2326 Serge 594
 
595
 
5354 serge 596
	intel_irq_init(dev_priv);
4104 Serge 597
	intel_uncore_sanitize(dev);
2326 Serge 598
 
599
    /* Try to make sure MCHBAR is enabled before poking at it */
2330 Serge 600
	intel_setup_mchbar(dev);
2326 Serge 601
    intel_setup_gmbus(dev);
2327 Serge 602
    intel_opregion_setup(dev);
2326 Serge 603
 
2330 Serge 604
    intel_setup_bios(dev);
2326 Serge 605
 
606
    i915_gem_load(dev);
607
 
608
    /* On the 945G/GM, the chipset reports the MSI capability on the
609
     * integrated graphics even though the support isn't actually there
610
     * according to the published specs.  It doesn't appear to function
611
     * correctly in testing on 945G.
612
     * This may be a side effect of MSI having been made available for PEG
613
     * and the registers being closely associated.
614
     *
615
     * According to chipset errata, on the 965GM, MSI interrupts may
616
     * be lost or delayed, but we use them anyways to avoid
617
     * stuck interrupts on some machines.
618
     */
619
 
5060 serge 620
	intel_device_info_runtime_init(dev);
2326 Serge 621
 
4560 Serge 622
//   if (INTEL_INFO(dev)->num_pipes) {
623
//       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
624
//       if (ret)
625
//           goto out_gem_unload;
626
//   }
4293 Serge 627
 
5060 serge 628
	intel_power_domains_init(dev_priv);
4560 Serge 629
 
4293 Serge 630
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
5060 serge 631
		ret = i915_load_modeset_init(dev);
632
		if (ret < 0) {
633
			DRM_ERROR("failed to init modeset\n");
4560 Serge 634
			goto out_power_well;
5060 serge 635
		}
4293 Serge 636
	}
2326 Serge 637
 
4293 Serge 638
 
4126 Serge 639
	if (INTEL_INFO(dev)->num_pipes) {
2326 Serge 640
    /* Must be done after probing outputs */
4126 Serge 641
		intel_opregion_init(dev);
642
	}
2326 Serge 643
 
3031 serge 644
	if (IS_GEN5(dev))
645
		intel_gpu_ips_init(dev_priv);
2326 Serge 646
 
5367 serge 647
//   intel_runtime_pm_enable(dev_priv);
4560 Serge 648
 
4104 Serge 649
    main_device = dev;
650
 
2326 Serge 651
    return 0;
652
 
4560 Serge 653
out_power_well:
2326 Serge 654
out_gem_unload:
655
 
656
out_mtrrfree:
4560 Serge 657
out_gtt:
658
out_regs:
2326 Serge 659
put_bridge:
660
free_priv:
661
    kfree(dev_priv);
662
    return ret;
663
}
664
 
3031 serge 665
#if 0
666
 
667
int i915_driver_unload(struct drm_device *dev)
668
{
669
	struct drm_i915_private *dev_priv = dev->dev_private;
670
	int ret;
671
 
4560 Serge 672
	ret = i915_gem_suspend(dev);
673
	if (ret) {
674
		DRM_ERROR("failed to idle hardware: %d\n", ret);
675
		return ret;
676
	}
677
 
678
 
3031 serge 679
	intel_gpu_ips_teardown();
680
 
681
	i915_teardown_sysfs(dev);
682
 
4104 Serge 683
	if (dev_priv->mm.inactive_shrinker.scan_objects)
3031 serge 684
		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
685
 
3480 Serge 686
	io_mapping_free(dev_priv->gtt.mappable);
4104 Serge 687
	arch_phys_wc_del(dev_priv->gtt.mtrr);
3031 serge 688
 
689
	acpi_video_unregister();
690
 
691
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
692
		intel_modeset_cleanup(dev);
693
 
694
		/*
695
		 * free the memory space allocated for the child device
696
		 * config parsed from VBT
697
		 */
4104 Serge 698
		if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
699
			kfree(dev_priv->vbt.child_dev);
700
			dev_priv->vbt.child_dev = NULL;
701
			dev_priv->vbt.child_dev_num = 0;
3031 serge 702
		}
703
 
704
		vga_switcheroo_unregister_client(dev->pdev);
705
		vga_client_register(dev->pdev, NULL, NULL, NULL);
706
	}
707
 
708
	/* Free error state after interrupts are fully disabled. */
3480 Serge 709
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
710
	cancel_work_sync(&dev_priv->gpu_error.work);
3031 serge 711
	i915_destroy_error_state(dev);
712
 
713
	if (dev->pdev->msi_enabled)
714
		pci_disable_msi(dev->pdev);
715
 
716
	intel_opregion_fini(dev);
717
 
718
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
719
		/* Flush any outstanding unpin_work. */
720
		flush_workqueue(dev_priv->wq);
721
 
722
		mutex_lock(&dev->struct_mutex);
723
		i915_gem_cleanup_ringbuffer(dev);
724
		i915_gem_context_fini(dev);
725
		mutex_unlock(&dev->struct_mutex);
726
		i915_gem_cleanup_stolen(dev);
727
	}
728
 
729
	intel_teardown_gmbus(dev);
730
	intel_teardown_mchbar(dev);
731
 
5060 serge 732
	destroy_workqueue(dev_priv->dp_wq);
3031 serge 733
	destroy_workqueue(dev_priv->wq);
3480 Serge 734
	pm_qos_remove_request(&dev_priv->pm_qos);
3031 serge 735
 
5354 serge 736
	i915_global_gtt_cleanup(dev);
4104 Serge 737
 
4560 Serge 738
	intel_uncore_fini(dev);
739
	if (dev_priv->regs != NULL)
740
		pci_iounmap(dev->pdev, dev_priv->regs);
741
 
3480 Serge 742
	if (dev_priv->slab)
743
		kmem_cache_destroy(dev_priv->slab);
744
 
3031 serge 745
	pci_dev_put(dev_priv->bridge_dev);
5060 serge 746
	kfree(dev_priv);
3031 serge 747
 
748
	return 0;
749
}
3263 Serge 750
#endif
3031 serge 751
 
752
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
753
{
5060 serge 754
	int ret;
3031 serge 755
 
5060 serge 756
	ret = i915_gem_open(dev, file);
757
	if (ret)
758
		return ret;
3031 serge 759
 
760
	return 0;
761
}
762
 
3263 Serge 763
#if 0
3031 serge 764
/**
765
 * i915_driver_lastclose - clean up after all DRM clients have exited
766
 * @dev: DRM device
767
 *
768
 * Take care of cleaning up after all DRM clients have exited.  In the
769
 * mode setting case, we want to restore the kernel's initial mode (just
770
 * in case the last client left us in a bad state).
771
 *
772
 * Additionally, in the non-mode setting case, we'll tear down the GTT
773
 * and DMA structures, since the kernel won't be using them, and clea
774
 * up any GEM state.
775
 */
5060 serge 776
void i915_driver_lastclose(struct drm_device *dev)
3031 serge 777
{
4560 Serge 778
		intel_fbdev_restore_mode(dev);
3031 serge 779
		vga_switcheroo_process_delayed_switch();
780
}
781
 
5060 serge 782
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
3031 serge 783
{
4560 Serge 784
	mutex_lock(&dev->struct_mutex);
5060 serge 785
	i915_gem_context_close(dev, file);
786
	i915_gem_release(dev, file);
4560 Serge 787
	mutex_unlock(&dev->struct_mutex);
5354 serge 788
 
789
	if (drm_core_check_feature(dev, DRIVER_MODESET))
790
		intel_modeset_preclose(dev, file);
3031 serge 791
}
792
 
793
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
794
{
795
	struct drm_i915_file_private *file_priv = file->driver_priv;
796
 
5060 serge 797
	if (file_priv && file_priv->bsd_ring)
798
		file_priv->bsd_ring = NULL;
3031 serge 799
	kfree(file_priv);
800
}
801
 
4104 Serge 802
const struct drm_ioctl_desc i915_ioctls[] = {
5354 serge 803
	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
804
	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
805
	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
806
	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
807
	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
808
	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
4104 Serge 809
	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
3031 serge 810
	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
811
	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
812
	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
813
	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 814
	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
3031 serge 815
	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
816
	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
5354 serge 817
	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
818
	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
819
	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
820
	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
3031 serge 821
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
4104 Serge 822
	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 823
	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
824
	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
4104 Serge 825
	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
826
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
827
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
828
	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
5354 serge 829
	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
830
	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
4104 Serge 831
	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
832
	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
833
	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
834
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
835
	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
836
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
837
	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
838
	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
839
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
840
	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 841
	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
4104 Serge 842
	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 843
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
844
	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
845
	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
846
	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
4104 Serge 847
	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
848
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
849
	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
850
	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
4560 Serge 851
	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
5060 serge 852
	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
3031 serge 853
};
854
 
5060 serge 855
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
3031 serge 856
 
857
/*
858
 * This is really ugly: Because old userspace abused the linux agp interface to
859
 * manage the gtt, we need to claim that all intel devices are agp.  For
860
 * otherwise the drm core refuses to initialize the agp support code.
861
 */
5060 serge 862
int i915_driver_device_is_agp(struct drm_device *dev)
3031 serge 863
{
864
	return 1;
865
}
866
#endif