Subversion Repositories Kolibri OS

Rev

Rev 6084 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/*
2
 * Intel GTT (Graphics Translation Table) routines
3
 *
4
 * Caveat: This driver implements the linux agp interface, but this is far from
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
6
 * old userspace intel graphics drivers needed an interface to map memory into
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
9
 * avoid having to create a new api.
10
 *
11
 * With gem this does not make much sense anymore, just needlessly complicates
12
 * the code. But as long as the old graphics stack is still support, it's stuck
13
 * here.
14
 *
15
 * /fairy-tale-mode off
16
 */
17
 
3480 Serge 18
#include 
19
 
2325 Serge 20
#include 
21
#include 
22
#include 
5354 serge 23
#include 
24
#include 
25
#
3031 serge 26
#include 
3243 Serge 27
#include 
28
 
2325 Serge 29
#include 
30
#include "agp.h"
31
#include "intel-agp.h"
3243 Serge 32
#include 
2325 Serge 33
 
34
 
35
struct pci_dev *
36
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
37
 
38
 
39
#define PCI_VENDOR_ID_INTEL             0x8086
40
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
41
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
2339 Serge 42
#define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
43
#define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
44
#define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
45
#define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
2325 Serge 46
 
47
 
48
#define AGP_NORMAL_MEMORY 0
49
 
50
#define AGP_USER_TYPES (1 << 16)
51
#define AGP_USER_MEMORY (AGP_USER_TYPES)
52
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
53
 
54
 
55
 
56
/*
57
 * If we have Intel graphics, we're not going to have anything other than
58
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
2339 Serge 59
 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
2325 Serge 60
 * Only newer chipsets need to bother with this, of course.
61
 */
2339 Serge 62
#ifdef CONFIG_INTEL_IOMMU
2325 Serge 63
#define USE_PCI_DMA_API 1
64
#else
65
#define USE_PCI_DMA_API 0
66
#endif
67
 
68
struct intel_gtt_driver {
6084 serge 69
	unsigned int gen : 8;
70
	unsigned int is_g33 : 1;
71
	unsigned int is_pineview : 1;
72
	unsigned int is_ironlake : 1;
73
	unsigned int has_pgtbl_enable : 1;
74
	unsigned int dma_mask_size : 8;
75
	/* Chipset specific GTT setup */
76
	int (*setup)(void);
77
	/* This should undo anything done in ->setup() save the unmapping
78
	 * of the mmio register file, that's done in the generic code. */
79
	void (*cleanup)(void);
80
	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
81
	/* Flags is a more or less chipset specific opaque value.
82
	 * For chipsets that need to support old ums (non-gem) code, this
83
	 * needs to be identical to the various supported agp memory types! */
84
	bool (*check_flags)(unsigned int flags);
85
	void (*chipset_flush)(void);
2325 Serge 86
};
87
 
88
static struct _intel_private {
6084 serge 89
	const struct intel_gtt_driver *driver;
90
	struct pci_dev *pcidev;	/* device one */
91
	struct pci_dev *bridge_dev;
92
	u8 __iomem *registers;
5060 serge 93
	phys_addr_t gtt_phys_addr;
6084 serge 94
	u32 PGETBL_save;
95
	u32 __iomem *gtt;		/* I915G */
96
	bool clear_fake_agp; /* on first access via agp, fill with scratch */
97
	int num_dcache_entries;
98
	void __iomem *i9xx_flush_page;
99
	char *i81x_gtt_table;
100
	struct resource ifp_resource;
101
	int resource_valid;
102
	struct page *scratch_page;
3480 Serge 103
	phys_addr_t scratch_page_dma;
3031 serge 104
	int refcount;
3480 Serge 105
	/* Whether i915 needs to use the dmar apis or not. */
106
	unsigned int needs_dmar : 1;
107
	phys_addr_t gma_bus_addr;
108
	/*  Size of memory reserved for graphics by the BIOS */
109
	unsigned int stolen_size;
110
	/* Total number of gtt entries. */
111
	unsigned int gtt_total_entries;
112
	/* Part of the gtt that is mappable by the cpu, for those chips where
113
	 * this is not the full gtt. */
114
	unsigned int gtt_mappable_entries;
2325 Serge 115
} intel_private;
116
 
6084 serge 117
#define INTEL_GTT_GEN	intel_private.driver->gen
118
#define IS_G33		intel_private.driver->is_g33
119
#define IS_PINEVIEW	intel_private.driver->is_pineview
120
#define IS_IRONLAKE	intel_private.driver->is_ironlake
121
#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
2325 Serge 122
 
6084 serge 123
#if IS_ENABLED(CONFIG_AGP_INTEL)
124
static int intel_gtt_map_memory(struct page **pages,
125
				unsigned int num_entries,
126
				struct sg_table *st)
127
{
128
	struct scatterlist *sg;
129
	int i;
130
 
131
	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
132
 
133
	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
134
		goto err;
135
 
136
	for_each_sg(st->sgl, sg, num_entries, i)
137
		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
138
 
139
	if (!pci_map_sg(intel_private.pcidev,
140
			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
141
		goto err;
142
 
143
	return 0;
144
 
145
err:
146
	sg_free_table(st);
147
	return -ENOMEM;
148
}
149
 
150
static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
151
{
152
	struct sg_table st;
153
	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
154
 
155
	pci_unmap_sg(intel_private.pcidev, sg_list,
156
		     num_sg, PCI_DMA_BIDIRECTIONAL);
157
 
158
	st.sgl = sg_list;
159
	st.orig_nents = st.nents = num_sg;
160
 
161
	sg_free_table(&st);
162
}
163
 
164
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
165
{
166
	return;
167
}
168
 
169
/* Exists to support ARGB cursors */
170
static struct page *i8xx_alloc_pages(void)
171
{
172
	struct page *page;
173
 
174
	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
175
	if (page == NULL)
176
		return NULL;
177
 
178
	if (set_pages_uc(page, 4) < 0) {
179
		set_pages_wb(page, 4);
180
		__free_pages(page, 2);
181
		return NULL;
182
	}
183
	atomic_inc(&agp_bridge->current_memory_agp);
184
	return page;
185
}
186
 
187
static void i8xx_destroy_pages(struct page *page)
188
{
189
	if (page == NULL)
190
		return;
191
 
192
	set_pages_wb(page, 4);
193
	__free_pages(page, 2);
194
	atomic_dec(&agp_bridge->current_memory_agp);
195
}
196
#endif
197
 
198
#if IS_ENABLED(CONFIG_AGP_INTEL)
199
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
200
				      int type)
201
{
202
	int i;
203
 
204
	if ((pg_start + mem->page_count)
205
			> intel_private.num_dcache_entries)
206
		return -EINVAL;
207
 
208
	if (!mem->is_flushed)
209
		global_cache_flush();
210
 
211
	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
212
		dma_addr_t addr = i << PAGE_SHIFT;
213
		intel_private.driver->write_entry(addr,
214
						  i, type);
215
	}
216
	wmb();
217
 
218
	return 0;
219
}
220
 
221
/*
222
 * The i810/i830 requires a physical address to program its mouse
223
 * pointer into hardware.
224
 * However the Xserver still writes to it through the agp aperture.
225
 */
226
static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
227
{
228
	struct agp_memory *new;
229
	struct page *page;
230
 
231
	switch (pg_count) {
232
	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
233
		break;
234
	case 4:
235
		/* kludge to get 4 physical pages for ARGB cursor */
236
		page = i8xx_alloc_pages();
237
		break;
238
	default:
239
		return NULL;
240
	}
241
 
242
	if (page == NULL)
243
		return NULL;
244
 
245
	new = agp_create_memory(pg_count);
246
	if (new == NULL)
247
		return NULL;
248
 
249
	new->pages[0] = page;
250
	if (pg_count == 4) {
251
		/* kludge to get 4 physical pages for ARGB cursor */
252
		new->pages[1] = new->pages[0] + 1;
253
		new->pages[2] = new->pages[1] + 1;
254
		new->pages[3] = new->pages[2] + 1;
255
	}
256
	new->page_count = pg_count;
257
	new->num_scratch_pages = pg_count;
258
	new->type = AGP_PHYS_MEMORY;
259
	new->physical = page_to_phys(new->pages[0]);
260
	return new;
261
}
262
 
263
static void intel_i810_free_by_type(struct agp_memory *curr)
264
{
265
	agp_free_key(curr->key);
266
	if (curr->type == AGP_PHYS_MEMORY) {
267
		if (curr->page_count == 4)
268
			i8xx_destroy_pages(curr->pages[0]);
269
		else {
270
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
271
							     AGP_PAGE_DESTROY_UNMAP);
272
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
273
							     AGP_PAGE_DESTROY_FREE);
274
		}
275
		agp_free_page_array(curr);
276
	}
277
	kfree(curr);
278
}
279
#endif
280
 
2325 Serge 281
static int intel_gtt_setup_scratch_page(void)
282
{
3243 Serge 283
	struct page *page;
3031 serge 284
	dma_addr_t dma_addr;
2325 Serge 285
 
3243 Serge 286
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
287
	if (page == NULL)
6084 serge 288
		return -ENOMEM;
3480 Serge 289
		intel_private.scratch_page_dma = page_to_phys(page);
2325 Serge 290
 
3243 Serge 291
	intel_private.scratch_page = page;
2325 Serge 292
 
6084 serge 293
	return 0;
2325 Serge 294
}
295
 
296
static unsigned int intel_gtt_stolen_size(void)
297
{
6084 serge 298
	u16 gmch_ctrl;
299
	u8 rdct;
300
	int local = 0;
301
	static const int ddt[4] = { 0, 16, 32, 64 };
302
	unsigned int stolen_size = 0;
2325 Serge 303
 
6084 serge 304
	if (INTEL_GTT_GEN == 1)
305
		return 0; /* no stolen mem on i81x */
2325 Serge 306
 
6084 serge 307
	pci_read_config_word(intel_private.bridge_dev,
308
			     I830_GMCH_CTRL, &gmch_ctrl);
2325 Serge 309
 
6084 serge 310
	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
311
	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
312
		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
313
		case I830_GMCH_GMS_STOLEN_512:
314
			stolen_size = KB(512);
315
			break;
316
		case I830_GMCH_GMS_STOLEN_1024:
317
			stolen_size = MB(1);
318
			break;
319
		case I830_GMCH_GMS_STOLEN_8192:
320
			stolen_size = MB(8);
321
			break;
322
		case I830_GMCH_GMS_LOCAL:
323
			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
324
			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
325
					MB(ddt[I830_RDRAM_DDT(rdct)]);
326
			local = 1;
327
			break;
328
		default:
329
			stolen_size = 0;
330
			break;
331
		}
332
	} else {
333
		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
334
		case I855_GMCH_GMS_STOLEN_1M:
335
			stolen_size = MB(1);
336
			break;
337
		case I855_GMCH_GMS_STOLEN_4M:
338
			stolen_size = MB(4);
339
			break;
340
		case I855_GMCH_GMS_STOLEN_8M:
341
			stolen_size = MB(8);
342
			break;
343
		case I855_GMCH_GMS_STOLEN_16M:
344
			stolen_size = MB(16);
345
			break;
346
		case I855_GMCH_GMS_STOLEN_32M:
347
			stolen_size = MB(32);
348
			break;
349
		case I915_GMCH_GMS_STOLEN_48M:
350
			stolen_size = MB(48);
351
			break;
352
		case I915_GMCH_GMS_STOLEN_64M:
353
			stolen_size = MB(64);
354
			break;
355
		case G33_GMCH_GMS_STOLEN_128M:
356
			stolen_size = MB(128);
357
			break;
358
		case G33_GMCH_GMS_STOLEN_256M:
359
			stolen_size = MB(256);
360
			break;
361
		case INTEL_GMCH_GMS_STOLEN_96M:
362
			stolen_size = MB(96);
363
			break;
364
		case INTEL_GMCH_GMS_STOLEN_160M:
365
			stolen_size = MB(160);
366
			break;
367
		case INTEL_GMCH_GMS_STOLEN_224M:
368
			stolen_size = MB(224);
369
			break;
370
		case INTEL_GMCH_GMS_STOLEN_352M:
371
			stolen_size = MB(352);
372
			break;
373
		default:
374
			stolen_size = 0;
375
			break;
376
		}
377
	}
2325 Serge 378
 
6084 serge 379
	if (stolen_size > 0) {
2332 Serge 380
		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
6084 serge 381
		       stolen_size / KB(1), local ? "local" : "stolen");
382
	} else {
2332 Serge 383
		dev_info(&intel_private.bridge_dev->dev,
384
		       "no pre-allocated video memory detected\n");
6084 serge 385
		stolen_size = 0;
386
	}
2325 Serge 387
 
6084 serge 388
	return stolen_size;
2325 Serge 389
}
390
 
391
static void i965_adjust_pgetbl_size(unsigned int size_flag)
392
{
6084 serge 393
	u32 pgetbl_ctl, pgetbl_ctl2;
2325 Serge 394
 
6084 serge 395
	/* ensure that ppgtt is disabled */
396
	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
397
	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
398
	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
2325 Serge 399
 
6084 serge 400
	/* write the new ggtt size */
401
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
402
	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
403
	pgetbl_ctl |= size_flag;
404
	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
2325 Serge 405
}
406
 
407
static unsigned int i965_gtt_total_entries(void)
408
{
6084 serge 409
	int size;
410
	u32 pgetbl_ctl;
411
	u16 gmch_ctl;
2325 Serge 412
 
6084 serge 413
	pci_read_config_word(intel_private.bridge_dev,
414
			     I830_GMCH_CTRL, &gmch_ctl);
2325 Serge 415
 
6084 serge 416
	if (INTEL_GTT_GEN == 5) {
417
		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
418
		case G4x_GMCH_SIZE_1M:
419
		case G4x_GMCH_SIZE_VT_1M:
420
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
421
			break;
422
		case G4x_GMCH_SIZE_VT_1_5M:
423
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
424
			break;
425
		case G4x_GMCH_SIZE_2M:
426
		case G4x_GMCH_SIZE_VT_2M:
427
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
428
			break;
429
		}
430
	}
2325 Serge 431
 
6084 serge 432
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
2325 Serge 433
 
6084 serge 434
	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
435
	case I965_PGETBL_SIZE_128KB:
436
		size = KB(128);
437
		break;
438
	case I965_PGETBL_SIZE_256KB:
439
		size = KB(256);
440
		break;
441
	case I965_PGETBL_SIZE_512KB:
442
		size = KB(512);
443
		break;
444
	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
445
	case I965_PGETBL_SIZE_1MB:
446
		size = KB(1024);
447
		break;
448
	case I965_PGETBL_SIZE_2MB:
449
		size = KB(2048);
450
		break;
451
	case I965_PGETBL_SIZE_1_5MB:
452
		size = KB(1024 + 512);
453
		break;
454
	default:
2332 Serge 455
		dev_info(&intel_private.pcidev->dev,
456
			 "unknown page table size, assuming 512KB\n");
6084 serge 457
		size = KB(512);
458
	}
2325 Serge 459
 
6084 serge 460
	return size/4;
2325 Serge 461
}
462
 
463
static unsigned int intel_gtt_total_entries(void)
464
{
6084 serge 465
	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
466
		return i965_gtt_total_entries();
3243 Serge 467
	else {
6084 serge 468
		/* On previous hardware, the GTT size was just what was
469
		 * required to map the aperture.
470
		 */
3480 Serge 471
		return intel_private.gtt_mappable_entries;
6084 serge 472
	}
2325 Serge 473
}
474
 
475
static unsigned int intel_gtt_mappable_entries(void)
476
{
6084 serge 477
	unsigned int aperture_size;
2325 Serge 478
 
6084 serge 479
	if (INTEL_GTT_GEN == 1) {
480
		u32 smram_miscc;
2325 Serge 481
 
6084 serge 482
		pci_read_config_dword(intel_private.bridge_dev,
483
				      I810_SMRAM_MISCC, &smram_miscc);
2325 Serge 484
 
6084 serge 485
		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
486
				== I810_GFX_MEM_WIN_32M)
487
			aperture_size = MB(32);
488
		else
489
			aperture_size = MB(64);
490
	} else if (INTEL_GTT_GEN == 2) {
491
		u16 gmch_ctrl;
2325 Serge 492
 
6084 serge 493
		pci_read_config_word(intel_private.bridge_dev,
494
				     I830_GMCH_CTRL, &gmch_ctrl);
2325 Serge 495
 
6084 serge 496
		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
497
			aperture_size = MB(64);
498
		else
499
			aperture_size = MB(128);
500
	} else {
501
		/* 9xx supports large sizes, just look at the length */
502
		aperture_size = pci_resource_len(intel_private.pcidev, 2);
503
	}
2325 Serge 504
 
6084 serge 505
	return aperture_size >> PAGE_SHIFT;
2325 Serge 506
}
507
 
508
static void intel_gtt_teardown_scratch_page(void)
509
{
510
   // FreePage(intel_private.scratch_page_dma);
511
}
512
 
513
static void intel_gtt_cleanup(void)
514
{
6084 serge 515
	intel_private.driver->cleanup();
2325 Serge 516
 
3031 serge 517
	iounmap(intel_private.gtt);
518
	iounmap(intel_private.registers);
2325 Serge 519
 
2339 Serge 520
	intel_gtt_teardown_scratch_page();
2325 Serge 521
}
522
 
4104 Serge 523
/* Certain Gen5 chipsets require require idling the GPU before
524
 * unmapping anything from the GTT when VT-d is enabled.
525
 */
526
static inline int needs_ilk_vtd_wa(void)
527
{
528
#ifdef CONFIG_INTEL_IOMMU
529
	const unsigned short gpu_devid = intel_private.pcidev->device;
530
 
531
	/* Query intel_iommu to see if we need the workaround. Presumably that
532
	 * was loaded first.
533
	 */
6084 serge 534
	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
4104 Serge 535
	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
536
	     intel_iommu_gfx_mapped)
537
		return 1;
538
#endif
539
	return 0;
540
}
541
 
542
static bool intel_gtt_can_wc(void)
543
{
544
	if (INTEL_GTT_GEN <= 2)
545
		return false;
546
 
547
	if (INTEL_GTT_GEN >= 6)
548
		return false;
549
 
550
	/* Reports of major corruption with ILK vt'd enabled */
551
	if (needs_ilk_vtd_wa())
552
		return false;
553
 
554
	return true;
555
}
556
 
2325 Serge 557
static int intel_gtt_init(void)
558
{
6084 serge 559
	u32 gtt_map_size;
5060 serge 560
	int ret, bar;
2325 Serge 561
 
6084 serge 562
	ret = intel_private.driver->setup();
563
	if (ret != 0)
564
		return ret;
2325 Serge 565
 
3480 Serge 566
	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
567
	intel_private.gtt_total_entries = intel_gtt_total_entries();
2325 Serge 568
 
6084 serge 569
	/* save the PGETBL reg for resume */
570
	intel_private.PGETBL_save =
571
		readl(intel_private.registers+I810_PGETBL_CTL)
572
			& ~I810_PGETBL_ENABLED;
573
	/* we only ever restore the register when enabling the PGTBL... */
574
	if (HAS_PGTBL_EN)
575
		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
2325 Serge 576
 
2339 Serge 577
	dev_info(&intel_private.bridge_dev->dev,
578
			"detected gtt size: %dK total, %dK mappable\n",
3480 Serge 579
			intel_private.gtt_total_entries * 4,
580
			intel_private.gtt_mappable_entries * 4);
2325 Serge 581
 
3480 Serge 582
	gtt_map_size = intel_private.gtt_total_entries * 4;
2325 Serge 583
 
3031 serge 584
	intel_private.gtt = NULL;
6660 serge 585
	if (intel_gtt_can_wc())
586
		intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
587
					       gtt_map_size);
3031 serge 588
	if (intel_private.gtt == NULL)
5060 serge 589
		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
3031 serge 590
					    gtt_map_size);
591
	if (intel_private.gtt == NULL) {
6084 serge 592
		intel_private.driver->cleanup();
3031 serge 593
		iounmap(intel_private.registers);
6084 serge 594
		return -ENOMEM;
595
	}
2325 Serge 596
 
5060 serge 597
#if IS_ENABLED(CONFIG_AGP_INTEL)
598
	global_cache_flush();   /* FIXME: ? */
599
#endif
2325 Serge 600
 
3480 Serge 601
	intel_private.stolen_size = intel_gtt_stolen_size();
2325 Serge 602
 
3480 Serge 603
	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
2325 Serge 604
 
6084 serge 605
	ret = intel_gtt_setup_scratch_page();
606
	if (ret != 0) {
607
		intel_gtt_cleanup();
608
		return ret;
609
	}
2325 Serge 610
 
3031 serge 611
	if (INTEL_GTT_GEN <= 2)
5060 serge 612
		bar = I810_GMADR_BAR;
3031 serge 613
	else
5060 serge 614
		bar = I915_GMADR_BAR;
2325 Serge 615
 
5060 serge 616
	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
617
	return 0;
618
}
3031 serge 619
 
6084 serge 620
#if IS_ENABLED(CONFIG_AGP_INTEL)
621
static int intel_fake_agp_fetch_size(void)
622
{
623
	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
624
	unsigned int aper_size;
625
	int i;
3480 Serge 626
 
6084 serge 627
	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
628
 
629
	for (i = 0; i < num_sizes; i++) {
630
		if (aper_size == intel_fake_agp_sizes[i].size) {
631
			agp_bridge->current_size =
632
				(void *) (intel_fake_agp_sizes + i);
633
			return aper_size;
634
		}
635
	}
636
 
637
	return 0;
638
}
639
#endif
640
 
2339 Serge 641
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
642
			     unsigned int flags)
643
{
644
	u32 pte_flags = I810_PTE_VALID;
645
 
646
	if (flags ==  AGP_USER_CACHED_MEMORY)
647
		pte_flags |= I830_PTE_SYSTEM_CACHED;
648
 
649
	writel(addr | pte_flags, intel_private.gtt + entry);
650
}
651
 
3031 serge 652
bool intel_enable_gtt(void)
2325 Serge 653
{
6084 serge 654
	u8 __iomem *reg;
2325 Serge 655
 
6084 serge 656
	if (INTEL_GTT_GEN == 2) {
657
		u16 gmch_ctrl;
2325 Serge 658
 
6084 serge 659
		pci_read_config_word(intel_private.bridge_dev,
660
				     I830_GMCH_CTRL, &gmch_ctrl);
661
		gmch_ctrl |= I830_GMCH_ENABLED;
662
		pci_write_config_word(intel_private.bridge_dev,
663
				      I830_GMCH_CTRL, gmch_ctrl);
2325 Serge 664
 
6084 serge 665
		pci_read_config_word(intel_private.bridge_dev,
666
				     I830_GMCH_CTRL, &gmch_ctrl);
667
		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
2332 Serge 668
			dev_err(&intel_private.pcidev->dev,
669
				"failed to enable the GTT: GMCH_CTRL=%x\n",
6084 serge 670
				gmch_ctrl);
671
			return false;
672
		}
673
	}
2325 Serge 674
 
6084 serge 675
	/* On the resume path we may be adjusting the PGTBL value, so
676
	 * be paranoid and flush all chipset write buffers...
677
	 */
678
	if (INTEL_GTT_GEN >= 3)
679
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
2325 Serge 680
 
6084 serge 681
	reg = intel_private.registers+I810_PGETBL_CTL;
682
	writel(intel_private.PGETBL_save, reg);
683
	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
2332 Serge 684
		dev_err(&intel_private.pcidev->dev,
685
			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
6084 serge 686
			readl(reg), intel_private.PGETBL_save);
687
		return false;
688
	}
2325 Serge 689
 
6084 serge 690
	if (INTEL_GTT_GEN >= 3)
691
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
2325 Serge 692
 
6084 serge 693
	return true;
2325 Serge 694
}
6084 serge 695
EXPORT_SYMBOL(intel_enable_gtt);
2325 Serge 696
 
6084 serge 697
#if IS_ENABLED(CONFIG_AGP_INTEL)
698
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
699
{
700
	agp_bridge->gatt_table_real = NULL;
701
	agp_bridge->gatt_table = NULL;
702
	agp_bridge->gatt_bus_addr = 0;
703
 
704
	return 0;
705
}
706
 
707
static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
708
{
709
	return 0;
710
}
711
 
712
static int intel_fake_agp_configure(void)
713
{
714
	if (!intel_enable_gtt())
715
	    return -EIO;
716
 
717
	intel_private.clear_fake_agp = true;
718
	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
719
 
720
	return 0;
721
}
722
#endif
723
 
2339 Serge 724
static bool i830_check_flags(unsigned int flags)
725
{
726
	switch (flags) {
727
	case 0:
728
	case AGP_PHYS_MEMORY:
729
	case AGP_USER_CACHED_MEMORY:
730
	case AGP_USER_MEMORY:
731
		return true;
732
	}
2325 Serge 733
 
2339 Serge 734
	return false;
735
}
736
 
3243 Serge 737
void intel_gtt_insert_sg_entries(struct sg_table *st,
3031 serge 738
				 unsigned int pg_start,
739
				 unsigned int flags)
2332 Serge 740
{
3243 Serge 741
	struct scatterlist *sg;
742
	unsigned int len, m;
6084 serge 743
	int i, j;
2325 Serge 744
 
3031 serge 745
	j = pg_start;
746
 
3243 Serge 747
	/* sg may merge pages, but we have to separate
748
	 * per-page addr for GTT */
749
	for_each_sg(st->sgl, sg, st->nents, i) {
750
		len = sg_dma_len(sg) >> PAGE_SHIFT;
751
		for (m = 0; m < len; m++) {
752
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
6084 serge 753
			intel_private.driver->write_entry(addr, j, flags);
754
			j++;
3243 Serge 755
		}
756
	}
6084 serge 757
	wmb();
3031 serge 758
}
3243 Serge 759
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
3031 serge 760
 
5060 serge 761
#if IS_ENABLED(CONFIG_AGP_INTEL)
3031 serge 762
static void intel_gtt_insert_pages(unsigned int first_entry,
763
				   unsigned int num_entries,
3243 Serge 764
				   struct page **pages,
3031 serge 765
				   unsigned int flags)
766
{
6084 serge 767
	int i, j;
3031 serge 768
 
6084 serge 769
	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
3243 Serge 770
		dma_addr_t addr = page_to_phys(pages[i]);
6084 serge 771
		intel_private.driver->write_entry(addr,
772
						  j, flags);
773
	}
774
	wmb();
2332 Serge 775
}
776
 
5060 serge 777
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
778
					 off_t pg_start, int type)
779
{
780
	int ret = -EINVAL;
2332 Serge 781
 
5060 serge 782
	if (intel_private.clear_fake_agp) {
783
		int start = intel_private.stolen_size / PAGE_SIZE;
784
		int end = intel_private.gtt_mappable_entries;
785
		intel_gtt_clear_range(start, end - start);
786
		intel_private.clear_fake_agp = false;
787
	}
788
 
789
	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
790
		return i810_insert_dcache_entries(mem, pg_start, type);
791
 
792
	if (mem->page_count == 0)
793
		goto out;
794
 
795
	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
796
		goto out_err;
797
 
798
	if (type != mem->type)
799
		goto out_err;
800
 
801
	if (!intel_private.driver->check_flags(type))
802
		goto out_err;
803
 
804
	if (!mem->is_flushed)
805
		global_cache_flush();
806
 
807
	if (intel_private.needs_dmar) {
808
		struct sg_table st;
809
 
810
		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
811
		if (ret != 0)
812
			return ret;
813
 
814
		intel_gtt_insert_sg_entries(&st, pg_start, type);
815
		mem->sg_list = st.sgl;
816
		mem->num_sg = st.nents;
817
	} else
818
		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
819
				       type);
820
 
821
out:
822
	ret = 0;
823
out_err:
824
	mem->is_flushed = true;
825
	return ret;
826
}
827
#endif
828
 
2332 Serge 829
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
830
{
831
	unsigned int i;
832
 
833
	for (i = first_entry; i < (first_entry + num_entries); i++) {
3480 Serge 834
		intel_private.driver->write_entry(intel_private.scratch_page_dma,
2332 Serge 835
						  i, 0);
836
	}
6084 serge 837
	wmb();
2332 Serge 838
}
6084 serge 839
EXPORT_SYMBOL(intel_gtt_clear_range);
840
 
841
#if IS_ENABLED(CONFIG_AGP_INTEL)
842
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
843
					 off_t pg_start, int type)
844
{
845
	if (mem->page_count == 0)
846
		return 0;
847
 
848
	intel_gtt_clear_range(pg_start, mem->page_count);
849
 
850
	if (intel_private.needs_dmar) {
851
		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
852
		mem->sg_list = NULL;
853
		mem->num_sg = 0;
854
	}
855
 
856
	return 0;
857
}
858
 
859
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
860
						       int type)
861
{
862
	struct agp_memory *new;
863
 
864
	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
865
		if (pg_count != intel_private.num_dcache_entries)
866
			return NULL;
867
 
868
		new = agp_create_memory(1);
869
		if (new == NULL)
870
			return NULL;
871
 
872
		new->type = AGP_DCACHE_MEMORY;
873
		new->page_count = pg_count;
874
		new->num_scratch_pages = 0;
875
		agp_free_page_array(new);
876
		return new;
877
	}
878
	if (type == AGP_PHYS_MEMORY)
879
		return alloc_agpphysmem_i8xx(pg_count, type);
880
	/* always return NULL for other allocation types for now */
881
	return NULL;
882
}
883
#endif
4389 Serge 884
static void intel_i915_setup_chipset_flush(void)
885
{
886
	int ret;
887
	u32 temp;
2332 Serge 888
 
4389 Serge 889
	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
890
	if (!(temp & 0x1)) {
891
//		intel_alloc_chipset_flush_resource();
892
//		intel_private.resource_valid = 1;
893
//		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
894
	} else {
895
		temp &= ~1;
896
 
897
		intel_private.resource_valid = 1;
898
		intel_private.ifp_resource.start = temp;
899
		intel_private.ifp_resource.end = temp + PAGE_SIZE;
900
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
901
		/* some BIOSes reserve this area in a pnp some don't */
902
//		if (ret)
903
//			intel_private.resource_valid = 0;
904
	}
905
}
906
 
907
static void intel_i965_g33_setup_chipset_flush(void)
908
{
909
	u32 temp_hi, temp_lo;
910
	int ret;
911
 
912
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
913
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
914
 
915
	if (!(temp_lo & 0x1)) {
916
 
917
//		intel_alloc_chipset_flush_resource();
918
 
919
//		intel_private.resource_valid = 1;
920
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
921
//			upper_32_bits(intel_private.ifp_resource.start));
922
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
923
	} else {
924
		u64 l64;
925
 
926
		temp_lo &= ~0x1;
927
		l64 = ((u64)temp_hi << 32) | temp_lo;
928
 
929
		intel_private.resource_valid = 1;
930
		intel_private.ifp_resource.start = l64;
931
		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
932
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
933
		/* some BIOSes reserve this area in a pnp some don't */
934
//		if (ret)
935
//			intel_private.resource_valid = 0;
936
	}
937
}
938
 
2325 Serge 939
static void intel_i9xx_setup_flush(void)
940
{
6084 serge 941
	/* return if already configured */
942
	if (intel_private.ifp_resource.start)
943
		return;
2325 Serge 944
 
6084 serge 945
	if (INTEL_GTT_GEN == 6)
946
		return;
2325 Serge 947
 
6084 serge 948
	/* setup a resource for this object */
4389 Serge 949
	intel_private.ifp_resource.name = "Intel Flush Page";
950
	intel_private.ifp_resource.flags = IORESOURCE_MEM;
2325 Serge 951
 
6084 serge 952
	/* Setup chipset flush for 915 */
4389 Serge 953
	if (IS_G33 || INTEL_GTT_GEN >= 4) {
954
		intel_i965_g33_setup_chipset_flush();
955
	} else {
956
		intel_i915_setup_chipset_flush();
957
	}
2325 Serge 958
 
4389 Serge 959
	if (intel_private.ifp_resource.start)
6660 serge 960
		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
6084 serge 961
	if (!intel_private.i9xx_flush_page)
962
		dev_err(&intel_private.pcidev->dev,
963
			"can't ioremap flush page - no chipset flushing\n");
2339 Serge 964
}
2325 Serge 965
 
2339 Serge 966
static void i9xx_cleanup(void)
967
{
968
	if (intel_private.i9xx_flush_page)
969
		iounmap(intel_private.i9xx_flush_page);
970
//	if (intel_private.resource_valid)
971
//		release_resource(&intel_private.ifp_resource);
972
	intel_private.ifp_resource.start = 0;
973
	intel_private.resource_valid = 0;
2325 Serge 974
}
975
 
976
static void i9xx_chipset_flush(void)
977
{
6084 serge 978
	if (intel_private.i9xx_flush_page)
979
		writel(1, intel_private.i9xx_flush_page);
2325 Serge 980
}
981
 
2339 Serge 982
static void i965_write_entry(dma_addr_t addr,
983
			     unsigned int entry,
984
			     unsigned int flags)
985
{
986
	u32 pte_flags;
987
 
988
	pte_flags = I810_PTE_VALID;
989
	if (flags == AGP_USER_CACHED_MEMORY)
990
		pte_flags |= I830_PTE_SYSTEM_CACHED;
991
 
992
	/* Shift high bits down */
993
	addr |= (addr >> 28) & 0xf0;
994
	writel(addr | pte_flags, intel_private.gtt + entry);
995
}
996
 
2325 Serge 997
static int i9xx_setup(void)
998
{
5060 serge 999
	phys_addr_t reg_addr;
3031 serge 1000
	int size = KB(512);
2325 Serge 1001
 
5060 serge 1002
	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
2325 Serge 1003
 
3031 serge 1004
	intel_private.registers = ioremap(reg_addr, size);
6084 serge 1005
	if (!intel_private.registers)
1006
		return -ENOMEM;
2325 Serge 1007
 
3243 Serge 1008
	switch (INTEL_GTT_GEN) {
1009
	case 3:
5060 serge 1010
		intel_private.gtt_phys_addr =
1011
			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
3243 Serge 1012
		break;
6084 serge 1013
	case 5:
5060 serge 1014
		intel_private.gtt_phys_addr = reg_addr + MB(2);
6084 serge 1015
		break;
1016
	default:
5060 serge 1017
		intel_private.gtt_phys_addr = reg_addr + KB(512);
6084 serge 1018
		break;
1019
	}
2325 Serge 1020
 
6084 serge 1021
	intel_i9xx_setup_flush();
2325 Serge 1022
 
6084 serge 1023
	return 0;
2325 Serge 1024
}
1025
 
6084 serge 1026
#if IS_ENABLED(CONFIG_AGP_INTEL)
1027
static const struct agp_bridge_driver intel_fake_agp_driver = {
1028
	.owner			= THIS_MODULE,
1029
	.size_type		= FIXED_APER_SIZE,
1030
	.aperture_sizes		= intel_fake_agp_sizes,
1031
	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1032
	.configure		= intel_fake_agp_configure,
1033
	.fetch_size		= intel_fake_agp_fetch_size,
1034
	.cleanup		= intel_gtt_cleanup,
1035
	.agp_enable		= intel_fake_agp_enable,
1036
	.cache_flush		= global_cache_flush,
1037
	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1038
	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1039
	.insert_memory		= intel_fake_agp_insert_entries,
1040
	.remove_memory		= intel_fake_agp_remove_entries,
1041
	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1042
	.free_by_type		= intel_i810_free_by_type,
1043
	.agp_alloc_page		= agp_generic_alloc_page,
1044
	.agp_alloc_pages        = agp_generic_alloc_pages,
1045
	.agp_destroy_page	= agp_generic_destroy_page,
1046
	.agp_destroy_pages      = agp_generic_destroy_pages,
1047
};
1048
#endif
2339 Serge 1049
static const struct intel_gtt_driver i915_gtt_driver = {
1050
	.gen = 3,
1051
	.has_pgtbl_enable = 1,
1052
	.setup = i9xx_setup,
1053
	.cleanup = i9xx_cleanup,
1054
	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1055
	.write_entry = i830_write_entry,
1056
	.dma_mask_size = 32,
1057
	.check_flags = i830_check_flags,
1058
	.chipset_flush = i9xx_chipset_flush,
1059
};
1060
static const struct intel_gtt_driver g33_gtt_driver = {
1061
	.gen = 3,
1062
	.is_g33 = 1,
1063
	.setup = i9xx_setup,
1064
	.cleanup = i9xx_cleanup,
1065
	.write_entry = i965_write_entry,
1066
	.dma_mask_size = 36,
1067
	.check_flags = i830_check_flags,
1068
	.chipset_flush = i9xx_chipset_flush,
1069
};
1070
static const struct intel_gtt_driver pineview_gtt_driver = {
1071
	.gen = 3,
1072
	.is_pineview = 1, .is_g33 = 1,
1073
	.setup = i9xx_setup,
1074
	.cleanup = i9xx_cleanup,
1075
	.write_entry = i965_write_entry,
1076
	.dma_mask_size = 36,
1077
	.check_flags = i830_check_flags,
1078
	.chipset_flush = i9xx_chipset_flush,
1079
};
1080
static const struct intel_gtt_driver i965_gtt_driver = {
1081
	.gen = 4,
1082
	.has_pgtbl_enable = 1,
1083
	.setup = i9xx_setup,
1084
	.cleanup = i9xx_cleanup,
1085
	.write_entry = i965_write_entry,
1086
	.dma_mask_size = 36,
1087
	.check_flags = i830_check_flags,
1088
	.chipset_flush = i9xx_chipset_flush,
1089
};
1090
static const struct intel_gtt_driver g4x_gtt_driver = {
1091
	.gen = 5,
1092
	.setup = i9xx_setup,
1093
	.cleanup = i9xx_cleanup,
1094
	.write_entry = i965_write_entry,
1095
	.dma_mask_size = 36,
1096
	.check_flags = i830_check_flags,
1097
	.chipset_flush = i9xx_chipset_flush,
1098
};
1099
static const struct intel_gtt_driver ironlake_gtt_driver = {
1100
	.gen = 5,
1101
	.is_ironlake = 1,
1102
	.setup = i9xx_setup,
1103
	.cleanup = i9xx_cleanup,
1104
	.write_entry = i965_write_entry,
1105
	.dma_mask_size = 36,
1106
	.check_flags = i830_check_flags,
1107
	.chipset_flush = i9xx_chipset_flush,
1108
};
2325 Serge 1109
 
1110
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1111
 * driver and gmch_driver must be non-null, and find_gmch will determine
1112
 * which one should be used if a gmch_chip_id is present.
1113
 */
1114
static const struct intel_gtt_driver_description {
6084 serge 1115
	unsigned int gmch_chip_id;
1116
	char *name;
1117
	const struct intel_gtt_driver *gtt_driver;
2325 Serge 1118
} intel_gtt_chipsets[] = {
2339 Serge 1119
	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1120
		&i915_gtt_driver },
1121
	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1122
		&i915_gtt_driver },
1123
	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1124
		&i915_gtt_driver },
1125
	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1126
		&i915_gtt_driver },
1127
	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1128
		&i915_gtt_driver },
1129
	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1130
		&i915_gtt_driver },
1131
	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1132
		&i965_gtt_driver },
1133
	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1134
		&i965_gtt_driver },
1135
	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1136
		&i965_gtt_driver },
1137
	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1138
		&i965_gtt_driver },
1139
	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1140
		&i965_gtt_driver },
1141
	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1142
		&i965_gtt_driver },
1143
	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1144
		&g33_gtt_driver },
1145
	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1146
		&g33_gtt_driver },
1147
	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1148
		&g33_gtt_driver },
1149
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1150
		&pineview_gtt_driver },
1151
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1152
		&pineview_gtt_driver },
1153
	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1154
		&g4x_gtt_driver },
1155
	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1156
		&g4x_gtt_driver },
1157
	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1158
		&g4x_gtt_driver },
1159
	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1160
		&g4x_gtt_driver },
1161
	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1162
		&g4x_gtt_driver },
1163
	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1164
		&g4x_gtt_driver },
1165
	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1166
		&g4x_gtt_driver },
1167
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1168
	    "HD Graphics", &ironlake_gtt_driver },
1169
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1170
	    "HD Graphics", &ironlake_gtt_driver },
6084 serge 1171
	{ 0, NULL, NULL }
2325 Serge 1172
};
1173
 
1174
static int find_gmch(u16 device)
1175
{
6084 serge 1176
	struct pci_dev *gmch_device;
2325 Serge 1177
 
6084 serge 1178
	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1179
	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1180
		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1181
					     device, gmch_device);
1182
	}
2325 Serge 1183
 
6084 serge 1184
	if (!gmch_device)
1185
		return 0;
2325 Serge 1186
 
6084 serge 1187
	intel_private.pcidev = gmch_device;
1188
	return 1;
2325 Serge 1189
}
1190
 
3031 serge 1191
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
6084 serge 1192
		     struct agp_bridge_data *bridge)
2325 Serge 1193
{
6084 serge 1194
	int i, mask;
2325 Serge 1195
 
3480 Serge 1196
	/*
1197
	 * Can be called from the fake agp driver but also directly from
1198
	 * drm/i915.ko. Hence we need to check whether everything is set up
1199
	 * already.
1200
	 */
1201
	if (intel_private.driver) {
1202
		intel_private.refcount++;
1203
		return 1;
1204
	}
1205
 
1206
 
2325 Serge 1207
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1208
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1209
            intel_private.driver =
1210
                intel_gtt_chipsets[i].gtt_driver;
1211
            break;
1212
        }
1213
    }
1214
 
1215
    if (!intel_private.driver)
1216
        return 0;
1217
 
3480 Serge 1218
	intel_private.refcount++;
1219
 
5060 serge 1220
#if IS_ENABLED(CONFIG_AGP_INTEL)
3031 serge 1221
	if (bridge) {
5060 serge 1222
		bridge->driver = &intel_fake_agp_driver;
6084 serge 1223
		bridge->dev_private_data = &intel_private;
3031 serge 1224
		bridge->dev = bridge_pdev;
1225
	}
5060 serge 1226
#endif
2325 Serge 1227
 
3031 serge 1228
    intel_private.bridge_dev = bridge_pdev;
2325 Serge 1229
 
3243 Serge 1230
	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
2325 Serge 1231
 
1232
    mask = intel_private.driver->dma_mask_size;
1233
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1234
//        dev_err(&intel_private.pcidev->dev,
1235
//            "set gfx device dma mask %d-bit failed!\n", mask);
1236
//    else
1237
//        pci_set_consistent_dma_mask(intel_private.pcidev,
1238
//                        DMA_BIT_MASK(mask));
1239
 
3031 serge 1240
	if (intel_gtt_init() != 0) {
1241
//		intel_gmch_remove();
2325 Serge 1242
 
1243
        return 0;
3031 serge 1244
	}
2325 Serge 1245
 
1246
    return 1;
1247
}
2339 Serge 1248
EXPORT_SYMBOL(intel_gmch_probe);
2325 Serge 1249
 
6084 serge 1250
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
1251
		   phys_addr_t *mappable_base, u64 *mappable_end)
2326 Serge 1252
{
3480 Serge 1253
	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1254
	*stolen_size = intel_private.stolen_size;
1255
	*mappable_base = intel_private.gma_bus_addr;
1256
	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
2326 Serge 1257
}
2339 Serge 1258
EXPORT_SYMBOL(intel_gtt_get);
2326 Serge 1259
 
2332 Serge 1260
void intel_gtt_chipset_flush(void)
1261
{
1262
	if (intel_private.driver->chipset_flush)
1263
		intel_private.driver->chipset_flush();
1264
}
2339 Serge 1265
EXPORT_SYMBOL(intel_gtt_chipset_flush);
2327 Serge 1266
 
2332 Serge 1267
 
6084 serge 1268
MODULE_AUTHOR("Dave Jones, Various @Intel");
3243 Serge 1269
MODULE_LICENSE("GPL and additional rights");