Subversion Repositories Kolibri OS

Rev

Rev 5354 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2325 Serge 1
/*
2
 * Intel GTT (Graphics Translation Table) routines
3
 *
4
 * Caveat: This driver implements the linux agp interface, but this is far from
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
6
 * old userspace intel graphics drivers needed an interface to map memory into
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
9
 * avoid having to create a new api.
10
 *
11
 * With gem this does not make much sense anymore, just needlessly complicates
12
 * the code. But as long as the old graphics stack is still support, it's stuck
13
 * here.
14
 *
15
 * /fairy-tale-mode off
16
 */
17
 
3480 Serge 18
#include 
19
 
2325 Serge 20
#include 
21
#include 
22
#include 
5354 serge 23
#include 
24
#include 
25
#
3031 serge 26
#include 
3243 Serge 27
#include 
28
 
2325 Serge 29
#include 
30
#include "agp.h"
31
#include "intel-agp.h"
3243 Serge 32
#include 
2325 Serge 33
 
34
 
35
struct pci_dev *
36
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
37
 
38
 
39
#define PCI_VENDOR_ID_INTEL             0x8086
40
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
41
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
2339 Serge 42
#define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
43
#define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
44
#define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
45
#define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
2325 Serge 46
 
47
 
48
#define AGP_NORMAL_MEMORY 0
49
 
50
#define AGP_USER_TYPES (1 << 16)
51
#define AGP_USER_MEMORY (AGP_USER_TYPES)
52
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
53
 
54
 
55
 
56
/*
57
 * If we have Intel graphics, we're not going to have anything other than
58
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
2339 Serge 59
 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
2325 Serge 60
 * Only newer chipsets need to bother with this, of course.
61
 */
2339 Serge 62
#ifdef CONFIG_INTEL_IOMMU
2325 Serge 63
#define USE_PCI_DMA_API 1
64
#else
65
#define USE_PCI_DMA_API 0
66
#endif
67
 
68
struct intel_gtt_driver {
6084 serge 69
	unsigned int gen : 8;
70
	unsigned int is_g33 : 1;
71
	unsigned int is_pineview : 1;
72
	unsigned int is_ironlake : 1;
73
	unsigned int has_pgtbl_enable : 1;
74
	unsigned int dma_mask_size : 8;
75
	/* Chipset specific GTT setup */
76
	int (*setup)(void);
77
	/* This should undo anything done in ->setup() save the unmapping
78
	 * of the mmio register file, that's done in the generic code. */
79
	void (*cleanup)(void);
80
	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
81
	/* Flags is a more or less chipset specific opaque value.
82
	 * For chipsets that need to support old ums (non-gem) code, this
83
	 * needs to be identical to the various supported agp memory types! */
84
	bool (*check_flags)(unsigned int flags);
85
	void (*chipset_flush)(void);
2325 Serge 86
};
87
 
88
static struct _intel_private {
6084 serge 89
	const struct intel_gtt_driver *driver;
90
	struct pci_dev *pcidev;	/* device one */
91
	struct pci_dev *bridge_dev;
92
	u8 __iomem *registers;
5060 serge 93
	phys_addr_t gtt_phys_addr;
6084 serge 94
	u32 PGETBL_save;
95
	u32 __iomem *gtt;		/* I915G */
96
	bool clear_fake_agp; /* on first access via agp, fill with scratch */
97
	int num_dcache_entries;
98
	void __iomem *i9xx_flush_page;
99
	char *i81x_gtt_table;
100
	struct resource ifp_resource;
101
	int resource_valid;
102
	struct page *scratch_page;
3480 Serge 103
	phys_addr_t scratch_page_dma;
3031 serge 104
	int refcount;
3480 Serge 105
	/* Whether i915 needs to use the dmar apis or not. */
106
	unsigned int needs_dmar : 1;
107
	phys_addr_t gma_bus_addr;
108
	/*  Size of memory reserved for graphics by the BIOS */
109
	unsigned int stolen_size;
110
	/* Total number of gtt entries. */
111
	unsigned int gtt_total_entries;
112
	/* Part of the gtt that is mappable by the cpu, for those chips where
113
	 * this is not the full gtt. */
114
	unsigned int gtt_mappable_entries;
2325 Serge 115
} intel_private;
116
 
6084 serge 117
#define INTEL_GTT_GEN	intel_private.driver->gen
118
#define IS_G33		intel_private.driver->is_g33
119
#define IS_PINEVIEW	intel_private.driver->is_pineview
120
#define IS_IRONLAKE	intel_private.driver->is_ironlake
121
#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
2325 Serge 122
 
6084 serge 123
#if IS_ENABLED(CONFIG_AGP_INTEL)
124
static int intel_gtt_map_memory(struct page **pages,
125
				unsigned int num_entries,
126
				struct sg_table *st)
127
{
128
	struct scatterlist *sg;
129
	int i;
130
 
131
	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
132
 
133
	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
134
		goto err;
135
 
136
	for_each_sg(st->sgl, sg, num_entries, i)
137
		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
138
 
139
	if (!pci_map_sg(intel_private.pcidev,
140
			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
141
		goto err;
142
 
143
	return 0;
144
 
145
err:
146
	sg_free_table(st);
147
	return -ENOMEM;
148
}
149
 
150
static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
151
{
152
	struct sg_table st;
153
	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
154
 
155
	pci_unmap_sg(intel_private.pcidev, sg_list,
156
		     num_sg, PCI_DMA_BIDIRECTIONAL);
157
 
158
	st.sgl = sg_list;
159
	st.orig_nents = st.nents = num_sg;
160
 
161
	sg_free_table(&st);
162
}
163
 
164
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
165
{
166
	return;
167
}
168
 
169
/* Exists to support ARGB cursors */
170
static struct page *i8xx_alloc_pages(void)
171
{
172
	struct page *page;
173
 
174
	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
175
	if (page == NULL)
176
		return NULL;
177
 
178
	if (set_pages_uc(page, 4) < 0) {
179
		set_pages_wb(page, 4);
180
		__free_pages(page, 2);
181
		return NULL;
182
	}
183
	atomic_inc(&agp_bridge->current_memory_agp);
184
	return page;
185
}
186
 
187
static void i8xx_destroy_pages(struct page *page)
188
{
189
	if (page == NULL)
190
		return;
191
 
192
	set_pages_wb(page, 4);
193
	__free_pages(page, 2);
194
	atomic_dec(&agp_bridge->current_memory_agp);
195
}
196
#endif
197
 
198
#if IS_ENABLED(CONFIG_AGP_INTEL)
199
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
200
				      int type)
201
{
202
	int i;
203
 
204
	if ((pg_start + mem->page_count)
205
			> intel_private.num_dcache_entries)
206
		return -EINVAL;
207
 
208
	if (!mem->is_flushed)
209
		global_cache_flush();
210
 
211
	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
212
		dma_addr_t addr = i << PAGE_SHIFT;
213
		intel_private.driver->write_entry(addr,
214
						  i, type);
215
	}
216
	wmb();
217
 
218
	return 0;
219
}
220
 
221
/*
222
 * The i810/i830 requires a physical address to program its mouse
223
 * pointer into hardware.
224
 * However the Xserver still writes to it through the agp aperture.
225
 */
226
static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
227
{
228
	struct agp_memory *new;
229
	struct page *page;
230
 
231
	switch (pg_count) {
232
	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
233
		break;
234
	case 4:
235
		/* kludge to get 4 physical pages for ARGB cursor */
236
		page = i8xx_alloc_pages();
237
		break;
238
	default:
239
		return NULL;
240
	}
241
 
242
	if (page == NULL)
243
		return NULL;
244
 
245
	new = agp_create_memory(pg_count);
246
	if (new == NULL)
247
		return NULL;
248
 
249
	new->pages[0] = page;
250
	if (pg_count == 4) {
251
		/* kludge to get 4 physical pages for ARGB cursor */
252
		new->pages[1] = new->pages[0] + 1;
253
		new->pages[2] = new->pages[1] + 1;
254
		new->pages[3] = new->pages[2] + 1;
255
	}
256
	new->page_count = pg_count;
257
	new->num_scratch_pages = pg_count;
258
	new->type = AGP_PHYS_MEMORY;
259
	new->physical = page_to_phys(new->pages[0]);
260
	return new;
261
}
262
 
263
static void intel_i810_free_by_type(struct agp_memory *curr)
264
{
265
	agp_free_key(curr->key);
266
	if (curr->type == AGP_PHYS_MEMORY) {
267
		if (curr->page_count == 4)
268
			i8xx_destroy_pages(curr->pages[0]);
269
		else {
270
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
271
							     AGP_PAGE_DESTROY_UNMAP);
272
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
273
							     AGP_PAGE_DESTROY_FREE);
274
		}
275
		agp_free_page_array(curr);
276
	}
277
	kfree(curr);
278
}
279
#endif
280
 
2325 Serge 281
static int intel_gtt_setup_scratch_page(void)
282
{
3243 Serge 283
	struct page *page;
3031 serge 284
	dma_addr_t dma_addr;
2325 Serge 285
 
3243 Serge 286
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
287
	if (page == NULL)
6084 serge 288
		return -ENOMEM;
3480 Serge 289
		intel_private.scratch_page_dma = page_to_phys(page);
2325 Serge 290
 
3243 Serge 291
	intel_private.scratch_page = page;
2325 Serge 292
 
6084 serge 293
	return 0;
2325 Serge 294
}
295
 
296
static unsigned int intel_gtt_stolen_size(void)
297
{
6084 serge 298
	u16 gmch_ctrl;
299
	u8 rdct;
300
	int local = 0;
301
	static const int ddt[4] = { 0, 16, 32, 64 };
302
	unsigned int stolen_size = 0;
2325 Serge 303
 
6084 serge 304
	if (INTEL_GTT_GEN == 1)
305
		return 0; /* no stolen mem on i81x */
2325 Serge 306
 
6084 serge 307
	pci_read_config_word(intel_private.bridge_dev,
308
			     I830_GMCH_CTRL, &gmch_ctrl);
2325 Serge 309
 
6084 serge 310
	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
311
	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
312
		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
313
		case I830_GMCH_GMS_STOLEN_512:
314
			stolen_size = KB(512);
315
			break;
316
		case I830_GMCH_GMS_STOLEN_1024:
317
			stolen_size = MB(1);
318
			break;
319
		case I830_GMCH_GMS_STOLEN_8192:
320
			stolen_size = MB(8);
321
			break;
322
		case I830_GMCH_GMS_LOCAL:
323
			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
324
			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
325
					MB(ddt[I830_RDRAM_DDT(rdct)]);
326
			local = 1;
327
			break;
328
		default:
329
			stolen_size = 0;
330
			break;
331
		}
332
	} else {
333
		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
334
		case I855_GMCH_GMS_STOLEN_1M:
335
			stolen_size = MB(1);
336
			break;
337
		case I855_GMCH_GMS_STOLEN_4M:
338
			stolen_size = MB(4);
339
			break;
340
		case I855_GMCH_GMS_STOLEN_8M:
341
			stolen_size = MB(8);
342
			break;
343
		case I855_GMCH_GMS_STOLEN_16M:
344
			stolen_size = MB(16);
345
			break;
346
		case I855_GMCH_GMS_STOLEN_32M:
347
			stolen_size = MB(32);
348
			break;
349
		case I915_GMCH_GMS_STOLEN_48M:
350
			stolen_size = MB(48);
351
			break;
352
		case I915_GMCH_GMS_STOLEN_64M:
353
			stolen_size = MB(64);
354
			break;
355
		case G33_GMCH_GMS_STOLEN_128M:
356
			stolen_size = MB(128);
357
			break;
358
		case G33_GMCH_GMS_STOLEN_256M:
359
			stolen_size = MB(256);
360
			break;
361
		case INTEL_GMCH_GMS_STOLEN_96M:
362
			stolen_size = MB(96);
363
			break;
364
		case INTEL_GMCH_GMS_STOLEN_160M:
365
			stolen_size = MB(160);
366
			break;
367
		case INTEL_GMCH_GMS_STOLEN_224M:
368
			stolen_size = MB(224);
369
			break;
370
		case INTEL_GMCH_GMS_STOLEN_352M:
371
			stolen_size = MB(352);
372
			break;
373
		default:
374
			stolen_size = 0;
375
			break;
376
		}
377
	}
2325 Serge 378
 
6084 serge 379
	if (stolen_size > 0) {
2332 Serge 380
		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
6084 serge 381
		       stolen_size / KB(1), local ? "local" : "stolen");
382
	} else {
2332 Serge 383
		dev_info(&intel_private.bridge_dev->dev,
384
		       "no pre-allocated video memory detected\n");
6084 serge 385
		stolen_size = 0;
386
	}
2325 Serge 387
 
6084 serge 388
	return stolen_size;
2325 Serge 389
}
390
 
391
static void i965_adjust_pgetbl_size(unsigned int size_flag)
392
{
6084 serge 393
	u32 pgetbl_ctl, pgetbl_ctl2;
2325 Serge 394
 
6084 serge 395
	/* ensure that ppgtt is disabled */
396
	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
397
	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
398
	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
2325 Serge 399
 
6084 serge 400
	/* write the new ggtt size */
401
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
402
	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
403
	pgetbl_ctl |= size_flag;
404
	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
2325 Serge 405
}
406
 
407
static unsigned int i965_gtt_total_entries(void)
408
{
6084 serge 409
	int size;
410
	u32 pgetbl_ctl;
411
	u16 gmch_ctl;
2325 Serge 412
 
6084 serge 413
	pci_read_config_word(intel_private.bridge_dev,
414
			     I830_GMCH_CTRL, &gmch_ctl);
2325 Serge 415
 
6084 serge 416
	if (INTEL_GTT_GEN == 5) {
417
		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
418
		case G4x_GMCH_SIZE_1M:
419
		case G4x_GMCH_SIZE_VT_1M:
420
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
421
			break;
422
		case G4x_GMCH_SIZE_VT_1_5M:
423
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
424
			break;
425
		case G4x_GMCH_SIZE_2M:
426
		case G4x_GMCH_SIZE_VT_2M:
427
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
428
			break;
429
		}
430
	}
2325 Serge 431
 
6084 serge 432
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
2325 Serge 433
 
6084 serge 434
	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
435
	case I965_PGETBL_SIZE_128KB:
436
		size = KB(128);
437
		break;
438
	case I965_PGETBL_SIZE_256KB:
439
		size = KB(256);
440
		break;
441
	case I965_PGETBL_SIZE_512KB:
442
		size = KB(512);
443
		break;
444
	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
445
	case I965_PGETBL_SIZE_1MB:
446
		size = KB(1024);
447
		break;
448
	case I965_PGETBL_SIZE_2MB:
449
		size = KB(2048);
450
		break;
451
	case I965_PGETBL_SIZE_1_5MB:
452
		size = KB(1024 + 512);
453
		break;
454
	default:
2332 Serge 455
		dev_info(&intel_private.pcidev->dev,
456
			 "unknown page table size, assuming 512KB\n");
6084 serge 457
		size = KB(512);
458
	}
2325 Serge 459
 
6084 serge 460
	return size/4;
2325 Serge 461
}
462
 
463
static unsigned int intel_gtt_total_entries(void)
464
{
6084 serge 465
	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
466
		return i965_gtt_total_entries();
3243 Serge 467
	else {
6084 serge 468
		/* On previous hardware, the GTT size was just what was
469
		 * required to map the aperture.
470
		 */
3480 Serge 471
		return intel_private.gtt_mappable_entries;
6084 serge 472
	}
2325 Serge 473
}
474
 
475
static unsigned int intel_gtt_mappable_entries(void)
476
{
6084 serge 477
	unsigned int aperture_size;
2325 Serge 478
 
6084 serge 479
	if (INTEL_GTT_GEN == 1) {
480
		u32 smram_miscc;
2325 Serge 481
 
6084 serge 482
		pci_read_config_dword(intel_private.bridge_dev,
483
				      I810_SMRAM_MISCC, &smram_miscc);
2325 Serge 484
 
6084 serge 485
		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
486
				== I810_GFX_MEM_WIN_32M)
487
			aperture_size = MB(32);
488
		else
489
			aperture_size = MB(64);
490
	} else if (INTEL_GTT_GEN == 2) {
491
		u16 gmch_ctrl;
2325 Serge 492
 
6084 serge 493
		pci_read_config_word(intel_private.bridge_dev,
494
				     I830_GMCH_CTRL, &gmch_ctrl);
2325 Serge 495
 
6084 serge 496
		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
497
			aperture_size = MB(64);
498
		else
499
			aperture_size = MB(128);
500
	} else {
501
		/* 9xx supports large sizes, just look at the length */
502
		aperture_size = pci_resource_len(intel_private.pcidev, 2);
503
	}
2325 Serge 504
 
6084 serge 505
	return aperture_size >> PAGE_SHIFT;
2325 Serge 506
}
507
 
508
static void intel_gtt_teardown_scratch_page(void)
509
{
510
   // FreePage(intel_private.scratch_page_dma);
511
}
512
 
513
static void intel_gtt_cleanup(void)
514
{
6084 serge 515
	intel_private.driver->cleanup();
2325 Serge 516
 
3031 serge 517
	iounmap(intel_private.gtt);
518
	iounmap(intel_private.registers);
2325 Serge 519
 
2339 Serge 520
	intel_gtt_teardown_scratch_page();
2325 Serge 521
}
522
 
4104 Serge 523
/* Certain Gen5 chipsets require require idling the GPU before
524
 * unmapping anything from the GTT when VT-d is enabled.
525
 */
526
static inline int needs_ilk_vtd_wa(void)
527
{
528
#ifdef CONFIG_INTEL_IOMMU
529
	const unsigned short gpu_devid = intel_private.pcidev->device;
530
 
531
	/* Query intel_iommu to see if we need the workaround. Presumably that
532
	 * was loaded first.
533
	 */
6084 serge 534
	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
4104 Serge 535
	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
536
	     intel_iommu_gfx_mapped)
537
		return 1;
538
#endif
539
	return 0;
540
}
541
 
542
static bool intel_gtt_can_wc(void)
543
{
544
	if (INTEL_GTT_GEN <= 2)
545
		return false;
546
 
547
	if (INTEL_GTT_GEN >= 6)
548
		return false;
549
 
550
	/* Reports of major corruption with ILK vt'd enabled */
551
	if (needs_ilk_vtd_wa())
552
		return false;
553
 
554
	return true;
555
}
556
 
2325 Serge 557
static int intel_gtt_init(void)
558
{
6084 serge 559
	u32 gtt_map_size;
5060 serge 560
	int ret, bar;
2325 Serge 561
 
6084 serge 562
	ret = intel_private.driver->setup();
563
	if (ret != 0)
564
		return ret;
2325 Serge 565
 
3480 Serge 566
	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
567
	intel_private.gtt_total_entries = intel_gtt_total_entries();
2325 Serge 568
 
6084 serge 569
	/* save the PGETBL reg for resume */
570
	intel_private.PGETBL_save =
571
		readl(intel_private.registers+I810_PGETBL_CTL)
572
			& ~I810_PGETBL_ENABLED;
573
	/* we only ever restore the register when enabling the PGTBL... */
574
	if (HAS_PGTBL_EN)
575
		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
2325 Serge 576
 
2339 Serge 577
	dev_info(&intel_private.bridge_dev->dev,
578
			"detected gtt size: %dK total, %dK mappable\n",
3480 Serge 579
			intel_private.gtt_total_entries * 4,
580
			intel_private.gtt_mappable_entries * 4);
2325 Serge 581
 
3480 Serge 582
	gtt_map_size = intel_private.gtt_total_entries * 4;
2325 Serge 583
 
3031 serge 584
	intel_private.gtt = NULL;
585
	if (intel_private.gtt == NULL)
5060 serge 586
		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
3031 serge 587
					    gtt_map_size);
588
	if (intel_private.gtt == NULL) {
6084 serge 589
		intel_private.driver->cleanup();
3031 serge 590
		iounmap(intel_private.registers);
6084 serge 591
		return -ENOMEM;
592
	}
2325 Serge 593
 
5060 serge 594
#if IS_ENABLED(CONFIG_AGP_INTEL)
595
	global_cache_flush();   /* FIXME: ? */
596
#endif
2325 Serge 597
 
3480 Serge 598
	intel_private.stolen_size = intel_gtt_stolen_size();
2325 Serge 599
 
3480 Serge 600
	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
2325 Serge 601
 
6084 serge 602
	ret = intel_gtt_setup_scratch_page();
603
	if (ret != 0) {
604
		intel_gtt_cleanup();
605
		return ret;
606
	}
2325 Serge 607
 
3031 serge 608
	if (INTEL_GTT_GEN <= 2)
5060 serge 609
		bar = I810_GMADR_BAR;
3031 serge 610
	else
5060 serge 611
		bar = I915_GMADR_BAR;
2325 Serge 612
 
5060 serge 613
	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
614
	return 0;
615
}
3031 serge 616
 
6084 serge 617
#if IS_ENABLED(CONFIG_AGP_INTEL)
618
static int intel_fake_agp_fetch_size(void)
619
{
620
	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
621
	unsigned int aper_size;
622
	int i;
3480 Serge 623
 
6084 serge 624
	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
625
 
626
	for (i = 0; i < num_sizes; i++) {
627
		if (aper_size == intel_fake_agp_sizes[i].size) {
628
			agp_bridge->current_size =
629
				(void *) (intel_fake_agp_sizes + i);
630
			return aper_size;
631
		}
632
	}
633
 
634
	return 0;
635
}
636
#endif
637
 
2339 Serge 638
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
639
			     unsigned int flags)
640
{
641
	u32 pte_flags = I810_PTE_VALID;
642
 
643
	if (flags ==  AGP_USER_CACHED_MEMORY)
644
		pte_flags |= I830_PTE_SYSTEM_CACHED;
645
 
646
	writel(addr | pte_flags, intel_private.gtt + entry);
647
}
648
 
3031 serge 649
bool intel_enable_gtt(void)
2325 Serge 650
{
6084 serge 651
	u8 __iomem *reg;
2325 Serge 652
 
6084 serge 653
	if (INTEL_GTT_GEN == 2) {
654
		u16 gmch_ctrl;
2325 Serge 655
 
6084 serge 656
		pci_read_config_word(intel_private.bridge_dev,
657
				     I830_GMCH_CTRL, &gmch_ctrl);
658
		gmch_ctrl |= I830_GMCH_ENABLED;
659
		pci_write_config_word(intel_private.bridge_dev,
660
				      I830_GMCH_CTRL, gmch_ctrl);
2325 Serge 661
 
6084 serge 662
		pci_read_config_word(intel_private.bridge_dev,
663
				     I830_GMCH_CTRL, &gmch_ctrl);
664
		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
2332 Serge 665
			dev_err(&intel_private.pcidev->dev,
666
				"failed to enable the GTT: GMCH_CTRL=%x\n",
6084 serge 667
				gmch_ctrl);
668
			return false;
669
		}
670
	}
2325 Serge 671
 
6084 serge 672
	/* On the resume path we may be adjusting the PGTBL value, so
673
	 * be paranoid and flush all chipset write buffers...
674
	 */
675
	if (INTEL_GTT_GEN >= 3)
676
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
2325 Serge 677
 
6084 serge 678
	reg = intel_private.registers+I810_PGETBL_CTL;
679
	writel(intel_private.PGETBL_save, reg);
680
	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
2332 Serge 681
		dev_err(&intel_private.pcidev->dev,
682
			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
6084 serge 683
			readl(reg), intel_private.PGETBL_save);
684
		return false;
685
	}
2325 Serge 686
 
6084 serge 687
	if (INTEL_GTT_GEN >= 3)
688
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
2325 Serge 689
 
6084 serge 690
	return true;
2325 Serge 691
}
6084 serge 692
EXPORT_SYMBOL(intel_enable_gtt);
2325 Serge 693
 
6084 serge 694
#if IS_ENABLED(CONFIG_AGP_INTEL)
695
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
696
{
697
	agp_bridge->gatt_table_real = NULL;
698
	agp_bridge->gatt_table = NULL;
699
	agp_bridge->gatt_bus_addr = 0;
700
 
701
	return 0;
702
}
703
 
704
static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
705
{
706
	return 0;
707
}
708
 
709
static int intel_fake_agp_configure(void)
710
{
711
	if (!intel_enable_gtt())
712
	    return -EIO;
713
 
714
	intel_private.clear_fake_agp = true;
715
	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
716
 
717
	return 0;
718
}
719
#endif
720
 
2339 Serge 721
static bool i830_check_flags(unsigned int flags)
722
{
723
	switch (flags) {
724
	case 0:
725
	case AGP_PHYS_MEMORY:
726
	case AGP_USER_CACHED_MEMORY:
727
	case AGP_USER_MEMORY:
728
		return true;
729
	}
2325 Serge 730
 
2339 Serge 731
	return false;
732
}
733
 
3243 Serge 734
void intel_gtt_insert_sg_entries(struct sg_table *st,
3031 serge 735
				 unsigned int pg_start,
736
				 unsigned int flags)
2332 Serge 737
{
3243 Serge 738
	struct scatterlist *sg;
739
	unsigned int len, m;
6084 serge 740
	int i, j;
2325 Serge 741
 
3031 serge 742
	j = pg_start;
743
 
3243 Serge 744
	/* sg may merge pages, but we have to separate
745
	 * per-page addr for GTT */
746
	for_each_sg(st->sgl, sg, st->nents, i) {
747
		len = sg_dma_len(sg) >> PAGE_SHIFT;
748
		for (m = 0; m < len; m++) {
749
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
6084 serge 750
			intel_private.driver->write_entry(addr, j, flags);
751
			j++;
3243 Serge 752
		}
753
	}
6084 serge 754
	wmb();
3031 serge 755
}
3243 Serge 756
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
3031 serge 757
 
5060 serge 758
#if IS_ENABLED(CONFIG_AGP_INTEL)
3031 serge 759
static void intel_gtt_insert_pages(unsigned int first_entry,
760
				   unsigned int num_entries,
3243 Serge 761
				   struct page **pages,
3031 serge 762
				   unsigned int flags)
763
{
6084 serge 764
	int i, j;
3031 serge 765
 
6084 serge 766
	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
3243 Serge 767
		dma_addr_t addr = page_to_phys(pages[i]);
6084 serge 768
		intel_private.driver->write_entry(addr,
769
						  j, flags);
770
	}
771
	wmb();
2332 Serge 772
}
773
 
5060 serge 774
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
775
					 off_t pg_start, int type)
776
{
777
	int ret = -EINVAL;
2332 Serge 778
 
5060 serge 779
	if (intel_private.clear_fake_agp) {
780
		int start = intel_private.stolen_size / PAGE_SIZE;
781
		int end = intel_private.gtt_mappable_entries;
782
		intel_gtt_clear_range(start, end - start);
783
		intel_private.clear_fake_agp = false;
784
	}
785
 
786
	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
787
		return i810_insert_dcache_entries(mem, pg_start, type);
788
 
789
	if (mem->page_count == 0)
790
		goto out;
791
 
792
	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
793
		goto out_err;
794
 
795
	if (type != mem->type)
796
		goto out_err;
797
 
798
	if (!intel_private.driver->check_flags(type))
799
		goto out_err;
800
 
801
	if (!mem->is_flushed)
802
		global_cache_flush();
803
 
804
	if (intel_private.needs_dmar) {
805
		struct sg_table st;
806
 
807
		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
808
		if (ret != 0)
809
			return ret;
810
 
811
		intel_gtt_insert_sg_entries(&st, pg_start, type);
812
		mem->sg_list = st.sgl;
813
		mem->num_sg = st.nents;
814
	} else
815
		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
816
				       type);
817
 
818
out:
819
	ret = 0;
820
out_err:
821
	mem->is_flushed = true;
822
	return ret;
823
}
824
#endif
825
 
2332 Serge 826
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
827
{
828
	unsigned int i;
829
 
830
	for (i = first_entry; i < (first_entry + num_entries); i++) {
3480 Serge 831
		intel_private.driver->write_entry(intel_private.scratch_page_dma,
2332 Serge 832
						  i, 0);
833
	}
6084 serge 834
	wmb();
2332 Serge 835
}
6084 serge 836
EXPORT_SYMBOL(intel_gtt_clear_range);
837
 
838
#if IS_ENABLED(CONFIG_AGP_INTEL)
839
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
840
					 off_t pg_start, int type)
841
{
842
	if (mem->page_count == 0)
843
		return 0;
844
 
845
	intel_gtt_clear_range(pg_start, mem->page_count);
846
 
847
	if (intel_private.needs_dmar) {
848
		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
849
		mem->sg_list = NULL;
850
		mem->num_sg = 0;
851
	}
852
 
853
	return 0;
854
}
855
 
856
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
857
						       int type)
858
{
859
	struct agp_memory *new;
860
 
861
	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
862
		if (pg_count != intel_private.num_dcache_entries)
863
			return NULL;
864
 
865
		new = agp_create_memory(1);
866
		if (new == NULL)
867
			return NULL;
868
 
869
		new->type = AGP_DCACHE_MEMORY;
870
		new->page_count = pg_count;
871
		new->num_scratch_pages = 0;
872
		agp_free_page_array(new);
873
		return new;
874
	}
875
	if (type == AGP_PHYS_MEMORY)
876
		return alloc_agpphysmem_i8xx(pg_count, type);
877
	/* always return NULL for other allocation types for now */
878
	return NULL;
879
}
880
#endif
4389 Serge 881
static void intel_i915_setup_chipset_flush(void)
882
{
883
	int ret;
884
	u32 temp;
2332 Serge 885
 
4389 Serge 886
	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
887
	if (!(temp & 0x1)) {
888
//		intel_alloc_chipset_flush_resource();
889
//		intel_private.resource_valid = 1;
890
//		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
891
	} else {
892
		temp &= ~1;
893
 
894
		intel_private.resource_valid = 1;
895
		intel_private.ifp_resource.start = temp;
896
		intel_private.ifp_resource.end = temp + PAGE_SIZE;
897
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
898
		/* some BIOSes reserve this area in a pnp some don't */
899
//		if (ret)
900
//			intel_private.resource_valid = 0;
901
	}
902
}
903
 
904
static void intel_i965_g33_setup_chipset_flush(void)
905
{
906
	u32 temp_hi, temp_lo;
907
	int ret;
908
 
909
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
910
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
911
 
912
	if (!(temp_lo & 0x1)) {
913
 
914
//		intel_alloc_chipset_flush_resource();
915
 
916
//		intel_private.resource_valid = 1;
917
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
918
//			upper_32_bits(intel_private.ifp_resource.start));
919
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
920
	} else {
921
		u64 l64;
922
 
923
		temp_lo &= ~0x1;
924
		l64 = ((u64)temp_hi << 32) | temp_lo;
925
 
926
		intel_private.resource_valid = 1;
927
		intel_private.ifp_resource.start = l64;
928
		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
929
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
930
		/* some BIOSes reserve this area in a pnp some don't */
931
//		if (ret)
932
//			intel_private.resource_valid = 0;
933
	}
934
}
935
 
2325 Serge 936
static void intel_i9xx_setup_flush(void)
937
{
6084 serge 938
	/* return if already configured */
939
	if (intel_private.ifp_resource.start)
940
		return;
2325 Serge 941
 
6084 serge 942
	if (INTEL_GTT_GEN == 6)
943
		return;
2325 Serge 944
 
6084 serge 945
	/* setup a resource for this object */
4389 Serge 946
	intel_private.ifp_resource.name = "Intel Flush Page";
947
	intel_private.ifp_resource.flags = IORESOURCE_MEM;
2325 Serge 948
 
6084 serge 949
	/* Setup chipset flush for 915 */
4389 Serge 950
	if (IS_G33 || INTEL_GTT_GEN >= 4) {
951
		intel_i965_g33_setup_chipset_flush();
952
	} else {
953
		intel_i915_setup_chipset_flush();
954
	}
2325 Serge 955
 
4389 Serge 956
	if (intel_private.ifp_resource.start)
6084 serge 957
		intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
958
	if (!intel_private.i9xx_flush_page)
959
		dev_err(&intel_private.pcidev->dev,
960
			"can't ioremap flush page - no chipset flushing\n");
2339 Serge 961
}
2325 Serge 962
 
2339 Serge 963
static void i9xx_cleanup(void)
964
{
965
	if (intel_private.i9xx_flush_page)
966
		iounmap(intel_private.i9xx_flush_page);
967
//	if (intel_private.resource_valid)
968
//		release_resource(&intel_private.ifp_resource);
969
	intel_private.ifp_resource.start = 0;
970
	intel_private.resource_valid = 0;
2325 Serge 971
}
972
 
973
static void i9xx_chipset_flush(void)
974
{
6084 serge 975
	if (intel_private.i9xx_flush_page)
976
		writel(1, intel_private.i9xx_flush_page);
2325 Serge 977
}
978
 
2339 Serge 979
static void i965_write_entry(dma_addr_t addr,
980
			     unsigned int entry,
981
			     unsigned int flags)
982
{
983
	u32 pte_flags;
984
 
985
	pte_flags = I810_PTE_VALID;
986
	if (flags == AGP_USER_CACHED_MEMORY)
987
		pte_flags |= I830_PTE_SYSTEM_CACHED;
988
 
989
	/* Shift high bits down */
990
	addr |= (addr >> 28) & 0xf0;
991
	writel(addr | pte_flags, intel_private.gtt + entry);
992
}
993
 
2325 Serge 994
static int i9xx_setup(void)
995
{
5060 serge 996
	phys_addr_t reg_addr;
3031 serge 997
	int size = KB(512);
2325 Serge 998
 
5060 serge 999
	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
2325 Serge 1000
 
3031 serge 1001
	intel_private.registers = ioremap(reg_addr, size);
6084 serge 1002
	if (!intel_private.registers)
1003
		return -ENOMEM;
2325 Serge 1004
 
3243 Serge 1005
	switch (INTEL_GTT_GEN) {
1006
	case 3:
5060 serge 1007
		intel_private.gtt_phys_addr =
1008
			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
3243 Serge 1009
		break;
6084 serge 1010
	case 5:
5060 serge 1011
		intel_private.gtt_phys_addr = reg_addr + MB(2);
6084 serge 1012
		break;
1013
	default:
5060 serge 1014
		intel_private.gtt_phys_addr = reg_addr + KB(512);
6084 serge 1015
		break;
1016
	}
2325 Serge 1017
 
6084 serge 1018
	intel_i9xx_setup_flush();
2325 Serge 1019
 
6084 serge 1020
	return 0;
2325 Serge 1021
}
1022
 
6084 serge 1023
#if IS_ENABLED(CONFIG_AGP_INTEL)
1024
static const struct agp_bridge_driver intel_fake_agp_driver = {
1025
	.owner			= THIS_MODULE,
1026
	.size_type		= FIXED_APER_SIZE,
1027
	.aperture_sizes		= intel_fake_agp_sizes,
1028
	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1029
	.configure		= intel_fake_agp_configure,
1030
	.fetch_size		= intel_fake_agp_fetch_size,
1031
	.cleanup		= intel_gtt_cleanup,
1032
	.agp_enable		= intel_fake_agp_enable,
1033
	.cache_flush		= global_cache_flush,
1034
	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1035
	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1036
	.insert_memory		= intel_fake_agp_insert_entries,
1037
	.remove_memory		= intel_fake_agp_remove_entries,
1038
	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1039
	.free_by_type		= intel_i810_free_by_type,
1040
	.agp_alloc_page		= agp_generic_alloc_page,
1041
	.agp_alloc_pages        = agp_generic_alloc_pages,
1042
	.agp_destroy_page	= agp_generic_destroy_page,
1043
	.agp_destroy_pages      = agp_generic_destroy_pages,
1044
};
1045
#endif
2339 Serge 1046
static const struct intel_gtt_driver i915_gtt_driver = {
1047
	.gen = 3,
1048
	.has_pgtbl_enable = 1,
1049
	.setup = i9xx_setup,
1050
	.cleanup = i9xx_cleanup,
1051
	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1052
	.write_entry = i830_write_entry,
1053
	.dma_mask_size = 32,
1054
	.check_flags = i830_check_flags,
1055
	.chipset_flush = i9xx_chipset_flush,
1056
};
1057
static const struct intel_gtt_driver g33_gtt_driver = {
1058
	.gen = 3,
1059
	.is_g33 = 1,
1060
	.setup = i9xx_setup,
1061
	.cleanup = i9xx_cleanup,
1062
	.write_entry = i965_write_entry,
1063
	.dma_mask_size = 36,
1064
	.check_flags = i830_check_flags,
1065
	.chipset_flush = i9xx_chipset_flush,
1066
};
1067
static const struct intel_gtt_driver pineview_gtt_driver = {
1068
	.gen = 3,
1069
	.is_pineview = 1, .is_g33 = 1,
1070
	.setup = i9xx_setup,
1071
	.cleanup = i9xx_cleanup,
1072
	.write_entry = i965_write_entry,
1073
	.dma_mask_size = 36,
1074
	.check_flags = i830_check_flags,
1075
	.chipset_flush = i9xx_chipset_flush,
1076
};
1077
static const struct intel_gtt_driver i965_gtt_driver = {
1078
	.gen = 4,
1079
	.has_pgtbl_enable = 1,
1080
	.setup = i9xx_setup,
1081
	.cleanup = i9xx_cleanup,
1082
	.write_entry = i965_write_entry,
1083
	.dma_mask_size = 36,
1084
	.check_flags = i830_check_flags,
1085
	.chipset_flush = i9xx_chipset_flush,
1086
};
1087
static const struct intel_gtt_driver g4x_gtt_driver = {
1088
	.gen = 5,
1089
	.setup = i9xx_setup,
1090
	.cleanup = i9xx_cleanup,
1091
	.write_entry = i965_write_entry,
1092
	.dma_mask_size = 36,
1093
	.check_flags = i830_check_flags,
1094
	.chipset_flush = i9xx_chipset_flush,
1095
};
1096
static const struct intel_gtt_driver ironlake_gtt_driver = {
1097
	.gen = 5,
1098
	.is_ironlake = 1,
1099
	.setup = i9xx_setup,
1100
	.cleanup = i9xx_cleanup,
1101
	.write_entry = i965_write_entry,
1102
	.dma_mask_size = 36,
1103
	.check_flags = i830_check_flags,
1104
	.chipset_flush = i9xx_chipset_flush,
1105
};
2325 Serge 1106
 
1107
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1108
 * driver and gmch_driver must be non-null, and find_gmch will determine
1109
 * which one should be used if a gmch_chip_id is present.
1110
 */
1111
static const struct intel_gtt_driver_description {
6084 serge 1112
	unsigned int gmch_chip_id;
1113
	char *name;
1114
	const struct intel_gtt_driver *gtt_driver;
2325 Serge 1115
} intel_gtt_chipsets[] = {
2339 Serge 1116
	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1117
		&i915_gtt_driver },
1118
	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1119
		&i915_gtt_driver },
1120
	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1121
		&i915_gtt_driver },
1122
	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1123
		&i915_gtt_driver },
1124
	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1125
		&i915_gtt_driver },
1126
	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1127
		&i915_gtt_driver },
1128
	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1129
		&i965_gtt_driver },
1130
	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1131
		&i965_gtt_driver },
1132
	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1133
		&i965_gtt_driver },
1134
	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1135
		&i965_gtt_driver },
1136
	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1137
		&i965_gtt_driver },
1138
	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1139
		&i965_gtt_driver },
1140
	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1141
		&g33_gtt_driver },
1142
	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1143
		&g33_gtt_driver },
1144
	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1145
		&g33_gtt_driver },
1146
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1147
		&pineview_gtt_driver },
1148
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1149
		&pineview_gtt_driver },
1150
	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1151
		&g4x_gtt_driver },
1152
	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1153
		&g4x_gtt_driver },
1154
	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1155
		&g4x_gtt_driver },
1156
	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1157
		&g4x_gtt_driver },
1158
	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1159
		&g4x_gtt_driver },
1160
	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1161
		&g4x_gtt_driver },
1162
	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1163
		&g4x_gtt_driver },
1164
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1165
	    "HD Graphics", &ironlake_gtt_driver },
1166
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1167
	    "HD Graphics", &ironlake_gtt_driver },
6084 serge 1168
	{ 0, NULL, NULL }
2325 Serge 1169
};
1170
 
1171
static int find_gmch(u16 device)
1172
{
6084 serge 1173
	struct pci_dev *gmch_device;
2325 Serge 1174
 
6084 serge 1175
	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1176
	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1177
		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1178
					     device, gmch_device);
1179
	}
2325 Serge 1180
 
6084 serge 1181
	if (!gmch_device)
1182
		return 0;
2325 Serge 1183
 
6084 serge 1184
	intel_private.pcidev = gmch_device;
1185
	return 1;
2325 Serge 1186
}
1187
 
3031 serge 1188
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
6084 serge 1189
		     struct agp_bridge_data *bridge)
2325 Serge 1190
{
6084 serge 1191
	int i, mask;
2325 Serge 1192
 
3480 Serge 1193
	/*
1194
	 * Can be called from the fake agp driver but also directly from
1195
	 * drm/i915.ko. Hence we need to check whether everything is set up
1196
	 * already.
1197
	 */
1198
	if (intel_private.driver) {
1199
		intel_private.refcount++;
1200
		return 1;
1201
	}
1202
 
1203
 
2325 Serge 1204
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1205
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1206
            intel_private.driver =
1207
                intel_gtt_chipsets[i].gtt_driver;
1208
            break;
1209
        }
1210
    }
1211
 
1212
    if (!intel_private.driver)
1213
        return 0;
1214
 
3480 Serge 1215
	intel_private.refcount++;
1216
 
5060 serge 1217
#if IS_ENABLED(CONFIG_AGP_INTEL)
3031 serge 1218
	if (bridge) {
5060 serge 1219
		bridge->driver = &intel_fake_agp_driver;
6084 serge 1220
		bridge->dev_private_data = &intel_private;
3031 serge 1221
		bridge->dev = bridge_pdev;
1222
	}
5060 serge 1223
#endif
2325 Serge 1224
 
3031 serge 1225
    intel_private.bridge_dev = bridge_pdev;
2325 Serge 1226
 
3243 Serge 1227
	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
2325 Serge 1228
 
1229
    mask = intel_private.driver->dma_mask_size;
1230
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1231
//        dev_err(&intel_private.pcidev->dev,
1232
//            "set gfx device dma mask %d-bit failed!\n", mask);
1233
//    else
1234
//        pci_set_consistent_dma_mask(intel_private.pcidev,
1235
//                        DMA_BIT_MASK(mask));
1236
 
3031 serge 1237
	if (intel_gtt_init() != 0) {
1238
//		intel_gmch_remove();
2325 Serge 1239
 
1240
        return 0;
3031 serge 1241
	}
2325 Serge 1242
 
1243
    return 1;
1244
}
2339 Serge 1245
EXPORT_SYMBOL(intel_gmch_probe);
2325 Serge 1246
 
6084 serge 1247
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
1248
		   phys_addr_t *mappable_base, u64 *mappable_end)
2326 Serge 1249
{
3480 Serge 1250
	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1251
	*stolen_size = intel_private.stolen_size;
1252
	*mappable_base = intel_private.gma_bus_addr;
1253
	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
2326 Serge 1254
}
2339 Serge 1255
EXPORT_SYMBOL(intel_gtt_get);
2326 Serge 1256
 
2332 Serge 1257
void intel_gtt_chipset_flush(void)
1258
{
1259
	if (intel_private.driver->chipset_flush)
1260
		intel_private.driver->chipset_flush();
1261
}
2339 Serge 1262
EXPORT_SYMBOL(intel_gtt_chipset_flush);
2327 Serge 1263
 
2332 Serge 1264
 
6084 serge 1265
MODULE_AUTHOR("Dave Jones, Various @Intel");
3243 Serge 1266
MODULE_LICENSE("GPL and additional rights");