Subversion Repositories Kolibri OS

Rev

Rev 6084 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6084 Rev 6660
1
/*
1
/*
2
 * Intel GTT (Graphics Translation Table) routines
2
 * Intel GTT (Graphics Translation Table) routines
3
 *
3
 *
4
 * Caveat: This driver implements the linux agp interface, but this is far from
4
 * Caveat: This driver implements the linux agp interface, but this is far from
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
5
 * a agp driver! GTT support ended up here for purely historical reasons: The
6
 * old userspace intel graphics drivers needed an interface to map memory into
6
 * old userspace intel graphics drivers needed an interface to map memory into
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
7
 * the GTT. And the drm provides a default interface for graphic devices sitting
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
8
 * on an agp port. So it made sense to fake the GTT support as an agp port to
9
 * avoid having to create a new api.
9
 * avoid having to create a new api.
10
 *
10
 *
11
 * With gem this does not make much sense anymore, just needlessly complicates
11
 * With gem this does not make much sense anymore, just needlessly complicates
12
 * the code. But as long as the old graphics stack is still support, it's stuck
12
 * the code. But as long as the old graphics stack is still support, it's stuck
13
 * here.
13
 * here.
14
 *
14
 *
15
 * /fairy-tale-mode off
15
 * /fairy-tale-mode off
16
 */
16
 */
17
 
17
 
18
#include 
18
#include 
19
 
19
 
20
#include 
20
#include 
21
#include 
21
#include 
22
#include 
22
#include 
23
#include 
23
#include 
24
#include 
24
#include 
25
#
25
#
26
#include 
26
#include 
27
#include 
27
#include 
28
 
28
 
29
#include 
29
#include 
30
#include "agp.h"
30
#include "agp.h"
31
#include "intel-agp.h"
31
#include "intel-agp.h"
32
#include 
32
#include 
33
 
33
 
34
 
34
 
35
struct pci_dev *
35
struct pci_dev *
36
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
36
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
37
 
37
 
38
 
38
 
39
#define PCI_VENDOR_ID_INTEL             0x8086
39
#define PCI_VENDOR_ID_INTEL             0x8086
40
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
40
#define PCI_DEVICE_ID_INTEL_82830_HB    0x3575
41
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
41
#define PCI_DEVICE_ID_INTEL_82845G_HB   0x2560
42
#define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
42
#define PCI_DEVICE_ID_INTEL_82915G_IG   0x2582
43
#define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
43
#define PCI_DEVICE_ID_INTEL_82915GM_IG  0x2592
44
#define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
44
#define PCI_DEVICE_ID_INTEL_82945G_IG   0x2772
45
#define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
45
#define PCI_DEVICE_ID_INTEL_82945GM_IG  0x27A2
46
 
46
 
47
 
47
 
48
#define AGP_NORMAL_MEMORY 0
48
#define AGP_NORMAL_MEMORY 0
49
 
49
 
50
#define AGP_USER_TYPES (1 << 16)
50
#define AGP_USER_TYPES (1 << 16)
51
#define AGP_USER_MEMORY (AGP_USER_TYPES)
51
#define AGP_USER_MEMORY (AGP_USER_TYPES)
52
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
52
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
53
 
53
 
54
 
54
 
55
 
55
 
56
/*
56
/*
57
 * If we have Intel graphics, we're not going to have anything other than
57
 * If we have Intel graphics, we're not going to have anything other than
58
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
58
 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
59
 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
59
 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
60
 * Only newer chipsets need to bother with this, of course.
60
 * Only newer chipsets need to bother with this, of course.
61
 */
61
 */
62
#ifdef CONFIG_INTEL_IOMMU
62
#ifdef CONFIG_INTEL_IOMMU
63
#define USE_PCI_DMA_API 1
63
#define USE_PCI_DMA_API 1
64
#else
64
#else
65
#define USE_PCI_DMA_API 0
65
#define USE_PCI_DMA_API 0
66
#endif
66
#endif
67
 
67
 
68
struct intel_gtt_driver {
68
struct intel_gtt_driver {
69
	unsigned int gen : 8;
69
	unsigned int gen : 8;
70
	unsigned int is_g33 : 1;
70
	unsigned int is_g33 : 1;
71
	unsigned int is_pineview : 1;
71
	unsigned int is_pineview : 1;
72
	unsigned int is_ironlake : 1;
72
	unsigned int is_ironlake : 1;
73
	unsigned int has_pgtbl_enable : 1;
73
	unsigned int has_pgtbl_enable : 1;
74
	unsigned int dma_mask_size : 8;
74
	unsigned int dma_mask_size : 8;
75
	/* Chipset specific GTT setup */
75
	/* Chipset specific GTT setup */
76
	int (*setup)(void);
76
	int (*setup)(void);
77
	/* This should undo anything done in ->setup() save the unmapping
77
	/* This should undo anything done in ->setup() save the unmapping
78
	 * of the mmio register file, that's done in the generic code. */
78
	 * of the mmio register file, that's done in the generic code. */
79
	void (*cleanup)(void);
79
	void (*cleanup)(void);
80
	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
80
	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
81
	/* Flags is a more or less chipset specific opaque value.
81
	/* Flags is a more or less chipset specific opaque value.
82
	 * For chipsets that need to support old ums (non-gem) code, this
82
	 * For chipsets that need to support old ums (non-gem) code, this
83
	 * needs to be identical to the various supported agp memory types! */
83
	 * needs to be identical to the various supported agp memory types! */
84
	bool (*check_flags)(unsigned int flags);
84
	bool (*check_flags)(unsigned int flags);
85
	void (*chipset_flush)(void);
85
	void (*chipset_flush)(void);
86
};
86
};
87
 
87
 
88
static struct _intel_private {
88
static struct _intel_private {
89
	const struct intel_gtt_driver *driver;
89
	const struct intel_gtt_driver *driver;
90
	struct pci_dev *pcidev;	/* device one */
90
	struct pci_dev *pcidev;	/* device one */
91
	struct pci_dev *bridge_dev;
91
	struct pci_dev *bridge_dev;
92
	u8 __iomem *registers;
92
	u8 __iomem *registers;
93
	phys_addr_t gtt_phys_addr;
93
	phys_addr_t gtt_phys_addr;
94
	u32 PGETBL_save;
94
	u32 PGETBL_save;
95
	u32 __iomem *gtt;		/* I915G */
95
	u32 __iomem *gtt;		/* I915G */
96
	bool clear_fake_agp; /* on first access via agp, fill with scratch */
96
	bool clear_fake_agp; /* on first access via agp, fill with scratch */
97
	int num_dcache_entries;
97
	int num_dcache_entries;
98
	void __iomem *i9xx_flush_page;
98
	void __iomem *i9xx_flush_page;
99
	char *i81x_gtt_table;
99
	char *i81x_gtt_table;
100
	struct resource ifp_resource;
100
	struct resource ifp_resource;
101
	int resource_valid;
101
	int resource_valid;
102
	struct page *scratch_page;
102
	struct page *scratch_page;
103
	phys_addr_t scratch_page_dma;
103
	phys_addr_t scratch_page_dma;
104
	int refcount;
104
	int refcount;
105
	/* Whether i915 needs to use the dmar apis or not. */
105
	/* Whether i915 needs to use the dmar apis or not. */
106
	unsigned int needs_dmar : 1;
106
	unsigned int needs_dmar : 1;
107
	phys_addr_t gma_bus_addr;
107
	phys_addr_t gma_bus_addr;
108
	/*  Size of memory reserved for graphics by the BIOS */
108
	/*  Size of memory reserved for graphics by the BIOS */
109
	unsigned int stolen_size;
109
	unsigned int stolen_size;
110
	/* Total number of gtt entries. */
110
	/* Total number of gtt entries. */
111
	unsigned int gtt_total_entries;
111
	unsigned int gtt_total_entries;
112
	/* Part of the gtt that is mappable by the cpu, for those chips where
112
	/* Part of the gtt that is mappable by the cpu, for those chips where
113
	 * this is not the full gtt. */
113
	 * this is not the full gtt. */
114
	unsigned int gtt_mappable_entries;
114
	unsigned int gtt_mappable_entries;
115
} intel_private;
115
} intel_private;
116
 
116
 
117
#define INTEL_GTT_GEN	intel_private.driver->gen
117
#define INTEL_GTT_GEN	intel_private.driver->gen
118
#define IS_G33		intel_private.driver->is_g33
118
#define IS_G33		intel_private.driver->is_g33
119
#define IS_PINEVIEW	intel_private.driver->is_pineview
119
#define IS_PINEVIEW	intel_private.driver->is_pineview
120
#define IS_IRONLAKE	intel_private.driver->is_ironlake
120
#define IS_IRONLAKE	intel_private.driver->is_ironlake
121
#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
121
#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
122
 
122
 
123
#if IS_ENABLED(CONFIG_AGP_INTEL)
123
#if IS_ENABLED(CONFIG_AGP_INTEL)
124
static int intel_gtt_map_memory(struct page **pages,
124
static int intel_gtt_map_memory(struct page **pages,
125
				unsigned int num_entries,
125
				unsigned int num_entries,
126
				struct sg_table *st)
126
				struct sg_table *st)
127
{
127
{
128
	struct scatterlist *sg;
128
	struct scatterlist *sg;
129
	int i;
129
	int i;
130
 
130
 
131
	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
131
	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
132
 
132
 
133
	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
133
	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
134
		goto err;
134
		goto err;
135
 
135
 
136
	for_each_sg(st->sgl, sg, num_entries, i)
136
	for_each_sg(st->sgl, sg, num_entries, i)
137
		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
137
		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
138
 
138
 
139
	if (!pci_map_sg(intel_private.pcidev,
139
	if (!pci_map_sg(intel_private.pcidev,
140
			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
140
			st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
141
		goto err;
141
		goto err;
142
 
142
 
143
	return 0;
143
	return 0;
144
 
144
 
145
err:
145
err:
146
	sg_free_table(st);
146
	sg_free_table(st);
147
	return -ENOMEM;
147
	return -ENOMEM;
148
}
148
}
149
 
149
 
150
static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
150
static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
151
{
151
{
152
	struct sg_table st;
152
	struct sg_table st;
153
	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
153
	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
154
 
154
 
155
	pci_unmap_sg(intel_private.pcidev, sg_list,
155
	pci_unmap_sg(intel_private.pcidev, sg_list,
156
		     num_sg, PCI_DMA_BIDIRECTIONAL);
156
		     num_sg, PCI_DMA_BIDIRECTIONAL);
157
 
157
 
158
	st.sgl = sg_list;
158
	st.sgl = sg_list;
159
	st.orig_nents = st.nents = num_sg;
159
	st.orig_nents = st.nents = num_sg;
160
 
160
 
161
	sg_free_table(&st);
161
	sg_free_table(&st);
162
}
162
}
163
 
163
 
164
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
164
static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
165
{
165
{
166
	return;
166
	return;
167
}
167
}
168
 
168
 
169
/* Exists to support ARGB cursors */
169
/* Exists to support ARGB cursors */
170
static struct page *i8xx_alloc_pages(void)
170
static struct page *i8xx_alloc_pages(void)
171
{
171
{
172
	struct page *page;
172
	struct page *page;
173
 
173
 
174
	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
174
	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
175
	if (page == NULL)
175
	if (page == NULL)
176
		return NULL;
176
		return NULL;
177
 
177
 
178
	if (set_pages_uc(page, 4) < 0) {
178
	if (set_pages_uc(page, 4) < 0) {
179
		set_pages_wb(page, 4);
179
		set_pages_wb(page, 4);
180
		__free_pages(page, 2);
180
		__free_pages(page, 2);
181
		return NULL;
181
		return NULL;
182
	}
182
	}
183
	atomic_inc(&agp_bridge->current_memory_agp);
183
	atomic_inc(&agp_bridge->current_memory_agp);
184
	return page;
184
	return page;
185
}
185
}
186
 
186
 
187
static void i8xx_destroy_pages(struct page *page)
187
static void i8xx_destroy_pages(struct page *page)
188
{
188
{
189
	if (page == NULL)
189
	if (page == NULL)
190
		return;
190
		return;
191
 
191
 
192
	set_pages_wb(page, 4);
192
	set_pages_wb(page, 4);
193
	__free_pages(page, 2);
193
	__free_pages(page, 2);
194
	atomic_dec(&agp_bridge->current_memory_agp);
194
	atomic_dec(&agp_bridge->current_memory_agp);
195
}
195
}
196
#endif
196
#endif
197
 
197
 
198
#if IS_ENABLED(CONFIG_AGP_INTEL)
198
#if IS_ENABLED(CONFIG_AGP_INTEL)
199
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
199
static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
200
				      int type)
200
				      int type)
201
{
201
{
202
	int i;
202
	int i;
203
 
203
 
204
	if ((pg_start + mem->page_count)
204
	if ((pg_start + mem->page_count)
205
			> intel_private.num_dcache_entries)
205
			> intel_private.num_dcache_entries)
206
		return -EINVAL;
206
		return -EINVAL;
207
 
207
 
208
	if (!mem->is_flushed)
208
	if (!mem->is_flushed)
209
		global_cache_flush();
209
		global_cache_flush();
210
 
210
 
211
	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
211
	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
212
		dma_addr_t addr = i << PAGE_SHIFT;
212
		dma_addr_t addr = i << PAGE_SHIFT;
213
		intel_private.driver->write_entry(addr,
213
		intel_private.driver->write_entry(addr,
214
						  i, type);
214
						  i, type);
215
	}
215
	}
216
	wmb();
216
	wmb();
217
 
217
 
218
	return 0;
218
	return 0;
219
}
219
}
220
 
220
 
221
/*
221
/*
222
 * The i810/i830 requires a physical address to program its mouse
222
 * The i810/i830 requires a physical address to program its mouse
223
 * pointer into hardware.
223
 * pointer into hardware.
224
 * However the Xserver still writes to it through the agp aperture.
224
 * However the Xserver still writes to it through the agp aperture.
225
 */
225
 */
226
static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
226
static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
227
{
227
{
228
	struct agp_memory *new;
228
	struct agp_memory *new;
229
	struct page *page;
229
	struct page *page;
230
 
230
 
231
	switch (pg_count) {
231
	switch (pg_count) {
232
	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
232
	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
233
		break;
233
		break;
234
	case 4:
234
	case 4:
235
		/* kludge to get 4 physical pages for ARGB cursor */
235
		/* kludge to get 4 physical pages for ARGB cursor */
236
		page = i8xx_alloc_pages();
236
		page = i8xx_alloc_pages();
237
		break;
237
		break;
238
	default:
238
	default:
239
		return NULL;
239
		return NULL;
240
	}
240
	}
241
 
241
 
242
	if (page == NULL)
242
	if (page == NULL)
243
		return NULL;
243
		return NULL;
244
 
244
 
245
	new = agp_create_memory(pg_count);
245
	new = agp_create_memory(pg_count);
246
	if (new == NULL)
246
	if (new == NULL)
247
		return NULL;
247
		return NULL;
248
 
248
 
249
	new->pages[0] = page;
249
	new->pages[0] = page;
250
	if (pg_count == 4) {
250
	if (pg_count == 4) {
251
		/* kludge to get 4 physical pages for ARGB cursor */
251
		/* kludge to get 4 physical pages for ARGB cursor */
252
		new->pages[1] = new->pages[0] + 1;
252
		new->pages[1] = new->pages[0] + 1;
253
		new->pages[2] = new->pages[1] + 1;
253
		new->pages[2] = new->pages[1] + 1;
254
		new->pages[3] = new->pages[2] + 1;
254
		new->pages[3] = new->pages[2] + 1;
255
	}
255
	}
256
	new->page_count = pg_count;
256
	new->page_count = pg_count;
257
	new->num_scratch_pages = pg_count;
257
	new->num_scratch_pages = pg_count;
258
	new->type = AGP_PHYS_MEMORY;
258
	new->type = AGP_PHYS_MEMORY;
259
	new->physical = page_to_phys(new->pages[0]);
259
	new->physical = page_to_phys(new->pages[0]);
260
	return new;
260
	return new;
261
}
261
}
262
 
262
 
263
static void intel_i810_free_by_type(struct agp_memory *curr)
263
static void intel_i810_free_by_type(struct agp_memory *curr)
264
{
264
{
265
	agp_free_key(curr->key);
265
	agp_free_key(curr->key);
266
	if (curr->type == AGP_PHYS_MEMORY) {
266
	if (curr->type == AGP_PHYS_MEMORY) {
267
		if (curr->page_count == 4)
267
		if (curr->page_count == 4)
268
			i8xx_destroy_pages(curr->pages[0]);
268
			i8xx_destroy_pages(curr->pages[0]);
269
		else {
269
		else {
270
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
270
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
271
							     AGP_PAGE_DESTROY_UNMAP);
271
							     AGP_PAGE_DESTROY_UNMAP);
272
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
272
			agp_bridge->driver->agp_destroy_page(curr->pages[0],
273
							     AGP_PAGE_DESTROY_FREE);
273
							     AGP_PAGE_DESTROY_FREE);
274
		}
274
		}
275
		agp_free_page_array(curr);
275
		agp_free_page_array(curr);
276
	}
276
	}
277
	kfree(curr);
277
	kfree(curr);
278
}
278
}
279
#endif
279
#endif
280
 
280
 
281
static int intel_gtt_setup_scratch_page(void)
281
static int intel_gtt_setup_scratch_page(void)
282
{
282
{
283
	struct page *page;
283
	struct page *page;
284
	dma_addr_t dma_addr;
284
	dma_addr_t dma_addr;
285
 
285
 
286
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
286
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
287
	if (page == NULL)
287
	if (page == NULL)
288
		return -ENOMEM;
288
		return -ENOMEM;
289
		intel_private.scratch_page_dma = page_to_phys(page);
289
		intel_private.scratch_page_dma = page_to_phys(page);
290
 
290
 
291
	intel_private.scratch_page = page;
291
	intel_private.scratch_page = page;
292
 
292
 
293
	return 0;
293
	return 0;
294
}
294
}
295
 
295
 
296
static unsigned int intel_gtt_stolen_size(void)
296
static unsigned int intel_gtt_stolen_size(void)
297
{
297
{
298
	u16 gmch_ctrl;
298
	u16 gmch_ctrl;
299
	u8 rdct;
299
	u8 rdct;
300
	int local = 0;
300
	int local = 0;
301
	static const int ddt[4] = { 0, 16, 32, 64 };
301
	static const int ddt[4] = { 0, 16, 32, 64 };
302
	unsigned int stolen_size = 0;
302
	unsigned int stolen_size = 0;
303
 
303
 
304
	if (INTEL_GTT_GEN == 1)
304
	if (INTEL_GTT_GEN == 1)
305
		return 0; /* no stolen mem on i81x */
305
		return 0; /* no stolen mem on i81x */
306
 
306
 
307
	pci_read_config_word(intel_private.bridge_dev,
307
	pci_read_config_word(intel_private.bridge_dev,
308
			     I830_GMCH_CTRL, &gmch_ctrl);
308
			     I830_GMCH_CTRL, &gmch_ctrl);
309
 
309
 
310
	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
310
	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
311
	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
311
	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
312
		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
312
		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
313
		case I830_GMCH_GMS_STOLEN_512:
313
		case I830_GMCH_GMS_STOLEN_512:
314
			stolen_size = KB(512);
314
			stolen_size = KB(512);
315
			break;
315
			break;
316
		case I830_GMCH_GMS_STOLEN_1024:
316
		case I830_GMCH_GMS_STOLEN_1024:
317
			stolen_size = MB(1);
317
			stolen_size = MB(1);
318
			break;
318
			break;
319
		case I830_GMCH_GMS_STOLEN_8192:
319
		case I830_GMCH_GMS_STOLEN_8192:
320
			stolen_size = MB(8);
320
			stolen_size = MB(8);
321
			break;
321
			break;
322
		case I830_GMCH_GMS_LOCAL:
322
		case I830_GMCH_GMS_LOCAL:
323
			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
323
			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
324
			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
324
			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
325
					MB(ddt[I830_RDRAM_DDT(rdct)]);
325
					MB(ddt[I830_RDRAM_DDT(rdct)]);
326
			local = 1;
326
			local = 1;
327
			break;
327
			break;
328
		default:
328
		default:
329
			stolen_size = 0;
329
			stolen_size = 0;
330
			break;
330
			break;
331
		}
331
		}
332
	} else {
332
	} else {
333
		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
333
		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
334
		case I855_GMCH_GMS_STOLEN_1M:
334
		case I855_GMCH_GMS_STOLEN_1M:
335
			stolen_size = MB(1);
335
			stolen_size = MB(1);
336
			break;
336
			break;
337
		case I855_GMCH_GMS_STOLEN_4M:
337
		case I855_GMCH_GMS_STOLEN_4M:
338
			stolen_size = MB(4);
338
			stolen_size = MB(4);
339
			break;
339
			break;
340
		case I855_GMCH_GMS_STOLEN_8M:
340
		case I855_GMCH_GMS_STOLEN_8M:
341
			stolen_size = MB(8);
341
			stolen_size = MB(8);
342
			break;
342
			break;
343
		case I855_GMCH_GMS_STOLEN_16M:
343
		case I855_GMCH_GMS_STOLEN_16M:
344
			stolen_size = MB(16);
344
			stolen_size = MB(16);
345
			break;
345
			break;
346
		case I855_GMCH_GMS_STOLEN_32M:
346
		case I855_GMCH_GMS_STOLEN_32M:
347
			stolen_size = MB(32);
347
			stolen_size = MB(32);
348
			break;
348
			break;
349
		case I915_GMCH_GMS_STOLEN_48M:
349
		case I915_GMCH_GMS_STOLEN_48M:
350
			stolen_size = MB(48);
350
			stolen_size = MB(48);
351
			break;
351
			break;
352
		case I915_GMCH_GMS_STOLEN_64M:
352
		case I915_GMCH_GMS_STOLEN_64M:
353
			stolen_size = MB(64);
353
			stolen_size = MB(64);
354
			break;
354
			break;
355
		case G33_GMCH_GMS_STOLEN_128M:
355
		case G33_GMCH_GMS_STOLEN_128M:
356
			stolen_size = MB(128);
356
			stolen_size = MB(128);
357
			break;
357
			break;
358
		case G33_GMCH_GMS_STOLEN_256M:
358
		case G33_GMCH_GMS_STOLEN_256M:
359
			stolen_size = MB(256);
359
			stolen_size = MB(256);
360
			break;
360
			break;
361
		case INTEL_GMCH_GMS_STOLEN_96M:
361
		case INTEL_GMCH_GMS_STOLEN_96M:
362
			stolen_size = MB(96);
362
			stolen_size = MB(96);
363
			break;
363
			break;
364
		case INTEL_GMCH_GMS_STOLEN_160M:
364
		case INTEL_GMCH_GMS_STOLEN_160M:
365
			stolen_size = MB(160);
365
			stolen_size = MB(160);
366
			break;
366
			break;
367
		case INTEL_GMCH_GMS_STOLEN_224M:
367
		case INTEL_GMCH_GMS_STOLEN_224M:
368
			stolen_size = MB(224);
368
			stolen_size = MB(224);
369
			break;
369
			break;
370
		case INTEL_GMCH_GMS_STOLEN_352M:
370
		case INTEL_GMCH_GMS_STOLEN_352M:
371
			stolen_size = MB(352);
371
			stolen_size = MB(352);
372
			break;
372
			break;
373
		default:
373
		default:
374
			stolen_size = 0;
374
			stolen_size = 0;
375
			break;
375
			break;
376
		}
376
		}
377
	}
377
	}
378
 
378
 
379
	if (stolen_size > 0) {
379
	if (stolen_size > 0) {
380
		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
380
		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
381
		       stolen_size / KB(1), local ? "local" : "stolen");
381
		       stolen_size / KB(1), local ? "local" : "stolen");
382
	} else {
382
	} else {
383
		dev_info(&intel_private.bridge_dev->dev,
383
		dev_info(&intel_private.bridge_dev->dev,
384
		       "no pre-allocated video memory detected\n");
384
		       "no pre-allocated video memory detected\n");
385
		stolen_size = 0;
385
		stolen_size = 0;
386
	}
386
	}
387
 
387
 
388
	return stolen_size;
388
	return stolen_size;
389
}
389
}
390
 
390
 
391
static void i965_adjust_pgetbl_size(unsigned int size_flag)
391
static void i965_adjust_pgetbl_size(unsigned int size_flag)
392
{
392
{
393
	u32 pgetbl_ctl, pgetbl_ctl2;
393
	u32 pgetbl_ctl, pgetbl_ctl2;
394
 
394
 
395
	/* ensure that ppgtt is disabled */
395
	/* ensure that ppgtt is disabled */
396
	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
396
	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
397
	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
397
	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
398
	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
398
	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
399
 
399
 
400
	/* write the new ggtt size */
400
	/* write the new ggtt size */
401
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
401
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
402
	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
402
	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
403
	pgetbl_ctl |= size_flag;
403
	pgetbl_ctl |= size_flag;
404
	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
404
	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
405
}
405
}
406
 
406
 
407
static unsigned int i965_gtt_total_entries(void)
407
static unsigned int i965_gtt_total_entries(void)
408
{
408
{
409
	int size;
409
	int size;
410
	u32 pgetbl_ctl;
410
	u32 pgetbl_ctl;
411
	u16 gmch_ctl;
411
	u16 gmch_ctl;
412
 
412
 
413
	pci_read_config_word(intel_private.bridge_dev,
413
	pci_read_config_word(intel_private.bridge_dev,
414
			     I830_GMCH_CTRL, &gmch_ctl);
414
			     I830_GMCH_CTRL, &gmch_ctl);
415
 
415
 
416
	if (INTEL_GTT_GEN == 5) {
416
	if (INTEL_GTT_GEN == 5) {
417
		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
417
		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
418
		case G4x_GMCH_SIZE_1M:
418
		case G4x_GMCH_SIZE_1M:
419
		case G4x_GMCH_SIZE_VT_1M:
419
		case G4x_GMCH_SIZE_VT_1M:
420
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
420
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
421
			break;
421
			break;
422
		case G4x_GMCH_SIZE_VT_1_5M:
422
		case G4x_GMCH_SIZE_VT_1_5M:
423
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
423
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
424
			break;
424
			break;
425
		case G4x_GMCH_SIZE_2M:
425
		case G4x_GMCH_SIZE_2M:
426
		case G4x_GMCH_SIZE_VT_2M:
426
		case G4x_GMCH_SIZE_VT_2M:
427
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
427
			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
428
			break;
428
			break;
429
		}
429
		}
430
	}
430
	}
431
 
431
 
432
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
432
	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
433
 
433
 
434
	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
434
	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
435
	case I965_PGETBL_SIZE_128KB:
435
	case I965_PGETBL_SIZE_128KB:
436
		size = KB(128);
436
		size = KB(128);
437
		break;
437
		break;
438
	case I965_PGETBL_SIZE_256KB:
438
	case I965_PGETBL_SIZE_256KB:
439
		size = KB(256);
439
		size = KB(256);
440
		break;
440
		break;
441
	case I965_PGETBL_SIZE_512KB:
441
	case I965_PGETBL_SIZE_512KB:
442
		size = KB(512);
442
		size = KB(512);
443
		break;
443
		break;
444
	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
444
	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
445
	case I965_PGETBL_SIZE_1MB:
445
	case I965_PGETBL_SIZE_1MB:
446
		size = KB(1024);
446
		size = KB(1024);
447
		break;
447
		break;
448
	case I965_PGETBL_SIZE_2MB:
448
	case I965_PGETBL_SIZE_2MB:
449
		size = KB(2048);
449
		size = KB(2048);
450
		break;
450
		break;
451
	case I965_PGETBL_SIZE_1_5MB:
451
	case I965_PGETBL_SIZE_1_5MB:
452
		size = KB(1024 + 512);
452
		size = KB(1024 + 512);
453
		break;
453
		break;
454
	default:
454
	default:
455
		dev_info(&intel_private.pcidev->dev,
455
		dev_info(&intel_private.pcidev->dev,
456
			 "unknown page table size, assuming 512KB\n");
456
			 "unknown page table size, assuming 512KB\n");
457
		size = KB(512);
457
		size = KB(512);
458
	}
458
	}
459
 
459
 
460
	return size/4;
460
	return size/4;
461
}
461
}
462
 
462
 
463
static unsigned int intel_gtt_total_entries(void)
463
static unsigned int intel_gtt_total_entries(void)
464
{
464
{
465
	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
465
	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
466
		return i965_gtt_total_entries();
466
		return i965_gtt_total_entries();
467
	else {
467
	else {
468
		/* On previous hardware, the GTT size was just what was
468
		/* On previous hardware, the GTT size was just what was
469
		 * required to map the aperture.
469
		 * required to map the aperture.
470
		 */
470
		 */
471
		return intel_private.gtt_mappable_entries;
471
		return intel_private.gtt_mappable_entries;
472
	}
472
	}
473
}
473
}
474
 
474
 
475
static unsigned int intel_gtt_mappable_entries(void)
475
static unsigned int intel_gtt_mappable_entries(void)
476
{
476
{
477
	unsigned int aperture_size;
477
	unsigned int aperture_size;
478
 
478
 
479
	if (INTEL_GTT_GEN == 1) {
479
	if (INTEL_GTT_GEN == 1) {
480
		u32 smram_miscc;
480
		u32 smram_miscc;
481
 
481
 
482
		pci_read_config_dword(intel_private.bridge_dev,
482
		pci_read_config_dword(intel_private.bridge_dev,
483
				      I810_SMRAM_MISCC, &smram_miscc);
483
				      I810_SMRAM_MISCC, &smram_miscc);
484
 
484
 
485
		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
485
		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
486
				== I810_GFX_MEM_WIN_32M)
486
				== I810_GFX_MEM_WIN_32M)
487
			aperture_size = MB(32);
487
			aperture_size = MB(32);
488
		else
488
		else
489
			aperture_size = MB(64);
489
			aperture_size = MB(64);
490
	} else if (INTEL_GTT_GEN == 2) {
490
	} else if (INTEL_GTT_GEN == 2) {
491
		u16 gmch_ctrl;
491
		u16 gmch_ctrl;
492
 
492
 
493
		pci_read_config_word(intel_private.bridge_dev,
493
		pci_read_config_word(intel_private.bridge_dev,
494
				     I830_GMCH_CTRL, &gmch_ctrl);
494
				     I830_GMCH_CTRL, &gmch_ctrl);
495
 
495
 
496
		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
496
		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
497
			aperture_size = MB(64);
497
			aperture_size = MB(64);
498
		else
498
		else
499
			aperture_size = MB(128);
499
			aperture_size = MB(128);
500
	} else {
500
	} else {
501
		/* 9xx supports large sizes, just look at the length */
501
		/* 9xx supports large sizes, just look at the length */
502
		aperture_size = pci_resource_len(intel_private.pcidev, 2);
502
		aperture_size = pci_resource_len(intel_private.pcidev, 2);
503
	}
503
	}
504
 
504
 
505
	return aperture_size >> PAGE_SHIFT;
505
	return aperture_size >> PAGE_SHIFT;
506
}
506
}
507
 
507
 
508
static void intel_gtt_teardown_scratch_page(void)
508
static void intel_gtt_teardown_scratch_page(void)
509
{
509
{
510
   // FreePage(intel_private.scratch_page_dma);
510
   // FreePage(intel_private.scratch_page_dma);
511
}
511
}
512
 
512
 
513
static void intel_gtt_cleanup(void)
513
static void intel_gtt_cleanup(void)
514
{
514
{
515
	intel_private.driver->cleanup();
515
	intel_private.driver->cleanup();
516
 
516
 
517
	iounmap(intel_private.gtt);
517
	iounmap(intel_private.gtt);
518
	iounmap(intel_private.registers);
518
	iounmap(intel_private.registers);
519
 
519
 
520
	intel_gtt_teardown_scratch_page();
520
	intel_gtt_teardown_scratch_page();
521
}
521
}
522
 
522
 
523
/* Certain Gen5 chipsets require require idling the GPU before
523
/* Certain Gen5 chipsets require require idling the GPU before
524
 * unmapping anything from the GTT when VT-d is enabled.
524
 * unmapping anything from the GTT when VT-d is enabled.
525
 */
525
 */
526
static inline int needs_ilk_vtd_wa(void)
526
static inline int needs_ilk_vtd_wa(void)
527
{
527
{
528
#ifdef CONFIG_INTEL_IOMMU
528
#ifdef CONFIG_INTEL_IOMMU
529
	const unsigned short gpu_devid = intel_private.pcidev->device;
529
	const unsigned short gpu_devid = intel_private.pcidev->device;
530
 
530
 
531
	/* Query intel_iommu to see if we need the workaround. Presumably that
531
	/* Query intel_iommu to see if we need the workaround. Presumably that
532
	 * was loaded first.
532
	 * was loaded first.
533
	 */
533
	 */
534
	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
534
	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
535
	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
535
	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
536
	     intel_iommu_gfx_mapped)
536
	     intel_iommu_gfx_mapped)
537
		return 1;
537
		return 1;
538
#endif
538
#endif
539
	return 0;
539
	return 0;
540
}
540
}
541
 
541
 
542
static bool intel_gtt_can_wc(void)
542
static bool intel_gtt_can_wc(void)
543
{
543
{
544
	if (INTEL_GTT_GEN <= 2)
544
	if (INTEL_GTT_GEN <= 2)
545
		return false;
545
		return false;
546
 
546
 
547
	if (INTEL_GTT_GEN >= 6)
547
	if (INTEL_GTT_GEN >= 6)
548
		return false;
548
		return false;
549
 
549
 
550
	/* Reports of major corruption with ILK vt'd enabled */
550
	/* Reports of major corruption with ILK vt'd enabled */
551
	if (needs_ilk_vtd_wa())
551
	if (needs_ilk_vtd_wa())
552
		return false;
552
		return false;
553
 
553
 
554
	return true;
554
	return true;
555
}
555
}
556
 
556
 
557
static int intel_gtt_init(void)
557
static int intel_gtt_init(void)
558
{
558
{
559
	u32 gtt_map_size;
559
	u32 gtt_map_size;
560
	int ret, bar;
560
	int ret, bar;
561
 
561
 
562
	ret = intel_private.driver->setup();
562
	ret = intel_private.driver->setup();
563
	if (ret != 0)
563
	if (ret != 0)
564
		return ret;
564
		return ret;
565
 
565
 
566
	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
566
	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
567
	intel_private.gtt_total_entries = intel_gtt_total_entries();
567
	intel_private.gtt_total_entries = intel_gtt_total_entries();
568
 
568
 
569
	/* save the PGETBL reg for resume */
569
	/* save the PGETBL reg for resume */
570
	intel_private.PGETBL_save =
570
	intel_private.PGETBL_save =
571
		readl(intel_private.registers+I810_PGETBL_CTL)
571
		readl(intel_private.registers+I810_PGETBL_CTL)
572
			& ~I810_PGETBL_ENABLED;
572
			& ~I810_PGETBL_ENABLED;
573
	/* we only ever restore the register when enabling the PGTBL... */
573
	/* we only ever restore the register when enabling the PGTBL... */
574
	if (HAS_PGTBL_EN)
574
	if (HAS_PGTBL_EN)
575
		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
575
		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
576
 
576
 
577
	dev_info(&intel_private.bridge_dev->dev,
577
	dev_info(&intel_private.bridge_dev->dev,
578
			"detected gtt size: %dK total, %dK mappable\n",
578
			"detected gtt size: %dK total, %dK mappable\n",
579
			intel_private.gtt_total_entries * 4,
579
			intel_private.gtt_total_entries * 4,
580
			intel_private.gtt_mappable_entries * 4);
580
			intel_private.gtt_mappable_entries * 4);
581
 
581
 
582
	gtt_map_size = intel_private.gtt_total_entries * 4;
582
	gtt_map_size = intel_private.gtt_total_entries * 4;
583
 
583
 
584
	intel_private.gtt = NULL;
584
	intel_private.gtt = NULL;
-
 
585
	if (intel_gtt_can_wc())
-
 
586
		intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
-
 
587
					       gtt_map_size);
585
	if (intel_private.gtt == NULL)
588
	if (intel_private.gtt == NULL)
586
		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
589
		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
587
					    gtt_map_size);
590
					    gtt_map_size);
588
	if (intel_private.gtt == NULL) {
591
	if (intel_private.gtt == NULL) {
589
		intel_private.driver->cleanup();
592
		intel_private.driver->cleanup();
590
		iounmap(intel_private.registers);
593
		iounmap(intel_private.registers);
591
		return -ENOMEM;
594
		return -ENOMEM;
592
	}
595
	}
593
 
596
 
594
#if IS_ENABLED(CONFIG_AGP_INTEL)
597
#if IS_ENABLED(CONFIG_AGP_INTEL)
595
	global_cache_flush();   /* FIXME: ? */
598
	global_cache_flush();   /* FIXME: ? */
596
#endif
599
#endif
597
 
600
 
598
	intel_private.stolen_size = intel_gtt_stolen_size();
601
	intel_private.stolen_size = intel_gtt_stolen_size();
599
 
602
 
600
	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
603
	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
601
 
604
 
602
	ret = intel_gtt_setup_scratch_page();
605
	ret = intel_gtt_setup_scratch_page();
603
	if (ret != 0) {
606
	if (ret != 0) {
604
		intel_gtt_cleanup();
607
		intel_gtt_cleanup();
605
		return ret;
608
		return ret;
606
	}
609
	}
607
 
610
 
608
	if (INTEL_GTT_GEN <= 2)
611
	if (INTEL_GTT_GEN <= 2)
609
		bar = I810_GMADR_BAR;
612
		bar = I810_GMADR_BAR;
610
	else
613
	else
611
		bar = I915_GMADR_BAR;
614
		bar = I915_GMADR_BAR;
612
 
615
 
613
	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
616
	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
614
	return 0;
617
	return 0;
615
}
618
}
616
 
619
 
617
#if IS_ENABLED(CONFIG_AGP_INTEL)
620
#if IS_ENABLED(CONFIG_AGP_INTEL)
618
static int intel_fake_agp_fetch_size(void)
621
static int intel_fake_agp_fetch_size(void)
619
{
622
{
620
	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
623
	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
621
	unsigned int aper_size;
624
	unsigned int aper_size;
622
	int i;
625
	int i;
623
 
626
 
624
	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
627
	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
625
 
628
 
626
	for (i = 0; i < num_sizes; i++) {
629
	for (i = 0; i < num_sizes; i++) {
627
		if (aper_size == intel_fake_agp_sizes[i].size) {
630
		if (aper_size == intel_fake_agp_sizes[i].size) {
628
			agp_bridge->current_size =
631
			agp_bridge->current_size =
629
				(void *) (intel_fake_agp_sizes + i);
632
				(void *) (intel_fake_agp_sizes + i);
630
			return aper_size;
633
			return aper_size;
631
		}
634
		}
632
	}
635
	}
633
 
636
 
634
	return 0;
637
	return 0;
635
}
638
}
636
#endif
639
#endif
637
 
640
 
638
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
641
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
639
			     unsigned int flags)
642
			     unsigned int flags)
640
{
643
{
641
	u32 pte_flags = I810_PTE_VALID;
644
	u32 pte_flags = I810_PTE_VALID;
642
 
645
 
643
	if (flags ==  AGP_USER_CACHED_MEMORY)
646
	if (flags ==  AGP_USER_CACHED_MEMORY)
644
		pte_flags |= I830_PTE_SYSTEM_CACHED;
647
		pte_flags |= I830_PTE_SYSTEM_CACHED;
645
 
648
 
646
	writel(addr | pte_flags, intel_private.gtt + entry);
649
	writel(addr | pte_flags, intel_private.gtt + entry);
647
}
650
}
648
 
651
 
649
bool intel_enable_gtt(void)
652
bool intel_enable_gtt(void)
650
{
653
{
651
	u8 __iomem *reg;
654
	u8 __iomem *reg;
652
 
655
 
653
	if (INTEL_GTT_GEN == 2) {
656
	if (INTEL_GTT_GEN == 2) {
654
		u16 gmch_ctrl;
657
		u16 gmch_ctrl;
655
 
658
 
656
		pci_read_config_word(intel_private.bridge_dev,
659
		pci_read_config_word(intel_private.bridge_dev,
657
				     I830_GMCH_CTRL, &gmch_ctrl);
660
				     I830_GMCH_CTRL, &gmch_ctrl);
658
		gmch_ctrl |= I830_GMCH_ENABLED;
661
		gmch_ctrl |= I830_GMCH_ENABLED;
659
		pci_write_config_word(intel_private.bridge_dev,
662
		pci_write_config_word(intel_private.bridge_dev,
660
				      I830_GMCH_CTRL, gmch_ctrl);
663
				      I830_GMCH_CTRL, gmch_ctrl);
661
 
664
 
662
		pci_read_config_word(intel_private.bridge_dev,
665
		pci_read_config_word(intel_private.bridge_dev,
663
				     I830_GMCH_CTRL, &gmch_ctrl);
666
				     I830_GMCH_CTRL, &gmch_ctrl);
664
		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
667
		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
665
			dev_err(&intel_private.pcidev->dev,
668
			dev_err(&intel_private.pcidev->dev,
666
				"failed to enable the GTT: GMCH_CTRL=%x\n",
669
				"failed to enable the GTT: GMCH_CTRL=%x\n",
667
				gmch_ctrl);
670
				gmch_ctrl);
668
			return false;
671
			return false;
669
		}
672
		}
670
	}
673
	}
671
 
674
 
672
	/* On the resume path we may be adjusting the PGTBL value, so
675
	/* On the resume path we may be adjusting the PGTBL value, so
673
	 * be paranoid and flush all chipset write buffers...
676
	 * be paranoid and flush all chipset write buffers...
674
	 */
677
	 */
675
	if (INTEL_GTT_GEN >= 3)
678
	if (INTEL_GTT_GEN >= 3)
676
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
679
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
677
 
680
 
678
	reg = intel_private.registers+I810_PGETBL_CTL;
681
	reg = intel_private.registers+I810_PGETBL_CTL;
679
	writel(intel_private.PGETBL_save, reg);
682
	writel(intel_private.PGETBL_save, reg);
680
	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
683
	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
681
		dev_err(&intel_private.pcidev->dev,
684
		dev_err(&intel_private.pcidev->dev,
682
			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
685
			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
683
			readl(reg), intel_private.PGETBL_save);
686
			readl(reg), intel_private.PGETBL_save);
684
		return false;
687
		return false;
685
	}
688
	}
686
 
689
 
687
	if (INTEL_GTT_GEN >= 3)
690
	if (INTEL_GTT_GEN >= 3)
688
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
691
		writel(0, intel_private.registers+GFX_FLSH_CNTL);
689
 
692
 
690
	return true;
693
	return true;
691
}
694
}
692
EXPORT_SYMBOL(intel_enable_gtt);
695
EXPORT_SYMBOL(intel_enable_gtt);
693
 
696
 
694
#if IS_ENABLED(CONFIG_AGP_INTEL)
697
#if IS_ENABLED(CONFIG_AGP_INTEL)
695
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
698
static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
696
{
699
{
697
	agp_bridge->gatt_table_real = NULL;
700
	agp_bridge->gatt_table_real = NULL;
698
	agp_bridge->gatt_table = NULL;
701
	agp_bridge->gatt_table = NULL;
699
	agp_bridge->gatt_bus_addr = 0;
702
	agp_bridge->gatt_bus_addr = 0;
700
 
703
 
701
	return 0;
704
	return 0;
702
}
705
}
703
 
706
 
704
static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
707
static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
705
{
708
{
706
	return 0;
709
	return 0;
707
}
710
}
708
 
711
 
709
static int intel_fake_agp_configure(void)
712
static int intel_fake_agp_configure(void)
710
{
713
{
711
	if (!intel_enable_gtt())
714
	if (!intel_enable_gtt())
712
	    return -EIO;
715
	    return -EIO;
713
 
716
 
714
	intel_private.clear_fake_agp = true;
717
	intel_private.clear_fake_agp = true;
715
	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
718
	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
716
 
719
 
717
	return 0;
720
	return 0;
718
}
721
}
719
#endif
722
#endif
720
 
723
 
721
static bool i830_check_flags(unsigned int flags)
724
static bool i830_check_flags(unsigned int flags)
722
{
725
{
723
	switch (flags) {
726
	switch (flags) {
724
	case 0:
727
	case 0:
725
	case AGP_PHYS_MEMORY:
728
	case AGP_PHYS_MEMORY:
726
	case AGP_USER_CACHED_MEMORY:
729
	case AGP_USER_CACHED_MEMORY:
727
	case AGP_USER_MEMORY:
730
	case AGP_USER_MEMORY:
728
		return true;
731
		return true;
729
	}
732
	}
730
 
733
 
731
	return false;
734
	return false;
732
}
735
}
733
 
736
 
734
void intel_gtt_insert_sg_entries(struct sg_table *st,
737
void intel_gtt_insert_sg_entries(struct sg_table *st,
735
				 unsigned int pg_start,
738
				 unsigned int pg_start,
736
				 unsigned int flags)
739
				 unsigned int flags)
737
{
740
{
738
	struct scatterlist *sg;
741
	struct scatterlist *sg;
739
	unsigned int len, m;
742
	unsigned int len, m;
740
	int i, j;
743
	int i, j;
741
 
744
 
742
	j = pg_start;
745
	j = pg_start;
743
 
746
 
744
	/* sg may merge pages, but we have to separate
747
	/* sg may merge pages, but we have to separate
745
	 * per-page addr for GTT */
748
	 * per-page addr for GTT */
746
	for_each_sg(st->sgl, sg, st->nents, i) {
749
	for_each_sg(st->sgl, sg, st->nents, i) {
747
		len = sg_dma_len(sg) >> PAGE_SHIFT;
750
		len = sg_dma_len(sg) >> PAGE_SHIFT;
748
		for (m = 0; m < len; m++) {
751
		for (m = 0; m < len; m++) {
749
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
752
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
750
			intel_private.driver->write_entry(addr, j, flags);
753
			intel_private.driver->write_entry(addr, j, flags);
751
			j++;
754
			j++;
752
		}
755
		}
753
	}
756
	}
754
	wmb();
757
	wmb();
755
}
758
}
756
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
759
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
757
 
760
 
758
#if IS_ENABLED(CONFIG_AGP_INTEL)
761
#if IS_ENABLED(CONFIG_AGP_INTEL)
759
static void intel_gtt_insert_pages(unsigned int first_entry,
762
static void intel_gtt_insert_pages(unsigned int first_entry,
760
				   unsigned int num_entries,
763
				   unsigned int num_entries,
761
				   struct page **pages,
764
				   struct page **pages,
762
				   unsigned int flags)
765
				   unsigned int flags)
763
{
766
{
764
	int i, j;
767
	int i, j;
765
 
768
 
766
	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
769
	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
767
		dma_addr_t addr = page_to_phys(pages[i]);
770
		dma_addr_t addr = page_to_phys(pages[i]);
768
		intel_private.driver->write_entry(addr,
771
		intel_private.driver->write_entry(addr,
769
						  j, flags);
772
						  j, flags);
770
	}
773
	}
771
	wmb();
774
	wmb();
772
}
775
}
773
 
776
 
774
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
777
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
775
					 off_t pg_start, int type)
778
					 off_t pg_start, int type)
776
{
779
{
777
	int ret = -EINVAL;
780
	int ret = -EINVAL;
778
 
781
 
779
	if (intel_private.clear_fake_agp) {
782
	if (intel_private.clear_fake_agp) {
780
		int start = intel_private.stolen_size / PAGE_SIZE;
783
		int start = intel_private.stolen_size / PAGE_SIZE;
781
		int end = intel_private.gtt_mappable_entries;
784
		int end = intel_private.gtt_mappable_entries;
782
		intel_gtt_clear_range(start, end - start);
785
		intel_gtt_clear_range(start, end - start);
783
		intel_private.clear_fake_agp = false;
786
		intel_private.clear_fake_agp = false;
784
	}
787
	}
785
 
788
 
786
	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
789
	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
787
		return i810_insert_dcache_entries(mem, pg_start, type);
790
		return i810_insert_dcache_entries(mem, pg_start, type);
788
 
791
 
789
	if (mem->page_count == 0)
792
	if (mem->page_count == 0)
790
		goto out;
793
		goto out;
791
 
794
 
792
	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
795
	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
793
		goto out_err;
796
		goto out_err;
794
 
797
 
795
	if (type != mem->type)
798
	if (type != mem->type)
796
		goto out_err;
799
		goto out_err;
797
 
800
 
798
	if (!intel_private.driver->check_flags(type))
801
	if (!intel_private.driver->check_flags(type))
799
		goto out_err;
802
		goto out_err;
800
 
803
 
801
	if (!mem->is_flushed)
804
	if (!mem->is_flushed)
802
		global_cache_flush();
805
		global_cache_flush();
803
 
806
 
804
	if (intel_private.needs_dmar) {
807
	if (intel_private.needs_dmar) {
805
		struct sg_table st;
808
		struct sg_table st;
806
 
809
 
807
		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
810
		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
808
		if (ret != 0)
811
		if (ret != 0)
809
			return ret;
812
			return ret;
810
 
813
 
811
		intel_gtt_insert_sg_entries(&st, pg_start, type);
814
		intel_gtt_insert_sg_entries(&st, pg_start, type);
812
		mem->sg_list = st.sgl;
815
		mem->sg_list = st.sgl;
813
		mem->num_sg = st.nents;
816
		mem->num_sg = st.nents;
814
	} else
817
	} else
815
		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
818
		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
816
				       type);
819
				       type);
817
 
820
 
818
out:
821
out:
819
	ret = 0;
822
	ret = 0;
820
out_err:
823
out_err:
821
	mem->is_flushed = true;
824
	mem->is_flushed = true;
822
	return ret;
825
	return ret;
823
}
826
}
824
#endif
827
#endif
825
 
828
 
826
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
829
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
827
{
830
{
828
	unsigned int i;
831
	unsigned int i;
829
 
832
 
830
	for (i = first_entry; i < (first_entry + num_entries); i++) {
833
	for (i = first_entry; i < (first_entry + num_entries); i++) {
831
		intel_private.driver->write_entry(intel_private.scratch_page_dma,
834
		intel_private.driver->write_entry(intel_private.scratch_page_dma,
832
						  i, 0);
835
						  i, 0);
833
	}
836
	}
834
	wmb();
837
	wmb();
835
}
838
}
836
EXPORT_SYMBOL(intel_gtt_clear_range);
839
EXPORT_SYMBOL(intel_gtt_clear_range);
837
 
840
 
838
#if IS_ENABLED(CONFIG_AGP_INTEL)
841
#if IS_ENABLED(CONFIG_AGP_INTEL)
839
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
842
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
840
					 off_t pg_start, int type)
843
					 off_t pg_start, int type)
841
{
844
{
842
	if (mem->page_count == 0)
845
	if (mem->page_count == 0)
843
		return 0;
846
		return 0;
844
 
847
 
845
	intel_gtt_clear_range(pg_start, mem->page_count);
848
	intel_gtt_clear_range(pg_start, mem->page_count);
846
 
849
 
847
	if (intel_private.needs_dmar) {
850
	if (intel_private.needs_dmar) {
848
		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
851
		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
849
		mem->sg_list = NULL;
852
		mem->sg_list = NULL;
850
		mem->num_sg = 0;
853
		mem->num_sg = 0;
851
	}
854
	}
852
 
855
 
853
	return 0;
856
	return 0;
854
}
857
}
855
 
858
 
856
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
859
static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
857
						       int type)
860
						       int type)
858
{
861
{
859
	struct agp_memory *new;
862
	struct agp_memory *new;
860
 
863
 
861
	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
864
	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
862
		if (pg_count != intel_private.num_dcache_entries)
865
		if (pg_count != intel_private.num_dcache_entries)
863
			return NULL;
866
			return NULL;
864
 
867
 
865
		new = agp_create_memory(1);
868
		new = agp_create_memory(1);
866
		if (new == NULL)
869
		if (new == NULL)
867
			return NULL;
870
			return NULL;
868
 
871
 
869
		new->type = AGP_DCACHE_MEMORY;
872
		new->type = AGP_DCACHE_MEMORY;
870
		new->page_count = pg_count;
873
		new->page_count = pg_count;
871
		new->num_scratch_pages = 0;
874
		new->num_scratch_pages = 0;
872
		agp_free_page_array(new);
875
		agp_free_page_array(new);
873
		return new;
876
		return new;
874
	}
877
	}
875
	if (type == AGP_PHYS_MEMORY)
878
	if (type == AGP_PHYS_MEMORY)
876
		return alloc_agpphysmem_i8xx(pg_count, type);
879
		return alloc_agpphysmem_i8xx(pg_count, type);
877
	/* always return NULL for other allocation types for now */
880
	/* always return NULL for other allocation types for now */
878
	return NULL;
881
	return NULL;
879
}
882
}
880
#endif
883
#endif
881
static void intel_i915_setup_chipset_flush(void)
884
static void intel_i915_setup_chipset_flush(void)
882
{
885
{
883
	int ret;
886
	int ret;
884
	u32 temp;
887
	u32 temp;
885
 
888
 
886
	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
889
	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
887
	if (!(temp & 0x1)) {
890
	if (!(temp & 0x1)) {
888
//		intel_alloc_chipset_flush_resource();
891
//		intel_alloc_chipset_flush_resource();
889
//		intel_private.resource_valid = 1;
892
//		intel_private.resource_valid = 1;
890
//		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
893
//		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
891
	} else {
894
	} else {
892
		temp &= ~1;
895
		temp &= ~1;
893
 
896
 
894
		intel_private.resource_valid = 1;
897
		intel_private.resource_valid = 1;
895
		intel_private.ifp_resource.start = temp;
898
		intel_private.ifp_resource.start = temp;
896
		intel_private.ifp_resource.end = temp + PAGE_SIZE;
899
		intel_private.ifp_resource.end = temp + PAGE_SIZE;
897
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
900
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
898
		/* some BIOSes reserve this area in a pnp some don't */
901
		/* some BIOSes reserve this area in a pnp some don't */
899
//		if (ret)
902
//		if (ret)
900
//			intel_private.resource_valid = 0;
903
//			intel_private.resource_valid = 0;
901
	}
904
	}
902
}
905
}
903
 
906
 
904
static void intel_i965_g33_setup_chipset_flush(void)
907
static void intel_i965_g33_setup_chipset_flush(void)
905
{
908
{
906
	u32 temp_hi, temp_lo;
909
	u32 temp_hi, temp_lo;
907
	int ret;
910
	int ret;
908
 
911
 
909
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
912
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
910
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
913
	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
911
 
914
 
912
	if (!(temp_lo & 0x1)) {
915
	if (!(temp_lo & 0x1)) {
913
 
916
 
914
//		intel_alloc_chipset_flush_resource();
917
//		intel_alloc_chipset_flush_resource();
915
 
918
 
916
//		intel_private.resource_valid = 1;
919
//		intel_private.resource_valid = 1;
917
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
920
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
918
//			upper_32_bits(intel_private.ifp_resource.start));
921
//			upper_32_bits(intel_private.ifp_resource.start));
919
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
922
//		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
920
	} else {
923
	} else {
921
		u64 l64;
924
		u64 l64;
922
 
925
 
923
		temp_lo &= ~0x1;
926
		temp_lo &= ~0x1;
924
		l64 = ((u64)temp_hi << 32) | temp_lo;
927
		l64 = ((u64)temp_hi << 32) | temp_lo;
925
 
928
 
926
		intel_private.resource_valid = 1;
929
		intel_private.resource_valid = 1;
927
		intel_private.ifp_resource.start = l64;
930
		intel_private.ifp_resource.start = l64;
928
		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
931
		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
929
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
932
//		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
930
		/* some BIOSes reserve this area in a pnp some don't */
933
		/* some BIOSes reserve this area in a pnp some don't */
931
//		if (ret)
934
//		if (ret)
932
//			intel_private.resource_valid = 0;
935
//			intel_private.resource_valid = 0;
933
	}
936
	}
934
}
937
}
935
 
938
 
936
static void intel_i9xx_setup_flush(void)
939
static void intel_i9xx_setup_flush(void)
937
{
940
{
938
	/* return if already configured */
941
	/* return if already configured */
939
	if (intel_private.ifp_resource.start)
942
	if (intel_private.ifp_resource.start)
940
		return;
943
		return;
941
 
944
 
942
	if (INTEL_GTT_GEN == 6)
945
	if (INTEL_GTT_GEN == 6)
943
		return;
946
		return;
944
 
947
 
945
	/* setup a resource for this object */
948
	/* setup a resource for this object */
946
	intel_private.ifp_resource.name = "Intel Flush Page";
949
	intel_private.ifp_resource.name = "Intel Flush Page";
947
	intel_private.ifp_resource.flags = IORESOURCE_MEM;
950
	intel_private.ifp_resource.flags = IORESOURCE_MEM;
948
 
951
 
949
	/* Setup chipset flush for 915 */
952
	/* Setup chipset flush for 915 */
950
	if (IS_G33 || INTEL_GTT_GEN >= 4) {
953
	if (IS_G33 || INTEL_GTT_GEN >= 4) {
951
		intel_i965_g33_setup_chipset_flush();
954
		intel_i965_g33_setup_chipset_flush();
952
	} else {
955
	} else {
953
		intel_i915_setup_chipset_flush();
956
		intel_i915_setup_chipset_flush();
954
	}
957
	}
955
 
958
 
956
	if (intel_private.ifp_resource.start)
959
	if (intel_private.ifp_resource.start)
957
		intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
960
		intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
958
	if (!intel_private.i9xx_flush_page)
961
	if (!intel_private.i9xx_flush_page)
959
		dev_err(&intel_private.pcidev->dev,
962
		dev_err(&intel_private.pcidev->dev,
960
			"can't ioremap flush page - no chipset flushing\n");
963
			"can't ioremap flush page - no chipset flushing\n");
961
}
964
}
962
 
965
 
963
static void i9xx_cleanup(void)
966
static void i9xx_cleanup(void)
964
{
967
{
965
	if (intel_private.i9xx_flush_page)
968
	if (intel_private.i9xx_flush_page)
966
		iounmap(intel_private.i9xx_flush_page);
969
		iounmap(intel_private.i9xx_flush_page);
967
//	if (intel_private.resource_valid)
970
//	if (intel_private.resource_valid)
968
//		release_resource(&intel_private.ifp_resource);
971
//		release_resource(&intel_private.ifp_resource);
969
	intel_private.ifp_resource.start = 0;
972
	intel_private.ifp_resource.start = 0;
970
	intel_private.resource_valid = 0;
973
	intel_private.resource_valid = 0;
971
}
974
}
972
 
975
 
973
static void i9xx_chipset_flush(void)
976
static void i9xx_chipset_flush(void)
974
{
977
{
975
	if (intel_private.i9xx_flush_page)
978
	if (intel_private.i9xx_flush_page)
976
		writel(1, intel_private.i9xx_flush_page);
979
		writel(1, intel_private.i9xx_flush_page);
977
}
980
}
978
 
981
 
979
static void i965_write_entry(dma_addr_t addr,
982
static void i965_write_entry(dma_addr_t addr,
980
			     unsigned int entry,
983
			     unsigned int entry,
981
			     unsigned int flags)
984
			     unsigned int flags)
982
{
985
{
983
	u32 pte_flags;
986
	u32 pte_flags;
984
 
987
 
985
	pte_flags = I810_PTE_VALID;
988
	pte_flags = I810_PTE_VALID;
986
	if (flags == AGP_USER_CACHED_MEMORY)
989
	if (flags == AGP_USER_CACHED_MEMORY)
987
		pte_flags |= I830_PTE_SYSTEM_CACHED;
990
		pte_flags |= I830_PTE_SYSTEM_CACHED;
988
 
991
 
989
	/* Shift high bits down */
992
	/* Shift high bits down */
990
	addr |= (addr >> 28) & 0xf0;
993
	addr |= (addr >> 28) & 0xf0;
991
	writel(addr | pte_flags, intel_private.gtt + entry);
994
	writel(addr | pte_flags, intel_private.gtt + entry);
992
}
995
}
993
 
996
 
994
static int i9xx_setup(void)
997
static int i9xx_setup(void)
995
{
998
{
996
	phys_addr_t reg_addr;
999
	phys_addr_t reg_addr;
997
	int size = KB(512);
1000
	int size = KB(512);
998
 
1001
 
999
	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1002
	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1000
 
1003
 
1001
	intel_private.registers = ioremap(reg_addr, size);
1004
	intel_private.registers = ioremap(reg_addr, size);
1002
	if (!intel_private.registers)
1005
	if (!intel_private.registers)
1003
		return -ENOMEM;
1006
		return -ENOMEM;
1004
 
1007
 
1005
	switch (INTEL_GTT_GEN) {
1008
	switch (INTEL_GTT_GEN) {
1006
	case 3:
1009
	case 3:
1007
		intel_private.gtt_phys_addr =
1010
		intel_private.gtt_phys_addr =
1008
			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1011
			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1009
		break;
1012
		break;
1010
	case 5:
1013
	case 5:
1011
		intel_private.gtt_phys_addr = reg_addr + MB(2);
1014
		intel_private.gtt_phys_addr = reg_addr + MB(2);
1012
		break;
1015
		break;
1013
	default:
1016
	default:
1014
		intel_private.gtt_phys_addr = reg_addr + KB(512);
1017
		intel_private.gtt_phys_addr = reg_addr + KB(512);
1015
		break;
1018
		break;
1016
	}
1019
	}
1017
 
1020
 
1018
	intel_i9xx_setup_flush();
1021
	intel_i9xx_setup_flush();
1019
 
1022
 
1020
	return 0;
1023
	return 0;
1021
}
1024
}
1022
 
1025
 
1023
#if IS_ENABLED(CONFIG_AGP_INTEL)
1026
#if IS_ENABLED(CONFIG_AGP_INTEL)
1024
static const struct agp_bridge_driver intel_fake_agp_driver = {
1027
static const struct agp_bridge_driver intel_fake_agp_driver = {
1025
	.owner			= THIS_MODULE,
1028
	.owner			= THIS_MODULE,
1026
	.size_type		= FIXED_APER_SIZE,
1029
	.size_type		= FIXED_APER_SIZE,
1027
	.aperture_sizes		= intel_fake_agp_sizes,
1030
	.aperture_sizes		= intel_fake_agp_sizes,
1028
	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1031
	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1029
	.configure		= intel_fake_agp_configure,
1032
	.configure		= intel_fake_agp_configure,
1030
	.fetch_size		= intel_fake_agp_fetch_size,
1033
	.fetch_size		= intel_fake_agp_fetch_size,
1031
	.cleanup		= intel_gtt_cleanup,
1034
	.cleanup		= intel_gtt_cleanup,
1032
	.agp_enable		= intel_fake_agp_enable,
1035
	.agp_enable		= intel_fake_agp_enable,
1033
	.cache_flush		= global_cache_flush,
1036
	.cache_flush		= global_cache_flush,
1034
	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1037
	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1035
	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1038
	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1036
	.insert_memory		= intel_fake_agp_insert_entries,
1039
	.insert_memory		= intel_fake_agp_insert_entries,
1037
	.remove_memory		= intel_fake_agp_remove_entries,
1040
	.remove_memory		= intel_fake_agp_remove_entries,
1038
	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1041
	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1039
	.free_by_type		= intel_i810_free_by_type,
1042
	.free_by_type		= intel_i810_free_by_type,
1040
	.agp_alloc_page		= agp_generic_alloc_page,
1043
	.agp_alloc_page		= agp_generic_alloc_page,
1041
	.agp_alloc_pages        = agp_generic_alloc_pages,
1044
	.agp_alloc_pages        = agp_generic_alloc_pages,
1042
	.agp_destroy_page	= agp_generic_destroy_page,
1045
	.agp_destroy_page	= agp_generic_destroy_page,
1043
	.agp_destroy_pages      = agp_generic_destroy_pages,
1046
	.agp_destroy_pages      = agp_generic_destroy_pages,
1044
};
1047
};
1045
#endif
1048
#endif
1046
static const struct intel_gtt_driver i915_gtt_driver = {
1049
static const struct intel_gtt_driver i915_gtt_driver = {
1047
	.gen = 3,
1050
	.gen = 3,
1048
	.has_pgtbl_enable = 1,
1051
	.has_pgtbl_enable = 1,
1049
	.setup = i9xx_setup,
1052
	.setup = i9xx_setup,
1050
	.cleanup = i9xx_cleanup,
1053
	.cleanup = i9xx_cleanup,
1051
	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1054
	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1052
	.write_entry = i830_write_entry,
1055
	.write_entry = i830_write_entry,
1053
	.dma_mask_size = 32,
1056
	.dma_mask_size = 32,
1054
	.check_flags = i830_check_flags,
1057
	.check_flags = i830_check_flags,
1055
	.chipset_flush = i9xx_chipset_flush,
1058
	.chipset_flush = i9xx_chipset_flush,
1056
};
1059
};
1057
static const struct intel_gtt_driver g33_gtt_driver = {
1060
static const struct intel_gtt_driver g33_gtt_driver = {
1058
	.gen = 3,
1061
	.gen = 3,
1059
	.is_g33 = 1,
1062
	.is_g33 = 1,
1060
	.setup = i9xx_setup,
1063
	.setup = i9xx_setup,
1061
	.cleanup = i9xx_cleanup,
1064
	.cleanup = i9xx_cleanup,
1062
	.write_entry = i965_write_entry,
1065
	.write_entry = i965_write_entry,
1063
	.dma_mask_size = 36,
1066
	.dma_mask_size = 36,
1064
	.check_flags = i830_check_flags,
1067
	.check_flags = i830_check_flags,
1065
	.chipset_flush = i9xx_chipset_flush,
1068
	.chipset_flush = i9xx_chipset_flush,
1066
};
1069
};
1067
static const struct intel_gtt_driver pineview_gtt_driver = {
1070
static const struct intel_gtt_driver pineview_gtt_driver = {
1068
	.gen = 3,
1071
	.gen = 3,
1069
	.is_pineview = 1, .is_g33 = 1,
1072
	.is_pineview = 1, .is_g33 = 1,
1070
	.setup = i9xx_setup,
1073
	.setup = i9xx_setup,
1071
	.cleanup = i9xx_cleanup,
1074
	.cleanup = i9xx_cleanup,
1072
	.write_entry = i965_write_entry,
1075
	.write_entry = i965_write_entry,
1073
	.dma_mask_size = 36,
1076
	.dma_mask_size = 36,
1074
	.check_flags = i830_check_flags,
1077
	.check_flags = i830_check_flags,
1075
	.chipset_flush = i9xx_chipset_flush,
1078
	.chipset_flush = i9xx_chipset_flush,
1076
};
1079
};
1077
static const struct intel_gtt_driver i965_gtt_driver = {
1080
static const struct intel_gtt_driver i965_gtt_driver = {
1078
	.gen = 4,
1081
	.gen = 4,
1079
	.has_pgtbl_enable = 1,
1082
	.has_pgtbl_enable = 1,
1080
	.setup = i9xx_setup,
1083
	.setup = i9xx_setup,
1081
	.cleanup = i9xx_cleanup,
1084
	.cleanup = i9xx_cleanup,
1082
	.write_entry = i965_write_entry,
1085
	.write_entry = i965_write_entry,
1083
	.dma_mask_size = 36,
1086
	.dma_mask_size = 36,
1084
	.check_flags = i830_check_flags,
1087
	.check_flags = i830_check_flags,
1085
	.chipset_flush = i9xx_chipset_flush,
1088
	.chipset_flush = i9xx_chipset_flush,
1086
};
1089
};
1087
static const struct intel_gtt_driver g4x_gtt_driver = {
1090
static const struct intel_gtt_driver g4x_gtt_driver = {
1088
	.gen = 5,
1091
	.gen = 5,
1089
	.setup = i9xx_setup,
1092
	.setup = i9xx_setup,
1090
	.cleanup = i9xx_cleanup,
1093
	.cleanup = i9xx_cleanup,
1091
	.write_entry = i965_write_entry,
1094
	.write_entry = i965_write_entry,
1092
	.dma_mask_size = 36,
1095
	.dma_mask_size = 36,
1093
	.check_flags = i830_check_flags,
1096
	.check_flags = i830_check_flags,
1094
	.chipset_flush = i9xx_chipset_flush,
1097
	.chipset_flush = i9xx_chipset_flush,
1095
};
1098
};
1096
static const struct intel_gtt_driver ironlake_gtt_driver = {
1099
static const struct intel_gtt_driver ironlake_gtt_driver = {
1097
	.gen = 5,
1100
	.gen = 5,
1098
	.is_ironlake = 1,
1101
	.is_ironlake = 1,
1099
	.setup = i9xx_setup,
1102
	.setup = i9xx_setup,
1100
	.cleanup = i9xx_cleanup,
1103
	.cleanup = i9xx_cleanup,
1101
	.write_entry = i965_write_entry,
1104
	.write_entry = i965_write_entry,
1102
	.dma_mask_size = 36,
1105
	.dma_mask_size = 36,
1103
	.check_flags = i830_check_flags,
1106
	.check_flags = i830_check_flags,
1104
	.chipset_flush = i9xx_chipset_flush,
1107
	.chipset_flush = i9xx_chipset_flush,
1105
};
1108
};
1106
 
1109
 
1107
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1110
/* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1108
 * driver and gmch_driver must be non-null, and find_gmch will determine
1111
 * driver and gmch_driver must be non-null, and find_gmch will determine
1109
 * which one should be used if a gmch_chip_id is present.
1112
 * which one should be used if a gmch_chip_id is present.
1110
 */
1113
 */
1111
static const struct intel_gtt_driver_description {
1114
static const struct intel_gtt_driver_description {
1112
	unsigned int gmch_chip_id;
1115
	unsigned int gmch_chip_id;
1113
	char *name;
1116
	char *name;
1114
	const struct intel_gtt_driver *gtt_driver;
1117
	const struct intel_gtt_driver *gtt_driver;
1115
} intel_gtt_chipsets[] = {
1118
} intel_gtt_chipsets[] = {
1116
	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1119
	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1117
		&i915_gtt_driver },
1120
		&i915_gtt_driver },
1118
	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1121
	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1119
		&i915_gtt_driver },
1122
		&i915_gtt_driver },
1120
	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1123
	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1121
		&i915_gtt_driver },
1124
		&i915_gtt_driver },
1122
	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1125
	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1123
		&i915_gtt_driver },
1126
		&i915_gtt_driver },
1124
	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1127
	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1125
		&i915_gtt_driver },
1128
		&i915_gtt_driver },
1126
	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1129
	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1127
		&i915_gtt_driver },
1130
		&i915_gtt_driver },
1128
	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1131
	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1129
		&i965_gtt_driver },
1132
		&i965_gtt_driver },
1130
	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1133
	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1131
		&i965_gtt_driver },
1134
		&i965_gtt_driver },
1132
	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1135
	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1133
		&i965_gtt_driver },
1136
		&i965_gtt_driver },
1134
	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1137
	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1135
		&i965_gtt_driver },
1138
		&i965_gtt_driver },
1136
	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1139
	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1137
		&i965_gtt_driver },
1140
		&i965_gtt_driver },
1138
	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1141
	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1139
		&i965_gtt_driver },
1142
		&i965_gtt_driver },
1140
	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1143
	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1141
		&g33_gtt_driver },
1144
		&g33_gtt_driver },
1142
	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1145
	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1143
		&g33_gtt_driver },
1146
		&g33_gtt_driver },
1144
	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1147
	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1145
		&g33_gtt_driver },
1148
		&g33_gtt_driver },
1146
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1149
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1147
		&pineview_gtt_driver },
1150
		&pineview_gtt_driver },
1148
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1151
	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1149
		&pineview_gtt_driver },
1152
		&pineview_gtt_driver },
1150
	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1153
	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1151
		&g4x_gtt_driver },
1154
		&g4x_gtt_driver },
1152
	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1155
	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1153
		&g4x_gtt_driver },
1156
		&g4x_gtt_driver },
1154
	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1157
	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1155
		&g4x_gtt_driver },
1158
		&g4x_gtt_driver },
1156
	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1159
	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1157
		&g4x_gtt_driver },
1160
		&g4x_gtt_driver },
1158
	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1161
	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1159
		&g4x_gtt_driver },
1162
		&g4x_gtt_driver },
1160
	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1163
	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1161
		&g4x_gtt_driver },
1164
		&g4x_gtt_driver },
1162
	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1165
	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1163
		&g4x_gtt_driver },
1166
		&g4x_gtt_driver },
1164
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1167
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1165
	    "HD Graphics", &ironlake_gtt_driver },
1168
	    "HD Graphics", &ironlake_gtt_driver },
1166
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1169
	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1167
	    "HD Graphics", &ironlake_gtt_driver },
1170
	    "HD Graphics", &ironlake_gtt_driver },
1168
	{ 0, NULL, NULL }
1171
	{ 0, NULL, NULL }
1169
};
1172
};
1170
 
1173
 
1171
static int find_gmch(u16 device)
1174
static int find_gmch(u16 device)
1172
{
1175
{
1173
	struct pci_dev *gmch_device;
1176
	struct pci_dev *gmch_device;
1174
 
1177
 
1175
	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1178
	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1176
	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1179
	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1177
		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1180
		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1178
					     device, gmch_device);
1181
					     device, gmch_device);
1179
	}
1182
	}
1180
 
1183
 
1181
	if (!gmch_device)
1184
	if (!gmch_device)
1182
		return 0;
1185
		return 0;
1183
 
1186
 
1184
	intel_private.pcidev = gmch_device;
1187
	intel_private.pcidev = gmch_device;
1185
	return 1;
1188
	return 1;
1186
}
1189
}
1187
 
1190
 
1188
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1191
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1189
		     struct agp_bridge_data *bridge)
1192
		     struct agp_bridge_data *bridge)
1190
{
1193
{
1191
	int i, mask;
1194
	int i, mask;
1192
 
1195
 
1193
	/*
1196
	/*
1194
	 * Can be called from the fake agp driver but also directly from
1197
	 * Can be called from the fake agp driver but also directly from
1195
	 * drm/i915.ko. Hence we need to check whether everything is set up
1198
	 * drm/i915.ko. Hence we need to check whether everything is set up
1196
	 * already.
1199
	 * already.
1197
	 */
1200
	 */
1198
	if (intel_private.driver) {
1201
	if (intel_private.driver) {
1199
		intel_private.refcount++;
1202
		intel_private.refcount++;
1200
		return 1;
1203
		return 1;
1201
	}
1204
	}
1202
 
1205
 
1203
 
1206
 
1204
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1207
    for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1205
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1208
        if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1206
            intel_private.driver =
1209
            intel_private.driver =
1207
                intel_gtt_chipsets[i].gtt_driver;
1210
                intel_gtt_chipsets[i].gtt_driver;
1208
            break;
1211
            break;
1209
        }
1212
        }
1210
    }
1213
    }
1211
 
1214
 
1212
    if (!intel_private.driver)
1215
    if (!intel_private.driver)
1213
        return 0;
1216
        return 0;
1214
 
1217
 
1215
	intel_private.refcount++;
1218
	intel_private.refcount++;
1216
 
1219
 
1217
#if IS_ENABLED(CONFIG_AGP_INTEL)
1220
#if IS_ENABLED(CONFIG_AGP_INTEL)
1218
	if (bridge) {
1221
	if (bridge) {
1219
		bridge->driver = &intel_fake_agp_driver;
1222
		bridge->driver = &intel_fake_agp_driver;
1220
		bridge->dev_private_data = &intel_private;
1223
		bridge->dev_private_data = &intel_private;
1221
		bridge->dev = bridge_pdev;
1224
		bridge->dev = bridge_pdev;
1222
	}
1225
	}
1223
#endif
1226
#endif
1224
 
1227
 
1225
    intel_private.bridge_dev = bridge_pdev;
1228
    intel_private.bridge_dev = bridge_pdev;
1226
 
1229
 
1227
	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1230
	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1228
 
1231
 
1229
    mask = intel_private.driver->dma_mask_size;
1232
    mask = intel_private.driver->dma_mask_size;
1230
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1233
//    if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1231
//        dev_err(&intel_private.pcidev->dev,
1234
//        dev_err(&intel_private.pcidev->dev,
1232
//            "set gfx device dma mask %d-bit failed!\n", mask);
1235
//            "set gfx device dma mask %d-bit failed!\n", mask);
1233
//    else
1236
//    else
1234
//        pci_set_consistent_dma_mask(intel_private.pcidev,
1237
//        pci_set_consistent_dma_mask(intel_private.pcidev,
1235
//                        DMA_BIT_MASK(mask));
1238
//                        DMA_BIT_MASK(mask));
1236
 
1239
 
1237
	if (intel_gtt_init() != 0) {
1240
	if (intel_gtt_init() != 0) {
1238
//		intel_gmch_remove();
1241
//		intel_gmch_remove();
1239
 
1242
 
1240
        return 0;
1243
        return 0;
1241
	}
1244
	}
1242
 
1245
 
1243
    return 1;
1246
    return 1;
1244
}
1247
}
1245
EXPORT_SYMBOL(intel_gmch_probe);
1248
EXPORT_SYMBOL(intel_gmch_probe);
1246
 
1249
 
1247
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
1250
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
1248
		   phys_addr_t *mappable_base, u64 *mappable_end)
1251
		   phys_addr_t *mappable_base, u64 *mappable_end)
1249
{
1252
{
1250
	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1253
	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1251
	*stolen_size = intel_private.stolen_size;
1254
	*stolen_size = intel_private.stolen_size;
1252
	*mappable_base = intel_private.gma_bus_addr;
1255
	*mappable_base = intel_private.gma_bus_addr;
1253
	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1256
	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1254
}
1257
}
1255
EXPORT_SYMBOL(intel_gtt_get);
1258
EXPORT_SYMBOL(intel_gtt_get);
1256
 
1259
 
1257
void intel_gtt_chipset_flush(void)
1260
void intel_gtt_chipset_flush(void)
1258
{
1261
{
1259
	if (intel_private.driver->chipset_flush)
1262
	if (intel_private.driver->chipset_flush)
1260
		intel_private.driver->chipset_flush();
1263
		intel_private.driver->chipset_flush();
1261
}
1264
}
1262
EXPORT_SYMBOL(intel_gtt_chipset_flush);
1265
EXPORT_SYMBOL(intel_gtt_chipset_flush);
1263
 
1266
 
1264
 
1267
 
1265
MODULE_AUTHOR("Dave Jones, Various @Intel");
1268
MODULE_AUTHOR("Dave Jones, Various @Intel");
1266
MODULE_LICENSE("GPL and additional rights");
1269
MODULE_LICENSE("GPL and additional rights");