Rev 3037 | Rev 3480 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3037 | Rev 3243 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Intel GTT (Graphics Translation Table) routines |
2 | * Intel GTT (Graphics Translation Table) routines |
3 | * |
3 | * |
4 | * Caveat: This driver implements the linux agp interface, but this is far from |
4 | * Caveat: This driver implements the linux agp interface, but this is far from |
5 | * a agp driver! GTT support ended up here for purely historical reasons: The |
5 | * a agp driver! GTT support ended up here for purely historical reasons: The |
6 | * old userspace intel graphics drivers needed an interface to map memory into |
6 | * old userspace intel graphics drivers needed an interface to map memory into |
7 | * the GTT. And the drm provides a default interface for graphic devices sitting |
7 | * the GTT. And the drm provides a default interface for graphic devices sitting |
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to |
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to |
9 | * avoid having to create a new api. |
9 | * avoid having to create a new api. |
10 | * |
10 | * |
11 | * With gem this does not make much sense anymore, just needlessly complicates |
11 | * With gem this does not make much sense anymore, just needlessly complicates |
12 | * the code. But as long as the old graphics stack is still support, it's stuck |
12 | * the code. But as long as the old graphics stack is still support, it's stuck |
13 | * here. |
13 | * here. |
14 | * |
14 | * |
15 | * /fairy-tale-mode off |
15 | * /fairy-tale-mode off |
16 | */ |
16 | */ |
17 | 17 | ||
18 | #include |
18 | #include |
19 | #include |
19 | #include |
20 | #include |
20 | #include |
21 | #include |
21 | #include |
22 | #include |
22 | #include |
- | 23 | #include |
|
- | 24 | ||
23 | //#include |
25 | //#include |
24 | //#include |
26 | //#include |
25 | //#include |
27 | //#include |
26 | #include |
28 | #include |
27 | #include "agp.h" |
29 | #include "agp.h" |
28 | #include "intel-agp.h" |
30 | #include "intel-agp.h" |
29 | #include "intel-gtt.h" |
31 | #include |
30 | 32 | ||
31 | #include |
33 | #include |
32 | 34 | ||
33 | struct pci_dev * |
35 | struct pci_dev * |
34 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); |
36 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); |
35 | 37 | ||
36 | 38 | ||
37 | #define PCI_VENDOR_ID_INTEL 0x8086 |
39 | #define PCI_VENDOR_ID_INTEL 0x8086 |
38 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
40 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
39 | #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
41 | #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
40 | #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 |
42 | #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 |
41 | #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 |
43 | #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 |
42 | #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 |
44 | #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 |
43 | #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 |
45 | #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 |
44 | 46 | ||
45 | 47 | ||
46 | #define AGP_NORMAL_MEMORY 0 |
48 | #define AGP_NORMAL_MEMORY 0 |
47 | 49 | ||
48 | #define AGP_USER_TYPES (1 << 16) |
50 | #define AGP_USER_TYPES (1 << 16) |
49 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
51 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
50 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
52 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
51 | 53 | ||
52 | 54 | ||
53 | 55 | ||
54 | /* |
56 | /* |
55 | * If we have Intel graphics, we're not going to have anything other than |
57 | * If we have Intel graphics, we're not going to have anything other than |
56 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
58 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
57 | * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
59 | * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
58 | * Only newer chipsets need to bother with this, of course. |
60 | * Only newer chipsets need to bother with this, of course. |
59 | */ |
61 | */ |
60 | #ifdef CONFIG_INTEL_IOMMU |
62 | #ifdef CONFIG_INTEL_IOMMU |
61 | #define USE_PCI_DMA_API 1 |
63 | #define USE_PCI_DMA_API 1 |
62 | #else |
64 | #else |
63 | #define USE_PCI_DMA_API 0 |
65 | #define USE_PCI_DMA_API 0 |
64 | #endif |
66 | #endif |
65 | 67 | ||
66 | struct intel_gtt_driver { |
68 | struct intel_gtt_driver { |
67 | unsigned int gen : 8; |
69 | unsigned int gen : 8; |
68 | unsigned int is_g33 : 1; |
70 | unsigned int is_g33 : 1; |
69 | unsigned int is_pineview : 1; |
71 | unsigned int is_pineview : 1; |
70 | unsigned int is_ironlake : 1; |
72 | unsigned int is_ironlake : 1; |
71 | unsigned int has_pgtbl_enable : 1; |
73 | unsigned int has_pgtbl_enable : 1; |
72 | unsigned int dma_mask_size : 8; |
74 | unsigned int dma_mask_size : 8; |
73 | /* Chipset specific GTT setup */ |
75 | /* Chipset specific GTT setup */ |
74 | int (*setup)(void); |
76 | int (*setup)(void); |
75 | /* This should undo anything done in ->setup() save the unmapping |
77 | /* This should undo anything done in ->setup() save the unmapping |
76 | * of the mmio register file, that's done in the generic code. */ |
78 | * of the mmio register file, that's done in the generic code. */ |
77 | void (*cleanup)(void); |
79 | void (*cleanup)(void); |
78 | void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); |
80 | void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); |
79 | /* Flags is a more or less chipset specific opaque value. |
81 | /* Flags is a more or less chipset specific opaque value. |
80 | * For chipsets that need to support old ums (non-gem) code, this |
82 | * For chipsets that need to support old ums (non-gem) code, this |
81 | * needs to be identical to the various supported agp memory types! */ |
83 | * needs to be identical to the various supported agp memory types! */ |
82 | bool (*check_flags)(unsigned int flags); |
84 | bool (*check_flags)(unsigned int flags); |
83 | void (*chipset_flush)(void); |
85 | void (*chipset_flush)(void); |
84 | }; |
86 | }; |
85 | 87 | ||
86 | static struct _intel_private { |
88 | static struct _intel_private { |
87 | struct intel_gtt base; |
89 | struct intel_gtt base; |
88 | const struct intel_gtt_driver *driver; |
90 | const struct intel_gtt_driver *driver; |
89 | struct pci_dev *pcidev; /* device one */ |
91 | struct pci_dev *pcidev; /* device one */ |
90 | struct pci_dev *bridge_dev; |
92 | struct pci_dev *bridge_dev; |
91 | u8 __iomem *registers; |
93 | u8 __iomem *registers; |
92 | phys_addr_t gtt_bus_addr; |
94 | phys_addr_t gtt_bus_addr; |
93 | u32 PGETBL_save; |
95 | u32 PGETBL_save; |
94 | u32 __iomem *gtt; /* I915G */ |
96 | u32 __iomem *gtt; /* I915G */ |
95 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
97 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
96 | int num_dcache_entries; |
98 | int num_dcache_entries; |
97 | void __iomem *i9xx_flush_page; |
99 | void __iomem *i9xx_flush_page; |
98 | char *i81x_gtt_table; |
100 | char *i81x_gtt_table; |
99 | struct resource ifp_resource; |
101 | struct resource ifp_resource; |
100 | int resource_valid; |
102 | int resource_valid; |
101 | struct page *scratch_page; |
103 | struct page *scratch_page; |
102 | int refcount; |
104 | int refcount; |
103 | } intel_private; |
105 | } intel_private; |
104 | 106 | ||
105 | #define INTEL_GTT_GEN intel_private.driver->gen |
107 | #define INTEL_GTT_GEN intel_private.driver->gen |
106 | #define IS_G33 intel_private.driver->is_g33 |
108 | #define IS_G33 intel_private.driver->is_g33 |
107 | #define IS_PINEVIEW intel_private.driver->is_pineview |
109 | #define IS_PINEVIEW intel_private.driver->is_pineview |
108 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
110 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
109 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
111 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
110 | 112 | ||
111 | static int intel_gtt_setup_scratch_page(void) |
113 | static int intel_gtt_setup_scratch_page(void) |
112 | { |
114 | { |
- | 115 | struct page *page; |
|
113 | dma_addr_t dma_addr; |
116 | dma_addr_t dma_addr; |
114 | 117 | ||
115 | dma_addr = AllocPage(); |
118 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
116 | if (dma_addr == 0) |
119 | if (page == NULL) |
- | 120 | return -ENOMEM; |
|
117 | return -ENOMEM; |
- | |
118 | 121 | intel_private.base.scratch_page_dma = page_to_phys(page); |
|
119 | intel_private.base.scratch_page_dma = dma_addr; |
122 | |
120 | intel_private.scratch_page = NULL; |
123 | intel_private.scratch_page = page; |
121 | 124 | ||
122 | return 0; |
125 | return 0; |
123 | } |
126 | } |
124 | 127 | ||
125 | static unsigned int intel_gtt_stolen_size(void) |
128 | static unsigned int intel_gtt_stolen_size(void) |
126 | { |
129 | { |
127 | u16 gmch_ctrl; |
130 | u16 gmch_ctrl; |
128 | u8 rdct; |
131 | u8 rdct; |
129 | int local = 0; |
132 | int local = 0; |
130 | static const int ddt[4] = { 0, 16, 32, 64 }; |
133 | static const int ddt[4] = { 0, 16, 32, 64 }; |
131 | unsigned int stolen_size = 0; |
134 | unsigned int stolen_size = 0; |
132 | 135 | ||
133 | if (INTEL_GTT_GEN == 1) |
136 | if (INTEL_GTT_GEN == 1) |
134 | return 0; /* no stolen mem on i81x */ |
137 | return 0; /* no stolen mem on i81x */ |
135 | 138 | ||
136 | pci_read_config_word(intel_private.bridge_dev, |
139 | pci_read_config_word(intel_private.bridge_dev, |
137 | I830_GMCH_CTRL, &gmch_ctrl); |
140 | I830_GMCH_CTRL, &gmch_ctrl); |
138 | 141 | ||
139 | if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || |
142 | if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || |
140 | intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { |
143 | intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { |
141 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { |
144 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { |
142 | case I830_GMCH_GMS_STOLEN_512: |
145 | case I830_GMCH_GMS_STOLEN_512: |
143 | stolen_size = KB(512); |
146 | stolen_size = KB(512); |
144 | break; |
147 | break; |
145 | case I830_GMCH_GMS_STOLEN_1024: |
148 | case I830_GMCH_GMS_STOLEN_1024: |
146 | stolen_size = MB(1); |
149 | stolen_size = MB(1); |
147 | break; |
150 | break; |
148 | case I830_GMCH_GMS_STOLEN_8192: |
151 | case I830_GMCH_GMS_STOLEN_8192: |
149 | stolen_size = MB(8); |
152 | stolen_size = MB(8); |
150 | break; |
153 | break; |
151 | case I830_GMCH_GMS_LOCAL: |
154 | case I830_GMCH_GMS_LOCAL: |
152 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); |
155 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); |
153 | stolen_size = (I830_RDRAM_ND(rdct) + 1) * |
156 | stolen_size = (I830_RDRAM_ND(rdct) + 1) * |
154 | MB(ddt[I830_RDRAM_DDT(rdct)]); |
157 | MB(ddt[I830_RDRAM_DDT(rdct)]); |
155 | local = 1; |
158 | local = 1; |
156 | break; |
159 | break; |
157 | default: |
160 | default: |
158 | stolen_size = 0; |
161 | stolen_size = 0; |
159 | break; |
162 | break; |
160 | } |
163 | } |
161 | } else if (INTEL_GTT_GEN == 6) { |
- | |
162 | /* |
- | |
163 | * SandyBridge has new memory control reg at 0x50.w |
- | |
164 | */ |
- | |
165 | u16 snb_gmch_ctl; |
- | |
166 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
- | |
167 | switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { |
- | |
168 | case SNB_GMCH_GMS_STOLEN_32M: |
- | |
169 | stolen_size = MB(32); |
- | |
170 | break; |
- | |
171 | case SNB_GMCH_GMS_STOLEN_64M: |
- | |
172 | stolen_size = MB(64); |
- | |
173 | break; |
- | |
174 | case SNB_GMCH_GMS_STOLEN_96M: |
- | |
175 | stolen_size = MB(96); |
- | |
176 | break; |
- | |
177 | case SNB_GMCH_GMS_STOLEN_128M: |
- | |
178 | stolen_size = MB(128); |
- | |
179 | break; |
- | |
180 | case SNB_GMCH_GMS_STOLEN_160M: |
- | |
181 | stolen_size = MB(160); |
- | |
182 | break; |
- | |
183 | case SNB_GMCH_GMS_STOLEN_192M: |
- | |
184 | stolen_size = MB(192); |
- | |
185 | break; |
- | |
186 | case SNB_GMCH_GMS_STOLEN_224M: |
- | |
187 | stolen_size = MB(224); |
- | |
188 | break; |
- | |
189 | case SNB_GMCH_GMS_STOLEN_256M: |
- | |
190 | stolen_size = MB(256); |
- | |
191 | break; |
- | |
192 | case SNB_GMCH_GMS_STOLEN_288M: |
- | |
193 | stolen_size = MB(288); |
- | |
194 | break; |
- | |
195 | case SNB_GMCH_GMS_STOLEN_320M: |
- | |
196 | stolen_size = MB(320); |
- | |
197 | break; |
- | |
198 | case SNB_GMCH_GMS_STOLEN_352M: |
- | |
199 | stolen_size = MB(352); |
- | |
200 | break; |
- | |
201 | case SNB_GMCH_GMS_STOLEN_384M: |
- | |
202 | stolen_size = MB(384); |
- | |
203 | break; |
- | |
204 | case SNB_GMCH_GMS_STOLEN_416M: |
- | |
205 | stolen_size = MB(416); |
- | |
206 | break; |
- | |
207 | case SNB_GMCH_GMS_STOLEN_448M: |
- | |
208 | stolen_size = MB(448); |
- | |
209 | break; |
- | |
210 | case SNB_GMCH_GMS_STOLEN_480M: |
- | |
211 | stolen_size = MB(480); |
- | |
212 | break; |
- | |
213 | case SNB_GMCH_GMS_STOLEN_512M: |
- | |
214 | stolen_size = MB(512); |
- | |
215 | break; |
- | |
216 | } |
- | |
217 | } else { |
164 | } else { |
218 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { |
165 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { |
219 | case I855_GMCH_GMS_STOLEN_1M: |
166 | case I855_GMCH_GMS_STOLEN_1M: |
220 | stolen_size = MB(1); |
167 | stolen_size = MB(1); |
221 | break; |
168 | break; |
222 | case I855_GMCH_GMS_STOLEN_4M: |
169 | case I855_GMCH_GMS_STOLEN_4M: |
223 | stolen_size = MB(4); |
170 | stolen_size = MB(4); |
224 | break; |
171 | break; |
225 | case I855_GMCH_GMS_STOLEN_8M: |
172 | case I855_GMCH_GMS_STOLEN_8M: |
226 | stolen_size = MB(8); |
173 | stolen_size = MB(8); |
227 | break; |
174 | break; |
228 | case I855_GMCH_GMS_STOLEN_16M: |
175 | case I855_GMCH_GMS_STOLEN_16M: |
229 | stolen_size = MB(16); |
176 | stolen_size = MB(16); |
230 | break; |
177 | break; |
231 | case I855_GMCH_GMS_STOLEN_32M: |
178 | case I855_GMCH_GMS_STOLEN_32M: |
232 | stolen_size = MB(32); |
179 | stolen_size = MB(32); |
233 | break; |
180 | break; |
234 | case I915_GMCH_GMS_STOLEN_48M: |
181 | case I915_GMCH_GMS_STOLEN_48M: |
235 | stolen_size = MB(48); |
182 | stolen_size = MB(48); |
236 | break; |
183 | break; |
237 | case I915_GMCH_GMS_STOLEN_64M: |
184 | case I915_GMCH_GMS_STOLEN_64M: |
238 | stolen_size = MB(64); |
185 | stolen_size = MB(64); |
239 | break; |
186 | break; |
240 | case G33_GMCH_GMS_STOLEN_128M: |
187 | case G33_GMCH_GMS_STOLEN_128M: |
241 | stolen_size = MB(128); |
188 | stolen_size = MB(128); |
242 | break; |
189 | break; |
243 | case G33_GMCH_GMS_STOLEN_256M: |
190 | case G33_GMCH_GMS_STOLEN_256M: |
244 | stolen_size = MB(256); |
191 | stolen_size = MB(256); |
245 | break; |
192 | break; |
246 | case INTEL_GMCH_GMS_STOLEN_96M: |
193 | case INTEL_GMCH_GMS_STOLEN_96M: |
247 | stolen_size = MB(96); |
194 | stolen_size = MB(96); |
248 | break; |
195 | break; |
249 | case INTEL_GMCH_GMS_STOLEN_160M: |
196 | case INTEL_GMCH_GMS_STOLEN_160M: |
250 | stolen_size = MB(160); |
197 | stolen_size = MB(160); |
251 | break; |
198 | break; |
252 | case INTEL_GMCH_GMS_STOLEN_224M: |
199 | case INTEL_GMCH_GMS_STOLEN_224M: |
253 | stolen_size = MB(224); |
200 | stolen_size = MB(224); |
254 | break; |
201 | break; |
255 | case INTEL_GMCH_GMS_STOLEN_352M: |
202 | case INTEL_GMCH_GMS_STOLEN_352M: |
256 | stolen_size = MB(352); |
203 | stolen_size = MB(352); |
257 | break; |
204 | break; |
258 | default: |
205 | default: |
259 | stolen_size = 0; |
206 | stolen_size = 0; |
260 | break; |
207 | break; |
261 | } |
208 | } |
262 | } |
209 | } |
263 | 210 | ||
264 | if (stolen_size > 0) { |
211 | if (stolen_size > 0) { |
265 | dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", |
212 | dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", |
266 | stolen_size / KB(1), local ? "local" : "stolen"); |
213 | stolen_size / KB(1), local ? "local" : "stolen"); |
267 | } else { |
214 | } else { |
268 | dev_info(&intel_private.bridge_dev->dev, |
215 | dev_info(&intel_private.bridge_dev->dev, |
269 | "no pre-allocated video memory detected\n"); |
216 | "no pre-allocated video memory detected\n"); |
270 | stolen_size = 0; |
217 | stolen_size = 0; |
271 | } |
218 | } |
272 | 219 | ||
273 | return stolen_size; |
220 | return stolen_size; |
274 | } |
221 | } |
275 | 222 | ||
276 | static void i965_adjust_pgetbl_size(unsigned int size_flag) |
223 | static void i965_adjust_pgetbl_size(unsigned int size_flag) |
277 | { |
224 | { |
278 | u32 pgetbl_ctl, pgetbl_ctl2; |
225 | u32 pgetbl_ctl, pgetbl_ctl2; |
279 | 226 | ||
280 | /* ensure that ppgtt is disabled */ |
227 | /* ensure that ppgtt is disabled */ |
281 | pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); |
228 | pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); |
282 | pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; |
229 | pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; |
283 | writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); |
230 | writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); |
284 | 231 | ||
285 | /* write the new ggtt size */ |
232 | /* write the new ggtt size */ |
286 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
233 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
287 | pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; |
234 | pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; |
288 | pgetbl_ctl |= size_flag; |
235 | pgetbl_ctl |= size_flag; |
289 | writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); |
236 | writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); |
290 | } |
237 | } |
291 | 238 | ||
292 | static unsigned int i965_gtt_total_entries(void) |
239 | static unsigned int i965_gtt_total_entries(void) |
293 | { |
240 | { |
294 | int size; |
241 | int size; |
295 | u32 pgetbl_ctl; |
242 | u32 pgetbl_ctl; |
296 | u16 gmch_ctl; |
243 | u16 gmch_ctl; |
297 | 244 | ||
298 | pci_read_config_word(intel_private.bridge_dev, |
245 | pci_read_config_word(intel_private.bridge_dev, |
299 | I830_GMCH_CTRL, &gmch_ctl); |
246 | I830_GMCH_CTRL, &gmch_ctl); |
300 | 247 | ||
301 | if (INTEL_GTT_GEN == 5) { |
248 | if (INTEL_GTT_GEN == 5) { |
302 | switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { |
249 | switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { |
303 | case G4x_GMCH_SIZE_1M: |
250 | case G4x_GMCH_SIZE_1M: |
304 | case G4x_GMCH_SIZE_VT_1M: |
251 | case G4x_GMCH_SIZE_VT_1M: |
305 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); |
252 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); |
306 | break; |
253 | break; |
307 | case G4x_GMCH_SIZE_VT_1_5M: |
254 | case G4x_GMCH_SIZE_VT_1_5M: |
308 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); |
255 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); |
309 | break; |
256 | break; |
310 | case G4x_GMCH_SIZE_2M: |
257 | case G4x_GMCH_SIZE_2M: |
311 | case G4x_GMCH_SIZE_VT_2M: |
258 | case G4x_GMCH_SIZE_VT_2M: |
312 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); |
259 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); |
313 | break; |
260 | break; |
314 | } |
261 | } |
315 | } |
262 | } |
316 | 263 | ||
317 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
264 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
318 | 265 | ||
319 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { |
266 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { |
320 | case I965_PGETBL_SIZE_128KB: |
267 | case I965_PGETBL_SIZE_128KB: |
321 | size = KB(128); |
268 | size = KB(128); |
322 | break; |
269 | break; |
323 | case I965_PGETBL_SIZE_256KB: |
270 | case I965_PGETBL_SIZE_256KB: |
324 | size = KB(256); |
271 | size = KB(256); |
325 | break; |
272 | break; |
326 | case I965_PGETBL_SIZE_512KB: |
273 | case I965_PGETBL_SIZE_512KB: |
327 | size = KB(512); |
274 | size = KB(512); |
328 | break; |
275 | break; |
329 | /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ |
276 | /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ |
330 | case I965_PGETBL_SIZE_1MB: |
277 | case I965_PGETBL_SIZE_1MB: |
331 | size = KB(1024); |
278 | size = KB(1024); |
332 | break; |
279 | break; |
333 | case I965_PGETBL_SIZE_2MB: |
280 | case I965_PGETBL_SIZE_2MB: |
334 | size = KB(2048); |
281 | size = KB(2048); |
335 | break; |
282 | break; |
336 | case I965_PGETBL_SIZE_1_5MB: |
283 | case I965_PGETBL_SIZE_1_5MB: |
337 | size = KB(1024 + 512); |
284 | size = KB(1024 + 512); |
338 | break; |
285 | break; |
339 | default: |
286 | default: |
340 | dev_info(&intel_private.pcidev->dev, |
287 | dev_info(&intel_private.pcidev->dev, |
341 | "unknown page table size, assuming 512KB\n"); |
288 | "unknown page table size, assuming 512KB\n"); |
342 | size = KB(512); |
289 | size = KB(512); |
343 | } |
290 | } |
344 | 291 | ||
345 | return size/4; |
292 | return size/4; |
346 | } |
293 | } |
347 | 294 | ||
348 | static unsigned int intel_gtt_total_entries(void) |
295 | static unsigned int intel_gtt_total_entries(void) |
349 | { |
296 | { |
350 | int size; |
- | |
351 | - | ||
352 | if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) |
297 | if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) |
353 | return i965_gtt_total_entries(); |
298 | return i965_gtt_total_entries(); |
354 | else if (INTEL_GTT_GEN == 6) { |
- | |
355 | u16 snb_gmch_ctl; |
- | |
356 | - | ||
357 | pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
- | |
358 | switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { |
- | |
359 | default: |
- | |
360 | case SNB_GTT_SIZE_0M: |
- | |
361 | printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); |
- | |
362 | size = MB(0); |
- | |
363 | break; |
- | |
364 | case SNB_GTT_SIZE_1M: |
- | |
365 | size = MB(1); |
- | |
366 | break; |
- | |
367 | case SNB_GTT_SIZE_2M: |
- | |
368 | size = MB(2); |
- | |
369 | break; |
- | |
370 | } |
- | |
371 | return size/4; |
- | |
372 | } else { |
299 | else { |
373 | /* On previous hardware, the GTT size was just what was |
300 | /* On previous hardware, the GTT size was just what was |
374 | * required to map the aperture. |
301 | * required to map the aperture. |
375 | */ |
302 | */ |
376 | return intel_private.base.gtt_mappable_entries; |
303 | return intel_private.base.gtt_mappable_entries; |
377 | } |
304 | } |
378 | } |
305 | } |
379 | 306 | ||
380 | static unsigned int intel_gtt_mappable_entries(void) |
307 | static unsigned int intel_gtt_mappable_entries(void) |
381 | { |
308 | { |
382 | unsigned int aperture_size; |
309 | unsigned int aperture_size; |
383 | 310 | ||
384 | if (INTEL_GTT_GEN == 1) { |
311 | if (INTEL_GTT_GEN == 1) { |
385 | u32 smram_miscc; |
312 | u32 smram_miscc; |
386 | 313 | ||
387 | pci_read_config_dword(intel_private.bridge_dev, |
314 | pci_read_config_dword(intel_private.bridge_dev, |
388 | I810_SMRAM_MISCC, &smram_miscc); |
315 | I810_SMRAM_MISCC, &smram_miscc); |
389 | 316 | ||
390 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) |
317 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) |
391 | == I810_GFX_MEM_WIN_32M) |
318 | == I810_GFX_MEM_WIN_32M) |
392 | aperture_size = MB(32); |
319 | aperture_size = MB(32); |
393 | else |
320 | else |
394 | aperture_size = MB(64); |
321 | aperture_size = MB(64); |
395 | } else if (INTEL_GTT_GEN == 2) { |
322 | } else if (INTEL_GTT_GEN == 2) { |
396 | u16 gmch_ctrl; |
323 | u16 gmch_ctrl; |
397 | 324 | ||
398 | pci_read_config_word(intel_private.bridge_dev, |
325 | pci_read_config_word(intel_private.bridge_dev, |
399 | I830_GMCH_CTRL, &gmch_ctrl); |
326 | I830_GMCH_CTRL, &gmch_ctrl); |
400 | 327 | ||
401 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) |
328 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) |
402 | aperture_size = MB(64); |
329 | aperture_size = MB(64); |
403 | else |
330 | else |
404 | aperture_size = MB(128); |
331 | aperture_size = MB(128); |
405 | } else { |
332 | } else { |
406 | /* 9xx supports large sizes, just look at the length */ |
333 | /* 9xx supports large sizes, just look at the length */ |
407 | aperture_size = pci_resource_len(intel_private.pcidev, 2); |
334 | aperture_size = pci_resource_len(intel_private.pcidev, 2); |
408 | } |
335 | } |
409 | 336 | ||
410 | return aperture_size >> PAGE_SHIFT; |
337 | return aperture_size >> PAGE_SHIFT; |
411 | } |
338 | } |
412 | 339 | ||
413 | static void intel_gtt_teardown_scratch_page(void) |
340 | static void intel_gtt_teardown_scratch_page(void) |
414 | { |
341 | { |
415 | // FreePage(intel_private.scratch_page_dma); |
342 | // FreePage(intel_private.scratch_page_dma); |
416 | } |
343 | } |
417 | 344 | ||
418 | static void intel_gtt_cleanup(void) |
345 | static void intel_gtt_cleanup(void) |
419 | { |
346 | { |
420 | intel_private.driver->cleanup(); |
347 | intel_private.driver->cleanup(); |
421 | 348 | ||
422 | iounmap(intel_private.gtt); |
349 | iounmap(intel_private.gtt); |
423 | iounmap(intel_private.registers); |
350 | iounmap(intel_private.registers); |
424 | 351 | ||
425 | intel_gtt_teardown_scratch_page(); |
352 | intel_gtt_teardown_scratch_page(); |
426 | } |
353 | } |
427 | 354 | ||
428 | static int intel_gtt_init(void) |
355 | static int intel_gtt_init(void) |
429 | { |
356 | { |
430 | u32 gma_addr; |
357 | u32 gma_addr; |
431 | u32 gtt_map_size; |
358 | u32 gtt_map_size; |
432 | int ret; |
359 | int ret; |
433 | 360 | ||
434 | ret = intel_private.driver->setup(); |
361 | ret = intel_private.driver->setup(); |
435 | if (ret != 0) |
362 | if (ret != 0) |
436 | { |
- | |
437 | return ret; |
363 | return ret; |
438 | }; |
- | |
439 | - | ||
440 | 364 | ||
441 | intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); |
365 | intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); |
442 | intel_private.base.gtt_total_entries = intel_gtt_total_entries(); |
366 | intel_private.base.gtt_total_entries = intel_gtt_total_entries(); |
443 | 367 | ||
444 | /* save the PGETBL reg for resume */ |
368 | /* save the PGETBL reg for resume */ |
445 | intel_private.PGETBL_save = |
369 | intel_private.PGETBL_save = |
446 | readl(intel_private.registers+I810_PGETBL_CTL) |
370 | readl(intel_private.registers+I810_PGETBL_CTL) |
447 | & ~I810_PGETBL_ENABLED; |
371 | & ~I810_PGETBL_ENABLED; |
448 | /* we only ever restore the register when enabling the PGTBL... */ |
372 | /* we only ever restore the register when enabling the PGTBL... */ |
449 | if (HAS_PGTBL_EN) |
373 | if (HAS_PGTBL_EN) |
450 | intel_private.PGETBL_save |= I810_PGETBL_ENABLED; |
374 | intel_private.PGETBL_save |= I810_PGETBL_ENABLED; |
451 | 375 | ||
452 | dev_info(&intel_private.bridge_dev->dev, |
376 | dev_info(&intel_private.bridge_dev->dev, |
453 | "detected gtt size: %dK total, %dK mappable\n", |
377 | "detected gtt size: %dK total, %dK mappable\n", |
454 | intel_private.base.gtt_total_entries * 4, |
378 | intel_private.base.gtt_total_entries * 4, |
455 | intel_private.base.gtt_mappable_entries * 4); |
379 | intel_private.base.gtt_mappable_entries * 4); |
456 | 380 | ||
457 | gtt_map_size = intel_private.base.gtt_total_entries * 4; |
381 | gtt_map_size = intel_private.base.gtt_total_entries * 4; |
458 | 382 | ||
459 | intel_private.gtt = NULL; |
383 | intel_private.gtt = NULL; |
460 | // if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) |
- | |
461 | // intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, |
- | |
462 | // gtt_map_size); |
- | |
463 | if (intel_private.gtt == NULL) |
384 | if (intel_private.gtt == NULL) |
464 | intel_private.gtt = ioremap(intel_private.gtt_bus_addr, |
385 | intel_private.gtt = ioremap(intel_private.gtt_bus_addr, |
465 | gtt_map_size); |
386 | gtt_map_size); |
466 | if (intel_private.gtt == NULL) { |
387 | if (intel_private.gtt == NULL) { |
467 | intel_private.driver->cleanup(); |
388 | intel_private.driver->cleanup(); |
468 | iounmap(intel_private.registers); |
389 | iounmap(intel_private.registers); |
469 | return -ENOMEM; |
390 | return -ENOMEM; |
470 | } |
391 | } |
471 | intel_private.base.gtt = intel_private.gtt; |
392 | intel_private.base.gtt = intel_private.gtt; |
472 | 393 | ||
473 | asm volatile("wbinvd"); |
394 | asm volatile("wbinvd"); |
474 | 395 | ||
475 | intel_private.base.stolen_size = intel_gtt_stolen_size(); |
396 | intel_private.base.stolen_size = intel_gtt_stolen_size(); |
476 | 397 | ||
477 | intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
398 | intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
478 | 399 | ||
479 | ret = intel_gtt_setup_scratch_page(); |
400 | ret = intel_gtt_setup_scratch_page(); |
480 | if (ret != 0) { |
401 | if (ret != 0) { |
481 | intel_gtt_cleanup(); |
402 | intel_gtt_cleanup(); |
482 | return ret; |
403 | return ret; |
483 | } |
404 | } |
484 | 405 | ||
485 | if (INTEL_GTT_GEN <= 2) |
406 | if (INTEL_GTT_GEN <= 2) |
486 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, |
407 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, |
487 | &gma_addr); |
408 | &gma_addr); |
488 | else |
409 | else |
489 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, |
410 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, |
490 | &gma_addr); |
411 | &gma_addr); |
491 | 412 | ||
492 | intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); |
413 | intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); |
493 | 414 | ||
494 | return 0; |
415 | return 0; |
495 | } |
416 | } |
496 | 417 | ||
497 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
418 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
498 | unsigned int flags) |
419 | unsigned int flags) |
499 | { |
420 | { |
500 | u32 pte_flags = I810_PTE_VALID; |
421 | u32 pte_flags = I810_PTE_VALID; |
501 | 422 | ||
502 | if (flags == AGP_USER_CACHED_MEMORY) |
423 | if (flags == AGP_USER_CACHED_MEMORY) |
503 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
424 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
504 | 425 | ||
505 | writel(addr | pte_flags, intel_private.gtt + entry); |
426 | writel(addr | pte_flags, intel_private.gtt + entry); |
506 | } |
427 | } |
507 | 428 | ||
508 | bool intel_enable_gtt(void) |
429 | bool intel_enable_gtt(void) |
509 | { |
430 | { |
510 | u8 __iomem *reg; |
431 | u8 __iomem *reg; |
511 | - | ||
512 | if (INTEL_GTT_GEN >= 6) |
- | |
513 | return true; |
- | |
514 | 432 | ||
515 | if (INTEL_GTT_GEN == 2) { |
433 | if (INTEL_GTT_GEN == 2) { |
516 | u16 gmch_ctrl; |
434 | u16 gmch_ctrl; |
517 | 435 | ||
518 | pci_read_config_word(intel_private.bridge_dev, |
436 | pci_read_config_word(intel_private.bridge_dev, |
519 | I830_GMCH_CTRL, &gmch_ctrl); |
437 | I830_GMCH_CTRL, &gmch_ctrl); |
520 | gmch_ctrl |= I830_GMCH_ENABLED; |
438 | gmch_ctrl |= I830_GMCH_ENABLED; |
521 | pci_write_config_word(intel_private.bridge_dev, |
439 | pci_write_config_word(intel_private.bridge_dev, |
522 | I830_GMCH_CTRL, gmch_ctrl); |
440 | I830_GMCH_CTRL, gmch_ctrl); |
523 | 441 | ||
524 | pci_read_config_word(intel_private.bridge_dev, |
442 | pci_read_config_word(intel_private.bridge_dev, |
525 | I830_GMCH_CTRL, &gmch_ctrl); |
443 | I830_GMCH_CTRL, &gmch_ctrl); |
526 | if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { |
444 | if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { |
527 | dev_err(&intel_private.pcidev->dev, |
445 | dev_err(&intel_private.pcidev->dev, |
528 | "failed to enable the GTT: GMCH_CTRL=%x\n", |
446 | "failed to enable the GTT: GMCH_CTRL=%x\n", |
529 | gmch_ctrl); |
447 | gmch_ctrl); |
530 | return false; |
448 | return false; |
531 | } |
449 | } |
532 | } |
450 | } |
533 | 451 | ||
534 | /* On the resume path we may be adjusting the PGTBL value, so |
452 | /* On the resume path we may be adjusting the PGTBL value, so |
535 | * be paranoid and flush all chipset write buffers... |
453 | * be paranoid and flush all chipset write buffers... |
536 | */ |
454 | */ |
537 | if (INTEL_GTT_GEN >= 3) |
455 | if (INTEL_GTT_GEN >= 3) |
538 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
456 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
539 | 457 | ||
540 | reg = intel_private.registers+I810_PGETBL_CTL; |
458 | reg = intel_private.registers+I810_PGETBL_CTL; |
541 | writel(intel_private.PGETBL_save, reg); |
459 | writel(intel_private.PGETBL_save, reg); |
542 | if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { |
460 | if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { |
543 | dev_err(&intel_private.pcidev->dev, |
461 | dev_err(&intel_private.pcidev->dev, |
544 | "failed to enable the GTT: PGETBL=%x [expected %x]\n", |
462 | "failed to enable the GTT: PGETBL=%x [expected %x]\n", |
545 | readl(reg), intel_private.PGETBL_save); |
463 | readl(reg), intel_private.PGETBL_save); |
546 | return false; |
464 | return false; |
547 | } |
465 | } |
548 | 466 | ||
549 | if (INTEL_GTT_GEN >= 3) |
467 | if (INTEL_GTT_GEN >= 3) |
550 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
468 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
551 | 469 | ||
552 | return true; |
470 | return true; |
553 | } |
471 | } |
554 | 472 | ||
555 | static bool i830_check_flags(unsigned int flags) |
473 | static bool i830_check_flags(unsigned int flags) |
556 | { |
474 | { |
557 | switch (flags) { |
475 | switch (flags) { |
558 | case 0: |
476 | case 0: |
559 | case AGP_PHYS_MEMORY: |
477 | case AGP_PHYS_MEMORY: |
560 | case AGP_USER_CACHED_MEMORY: |
478 | case AGP_USER_CACHED_MEMORY: |
561 | case AGP_USER_MEMORY: |
479 | case AGP_USER_MEMORY: |
562 | return true; |
480 | return true; |
563 | } |
481 | } |
564 | 482 | ||
565 | return false; |
483 | return false; |
566 | } |
484 | } |
567 | 485 | ||
568 | void intel_gtt_insert_sg_entries(struct pagelist *st, |
486 | void intel_gtt_insert_sg_entries(struct sg_table *st, |
569 | unsigned int pg_start, |
487 | unsigned int pg_start, |
570 | unsigned int flags) |
488 | unsigned int flags) |
571 | { |
489 | { |
- | 490 | struct scatterlist *sg; |
|
- | 491 | unsigned int len, m; |
|
572 | int i, j; |
492 | int i, j; |
573 | 493 | ||
574 | j = pg_start; |
494 | j = pg_start; |
- | 495 | ||
- | 496 | /* sg may merge pages, but we have to separate |
|
575 | 497 | * per-page addr for GTT */ |
|
- | 498 | for_each_sg(st->sgl, sg, st->nents, i) { |
|
576 | for(i = 0; i < st->nents; i++) |
499 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
577 | { |
500 | for (m = 0; m < len; m++) { |
578 | dma_addr_t addr = st->page[i]; |
501 | dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
579 | intel_private.driver->write_entry(addr, j, flags); |
502 | intel_private.driver->write_entry(addr, j, flags); |
580 | j++; |
503 | j++; |
581 | }; |
504 | } |
582 | 505 | } |
|
583 | readl(intel_private.gtt+j-1); |
506 | readl(intel_private.gtt+j-1); |
- | 507 | } |
|
584 | } |
508 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); |
585 | 509 | ||
586 | static void intel_gtt_insert_pages(unsigned int first_entry, |
510 | static void intel_gtt_insert_pages(unsigned int first_entry, |
587 | unsigned int num_entries, |
511 | unsigned int num_entries, |
588 | dma_addr_t *pages, |
512 | struct page **pages, |
589 | unsigned int flags) |
513 | unsigned int flags) |
590 | { |
514 | { |
591 | int i, j; |
515 | int i, j; |
592 | 516 | ||
593 | for (i = 0, j = first_entry; i < num_entries; i++, j++) { |
517 | for (i = 0, j = first_entry; i < num_entries; i++, j++) { |
594 | dma_addr_t addr = pages[i]; |
518 | dma_addr_t addr = page_to_phys(pages[i]); |
595 | intel_private.driver->write_entry(addr, |
519 | intel_private.driver->write_entry(addr, |
596 | j, flags); |
520 | j, flags); |
597 | } |
521 | } |
598 | readl(intel_private.gtt+j-1); |
522 | readl(intel_private.gtt+j-1); |
599 | } |
523 | } |
600 | 524 | ||
601 | 525 | ||
602 | void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) |
526 | void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) |
603 | { |
527 | { |
604 | unsigned int i; |
528 | unsigned int i; |
605 | 529 | ||
606 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
530 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
607 | intel_private.driver->write_entry(intel_private.base.scratch_page_dma, |
531 | intel_private.driver->write_entry(intel_private.base.scratch_page_dma, |
608 | i, 0); |
532 | i, 0); |
609 | } |
533 | } |
610 | readl(intel_private.gtt+i-1); |
534 | readl(intel_private.gtt+i-1); |
611 | } |
535 | } |
612 | 536 | ||
613 | static void intel_i9xx_setup_flush(void) |
537 | static void intel_i9xx_setup_flush(void) |
614 | { |
538 | { |
615 | /* return if already configured */ |
539 | /* return if already configured */ |
616 | if (intel_private.ifp_resource.start) |
540 | if (intel_private.ifp_resource.start) |
617 | return; |
541 | return; |
618 | 542 | ||
619 | if (INTEL_GTT_GEN == 6) |
543 | if (INTEL_GTT_GEN == 6) |
620 | return; |
544 | return; |
621 | 545 | ||
622 | /* setup a resource for this object */ |
546 | /* setup a resource for this object */ |
623 | // intel_private.ifp_resource.name = "Intel Flush Page"; |
547 | // intel_private.ifp_resource.name = "Intel Flush Page"; |
624 | // intel_private.ifp_resource.flags = IORESOURCE_MEM; |
548 | // intel_private.ifp_resource.flags = IORESOURCE_MEM; |
625 | 549 | ||
626 | intel_private.resource_valid = 0; |
550 | intel_private.resource_valid = 0; |
627 | 551 | ||
628 | /* Setup chipset flush for 915 */ |
552 | /* Setup chipset flush for 915 */ |
629 | // if (IS_G33 || INTEL_GTT_GEN >= 4) { |
553 | // if (IS_G33 || INTEL_GTT_GEN >= 4) { |
630 | // intel_i965_g33_setup_chipset_flush(); |
554 | // intel_i965_g33_setup_chipset_flush(); |
631 | // } else { |
555 | // } else { |
632 | // intel_i915_setup_chipset_flush(); |
556 | // intel_i915_setup_chipset_flush(); |
633 | // } |
557 | // } |
634 | 558 | ||
635 | // if (intel_private.ifp_resource.start) |
559 | // if (intel_private.ifp_resource.start) |
636 | // intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); |
560 | // intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); |
637 | if (!intel_private.i9xx_flush_page) |
561 | if (!intel_private.i9xx_flush_page) |
638 | dev_err(&intel_private.pcidev->dev, |
562 | dev_err(&intel_private.pcidev->dev, |
639 | "can't ioremap flush page - no chipset flushing\n"); |
563 | "can't ioremap flush page - no chipset flushing\n"); |
640 | } |
564 | } |
641 | 565 | ||
642 | static void i9xx_cleanup(void) |
566 | static void i9xx_cleanup(void) |
643 | { |
567 | { |
644 | if (intel_private.i9xx_flush_page) |
568 | if (intel_private.i9xx_flush_page) |
645 | iounmap(intel_private.i9xx_flush_page); |
569 | iounmap(intel_private.i9xx_flush_page); |
646 | // if (intel_private.resource_valid) |
570 | // if (intel_private.resource_valid) |
647 | // release_resource(&intel_private.ifp_resource); |
571 | // release_resource(&intel_private.ifp_resource); |
648 | intel_private.ifp_resource.start = 0; |
572 | intel_private.ifp_resource.start = 0; |
649 | intel_private.resource_valid = 0; |
573 | intel_private.resource_valid = 0; |
650 | } |
574 | } |
651 | 575 | ||
652 | static void i9xx_chipset_flush(void) |
576 | static void i9xx_chipset_flush(void) |
653 | { |
577 | { |
654 | if (intel_private.i9xx_flush_page) |
578 | if (intel_private.i9xx_flush_page) |
655 | writel(1, intel_private.i9xx_flush_page); |
579 | writel(1, intel_private.i9xx_flush_page); |
656 | } |
580 | } |
657 | 581 | ||
658 | static void i965_write_entry(dma_addr_t addr, |
582 | static void i965_write_entry(dma_addr_t addr, |
659 | unsigned int entry, |
583 | unsigned int entry, |
660 | unsigned int flags) |
584 | unsigned int flags) |
661 | { |
585 | { |
662 | u32 pte_flags; |
586 | u32 pte_flags; |
663 | 587 | ||
664 | pte_flags = I810_PTE_VALID; |
588 | pte_flags = I810_PTE_VALID; |
665 | if (flags == AGP_USER_CACHED_MEMORY) |
589 | if (flags == AGP_USER_CACHED_MEMORY) |
666 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
590 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
667 | 591 | ||
668 | /* Shift high bits down */ |
592 | /* Shift high bits down */ |
669 | addr |= (addr >> 28) & 0xf0; |
593 | addr |= (addr >> 28) & 0xf0; |
670 | writel(addr | pte_flags, intel_private.gtt + entry); |
594 | writel(addr | pte_flags, intel_private.gtt + entry); |
671 | } |
595 | } |
672 | - | ||
673 | static bool gen6_check_flags(unsigned int flags) |
- | |
674 | { |
- | |
675 | return true; |
- | |
676 | } |
- | |
677 | - | ||
678 | static void haswell_write_entry(dma_addr_t addr, unsigned int entry, |
- | |
679 | unsigned int flags) |
- | |
680 | { |
- | |
681 | unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; |
- | |
682 | unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
- | |
683 | u32 pte_flags; |
- | |
684 | - | ||
685 | if (type_mask == AGP_USER_MEMORY) |
- | |
686 | pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; |
- | |
687 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { |
- | |
688 | pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; |
- | |
689 | if (gfdt) |
- | |
690 | pte_flags |= GEN6_PTE_GFDT; |
- | |
691 | } else { /* set 'normal'/'cached' to LLC by default */ |
- | |
692 | pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
- | |
693 | if (gfdt) |
- | |
694 | pte_flags |= GEN6_PTE_GFDT; |
- | |
695 | } |
- | |
696 | - | ||
697 | /* gen6 has bit11-4 for physical addr bit39-32 */ |
- | |
698 | addr |= (addr >> 28) & 0xff0; |
- | |
699 | writel(addr | pte_flags, intel_private.gtt + entry); |
- | |
700 | } |
- | |
701 | - | ||
702 | static void gen6_write_entry(dma_addr_t addr, unsigned int entry, |
- | |
703 | unsigned int flags) |
- | |
704 | { |
- | |
705 | unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; |
- | |
706 | unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
- | |
707 | u32 pte_flags; |
- | |
708 | - | ||
709 | if (type_mask == AGP_USER_MEMORY) |
- | |
710 | pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; |
- | |
711 | else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { |
- | |
712 | pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; |
- | |
713 | if (gfdt) |
- | |
714 | pte_flags |= GEN6_PTE_GFDT; |
- | |
715 | } else { /* set 'normal'/'cached' to LLC by default */ |
- | |
716 | pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
- | |
717 | if (gfdt) |
- | |
718 | pte_flags |= GEN6_PTE_GFDT; |
- | |
719 | } |
- | |
720 | - | ||
721 | /* gen6 has bit11-4 for physical addr bit39-32 */ |
- | |
722 | addr |= (addr >> 28) & 0xff0; |
- | |
723 | writel(addr | pte_flags, intel_private.gtt + entry); |
- | |
724 | } |
- | |
725 | - | ||
726 | static void valleyview_write_entry(dma_addr_t addr, unsigned int entry, |
- | |
727 | unsigned int flags) |
- | |
728 | { |
- | |
729 | unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; |
- | |
730 | unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; |
- | |
731 | u32 pte_flags; |
- | |
732 | - | ||
733 | if (type_mask == AGP_USER_MEMORY) |
- | |
734 | pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; |
- | |
735 | else { |
- | |
736 | pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; |
- | |
737 | if (gfdt) |
- | |
738 | pte_flags |= GEN6_PTE_GFDT; |
- | |
739 | } |
- | |
740 | - | ||
741 | /* gen6 has bit11-4 for physical addr bit39-32 */ |
- | |
742 | addr |= (addr >> 28) & 0xff0; |
- | |
743 | writel(addr | pte_flags, intel_private.gtt + entry); |
- | |
744 | - | ||
745 | writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV); |
- | |
746 | } |
- | |
747 | - | ||
748 | static void gen6_cleanup(void) |
- | |
749 | { |
- | |
750 | } |
- | |
751 | 596 | ||
752 | /* Certain Gen5 chipsets require require idling the GPU before |
597 | /* Certain Gen5 chipsets require require idling the GPU before |
753 | * unmapping anything from the GTT when VT-d is enabled. |
598 | * unmapping anything from the GTT when VT-d is enabled. |
754 | */ |
599 | */ |
755 | static inline int needs_idle_maps(void) |
600 | static inline int needs_idle_maps(void) |
756 | { |
601 | { |
757 | #ifdef CONFIG_INTEL_IOMMU |
602 | #ifdef CONFIG_INTEL_IOMMU |
758 | const unsigned short gpu_devid = intel_private.pcidev->device; |
603 | const unsigned short gpu_devid = intel_private.pcidev->device; |
759 | 604 | ||
760 | /* Query intel_iommu to see if we need the workaround. Presumably that |
605 | /* Query intel_iommu to see if we need the workaround. Presumably that |
761 | * was loaded first. |
606 | * was loaded first. |
762 | */ |
607 | */ |
763 | if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || |
608 | if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || |
764 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
609 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
765 | intel_iommu_gfx_mapped) |
610 | intel_iommu_gfx_mapped) |
766 | return 1; |
611 | return 1; |
767 | #endif |
612 | #endif |
768 | return 0; |
613 | return 0; |
769 | } |
614 | } |
770 | 615 | ||
771 | static int i9xx_setup(void) |
616 | static int i9xx_setup(void) |
772 | { |
617 | { |
773 | u32 reg_addr; |
618 | u32 reg_addr, gtt_addr; |
774 | int size = KB(512); |
619 | int size = KB(512); |
775 | 620 | ||
776 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); |
621 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); |
777 | 622 | ||
778 | reg_addr &= 0xfff80000; |
623 | reg_addr &= 0xfff80000; |
779 | - | ||
780 | if (INTEL_GTT_GEN >= 7) |
- | |
781 | size = MB(2); |
- | |
782 | 624 | ||
783 | intel_private.registers = ioremap(reg_addr, size); |
625 | intel_private.registers = ioremap(reg_addr, size); |
784 | if (!intel_private.registers) |
626 | if (!intel_private.registers) |
785 | return -ENOMEM; |
627 | return -ENOMEM; |
786 | 628 | ||
787 | if (INTEL_GTT_GEN == 3) { |
629 | switch (INTEL_GTT_GEN) { |
788 | u32 gtt_addr; |
- | |
789 | 630 | case 3: |
|
790 | pci_read_config_dword(intel_private.pcidev, |
631 | pci_read_config_dword(intel_private.pcidev, |
791 | I915_PTEADDR, >t_addr); |
632 | I915_PTEADDR, >t_addr); |
792 | intel_private.gtt_bus_addr = gtt_addr; |
633 | intel_private.gtt_bus_addr = gtt_addr; |
793 | } else { |
634 | break; |
794 | u32 gtt_offset; |
- | |
795 | - | ||
796 | switch (INTEL_GTT_GEN) { |
- | |
797 | case 5: |
635 | case 5: |
798 | case 6: |
- | |
799 | case 7: |
- | |
800 | gtt_offset = MB(2); |
636 | intel_private.gtt_bus_addr = reg_addr + MB(2); |
801 | break; |
637 | break; |
802 | case 4: |
- | |
803 | default: |
638 | default: |
804 | gtt_offset = KB(512); |
639 | intel_private.gtt_bus_addr = reg_addr + KB(512); |
805 | break; |
640 | break; |
806 | } |
641 | } |
807 | intel_private.gtt_bus_addr = reg_addr + gtt_offset; |
- | |
808 | } |
- | |
809 | 642 | ||
810 | if (needs_idle_maps()) |
643 | if (needs_idle_maps()) |
811 | intel_private.base.do_idle_maps = 1; |
644 | intel_private.base.do_idle_maps = 1; |
812 | 645 | ||
813 | intel_i9xx_setup_flush(); |
646 | intel_i9xx_setup_flush(); |
814 | 647 | ||
815 | return 0; |
648 | return 0; |
816 | } |
649 | } |
817 | 650 | ||
818 | static const struct intel_gtt_driver i915_gtt_driver = { |
651 | static const struct intel_gtt_driver i915_gtt_driver = { |
819 | .gen = 3, |
652 | .gen = 3, |
820 | .has_pgtbl_enable = 1, |
653 | .has_pgtbl_enable = 1, |
821 | .setup = i9xx_setup, |
654 | .setup = i9xx_setup, |
822 | .cleanup = i9xx_cleanup, |
655 | .cleanup = i9xx_cleanup, |
823 | /* i945 is the last gpu to need phys mem (for overlay and cursors). */ |
656 | /* i945 is the last gpu to need phys mem (for overlay and cursors). */ |
824 | .write_entry = i830_write_entry, |
657 | .write_entry = i830_write_entry, |
825 | .dma_mask_size = 32, |
658 | .dma_mask_size = 32, |
826 | .check_flags = i830_check_flags, |
659 | .check_flags = i830_check_flags, |
827 | .chipset_flush = i9xx_chipset_flush, |
660 | .chipset_flush = i9xx_chipset_flush, |
828 | }; |
661 | }; |
829 | static const struct intel_gtt_driver g33_gtt_driver = { |
662 | static const struct intel_gtt_driver g33_gtt_driver = { |
830 | .gen = 3, |
663 | .gen = 3, |
831 | .is_g33 = 1, |
664 | .is_g33 = 1, |
832 | .setup = i9xx_setup, |
665 | .setup = i9xx_setup, |
833 | .cleanup = i9xx_cleanup, |
666 | .cleanup = i9xx_cleanup, |
834 | .write_entry = i965_write_entry, |
667 | .write_entry = i965_write_entry, |
835 | .dma_mask_size = 36, |
668 | .dma_mask_size = 36, |
836 | .check_flags = i830_check_flags, |
669 | .check_flags = i830_check_flags, |
837 | .chipset_flush = i9xx_chipset_flush, |
670 | .chipset_flush = i9xx_chipset_flush, |
838 | }; |
671 | }; |
839 | static const struct intel_gtt_driver pineview_gtt_driver = { |
672 | static const struct intel_gtt_driver pineview_gtt_driver = { |
840 | .gen = 3, |
673 | .gen = 3, |
841 | .is_pineview = 1, .is_g33 = 1, |
674 | .is_pineview = 1, .is_g33 = 1, |
842 | .setup = i9xx_setup, |
675 | .setup = i9xx_setup, |
843 | .cleanup = i9xx_cleanup, |
676 | .cleanup = i9xx_cleanup, |
844 | .write_entry = i965_write_entry, |
677 | .write_entry = i965_write_entry, |
845 | .dma_mask_size = 36, |
678 | .dma_mask_size = 36, |
846 | .check_flags = i830_check_flags, |
679 | .check_flags = i830_check_flags, |
847 | .chipset_flush = i9xx_chipset_flush, |
680 | .chipset_flush = i9xx_chipset_flush, |
848 | }; |
681 | }; |
849 | static const struct intel_gtt_driver i965_gtt_driver = { |
682 | static const struct intel_gtt_driver i965_gtt_driver = { |
850 | .gen = 4, |
683 | .gen = 4, |
851 | .has_pgtbl_enable = 1, |
684 | .has_pgtbl_enable = 1, |
852 | .setup = i9xx_setup, |
685 | .setup = i9xx_setup, |
853 | .cleanup = i9xx_cleanup, |
686 | .cleanup = i9xx_cleanup, |
854 | .write_entry = i965_write_entry, |
687 | .write_entry = i965_write_entry, |
855 | .dma_mask_size = 36, |
688 | .dma_mask_size = 36, |
856 | .check_flags = i830_check_flags, |
689 | .check_flags = i830_check_flags, |
857 | .chipset_flush = i9xx_chipset_flush, |
690 | .chipset_flush = i9xx_chipset_flush, |
858 | }; |
691 | }; |
859 | static const struct intel_gtt_driver g4x_gtt_driver = { |
692 | static const struct intel_gtt_driver g4x_gtt_driver = { |
860 | .gen = 5, |
693 | .gen = 5, |
861 | .setup = i9xx_setup, |
694 | .setup = i9xx_setup, |
862 | .cleanup = i9xx_cleanup, |
695 | .cleanup = i9xx_cleanup, |
863 | .write_entry = i965_write_entry, |
696 | .write_entry = i965_write_entry, |
864 | .dma_mask_size = 36, |
697 | .dma_mask_size = 36, |
865 | .check_flags = i830_check_flags, |
698 | .check_flags = i830_check_flags, |
866 | .chipset_flush = i9xx_chipset_flush, |
699 | .chipset_flush = i9xx_chipset_flush, |
867 | }; |
700 | }; |
868 | static const struct intel_gtt_driver ironlake_gtt_driver = { |
701 | static const struct intel_gtt_driver ironlake_gtt_driver = { |
869 | .gen = 5, |
702 | .gen = 5, |
870 | .is_ironlake = 1, |
703 | .is_ironlake = 1, |
871 | .setup = i9xx_setup, |
704 | .setup = i9xx_setup, |
872 | .cleanup = i9xx_cleanup, |
705 | .cleanup = i9xx_cleanup, |
873 | .write_entry = i965_write_entry, |
706 | .write_entry = i965_write_entry, |
874 | .dma_mask_size = 36, |
707 | .dma_mask_size = 36, |
875 | .check_flags = i830_check_flags, |
708 | .check_flags = i830_check_flags, |
876 | .chipset_flush = i9xx_chipset_flush, |
709 | .chipset_flush = i9xx_chipset_flush, |
877 | }; |
710 | }; |
878 | static const struct intel_gtt_driver sandybridge_gtt_driver = { |
- | |
879 | .gen = 6, |
- | |
880 | .setup = i9xx_setup, |
- | |
881 | .cleanup = gen6_cleanup, |
- | |
882 | .write_entry = gen6_write_entry, |
- | |
883 | .dma_mask_size = 40, |
- | |
884 | .check_flags = gen6_check_flags, |
- | |
885 | .chipset_flush = i9xx_chipset_flush, |
- | |
886 | }; |
- | |
887 | static const struct intel_gtt_driver haswell_gtt_driver = { |
- | |
888 | .gen = 6, |
- | |
889 | .setup = i9xx_setup, |
- | |
890 | .cleanup = gen6_cleanup, |
- | |
891 | .write_entry = haswell_write_entry, |
- | |
892 | .dma_mask_size = 40, |
- | |
893 | .check_flags = gen6_check_flags, |
- | |
894 | .chipset_flush = i9xx_chipset_flush, |
- | |
895 | }; |
- | |
896 | static const struct intel_gtt_driver valleyview_gtt_driver = { |
- | |
897 | .gen = 7, |
- | |
898 | .setup = i9xx_setup, |
- | |
899 | .cleanup = gen6_cleanup, |
- | |
900 | .write_entry = valleyview_write_entry, |
- | |
901 | .dma_mask_size = 40, |
- | |
902 | .check_flags = gen6_check_flags, |
- | |
903 | }; |
- | |
904 | 711 | ||
905 | /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of |
712 | /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of |
906 | * driver and gmch_driver must be non-null, and find_gmch will determine |
713 | * driver and gmch_driver must be non-null, and find_gmch will determine |
907 | * which one should be used if a gmch_chip_id is present. |
714 | * which one should be used if a gmch_chip_id is present. |
908 | */ |
715 | */ |
909 | static const struct intel_gtt_driver_description { |
716 | static const struct intel_gtt_driver_description { |
910 | unsigned int gmch_chip_id; |
717 | unsigned int gmch_chip_id; |
911 | char *name; |
718 | char *name; |
912 | const struct intel_gtt_driver *gtt_driver; |
719 | const struct intel_gtt_driver *gtt_driver; |
913 | } intel_gtt_chipsets[] = { |
720 | } intel_gtt_chipsets[] = { |
914 | { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
721 | { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
915 | &i915_gtt_driver }, |
722 | &i915_gtt_driver }, |
916 | { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
723 | { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
917 | &i915_gtt_driver }, |
724 | &i915_gtt_driver }, |
918 | { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
725 | { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
919 | &i915_gtt_driver }, |
726 | &i915_gtt_driver }, |
920 | { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
727 | { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
921 | &i915_gtt_driver }, |
728 | &i915_gtt_driver }, |
922 | { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
729 | { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
923 | &i915_gtt_driver }, |
730 | &i915_gtt_driver }, |
924 | { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
731 | { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
925 | &i915_gtt_driver }, |
732 | &i915_gtt_driver }, |
926 | { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
733 | { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
927 | &i965_gtt_driver }, |
734 | &i965_gtt_driver }, |
928 | { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
735 | { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
929 | &i965_gtt_driver }, |
736 | &i965_gtt_driver }, |
930 | { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
737 | { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
931 | &i965_gtt_driver }, |
738 | &i965_gtt_driver }, |
932 | { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
739 | { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
933 | &i965_gtt_driver }, |
740 | &i965_gtt_driver }, |
934 | { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
741 | { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
935 | &i965_gtt_driver }, |
742 | &i965_gtt_driver }, |
936 | { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
743 | { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
937 | &i965_gtt_driver }, |
744 | &i965_gtt_driver }, |
938 | { PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
745 | { PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
939 | &g33_gtt_driver }, |
746 | &g33_gtt_driver }, |
940 | { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
747 | { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
941 | &g33_gtt_driver }, |
748 | &g33_gtt_driver }, |
942 | { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
749 | { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
943 | &g33_gtt_driver }, |
750 | &g33_gtt_driver }, |
944 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
751 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
945 | &pineview_gtt_driver }, |
752 | &pineview_gtt_driver }, |
946 | { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
753 | { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
947 | &pineview_gtt_driver }, |
754 | &pineview_gtt_driver }, |
948 | { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", |
755 | { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", |
949 | &g4x_gtt_driver }, |
756 | &g4x_gtt_driver }, |
950 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", |
757 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", |
951 | &g4x_gtt_driver }, |
758 | &g4x_gtt_driver }, |
952 | { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", |
759 | { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", |
953 | &g4x_gtt_driver }, |
760 | &g4x_gtt_driver }, |
954 | { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", |
761 | { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", |
955 | &g4x_gtt_driver }, |
762 | &g4x_gtt_driver }, |
956 | { PCI_DEVICE_ID_INTEL_B43_IG, "B43", |
763 | { PCI_DEVICE_ID_INTEL_B43_IG, "B43", |
957 | &g4x_gtt_driver }, |
764 | &g4x_gtt_driver }, |
958 | { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", |
765 | { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", |
959 | &g4x_gtt_driver }, |
766 | &g4x_gtt_driver }, |
960 | { PCI_DEVICE_ID_INTEL_G41_IG, "G41", |
767 | { PCI_DEVICE_ID_INTEL_G41_IG, "G41", |
961 | &g4x_gtt_driver }, |
768 | &g4x_gtt_driver }, |
962 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
769 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
963 | "HD Graphics", &ironlake_gtt_driver }, |
770 | "HD Graphics", &ironlake_gtt_driver }, |
964 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
771 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
965 | "HD Graphics", &ironlake_gtt_driver }, |
772 | "HD Graphics", &ironlake_gtt_driver }, |
966 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, |
- | |
967 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
968 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, |
- | |
969 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
970 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, |
- | |
971 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
972 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, |
- | |
973 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
974 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, |
- | |
975 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
976 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, |
- | |
977 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
978 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, |
- | |
979 | "Sandybridge", &sandybridge_gtt_driver }, |
- | |
980 | { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, |
- | |
981 | "Ivybridge", &sandybridge_gtt_driver }, |
- | |
982 | { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, |
- | |
983 | "Ivybridge", &sandybridge_gtt_driver }, |
- | |
984 | { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, |
- | |
985 | "Ivybridge", &sandybridge_gtt_driver }, |
- | |
986 | { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, |
- | |
987 | "Ivybridge", &sandybridge_gtt_driver }, |
- | |
988 | { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, |
- | |
989 | "Ivybridge", &sandybridge_gtt_driver }, |
- | |
990 | { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, |
- | |
991 | "Ivybridge", &sandybridge_gtt_driver }, |
- | |
992 | { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, |
- | |
993 | "ValleyView", &valleyview_gtt_driver }, |
- | |
994 | { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, |
- | |
995 | "Haswell", &haswell_gtt_driver }, |
- | |
996 | { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, |
- | |
997 | "Haswell", &haswell_gtt_driver }, |
- | |
998 | { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, |
- | |
999 | "Haswell", &haswell_gtt_driver }, |
- | |
1000 | { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, |
- | |
1001 | "Haswell", &haswell_gtt_driver }, |
- | |
1002 | { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, |
- | |
1003 | "Haswell", &haswell_gtt_driver }, |
- | |
1004 | { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, |
- | |
1005 | "Haswell", &haswell_gtt_driver }, |
- | |
1006 | { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, |
- | |
1007 | "Haswell", &haswell_gtt_driver }, |
- | |
1008 | { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, |
- | |
1009 | "Haswell", &haswell_gtt_driver }, |
- | |
1010 | { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, |
- | |
1011 | "Haswell", &haswell_gtt_driver }, |
- | |
1012 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, |
- | |
1013 | "Haswell", &haswell_gtt_driver }, |
- | |
1014 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, |
- | |
1015 | "Haswell", &haswell_gtt_driver }, |
- | |
1016 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, |
- | |
1017 | "Haswell", &haswell_gtt_driver }, |
- | |
1018 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, |
- | |
1019 | "Haswell", &haswell_gtt_driver }, |
- | |
1020 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, |
- | |
1021 | "Haswell", &haswell_gtt_driver }, |
- | |
1022 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, |
- | |
1023 | "Haswell", &haswell_gtt_driver }, |
- | |
1024 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, |
- | |
1025 | "Haswell", &haswell_gtt_driver }, |
- | |
1026 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, |
- | |
1027 | "Haswell", &haswell_gtt_driver }, |
- | |
1028 | { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, |
- | |
1029 | "Haswell", &haswell_gtt_driver }, |
- | |
1030 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, |
- | |
1031 | "Haswell", &haswell_gtt_driver }, |
- | |
1032 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, |
- | |
1033 | "Haswell", &haswell_gtt_driver }, |
- | |
1034 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, |
- | |
1035 | "Haswell", &haswell_gtt_driver }, |
- | |
1036 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, |
- | |
1037 | "Haswell", &haswell_gtt_driver }, |
- | |
1038 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, |
- | |
1039 | "Haswell", &haswell_gtt_driver }, |
- | |
1040 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, |
- | |
1041 | "Haswell", &haswell_gtt_driver }, |
- | |
1042 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, |
- | |
1043 | "Haswell", &haswell_gtt_driver }, |
- | |
1044 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, |
- | |
1045 | "Haswell", &haswell_gtt_driver }, |
- | |
1046 | { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, |
- | |
1047 | "Haswell", &haswell_gtt_driver }, |
- | |
1048 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, |
- | |
1049 | "Haswell", &haswell_gtt_driver }, |
- | |
1050 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, |
- | |
1051 | "Haswell", &haswell_gtt_driver }, |
- | |
1052 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, |
- | |
1053 | "Haswell", &haswell_gtt_driver }, |
- | |
1054 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, |
- | |
1055 | "Haswell", &haswell_gtt_driver }, |
- | |
1056 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, |
- | |
1057 | "Haswell", &haswell_gtt_driver }, |
- | |
1058 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, |
- | |
1059 | "Haswell", &haswell_gtt_driver }, |
- | |
1060 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, |
- | |
1061 | "Haswell", &haswell_gtt_driver }, |
- | |
1062 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, |
- | |
1063 | "Haswell", &haswell_gtt_driver }, |
- | |
1064 | { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, |
- | |
1065 | "Haswell", &haswell_gtt_driver }, |
- | |
1066 | { 0, NULL, NULL } |
773 | { 0, NULL, NULL } |
1067 | }; |
774 | }; |
1068 | 775 | ||
1069 | static int find_gmch(u16 device) |
776 | static int find_gmch(u16 device) |
1070 | { |
777 | { |
1071 | struct pci_dev *gmch_device; |
778 | struct pci_dev *gmch_device; |
1072 | 779 | ||
1073 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
780 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
1074 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { |
781 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { |
1075 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, |
782 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, |
1076 | device, gmch_device); |
783 | device, gmch_device); |
1077 | } |
784 | } |
1078 | 785 | ||
1079 | if (!gmch_device) |
786 | if (!gmch_device) |
1080 | return 0; |
787 | return 0; |
1081 | 788 | ||
1082 | intel_private.pcidev = gmch_device; |
789 | intel_private.pcidev = gmch_device; |
1083 | return 1; |
790 | return 1; |
1084 | } |
791 | } |
1085 | 792 | ||
1086 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
793 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
1087 | struct agp_bridge_data *bridge) |
794 | struct agp_bridge_data *bridge) |
1088 | { |
795 | { |
1089 | int i, mask; |
796 | int i, mask; |
1090 | intel_private.driver = NULL; |
797 | intel_private.driver = NULL; |
1091 | 798 | ||
1092 | for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { |
799 | for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { |
1093 | if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { |
800 | if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { |
1094 | intel_private.driver = |
801 | intel_private.driver = |
1095 | intel_gtt_chipsets[i].gtt_driver; |
802 | intel_gtt_chipsets[i].gtt_driver; |
1096 | break; |
803 | break; |
1097 | } |
804 | } |
1098 | } |
805 | } |
1099 | 806 | ||
1100 | if (!intel_private.driver) |
807 | if (!intel_private.driver) |
1101 | return 0; |
808 | return 0; |
1102 | 809 | ||
1103 | if (bridge) { |
810 | if (bridge) { |
1104 | bridge->dev_private_data = &intel_private; |
811 | bridge->dev_private_data = &intel_private; |
1105 | bridge->dev = bridge_pdev; |
812 | bridge->dev = bridge_pdev; |
1106 | } |
813 | } |
1107 | 814 | ||
1108 | intel_private.bridge_dev = bridge_pdev; |
815 | intel_private.bridge_dev = bridge_pdev; |
1109 | 816 | ||
1110 | dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name); |
817 | dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); |
1111 | 818 | ||
1112 | mask = intel_private.driver->dma_mask_size; |
819 | mask = intel_private.driver->dma_mask_size; |
1113 | // if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) |
820 | // if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) |
1114 | // dev_err(&intel_private.pcidev->dev, |
821 | // dev_err(&intel_private.pcidev->dev, |
1115 | // "set gfx device dma mask %d-bit failed!\n", mask); |
822 | // "set gfx device dma mask %d-bit failed!\n", mask); |
1116 | // else |
823 | // else |
1117 | // pci_set_consistent_dma_mask(intel_private.pcidev, |
824 | // pci_set_consistent_dma_mask(intel_private.pcidev, |
1118 | // DMA_BIT_MASK(mask)); |
825 | // DMA_BIT_MASK(mask)); |
1119 | 826 | ||
1120 | if (intel_gtt_init() != 0) { |
827 | if (intel_gtt_init() != 0) { |
1121 | // intel_gmch_remove(); |
828 | // intel_gmch_remove(); |
1122 | 829 | ||
1123 | return 0; |
830 | return 0; |
1124 | } |
831 | } |
1125 | 832 | ||
1126 | return 1; |
833 | return 1; |
1127 | } |
834 | } |
1128 | EXPORT_SYMBOL(intel_gmch_probe); |
835 | EXPORT_SYMBOL(intel_gmch_probe); |
1129 | 836 | ||
1130 | const struct intel_gtt *intel_gtt_get(void) |
837 | struct intel_gtt *intel_gtt_get(void) |
1131 | { |
838 | { |
1132 | return &intel_private.base; |
839 | return &intel_private.base; |
1133 | } |
840 | } |
1134 | EXPORT_SYMBOL(intel_gtt_get); |
841 | EXPORT_SYMBOL(intel_gtt_get); |
1135 | 842 | ||
1136 | void intel_gtt_chipset_flush(void) |
843 | void intel_gtt_chipset_flush(void) |
1137 | { |
844 | { |
1138 | if (intel_private.driver->chipset_flush) |
845 | if (intel_private.driver->chipset_flush) |
1139 | intel_private.driver->chipset_flush(); |
846 | intel_private.driver->chipset_flush(); |
1140 | } |
847 | } |
1141 | EXPORT_SYMBOL(intel_gtt_chipset_flush); |
848 | EXPORT_SYMBOL(intel_gtt_chipset_flush); |
1142 | 849 | ||
1143 | 850 | ||
1144 | //phys_addr_t get_bus_addr(void) |
- | |
1145 | //{ |
851 | MODULE_AUTHOR("Dave Jones |
1146 | // return intel_private.gma_bus_addr; |
- | |
1147 | //};>>>=>>><> |
852 | MODULE_LICENSE("GPL and additional rights");>>><>>=>><> |