Rev 4389 | Rev 5354 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4389 | Rev 5060 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Intel GTT (Graphics Translation Table) routines |
2 | * Intel GTT (Graphics Translation Table) routines |
3 | * |
3 | * |
4 | * Caveat: This driver implements the linux agp interface, but this is far from |
4 | * Caveat: This driver implements the linux agp interface, but this is far from |
5 | * a agp driver! GTT support ended up here for purely historical reasons: The |
5 | * a agp driver! GTT support ended up here for purely historical reasons: The |
6 | * old userspace intel graphics drivers needed an interface to map memory into |
6 | * old userspace intel graphics drivers needed an interface to map memory into |
7 | * the GTT. And the drm provides a default interface for graphic devices sitting |
7 | * the GTT. And the drm provides a default interface for graphic devices sitting |
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to |
8 | * on an agp port. So it made sense to fake the GTT support as an agp port to |
9 | * avoid having to create a new api. |
9 | * avoid having to create a new api. |
10 | * |
10 | * |
11 | * With gem this does not make much sense anymore, just needlessly complicates |
11 | * With gem this does not make much sense anymore, just needlessly complicates |
12 | * the code. But as long as the old graphics stack is still support, it's stuck |
12 | * the code. But as long as the old graphics stack is still support, it's stuck |
13 | * here. |
13 | * here. |
14 | * |
14 | * |
15 | * /fairy-tale-mode off |
15 | * /fairy-tale-mode off |
16 | */ |
16 | */ |
17 | 17 | ||
18 | #include |
18 | #include |
19 | 19 | ||
20 | #include |
20 | #include |
21 | #include |
21 | #include |
22 | #include |
22 | #include |
23 | #include |
23 | #include |
24 | #include |
24 | #include |
25 | #include |
25 | #include |
26 | 26 | ||
27 | //#include |
27 | //#include |
28 | //#include |
28 | //#include |
29 | //#include |
29 | //#include |
30 | #include |
30 | #include |
31 | #include "agp.h" |
31 | #include "agp.h" |
32 | #include "intel-agp.h" |
32 | #include "intel-agp.h" |
33 | #include |
33 | #include |
34 | 34 | ||
35 | 35 | ||
36 | struct pci_dev * |
36 | struct pci_dev * |
37 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); |
37 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); |
38 | 38 | ||
39 | 39 | ||
40 | #define PCI_VENDOR_ID_INTEL 0x8086 |
40 | #define PCI_VENDOR_ID_INTEL 0x8086 |
41 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
41 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
42 | #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
42 | #define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
43 | #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 |
43 | #define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 |
44 | #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 |
44 | #define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 |
45 | #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 |
45 | #define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 |
46 | #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 |
46 | #define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 |
47 | 47 | ||
48 | 48 | ||
49 | #define AGP_NORMAL_MEMORY 0 |
49 | #define AGP_NORMAL_MEMORY 0 |
50 | 50 | ||
51 | #define AGP_USER_TYPES (1 << 16) |
51 | #define AGP_USER_TYPES (1 << 16) |
52 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
52 | #define AGP_USER_MEMORY (AGP_USER_TYPES) |
53 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
53 | #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
54 | 54 | ||
55 | 55 | ||
56 | 56 | ||
57 | /* |
57 | /* |
58 | * If we have Intel graphics, we're not going to have anything other than |
58 | * If we have Intel graphics, we're not going to have anything other than |
59 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
59 | * an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
60 | * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
60 | * on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
61 | * Only newer chipsets need to bother with this, of course. |
61 | * Only newer chipsets need to bother with this, of course. |
62 | */ |
62 | */ |
63 | #ifdef CONFIG_INTEL_IOMMU |
63 | #ifdef CONFIG_INTEL_IOMMU |
64 | #define USE_PCI_DMA_API 1 |
64 | #define USE_PCI_DMA_API 1 |
65 | #else |
65 | #else |
66 | #define USE_PCI_DMA_API 0 |
66 | #define USE_PCI_DMA_API 0 |
67 | #endif |
67 | #endif |
68 | 68 | ||
69 | struct intel_gtt_driver { |
69 | struct intel_gtt_driver { |
70 | unsigned int gen : 8; |
70 | unsigned int gen : 8; |
71 | unsigned int is_g33 : 1; |
71 | unsigned int is_g33 : 1; |
72 | unsigned int is_pineview : 1; |
72 | unsigned int is_pineview : 1; |
73 | unsigned int is_ironlake : 1; |
73 | unsigned int is_ironlake : 1; |
74 | unsigned int has_pgtbl_enable : 1; |
74 | unsigned int has_pgtbl_enable : 1; |
75 | unsigned int dma_mask_size : 8; |
75 | unsigned int dma_mask_size : 8; |
76 | /* Chipset specific GTT setup */ |
76 | /* Chipset specific GTT setup */ |
77 | int (*setup)(void); |
77 | int (*setup)(void); |
78 | /* This should undo anything done in ->setup() save the unmapping |
78 | /* This should undo anything done in ->setup() save the unmapping |
79 | * of the mmio register file, that's done in the generic code. */ |
79 | * of the mmio register file, that's done in the generic code. */ |
80 | void (*cleanup)(void); |
80 | void (*cleanup)(void); |
81 | void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); |
81 | void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); |
82 | /* Flags is a more or less chipset specific opaque value. |
82 | /* Flags is a more or less chipset specific opaque value. |
83 | * For chipsets that need to support old ums (non-gem) code, this |
83 | * For chipsets that need to support old ums (non-gem) code, this |
84 | * needs to be identical to the various supported agp memory types! */ |
84 | * needs to be identical to the various supported agp memory types! */ |
85 | bool (*check_flags)(unsigned int flags); |
85 | bool (*check_flags)(unsigned int flags); |
86 | void (*chipset_flush)(void); |
86 | void (*chipset_flush)(void); |
87 | }; |
87 | }; |
88 | 88 | ||
89 | static struct _intel_private { |
89 | static struct _intel_private { |
90 | const struct intel_gtt_driver *driver; |
90 | const struct intel_gtt_driver *driver; |
91 | struct pci_dev *pcidev; /* device one */ |
91 | struct pci_dev *pcidev; /* device one */ |
92 | struct pci_dev *bridge_dev; |
92 | struct pci_dev *bridge_dev; |
93 | u8 __iomem *registers; |
93 | u8 __iomem *registers; |
94 | phys_addr_t gtt_bus_addr; |
94 | phys_addr_t gtt_phys_addr; |
95 | u32 PGETBL_save; |
95 | u32 PGETBL_save; |
96 | u32 __iomem *gtt; /* I915G */ |
96 | u32 __iomem *gtt; /* I915G */ |
97 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
97 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
98 | int num_dcache_entries; |
98 | int num_dcache_entries; |
99 | void __iomem *i9xx_flush_page; |
99 | void __iomem *i9xx_flush_page; |
100 | char *i81x_gtt_table; |
100 | char *i81x_gtt_table; |
101 | struct resource ifp_resource; |
101 | struct resource ifp_resource; |
102 | int resource_valid; |
102 | int resource_valid; |
103 | struct page *scratch_page; |
103 | struct page *scratch_page; |
104 | phys_addr_t scratch_page_dma; |
104 | phys_addr_t scratch_page_dma; |
105 | int refcount; |
105 | int refcount; |
106 | /* Whether i915 needs to use the dmar apis or not. */ |
106 | /* Whether i915 needs to use the dmar apis or not. */ |
107 | unsigned int needs_dmar : 1; |
107 | unsigned int needs_dmar : 1; |
108 | phys_addr_t gma_bus_addr; |
108 | phys_addr_t gma_bus_addr; |
109 | /* Size of memory reserved for graphics by the BIOS */ |
109 | /* Size of memory reserved for graphics by the BIOS */ |
110 | unsigned int stolen_size; |
110 | unsigned int stolen_size; |
111 | /* Total number of gtt entries. */ |
111 | /* Total number of gtt entries. */ |
112 | unsigned int gtt_total_entries; |
112 | unsigned int gtt_total_entries; |
113 | /* Part of the gtt that is mappable by the cpu, for those chips where |
113 | /* Part of the gtt that is mappable by the cpu, for those chips where |
114 | * this is not the full gtt. */ |
114 | * this is not the full gtt. */ |
115 | unsigned int gtt_mappable_entries; |
115 | unsigned int gtt_mappable_entries; |
116 | } intel_private; |
116 | } intel_private; |
117 | 117 | ||
118 | #define INTEL_GTT_GEN intel_private.driver->gen |
118 | #define INTEL_GTT_GEN intel_private.driver->gen |
119 | #define IS_G33 intel_private.driver->is_g33 |
119 | #define IS_G33 intel_private.driver->is_g33 |
120 | #define IS_PINEVIEW intel_private.driver->is_pineview |
120 | #define IS_PINEVIEW intel_private.driver->is_pineview |
121 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
121 | #define IS_IRONLAKE intel_private.driver->is_ironlake |
122 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
122 | #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable |
123 | 123 | ||
124 | static int intel_gtt_setup_scratch_page(void) |
124 | static int intel_gtt_setup_scratch_page(void) |
125 | { |
125 | { |
126 | struct page *page; |
126 | struct page *page; |
127 | dma_addr_t dma_addr; |
127 | dma_addr_t dma_addr; |
128 | 128 | ||
129 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
129 | page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
130 | if (page == NULL) |
130 | if (page == NULL) |
131 | return -ENOMEM; |
131 | return -ENOMEM; |
132 | intel_private.scratch_page_dma = page_to_phys(page); |
132 | intel_private.scratch_page_dma = page_to_phys(page); |
133 | 133 | ||
134 | intel_private.scratch_page = page; |
134 | intel_private.scratch_page = page; |
135 | 135 | ||
136 | return 0; |
136 | return 0; |
137 | } |
137 | } |
138 | 138 | ||
139 | static unsigned int intel_gtt_stolen_size(void) |
139 | static unsigned int intel_gtt_stolen_size(void) |
140 | { |
140 | { |
141 | u16 gmch_ctrl; |
141 | u16 gmch_ctrl; |
142 | u8 rdct; |
142 | u8 rdct; |
143 | int local = 0; |
143 | int local = 0; |
144 | static const int ddt[4] = { 0, 16, 32, 64 }; |
144 | static const int ddt[4] = { 0, 16, 32, 64 }; |
145 | unsigned int stolen_size = 0; |
145 | unsigned int stolen_size = 0; |
146 | 146 | ||
147 | if (INTEL_GTT_GEN == 1) |
147 | if (INTEL_GTT_GEN == 1) |
148 | return 0; /* no stolen mem on i81x */ |
148 | return 0; /* no stolen mem on i81x */ |
149 | 149 | ||
150 | pci_read_config_word(intel_private.bridge_dev, |
150 | pci_read_config_word(intel_private.bridge_dev, |
151 | I830_GMCH_CTRL, &gmch_ctrl); |
151 | I830_GMCH_CTRL, &gmch_ctrl); |
152 | 152 | ||
153 | if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || |
153 | if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || |
154 | intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { |
154 | intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { |
155 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { |
155 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { |
156 | case I830_GMCH_GMS_STOLEN_512: |
156 | case I830_GMCH_GMS_STOLEN_512: |
157 | stolen_size = KB(512); |
157 | stolen_size = KB(512); |
158 | break; |
158 | break; |
159 | case I830_GMCH_GMS_STOLEN_1024: |
159 | case I830_GMCH_GMS_STOLEN_1024: |
160 | stolen_size = MB(1); |
160 | stolen_size = MB(1); |
161 | break; |
161 | break; |
162 | case I830_GMCH_GMS_STOLEN_8192: |
162 | case I830_GMCH_GMS_STOLEN_8192: |
163 | stolen_size = MB(8); |
163 | stolen_size = MB(8); |
164 | break; |
164 | break; |
165 | case I830_GMCH_GMS_LOCAL: |
165 | case I830_GMCH_GMS_LOCAL: |
166 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); |
166 | rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); |
167 | stolen_size = (I830_RDRAM_ND(rdct) + 1) * |
167 | stolen_size = (I830_RDRAM_ND(rdct) + 1) * |
168 | MB(ddt[I830_RDRAM_DDT(rdct)]); |
168 | MB(ddt[I830_RDRAM_DDT(rdct)]); |
169 | local = 1; |
169 | local = 1; |
170 | break; |
170 | break; |
171 | default: |
171 | default: |
172 | stolen_size = 0; |
172 | stolen_size = 0; |
173 | break; |
173 | break; |
174 | } |
174 | } |
175 | } else { |
175 | } else { |
176 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { |
176 | switch (gmch_ctrl & I855_GMCH_GMS_MASK) { |
177 | case I855_GMCH_GMS_STOLEN_1M: |
177 | case I855_GMCH_GMS_STOLEN_1M: |
178 | stolen_size = MB(1); |
178 | stolen_size = MB(1); |
179 | break; |
179 | break; |
180 | case I855_GMCH_GMS_STOLEN_4M: |
180 | case I855_GMCH_GMS_STOLEN_4M: |
181 | stolen_size = MB(4); |
181 | stolen_size = MB(4); |
182 | break; |
182 | break; |
183 | case I855_GMCH_GMS_STOLEN_8M: |
183 | case I855_GMCH_GMS_STOLEN_8M: |
184 | stolen_size = MB(8); |
184 | stolen_size = MB(8); |
185 | break; |
185 | break; |
186 | case I855_GMCH_GMS_STOLEN_16M: |
186 | case I855_GMCH_GMS_STOLEN_16M: |
187 | stolen_size = MB(16); |
187 | stolen_size = MB(16); |
188 | break; |
188 | break; |
189 | case I855_GMCH_GMS_STOLEN_32M: |
189 | case I855_GMCH_GMS_STOLEN_32M: |
190 | stolen_size = MB(32); |
190 | stolen_size = MB(32); |
191 | break; |
191 | break; |
192 | case I915_GMCH_GMS_STOLEN_48M: |
192 | case I915_GMCH_GMS_STOLEN_48M: |
193 | stolen_size = MB(48); |
193 | stolen_size = MB(48); |
194 | break; |
194 | break; |
195 | case I915_GMCH_GMS_STOLEN_64M: |
195 | case I915_GMCH_GMS_STOLEN_64M: |
196 | stolen_size = MB(64); |
196 | stolen_size = MB(64); |
197 | break; |
197 | break; |
198 | case G33_GMCH_GMS_STOLEN_128M: |
198 | case G33_GMCH_GMS_STOLEN_128M: |
199 | stolen_size = MB(128); |
199 | stolen_size = MB(128); |
200 | break; |
200 | break; |
201 | case G33_GMCH_GMS_STOLEN_256M: |
201 | case G33_GMCH_GMS_STOLEN_256M: |
202 | stolen_size = MB(256); |
202 | stolen_size = MB(256); |
203 | break; |
203 | break; |
204 | case INTEL_GMCH_GMS_STOLEN_96M: |
204 | case INTEL_GMCH_GMS_STOLEN_96M: |
205 | stolen_size = MB(96); |
205 | stolen_size = MB(96); |
206 | break; |
206 | break; |
207 | case INTEL_GMCH_GMS_STOLEN_160M: |
207 | case INTEL_GMCH_GMS_STOLEN_160M: |
208 | stolen_size = MB(160); |
208 | stolen_size = MB(160); |
209 | break; |
209 | break; |
210 | case INTEL_GMCH_GMS_STOLEN_224M: |
210 | case INTEL_GMCH_GMS_STOLEN_224M: |
211 | stolen_size = MB(224); |
211 | stolen_size = MB(224); |
212 | break; |
212 | break; |
213 | case INTEL_GMCH_GMS_STOLEN_352M: |
213 | case INTEL_GMCH_GMS_STOLEN_352M: |
214 | stolen_size = MB(352); |
214 | stolen_size = MB(352); |
215 | break; |
215 | break; |
216 | default: |
216 | default: |
217 | stolen_size = 0; |
217 | stolen_size = 0; |
218 | break; |
218 | break; |
219 | } |
219 | } |
220 | } |
220 | } |
221 | 221 | ||
222 | if (stolen_size > 0) { |
222 | if (stolen_size > 0) { |
223 | dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", |
223 | dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", |
224 | stolen_size / KB(1), local ? "local" : "stolen"); |
224 | stolen_size / KB(1), local ? "local" : "stolen"); |
225 | } else { |
225 | } else { |
226 | dev_info(&intel_private.bridge_dev->dev, |
226 | dev_info(&intel_private.bridge_dev->dev, |
227 | "no pre-allocated video memory detected\n"); |
227 | "no pre-allocated video memory detected\n"); |
228 | stolen_size = 0; |
228 | stolen_size = 0; |
229 | } |
229 | } |
230 | 230 | ||
231 | return stolen_size; |
231 | return stolen_size; |
232 | } |
232 | } |
233 | 233 | ||
234 | static void i965_adjust_pgetbl_size(unsigned int size_flag) |
234 | static void i965_adjust_pgetbl_size(unsigned int size_flag) |
235 | { |
235 | { |
236 | u32 pgetbl_ctl, pgetbl_ctl2; |
236 | u32 pgetbl_ctl, pgetbl_ctl2; |
237 | 237 | ||
238 | /* ensure that ppgtt is disabled */ |
238 | /* ensure that ppgtt is disabled */ |
239 | pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); |
239 | pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); |
240 | pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; |
240 | pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; |
241 | writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); |
241 | writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); |
242 | 242 | ||
243 | /* write the new ggtt size */ |
243 | /* write the new ggtt size */ |
244 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
244 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
245 | pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; |
245 | pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; |
246 | pgetbl_ctl |= size_flag; |
246 | pgetbl_ctl |= size_flag; |
247 | writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); |
247 | writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); |
248 | } |
248 | } |
249 | 249 | ||
250 | static unsigned int i965_gtt_total_entries(void) |
250 | static unsigned int i965_gtt_total_entries(void) |
251 | { |
251 | { |
252 | int size; |
252 | int size; |
253 | u32 pgetbl_ctl; |
253 | u32 pgetbl_ctl; |
254 | u16 gmch_ctl; |
254 | u16 gmch_ctl; |
255 | 255 | ||
256 | pci_read_config_word(intel_private.bridge_dev, |
256 | pci_read_config_word(intel_private.bridge_dev, |
257 | I830_GMCH_CTRL, &gmch_ctl); |
257 | I830_GMCH_CTRL, &gmch_ctl); |
258 | 258 | ||
259 | if (INTEL_GTT_GEN == 5) { |
259 | if (INTEL_GTT_GEN == 5) { |
260 | switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { |
260 | switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { |
261 | case G4x_GMCH_SIZE_1M: |
261 | case G4x_GMCH_SIZE_1M: |
262 | case G4x_GMCH_SIZE_VT_1M: |
262 | case G4x_GMCH_SIZE_VT_1M: |
263 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); |
263 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); |
264 | break; |
264 | break; |
265 | case G4x_GMCH_SIZE_VT_1_5M: |
265 | case G4x_GMCH_SIZE_VT_1_5M: |
266 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); |
266 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); |
267 | break; |
267 | break; |
268 | case G4x_GMCH_SIZE_2M: |
268 | case G4x_GMCH_SIZE_2M: |
269 | case G4x_GMCH_SIZE_VT_2M: |
269 | case G4x_GMCH_SIZE_VT_2M: |
270 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); |
270 | i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); |
271 | break; |
271 | break; |
272 | } |
272 | } |
273 | } |
273 | } |
274 | 274 | ||
275 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
275 | pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); |
276 | 276 | ||
277 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { |
277 | switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { |
278 | case I965_PGETBL_SIZE_128KB: |
278 | case I965_PGETBL_SIZE_128KB: |
279 | size = KB(128); |
279 | size = KB(128); |
280 | break; |
280 | break; |
281 | case I965_PGETBL_SIZE_256KB: |
281 | case I965_PGETBL_SIZE_256KB: |
282 | size = KB(256); |
282 | size = KB(256); |
283 | break; |
283 | break; |
284 | case I965_PGETBL_SIZE_512KB: |
284 | case I965_PGETBL_SIZE_512KB: |
285 | size = KB(512); |
285 | size = KB(512); |
286 | break; |
286 | break; |
287 | /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ |
287 | /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ |
288 | case I965_PGETBL_SIZE_1MB: |
288 | case I965_PGETBL_SIZE_1MB: |
289 | size = KB(1024); |
289 | size = KB(1024); |
290 | break; |
290 | break; |
291 | case I965_PGETBL_SIZE_2MB: |
291 | case I965_PGETBL_SIZE_2MB: |
292 | size = KB(2048); |
292 | size = KB(2048); |
293 | break; |
293 | break; |
294 | case I965_PGETBL_SIZE_1_5MB: |
294 | case I965_PGETBL_SIZE_1_5MB: |
295 | size = KB(1024 + 512); |
295 | size = KB(1024 + 512); |
296 | break; |
296 | break; |
297 | default: |
297 | default: |
298 | dev_info(&intel_private.pcidev->dev, |
298 | dev_info(&intel_private.pcidev->dev, |
299 | "unknown page table size, assuming 512KB\n"); |
299 | "unknown page table size, assuming 512KB\n"); |
300 | size = KB(512); |
300 | size = KB(512); |
301 | } |
301 | } |
302 | 302 | ||
303 | return size/4; |
303 | return size/4; |
304 | } |
304 | } |
305 | 305 | ||
306 | static unsigned int intel_gtt_total_entries(void) |
306 | static unsigned int intel_gtt_total_entries(void) |
307 | { |
307 | { |
308 | if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) |
308 | if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) |
309 | return i965_gtt_total_entries(); |
309 | return i965_gtt_total_entries(); |
310 | else { |
310 | else { |
311 | /* On previous hardware, the GTT size was just what was |
311 | /* On previous hardware, the GTT size was just what was |
312 | * required to map the aperture. |
312 | * required to map the aperture. |
313 | */ |
313 | */ |
314 | return intel_private.gtt_mappable_entries; |
314 | return intel_private.gtt_mappable_entries; |
315 | } |
315 | } |
316 | } |
316 | } |
317 | 317 | ||
318 | static unsigned int intel_gtt_mappable_entries(void) |
318 | static unsigned int intel_gtt_mappable_entries(void) |
319 | { |
319 | { |
320 | unsigned int aperture_size; |
320 | unsigned int aperture_size; |
321 | 321 | ||
322 | if (INTEL_GTT_GEN == 1) { |
322 | if (INTEL_GTT_GEN == 1) { |
323 | u32 smram_miscc; |
323 | u32 smram_miscc; |
324 | 324 | ||
325 | pci_read_config_dword(intel_private.bridge_dev, |
325 | pci_read_config_dword(intel_private.bridge_dev, |
326 | I810_SMRAM_MISCC, &smram_miscc); |
326 | I810_SMRAM_MISCC, &smram_miscc); |
327 | 327 | ||
328 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) |
328 | if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) |
329 | == I810_GFX_MEM_WIN_32M) |
329 | == I810_GFX_MEM_WIN_32M) |
330 | aperture_size = MB(32); |
330 | aperture_size = MB(32); |
331 | else |
331 | else |
332 | aperture_size = MB(64); |
332 | aperture_size = MB(64); |
333 | } else if (INTEL_GTT_GEN == 2) { |
333 | } else if (INTEL_GTT_GEN == 2) { |
334 | u16 gmch_ctrl; |
334 | u16 gmch_ctrl; |
335 | 335 | ||
336 | pci_read_config_word(intel_private.bridge_dev, |
336 | pci_read_config_word(intel_private.bridge_dev, |
337 | I830_GMCH_CTRL, &gmch_ctrl); |
337 | I830_GMCH_CTRL, &gmch_ctrl); |
338 | 338 | ||
339 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) |
339 | if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) |
340 | aperture_size = MB(64); |
340 | aperture_size = MB(64); |
341 | else |
341 | else |
342 | aperture_size = MB(128); |
342 | aperture_size = MB(128); |
343 | } else { |
343 | } else { |
344 | /* 9xx supports large sizes, just look at the length */ |
344 | /* 9xx supports large sizes, just look at the length */ |
345 | aperture_size = pci_resource_len(intel_private.pcidev, 2); |
345 | aperture_size = pci_resource_len(intel_private.pcidev, 2); |
346 | } |
346 | } |
347 | 347 | ||
348 | return aperture_size >> PAGE_SHIFT; |
348 | return aperture_size >> PAGE_SHIFT; |
349 | } |
349 | } |
350 | 350 | ||
351 | static void intel_gtt_teardown_scratch_page(void) |
351 | static void intel_gtt_teardown_scratch_page(void) |
352 | { |
352 | { |
353 | // FreePage(intel_private.scratch_page_dma); |
353 | // FreePage(intel_private.scratch_page_dma); |
354 | } |
354 | } |
355 | 355 | ||
356 | static void intel_gtt_cleanup(void) |
356 | static void intel_gtt_cleanup(void) |
357 | { |
357 | { |
358 | intel_private.driver->cleanup(); |
358 | intel_private.driver->cleanup(); |
359 | 359 | ||
360 | iounmap(intel_private.gtt); |
360 | iounmap(intel_private.gtt); |
361 | iounmap(intel_private.registers); |
361 | iounmap(intel_private.registers); |
362 | 362 | ||
363 | intel_gtt_teardown_scratch_page(); |
363 | intel_gtt_teardown_scratch_page(); |
364 | } |
364 | } |
365 | 365 | ||
366 | /* Certain Gen5 chipsets require require idling the GPU before |
366 | /* Certain Gen5 chipsets require require idling the GPU before |
367 | * unmapping anything from the GTT when VT-d is enabled. |
367 | * unmapping anything from the GTT when VT-d is enabled. |
368 | */ |
368 | */ |
369 | static inline int needs_ilk_vtd_wa(void) |
369 | static inline int needs_ilk_vtd_wa(void) |
370 | { |
370 | { |
371 | #ifdef CONFIG_INTEL_IOMMU |
371 | #ifdef CONFIG_INTEL_IOMMU |
372 | const unsigned short gpu_devid = intel_private.pcidev->device; |
372 | const unsigned short gpu_devid = intel_private.pcidev->device; |
373 | 373 | ||
374 | /* Query intel_iommu to see if we need the workaround. Presumably that |
374 | /* Query intel_iommu to see if we need the workaround. Presumably that |
375 | * was loaded first. |
375 | * was loaded first. |
376 | */ |
376 | */ |
377 | if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || |
377 | if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || |
378 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
378 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
379 | intel_iommu_gfx_mapped) |
379 | intel_iommu_gfx_mapped) |
380 | return 1; |
380 | return 1; |
381 | #endif |
381 | #endif |
382 | return 0; |
382 | return 0; |
383 | } |
383 | } |
384 | 384 | ||
385 | static bool intel_gtt_can_wc(void) |
385 | static bool intel_gtt_can_wc(void) |
386 | { |
386 | { |
387 | if (INTEL_GTT_GEN <= 2) |
387 | if (INTEL_GTT_GEN <= 2) |
388 | return false; |
388 | return false; |
389 | 389 | ||
390 | if (INTEL_GTT_GEN >= 6) |
390 | if (INTEL_GTT_GEN >= 6) |
391 | return false; |
391 | return false; |
392 | 392 | ||
393 | /* Reports of major corruption with ILK vt'd enabled */ |
393 | /* Reports of major corruption with ILK vt'd enabled */ |
394 | if (needs_ilk_vtd_wa()) |
394 | if (needs_ilk_vtd_wa()) |
395 | return false; |
395 | return false; |
396 | 396 | ||
397 | return true; |
397 | return true; |
398 | } |
398 | } |
399 | 399 | ||
400 | static int intel_gtt_init(void) |
400 | static int intel_gtt_init(void) |
401 | { |
401 | { |
402 | u32 gma_addr; |
- | |
403 | u32 gtt_map_size; |
402 | u32 gtt_map_size; |
404 | int ret; |
403 | int ret, bar; |
405 | 404 | ||
406 | ret = intel_private.driver->setup(); |
405 | ret = intel_private.driver->setup(); |
407 | if (ret != 0) |
406 | if (ret != 0) |
408 | return ret; |
407 | return ret; |
409 | 408 | ||
410 | intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); |
409 | intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); |
411 | intel_private.gtt_total_entries = intel_gtt_total_entries(); |
410 | intel_private.gtt_total_entries = intel_gtt_total_entries(); |
412 | 411 | ||
413 | /* save the PGETBL reg for resume */ |
412 | /* save the PGETBL reg for resume */ |
414 | intel_private.PGETBL_save = |
413 | intel_private.PGETBL_save = |
415 | readl(intel_private.registers+I810_PGETBL_CTL) |
414 | readl(intel_private.registers+I810_PGETBL_CTL) |
416 | & ~I810_PGETBL_ENABLED; |
415 | & ~I810_PGETBL_ENABLED; |
417 | /* we only ever restore the register when enabling the PGTBL... */ |
416 | /* we only ever restore the register when enabling the PGTBL... */ |
418 | if (HAS_PGTBL_EN) |
417 | if (HAS_PGTBL_EN) |
419 | intel_private.PGETBL_save |= I810_PGETBL_ENABLED; |
418 | intel_private.PGETBL_save |= I810_PGETBL_ENABLED; |
420 | 419 | ||
421 | dev_info(&intel_private.bridge_dev->dev, |
420 | dev_info(&intel_private.bridge_dev->dev, |
422 | "detected gtt size: %dK total, %dK mappable\n", |
421 | "detected gtt size: %dK total, %dK mappable\n", |
423 | intel_private.gtt_total_entries * 4, |
422 | intel_private.gtt_total_entries * 4, |
424 | intel_private.gtt_mappable_entries * 4); |
423 | intel_private.gtt_mappable_entries * 4); |
425 | 424 | ||
426 | gtt_map_size = intel_private.gtt_total_entries * 4; |
425 | gtt_map_size = intel_private.gtt_total_entries * 4; |
427 | 426 | ||
428 | intel_private.gtt = NULL; |
427 | intel_private.gtt = NULL; |
429 | if (intel_private.gtt == NULL) |
428 | if (intel_private.gtt == NULL) |
430 | intel_private.gtt = ioremap(intel_private.gtt_bus_addr, |
429 | intel_private.gtt = ioremap(intel_private.gtt_phys_addr, |
431 | gtt_map_size); |
430 | gtt_map_size); |
432 | if (intel_private.gtt == NULL) { |
431 | if (intel_private.gtt == NULL) { |
433 | intel_private.driver->cleanup(); |
432 | intel_private.driver->cleanup(); |
434 | iounmap(intel_private.registers); |
433 | iounmap(intel_private.registers); |
435 | return -ENOMEM; |
434 | return -ENOMEM; |
436 | } |
435 | } |
- | 436 | ||
437 | 437 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
|
- | 438 | global_cache_flush(); /* FIXME: ? */ |
|
438 | asm volatile("wbinvd":::"memory"); |
439 | #endif |
439 | 440 | ||
440 | intel_private.stolen_size = intel_gtt_stolen_size(); |
441 | intel_private.stolen_size = intel_gtt_stolen_size(); |
441 | 442 | ||
442 | intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
443 | intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
443 | 444 | ||
444 | ret = intel_gtt_setup_scratch_page(); |
445 | ret = intel_gtt_setup_scratch_page(); |
445 | if (ret != 0) { |
446 | if (ret != 0) { |
446 | intel_gtt_cleanup(); |
447 | intel_gtt_cleanup(); |
447 | return ret; |
448 | return ret; |
448 | } |
449 | } |
449 | 450 | ||
450 | if (INTEL_GTT_GEN <= 2) |
451 | if (INTEL_GTT_GEN <= 2) |
451 | pci_read_config_dword(intel_private.pcidev, I810_GMADDR, |
- | |
452 | &gma_addr); |
452 | bar = I810_GMADR_BAR; |
453 | else |
453 | else |
454 | pci_read_config_dword(intel_private.pcidev, I915_GMADDR, |
- | |
455 | &gma_addr); |
454 | bar = I915_GMADR_BAR; |
456 | - | ||
457 | intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); |
- | |
458 | - | ||
- | 455 | ||
459 | 456 | intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar); |
|
460 | return 0; |
457 | return 0; |
461 | } |
458 | } |
- | 459 | ||
462 | 460 | ||
463 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
461 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
464 | unsigned int flags) |
462 | unsigned int flags) |
465 | { |
463 | { |
466 | u32 pte_flags = I810_PTE_VALID; |
464 | u32 pte_flags = I810_PTE_VALID; |
467 | 465 | ||
468 | if (flags == AGP_USER_CACHED_MEMORY) |
466 | if (flags == AGP_USER_CACHED_MEMORY) |
469 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
467 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
470 | 468 | ||
471 | writel(addr | pte_flags, intel_private.gtt + entry); |
469 | writel(addr | pte_flags, intel_private.gtt + entry); |
472 | } |
470 | } |
473 | 471 | ||
474 | bool intel_enable_gtt(void) |
472 | bool intel_enable_gtt(void) |
475 | { |
473 | { |
476 | u8 __iomem *reg; |
474 | u8 __iomem *reg; |
477 | 475 | ||
478 | if (INTEL_GTT_GEN == 2) { |
476 | if (INTEL_GTT_GEN == 2) { |
479 | u16 gmch_ctrl; |
477 | u16 gmch_ctrl; |
480 | 478 | ||
481 | pci_read_config_word(intel_private.bridge_dev, |
479 | pci_read_config_word(intel_private.bridge_dev, |
482 | I830_GMCH_CTRL, &gmch_ctrl); |
480 | I830_GMCH_CTRL, &gmch_ctrl); |
483 | gmch_ctrl |= I830_GMCH_ENABLED; |
481 | gmch_ctrl |= I830_GMCH_ENABLED; |
484 | pci_write_config_word(intel_private.bridge_dev, |
482 | pci_write_config_word(intel_private.bridge_dev, |
485 | I830_GMCH_CTRL, gmch_ctrl); |
483 | I830_GMCH_CTRL, gmch_ctrl); |
486 | 484 | ||
487 | pci_read_config_word(intel_private.bridge_dev, |
485 | pci_read_config_word(intel_private.bridge_dev, |
488 | I830_GMCH_CTRL, &gmch_ctrl); |
486 | I830_GMCH_CTRL, &gmch_ctrl); |
489 | if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { |
487 | if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { |
490 | dev_err(&intel_private.pcidev->dev, |
488 | dev_err(&intel_private.pcidev->dev, |
491 | "failed to enable the GTT: GMCH_CTRL=%x\n", |
489 | "failed to enable the GTT: GMCH_CTRL=%x\n", |
492 | gmch_ctrl); |
490 | gmch_ctrl); |
493 | return false; |
491 | return false; |
494 | } |
492 | } |
495 | } |
493 | } |
496 | 494 | ||
497 | /* On the resume path we may be adjusting the PGTBL value, so |
495 | /* On the resume path we may be adjusting the PGTBL value, so |
498 | * be paranoid and flush all chipset write buffers... |
496 | * be paranoid and flush all chipset write buffers... |
499 | */ |
497 | */ |
500 | if (INTEL_GTT_GEN >= 3) |
498 | if (INTEL_GTT_GEN >= 3) |
501 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
499 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
502 | 500 | ||
503 | reg = intel_private.registers+I810_PGETBL_CTL; |
501 | reg = intel_private.registers+I810_PGETBL_CTL; |
504 | writel(intel_private.PGETBL_save, reg); |
502 | writel(intel_private.PGETBL_save, reg); |
505 | if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { |
503 | if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { |
506 | dev_err(&intel_private.pcidev->dev, |
504 | dev_err(&intel_private.pcidev->dev, |
507 | "failed to enable the GTT: PGETBL=%x [expected %x]\n", |
505 | "failed to enable the GTT: PGETBL=%x [expected %x]\n", |
508 | readl(reg), intel_private.PGETBL_save); |
506 | readl(reg), intel_private.PGETBL_save); |
509 | return false; |
507 | return false; |
510 | } |
508 | } |
511 | 509 | ||
512 | if (INTEL_GTT_GEN >= 3) |
510 | if (INTEL_GTT_GEN >= 3) |
513 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
511 | writel(0, intel_private.registers+GFX_FLSH_CNTL); |
514 | 512 | ||
515 | return true; |
513 | return true; |
516 | } |
514 | } |
517 | 515 | ||
518 | static bool i830_check_flags(unsigned int flags) |
516 | static bool i830_check_flags(unsigned int flags) |
519 | { |
517 | { |
520 | switch (flags) { |
518 | switch (flags) { |
521 | case 0: |
519 | case 0: |
522 | case AGP_PHYS_MEMORY: |
520 | case AGP_PHYS_MEMORY: |
523 | case AGP_USER_CACHED_MEMORY: |
521 | case AGP_USER_CACHED_MEMORY: |
524 | case AGP_USER_MEMORY: |
522 | case AGP_USER_MEMORY: |
525 | return true; |
523 | return true; |
526 | } |
524 | } |
527 | 525 | ||
528 | return false; |
526 | return false; |
529 | } |
527 | } |
530 | 528 | ||
531 | void intel_gtt_insert_sg_entries(struct sg_table *st, |
529 | void intel_gtt_insert_sg_entries(struct sg_table *st, |
532 | unsigned int pg_start, |
530 | unsigned int pg_start, |
533 | unsigned int flags) |
531 | unsigned int flags) |
534 | { |
532 | { |
535 | struct scatterlist *sg; |
533 | struct scatterlist *sg; |
536 | unsigned int len, m; |
534 | unsigned int len, m; |
537 | int i, j; |
535 | int i, j; |
538 | 536 | ||
539 | j = pg_start; |
537 | j = pg_start; |
540 | 538 | ||
541 | /* sg may merge pages, but we have to separate |
539 | /* sg may merge pages, but we have to separate |
542 | * per-page addr for GTT */ |
540 | * per-page addr for GTT */ |
543 | for_each_sg(st->sgl, sg, st->nents, i) { |
541 | for_each_sg(st->sgl, sg, st->nents, i) { |
544 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
542 | len = sg_dma_len(sg) >> PAGE_SHIFT; |
545 | for (m = 0; m < len; m++) { |
543 | for (m = 0; m < len; m++) { |
546 | dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
544 | dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
547 | intel_private.driver->write_entry(addr, j, flags); |
545 | intel_private.driver->write_entry(addr, j, flags); |
548 | j++; |
546 | j++; |
549 | } |
547 | } |
550 | } |
548 | } |
551 | readl(intel_private.gtt+j-1); |
549 | readl(intel_private.gtt+j-1); |
552 | } |
550 | } |
553 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); |
551 | EXPORT_SYMBOL(intel_gtt_insert_sg_entries); |
- | 552 | ||
554 | 553 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
|
555 | static void intel_gtt_insert_pages(unsigned int first_entry, |
554 | static void intel_gtt_insert_pages(unsigned int first_entry, |
556 | unsigned int num_entries, |
555 | unsigned int num_entries, |
557 | struct page **pages, |
556 | struct page **pages, |
558 | unsigned int flags) |
557 | unsigned int flags) |
559 | { |
558 | { |
560 | int i, j; |
559 | int i, j; |
561 | 560 | ||
562 | for (i = 0, j = first_entry; i < num_entries; i++, j++) { |
561 | for (i = 0, j = first_entry; i < num_entries; i++, j++) { |
563 | dma_addr_t addr = page_to_phys(pages[i]); |
562 | dma_addr_t addr = page_to_phys(pages[i]); |
564 | intel_private.driver->write_entry(addr, |
563 | intel_private.driver->write_entry(addr, |
565 | j, flags); |
564 | j, flags); |
566 | } |
565 | } |
567 | readl(intel_private.gtt+j-1); |
566 | readl(intel_private.gtt+j-1); |
568 | } |
567 | } |
- | 568 | ||
- | 569 | static int intel_fake_agp_insert_entries(struct agp_memory *mem, |
|
- | 570 | off_t pg_start, int type) |
|
- | 571 | { |
|
- | 572 | int ret = -EINVAL; |
|
- | 573 | ||
- | 574 | if (intel_private.clear_fake_agp) { |
|
- | 575 | int start = intel_private.stolen_size / PAGE_SIZE; |
|
- | 576 | int end = intel_private.gtt_mappable_entries; |
|
- | 577 | intel_gtt_clear_range(start, end - start); |
|
- | 578 | intel_private.clear_fake_agp = false; |
|
- | 579 | } |
|
- | 580 | ||
- | 581 | if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) |
|
- | 582 | return i810_insert_dcache_entries(mem, pg_start, type); |
|
- | 583 | ||
- | 584 | if (mem->page_count == 0) |
|
- | 585 | goto out; |
|
- | 586 | ||
- | 587 | if (pg_start + mem->page_count > intel_private.gtt_total_entries) |
|
- | 588 | goto out_err; |
|
- | 589 | ||
- | 590 | if (type != mem->type) |
|
- | 591 | goto out_err; |
|
- | 592 | ||
- | 593 | if (!intel_private.driver->check_flags(type)) |
|
- | 594 | goto out_err; |
|
- | 595 | ||
- | 596 | if (!mem->is_flushed) |
|
- | 597 | global_cache_flush(); |
|
- | 598 | ||
- | 599 | if (intel_private.needs_dmar) { |
|
- | 600 | struct sg_table st; |
|
- | 601 | ||
- | 602 | ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); |
|
- | 603 | if (ret != 0) |
|
- | 604 | return ret; |
|
- | 605 | ||
- | 606 | intel_gtt_insert_sg_entries(&st, pg_start, type); |
|
- | 607 | mem->sg_list = st.sgl; |
|
- | 608 | mem->num_sg = st.nents; |
|
- | 609 | } else |
|
- | 610 | intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, |
|
- | 611 | type); |
|
- | 612 | ||
- | 613 | out: |
|
- | 614 | ret = 0; |
|
- | 615 | out_err: |
|
- | 616 | mem->is_flushed = true; |
|
- | 617 | return ret; |
|
- | 618 | } |
|
569 | 619 | #endif |
|
570 | 620 | ||
571 | void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) |
621 | void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) |
572 | { |
622 | { |
573 | unsigned int i; |
623 | unsigned int i; |
574 | 624 | ||
575 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
625 | for (i = first_entry; i < (first_entry + num_entries); i++) { |
576 | intel_private.driver->write_entry(intel_private.scratch_page_dma, |
626 | intel_private.driver->write_entry(intel_private.scratch_page_dma, |
577 | i, 0); |
627 | i, 0); |
578 | } |
628 | } |
579 | readl(intel_private.gtt+i-1); |
629 | readl(intel_private.gtt+i-1); |
580 | } |
630 | } |
581 | static void intel_i915_setup_chipset_flush(void) |
631 | static void intel_i915_setup_chipset_flush(void) |
582 | { |
632 | { |
583 | int ret; |
633 | int ret; |
584 | u32 temp; |
634 | u32 temp; |
585 | 635 | ||
586 | pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); |
636 | pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); |
587 | if (!(temp & 0x1)) { |
637 | if (!(temp & 0x1)) { |
588 | // intel_alloc_chipset_flush_resource(); |
638 | // intel_alloc_chipset_flush_resource(); |
589 | // intel_private.resource_valid = 1; |
639 | // intel_private.resource_valid = 1; |
590 | // pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); |
640 | // pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); |
591 | } else { |
641 | } else { |
592 | temp &= ~1; |
642 | temp &= ~1; |
593 | 643 | ||
594 | intel_private.resource_valid = 1; |
644 | intel_private.resource_valid = 1; |
595 | intel_private.ifp_resource.start = temp; |
645 | intel_private.ifp_resource.start = temp; |
596 | intel_private.ifp_resource.end = temp + PAGE_SIZE; |
646 | intel_private.ifp_resource.end = temp + PAGE_SIZE; |
597 | // ret = request_resource(&iomem_resource, &intel_private.ifp_resource); |
647 | // ret = request_resource(&iomem_resource, &intel_private.ifp_resource); |
598 | /* some BIOSes reserve this area in a pnp some don't */ |
648 | /* some BIOSes reserve this area in a pnp some don't */ |
599 | // if (ret) |
649 | // if (ret) |
600 | // intel_private.resource_valid = 0; |
650 | // intel_private.resource_valid = 0; |
601 | } |
651 | } |
602 | } |
652 | } |
603 | 653 | ||
604 | static void intel_i965_g33_setup_chipset_flush(void) |
654 | static void intel_i965_g33_setup_chipset_flush(void) |
605 | { |
655 | { |
606 | u32 temp_hi, temp_lo; |
656 | u32 temp_hi, temp_lo; |
607 | int ret; |
657 | int ret; |
608 | 658 | ||
609 | pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); |
659 | pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); |
610 | pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); |
660 | pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); |
611 | 661 | ||
612 | if (!(temp_lo & 0x1)) { |
662 | if (!(temp_lo & 0x1)) { |
613 | 663 | ||
614 | // intel_alloc_chipset_flush_resource(); |
664 | // intel_alloc_chipset_flush_resource(); |
615 | 665 | ||
616 | // intel_private.resource_valid = 1; |
666 | // intel_private.resource_valid = 1; |
617 | // pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, |
667 | // pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, |
618 | // upper_32_bits(intel_private.ifp_resource.start)); |
668 | // upper_32_bits(intel_private.ifp_resource.start)); |
619 | // pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); |
669 | // pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); |
620 | } else { |
670 | } else { |
621 | u64 l64; |
671 | u64 l64; |
622 | 672 | ||
623 | temp_lo &= ~0x1; |
673 | temp_lo &= ~0x1; |
624 | l64 = ((u64)temp_hi << 32) | temp_lo; |
674 | l64 = ((u64)temp_hi << 32) | temp_lo; |
625 | 675 | ||
626 | intel_private.resource_valid = 1; |
676 | intel_private.resource_valid = 1; |
627 | intel_private.ifp_resource.start = l64; |
677 | intel_private.ifp_resource.start = l64; |
628 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; |
678 | intel_private.ifp_resource.end = l64 + PAGE_SIZE; |
629 | // ret = request_resource(&iomem_resource, &intel_private.ifp_resource); |
679 | // ret = request_resource(&iomem_resource, &intel_private.ifp_resource); |
630 | /* some BIOSes reserve this area in a pnp some don't */ |
680 | /* some BIOSes reserve this area in a pnp some don't */ |
631 | // if (ret) |
681 | // if (ret) |
632 | // intel_private.resource_valid = 0; |
682 | // intel_private.resource_valid = 0; |
633 | } |
683 | } |
634 | } |
684 | } |
635 | 685 | ||
636 | static void intel_i9xx_setup_flush(void) |
686 | static void intel_i9xx_setup_flush(void) |
637 | { |
687 | { |
638 | /* return if already configured */ |
688 | /* return if already configured */ |
639 | if (intel_private.ifp_resource.start) |
689 | if (intel_private.ifp_resource.start) |
640 | return; |
690 | return; |
641 | 691 | ||
642 | if (INTEL_GTT_GEN == 6) |
692 | if (INTEL_GTT_GEN == 6) |
643 | return; |
693 | return; |
644 | 694 | ||
645 | /* setup a resource for this object */ |
695 | /* setup a resource for this object */ |
646 | intel_private.ifp_resource.name = "Intel Flush Page"; |
696 | intel_private.ifp_resource.name = "Intel Flush Page"; |
647 | intel_private.ifp_resource.flags = IORESOURCE_MEM; |
697 | intel_private.ifp_resource.flags = IORESOURCE_MEM; |
648 | 698 | ||
649 | /* Setup chipset flush for 915 */ |
699 | /* Setup chipset flush for 915 */ |
650 | if (IS_G33 || INTEL_GTT_GEN >= 4) { |
700 | if (IS_G33 || INTEL_GTT_GEN >= 4) { |
651 | intel_i965_g33_setup_chipset_flush(); |
701 | intel_i965_g33_setup_chipset_flush(); |
652 | } else { |
702 | } else { |
653 | intel_i915_setup_chipset_flush(); |
703 | intel_i915_setup_chipset_flush(); |
654 | } |
704 | } |
655 | 705 | ||
656 | if (intel_private.ifp_resource.start) |
706 | if (intel_private.ifp_resource.start) |
657 | intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE); |
707 | intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE); |
658 | if (!intel_private.i9xx_flush_page) |
708 | if (!intel_private.i9xx_flush_page) |
659 | dev_err(&intel_private.pcidev->dev, |
709 | dev_err(&intel_private.pcidev->dev, |
660 | "can't ioremap flush page - no chipset flushing\n"); |
710 | "can't ioremap flush page - no chipset flushing\n"); |
661 | } |
711 | } |
662 | 712 | ||
663 | static void i9xx_cleanup(void) |
713 | static void i9xx_cleanup(void) |
664 | { |
714 | { |
665 | if (intel_private.i9xx_flush_page) |
715 | if (intel_private.i9xx_flush_page) |
666 | iounmap(intel_private.i9xx_flush_page); |
716 | iounmap(intel_private.i9xx_flush_page); |
667 | // if (intel_private.resource_valid) |
717 | // if (intel_private.resource_valid) |
668 | // release_resource(&intel_private.ifp_resource); |
718 | // release_resource(&intel_private.ifp_resource); |
669 | intel_private.ifp_resource.start = 0; |
719 | intel_private.ifp_resource.start = 0; |
670 | intel_private.resource_valid = 0; |
720 | intel_private.resource_valid = 0; |
671 | } |
721 | } |
672 | 722 | ||
673 | static void i9xx_chipset_flush(void) |
723 | static void i9xx_chipset_flush(void) |
674 | { |
724 | { |
675 | if (intel_private.i9xx_flush_page) |
725 | if (intel_private.i9xx_flush_page) |
676 | writel(1, intel_private.i9xx_flush_page); |
726 | writel(1, intel_private.i9xx_flush_page); |
677 | } |
727 | } |
678 | 728 | ||
679 | static void i965_write_entry(dma_addr_t addr, |
729 | static void i965_write_entry(dma_addr_t addr, |
680 | unsigned int entry, |
730 | unsigned int entry, |
681 | unsigned int flags) |
731 | unsigned int flags) |
682 | { |
732 | { |
683 | u32 pte_flags; |
733 | u32 pte_flags; |
684 | 734 | ||
685 | pte_flags = I810_PTE_VALID; |
735 | pte_flags = I810_PTE_VALID; |
686 | if (flags == AGP_USER_CACHED_MEMORY) |
736 | if (flags == AGP_USER_CACHED_MEMORY) |
687 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
737 | pte_flags |= I830_PTE_SYSTEM_CACHED; |
688 | 738 | ||
689 | /* Shift high bits down */ |
739 | /* Shift high bits down */ |
690 | addr |= (addr >> 28) & 0xf0; |
740 | addr |= (addr >> 28) & 0xf0; |
691 | writel(addr | pte_flags, intel_private.gtt + entry); |
741 | writel(addr | pte_flags, intel_private.gtt + entry); |
692 | } |
742 | } |
693 | 743 | ||
694 | static int i9xx_setup(void) |
744 | static int i9xx_setup(void) |
695 | { |
745 | { |
696 | u32 reg_addr, gtt_addr; |
746 | phys_addr_t reg_addr; |
697 | int size = KB(512); |
747 | int size = KB(512); |
698 | 748 | ||
699 | pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); |
- | |
700 | - | ||
701 | reg_addr &= 0xfff80000; |
749 | reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR); |
702 | 750 | ||
703 | intel_private.registers = ioremap(reg_addr, size); |
751 | intel_private.registers = ioremap(reg_addr, size); |
704 | if (!intel_private.registers) |
752 | if (!intel_private.registers) |
705 | return -ENOMEM; |
753 | return -ENOMEM; |
706 | 754 | ||
707 | switch (INTEL_GTT_GEN) { |
755 | switch (INTEL_GTT_GEN) { |
708 | case 3: |
756 | case 3: |
709 | pci_read_config_dword(intel_private.pcidev, |
- | |
710 | I915_PTEADDR, >t_addr); |
757 | intel_private.gtt_phys_addr = |
711 | intel_private.gtt_bus_addr = gtt_addr; |
758 | pci_resource_start(intel_private.pcidev, I915_PTE_BAR); |
712 | break; |
759 | break; |
713 | case 5: |
760 | case 5: |
714 | intel_private.gtt_bus_addr = reg_addr + MB(2); |
761 | intel_private.gtt_phys_addr = reg_addr + MB(2); |
715 | break; |
762 | break; |
716 | default: |
763 | default: |
717 | intel_private.gtt_bus_addr = reg_addr + KB(512); |
764 | intel_private.gtt_phys_addr = reg_addr + KB(512); |
718 | break; |
765 | break; |
719 | } |
766 | } |
720 | 767 | ||
721 | intel_i9xx_setup_flush(); |
768 | intel_i9xx_setup_flush(); |
722 | 769 | ||
723 | return 0; |
770 | return 0; |
724 | } |
771 | } |
725 | 772 | ||
726 | static const struct intel_gtt_driver i915_gtt_driver = { |
773 | static const struct intel_gtt_driver i915_gtt_driver = { |
727 | .gen = 3, |
774 | .gen = 3, |
728 | .has_pgtbl_enable = 1, |
775 | .has_pgtbl_enable = 1, |
729 | .setup = i9xx_setup, |
776 | .setup = i9xx_setup, |
730 | .cleanup = i9xx_cleanup, |
777 | .cleanup = i9xx_cleanup, |
731 | /* i945 is the last gpu to need phys mem (for overlay and cursors). */ |
778 | /* i945 is the last gpu to need phys mem (for overlay and cursors). */ |
732 | .write_entry = i830_write_entry, |
779 | .write_entry = i830_write_entry, |
733 | .dma_mask_size = 32, |
780 | .dma_mask_size = 32, |
734 | .check_flags = i830_check_flags, |
781 | .check_flags = i830_check_flags, |
735 | .chipset_flush = i9xx_chipset_flush, |
782 | .chipset_flush = i9xx_chipset_flush, |
736 | }; |
783 | }; |
737 | static const struct intel_gtt_driver g33_gtt_driver = { |
784 | static const struct intel_gtt_driver g33_gtt_driver = { |
738 | .gen = 3, |
785 | .gen = 3, |
739 | .is_g33 = 1, |
786 | .is_g33 = 1, |
740 | .setup = i9xx_setup, |
787 | .setup = i9xx_setup, |
741 | .cleanup = i9xx_cleanup, |
788 | .cleanup = i9xx_cleanup, |
742 | .write_entry = i965_write_entry, |
789 | .write_entry = i965_write_entry, |
743 | .dma_mask_size = 36, |
790 | .dma_mask_size = 36, |
744 | .check_flags = i830_check_flags, |
791 | .check_flags = i830_check_flags, |
745 | .chipset_flush = i9xx_chipset_flush, |
792 | .chipset_flush = i9xx_chipset_flush, |
746 | }; |
793 | }; |
747 | static const struct intel_gtt_driver pineview_gtt_driver = { |
794 | static const struct intel_gtt_driver pineview_gtt_driver = { |
748 | .gen = 3, |
795 | .gen = 3, |
749 | .is_pineview = 1, .is_g33 = 1, |
796 | .is_pineview = 1, .is_g33 = 1, |
750 | .setup = i9xx_setup, |
797 | .setup = i9xx_setup, |
751 | .cleanup = i9xx_cleanup, |
798 | .cleanup = i9xx_cleanup, |
752 | .write_entry = i965_write_entry, |
799 | .write_entry = i965_write_entry, |
753 | .dma_mask_size = 36, |
800 | .dma_mask_size = 36, |
754 | .check_flags = i830_check_flags, |
801 | .check_flags = i830_check_flags, |
755 | .chipset_flush = i9xx_chipset_flush, |
802 | .chipset_flush = i9xx_chipset_flush, |
756 | }; |
803 | }; |
757 | static const struct intel_gtt_driver i965_gtt_driver = { |
804 | static const struct intel_gtt_driver i965_gtt_driver = { |
758 | .gen = 4, |
805 | .gen = 4, |
759 | .has_pgtbl_enable = 1, |
806 | .has_pgtbl_enable = 1, |
760 | .setup = i9xx_setup, |
807 | .setup = i9xx_setup, |
761 | .cleanup = i9xx_cleanup, |
808 | .cleanup = i9xx_cleanup, |
762 | .write_entry = i965_write_entry, |
809 | .write_entry = i965_write_entry, |
763 | .dma_mask_size = 36, |
810 | .dma_mask_size = 36, |
764 | .check_flags = i830_check_flags, |
811 | .check_flags = i830_check_flags, |
765 | .chipset_flush = i9xx_chipset_flush, |
812 | .chipset_flush = i9xx_chipset_flush, |
766 | }; |
813 | }; |
767 | static const struct intel_gtt_driver g4x_gtt_driver = { |
814 | static const struct intel_gtt_driver g4x_gtt_driver = { |
768 | .gen = 5, |
815 | .gen = 5, |
769 | .setup = i9xx_setup, |
816 | .setup = i9xx_setup, |
770 | .cleanup = i9xx_cleanup, |
817 | .cleanup = i9xx_cleanup, |
771 | .write_entry = i965_write_entry, |
818 | .write_entry = i965_write_entry, |
772 | .dma_mask_size = 36, |
819 | .dma_mask_size = 36, |
773 | .check_flags = i830_check_flags, |
820 | .check_flags = i830_check_flags, |
774 | .chipset_flush = i9xx_chipset_flush, |
821 | .chipset_flush = i9xx_chipset_flush, |
775 | }; |
822 | }; |
776 | static const struct intel_gtt_driver ironlake_gtt_driver = { |
823 | static const struct intel_gtt_driver ironlake_gtt_driver = { |
777 | .gen = 5, |
824 | .gen = 5, |
778 | .is_ironlake = 1, |
825 | .is_ironlake = 1, |
779 | .setup = i9xx_setup, |
826 | .setup = i9xx_setup, |
780 | .cleanup = i9xx_cleanup, |
827 | .cleanup = i9xx_cleanup, |
781 | .write_entry = i965_write_entry, |
828 | .write_entry = i965_write_entry, |
782 | .dma_mask_size = 36, |
829 | .dma_mask_size = 36, |
783 | .check_flags = i830_check_flags, |
830 | .check_flags = i830_check_flags, |
784 | .chipset_flush = i9xx_chipset_flush, |
831 | .chipset_flush = i9xx_chipset_flush, |
785 | }; |
832 | }; |
786 | 833 | ||
787 | /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of |
834 | /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of |
788 | * driver and gmch_driver must be non-null, and find_gmch will determine |
835 | * driver and gmch_driver must be non-null, and find_gmch will determine |
789 | * which one should be used if a gmch_chip_id is present. |
836 | * which one should be used if a gmch_chip_id is present. |
790 | */ |
837 | */ |
791 | static const struct intel_gtt_driver_description { |
838 | static const struct intel_gtt_driver_description { |
792 | unsigned int gmch_chip_id; |
839 | unsigned int gmch_chip_id; |
793 | char *name; |
840 | char *name; |
794 | const struct intel_gtt_driver *gtt_driver; |
841 | const struct intel_gtt_driver *gtt_driver; |
795 | } intel_gtt_chipsets[] = { |
842 | } intel_gtt_chipsets[] = { |
796 | { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
843 | { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
797 | &i915_gtt_driver }, |
844 | &i915_gtt_driver }, |
798 | { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
845 | { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
799 | &i915_gtt_driver }, |
846 | &i915_gtt_driver }, |
800 | { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
847 | { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
801 | &i915_gtt_driver }, |
848 | &i915_gtt_driver }, |
802 | { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
849 | { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
803 | &i915_gtt_driver }, |
850 | &i915_gtt_driver }, |
804 | { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
851 | { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
805 | &i915_gtt_driver }, |
852 | &i915_gtt_driver }, |
806 | { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
853 | { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
807 | &i915_gtt_driver }, |
854 | &i915_gtt_driver }, |
808 | { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
855 | { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
809 | &i965_gtt_driver }, |
856 | &i965_gtt_driver }, |
810 | { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
857 | { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
811 | &i965_gtt_driver }, |
858 | &i965_gtt_driver }, |
812 | { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
859 | { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
813 | &i965_gtt_driver }, |
860 | &i965_gtt_driver }, |
814 | { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
861 | { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
815 | &i965_gtt_driver }, |
862 | &i965_gtt_driver }, |
816 | { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
863 | { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
817 | &i965_gtt_driver }, |
864 | &i965_gtt_driver }, |
818 | { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
865 | { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
819 | &i965_gtt_driver }, |
866 | &i965_gtt_driver }, |
820 | { PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
867 | { PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
821 | &g33_gtt_driver }, |
868 | &g33_gtt_driver }, |
822 | { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
869 | { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
823 | &g33_gtt_driver }, |
870 | &g33_gtt_driver }, |
824 | { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
871 | { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
825 | &g33_gtt_driver }, |
872 | &g33_gtt_driver }, |
826 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
873 | { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
827 | &pineview_gtt_driver }, |
874 | &pineview_gtt_driver }, |
828 | { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
875 | { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
829 | &pineview_gtt_driver }, |
876 | &pineview_gtt_driver }, |
830 | { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", |
877 | { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", |
831 | &g4x_gtt_driver }, |
878 | &g4x_gtt_driver }, |
832 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", |
879 | { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", |
833 | &g4x_gtt_driver }, |
880 | &g4x_gtt_driver }, |
834 | { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", |
881 | { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", |
835 | &g4x_gtt_driver }, |
882 | &g4x_gtt_driver }, |
836 | { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", |
883 | { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", |
837 | &g4x_gtt_driver }, |
884 | &g4x_gtt_driver }, |
838 | { PCI_DEVICE_ID_INTEL_B43_IG, "B43", |
885 | { PCI_DEVICE_ID_INTEL_B43_IG, "B43", |
839 | &g4x_gtt_driver }, |
886 | &g4x_gtt_driver }, |
840 | { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", |
887 | { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", |
841 | &g4x_gtt_driver }, |
888 | &g4x_gtt_driver }, |
842 | { PCI_DEVICE_ID_INTEL_G41_IG, "G41", |
889 | { PCI_DEVICE_ID_INTEL_G41_IG, "G41", |
843 | &g4x_gtt_driver }, |
890 | &g4x_gtt_driver }, |
844 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
891 | { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
845 | "HD Graphics", &ironlake_gtt_driver }, |
892 | "HD Graphics", &ironlake_gtt_driver }, |
846 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
893 | { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
847 | "HD Graphics", &ironlake_gtt_driver }, |
894 | "HD Graphics", &ironlake_gtt_driver }, |
848 | { 0, NULL, NULL } |
895 | { 0, NULL, NULL } |
849 | }; |
896 | }; |
850 | 897 | ||
851 | static int find_gmch(u16 device) |
898 | static int find_gmch(u16 device) |
852 | { |
899 | { |
853 | struct pci_dev *gmch_device; |
900 | struct pci_dev *gmch_device; |
854 | 901 | ||
855 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
902 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); |
856 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { |
903 | if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { |
857 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, |
904 | gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, |
858 | device, gmch_device); |
905 | device, gmch_device); |
859 | } |
906 | } |
860 | 907 | ||
861 | if (!gmch_device) |
908 | if (!gmch_device) |
862 | return 0; |
909 | return 0; |
863 | 910 | ||
864 | intel_private.pcidev = gmch_device; |
911 | intel_private.pcidev = gmch_device; |
865 | return 1; |
912 | return 1; |
866 | } |
913 | } |
867 | 914 | ||
868 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
915 | int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, |
869 | struct agp_bridge_data *bridge) |
916 | struct agp_bridge_data *bridge) |
870 | { |
917 | { |
871 | int i, mask; |
918 | int i, mask; |
872 | 919 | ||
873 | /* |
920 | /* |
874 | * Can be called from the fake agp driver but also directly from |
921 | * Can be called from the fake agp driver but also directly from |
875 | * drm/i915.ko. Hence we need to check whether everything is set up |
922 | * drm/i915.ko. Hence we need to check whether everything is set up |
876 | * already. |
923 | * already. |
877 | */ |
924 | */ |
878 | if (intel_private.driver) { |
925 | if (intel_private.driver) { |
879 | intel_private.refcount++; |
926 | intel_private.refcount++; |
880 | return 1; |
927 | return 1; |
881 | } |
928 | } |
882 | 929 | ||
883 | 930 | ||
884 | for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { |
931 | for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { |
885 | if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { |
932 | if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { |
886 | intel_private.driver = |
933 | intel_private.driver = |
887 | intel_gtt_chipsets[i].gtt_driver; |
934 | intel_gtt_chipsets[i].gtt_driver; |
888 | break; |
935 | break; |
889 | } |
936 | } |
890 | } |
937 | } |
891 | 938 | ||
892 | if (!intel_private.driver) |
939 | if (!intel_private.driver) |
893 | return 0; |
940 | return 0; |
894 | 941 | ||
895 | intel_private.refcount++; |
942 | intel_private.refcount++; |
- | 943 | ||
896 | 944 | #if IS_ENABLED(CONFIG_AGP_INTEL) |
|
- | 945 | if (bridge) { |
|
897 | if (bridge) { |
946 | bridge->driver = &intel_fake_agp_driver; |
898 | bridge->dev_private_data = &intel_private; |
947 | bridge->dev_private_data = &intel_private; |
899 | bridge->dev = bridge_pdev; |
948 | bridge->dev = bridge_pdev; |
900 | } |
949 | } |
- | 950 | #endif |
|
901 | 951 | ||
902 | intel_private.bridge_dev = bridge_pdev; |
952 | intel_private.bridge_dev = bridge_pdev; |
903 | 953 | ||
904 | dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); |
954 | dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); |
905 | 955 | ||
906 | mask = intel_private.driver->dma_mask_size; |
956 | mask = intel_private.driver->dma_mask_size; |
907 | // if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) |
957 | // if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) |
908 | // dev_err(&intel_private.pcidev->dev, |
958 | // dev_err(&intel_private.pcidev->dev, |
909 | // "set gfx device dma mask %d-bit failed!\n", mask); |
959 | // "set gfx device dma mask %d-bit failed!\n", mask); |
910 | // else |
960 | // else |
911 | // pci_set_consistent_dma_mask(intel_private.pcidev, |
961 | // pci_set_consistent_dma_mask(intel_private.pcidev, |
912 | // DMA_BIT_MASK(mask)); |
962 | // DMA_BIT_MASK(mask)); |
913 | 963 | ||
914 | if (intel_gtt_init() != 0) { |
964 | if (intel_gtt_init() != 0) { |
915 | // intel_gmch_remove(); |
965 | // intel_gmch_remove(); |
916 | 966 | ||
917 | return 0; |
967 | return 0; |
918 | } |
968 | } |
919 | 969 | ||
920 | return 1; |
970 | return 1; |
921 | } |
971 | } |
922 | EXPORT_SYMBOL(intel_gmch_probe); |
972 | EXPORT_SYMBOL(intel_gmch_probe); |
923 | 973 | ||
924 | void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, |
974 | void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, |
925 | phys_addr_t *mappable_base, unsigned long *mappable_end) |
975 | phys_addr_t *mappable_base, unsigned long *mappable_end) |
926 | { |
976 | { |
927 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
977 | *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
928 | *stolen_size = intel_private.stolen_size; |
978 | *stolen_size = intel_private.stolen_size; |
929 | *mappable_base = intel_private.gma_bus_addr; |
979 | *mappable_base = intel_private.gma_bus_addr; |
930 | *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; |
980 | *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; |
931 | } |
981 | } |
932 | EXPORT_SYMBOL(intel_gtt_get); |
982 | EXPORT_SYMBOL(intel_gtt_get); |
933 | 983 | ||
934 | void intel_gtt_chipset_flush(void) |
984 | void intel_gtt_chipset_flush(void) |
935 | { |
985 | { |
936 | if (intel_private.driver->chipset_flush) |
986 | if (intel_private.driver->chipset_flush) |
937 | intel_private.driver->chipset_flush(); |
987 | intel_private.driver->chipset_flush(); |
938 | } |
988 | } |
939 | EXPORT_SYMBOL(intel_gtt_chipset_flush); |
989 | EXPORT_SYMBOL(intel_gtt_chipset_flush); |
940 | 990 | ||
941 | 991 | ||
942 | MODULE_AUTHOR("Dave Jones |
992 | MODULE_AUTHOR("Dave Jones |
943 | MODULE_LICENSE("GPL and additional rights");><>><>><>>>><>>=>=>><> |
993 | MODULE_LICENSE("GPL and additional rights");><>><>><>>>><>>=>=>><> |