Rev 9152 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
9079 | turbocat | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
||
3 | * Shared support code for AMD K8 northbridges and derivatives. |
||
4 | * Copyright 2006 Andi Kleen, SUSE Labs. |
||
5 | */ |
||
6 | |||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
||
8 | |||
9 | #include |
||
10 | #include |
||
11 | #include |
||
12 | #include |
||
13 | #include |
||
14 | #include |
||
15 | #include |
||
16 | #include |
||
17 | |||
9152 | turbocat | 18 | |
9079 | turbocat | 19 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 |
20 | #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 |
||
21 | #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 |
||
22 | #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 |
||
23 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 |
||
24 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec |
||
25 | #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 |
||
26 | #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c |
||
27 | #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 |
||
28 | #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 |
||
29 | #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e |
||
30 | |||
31 | #ifndef topology_die_id |
||
32 | #define topology_die_id(cpu) ((void)(cpu), -1) |
||
33 | #endif |
||
34 | |||
35 | const struct pci_device_id *pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) |
||
36 | { |
||
37 | if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) && |
||
38 | (id->device == PCI_ANY_ID || id->device == dev->device) && |
||
39 | (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) && |
||
40 | (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) && |
||
41 | !((id->class ^ dev->class) & id->class_mask)) |
||
42 | return id; |
||
43 | return NULL; |
||
44 | } |
||
45 | |||
46 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
||
47 | struct pci_dev *dev) |
||
48 | { |
||
49 | if (ids) { |
||
50 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
||
51 | if (pci_match_one_device(ids, dev)) |
||
52 | return ids; |
||
53 | ids++; |
||
54 | } |
||
55 | } |
||
56 | return NULL; |
||
57 | } |
||
58 | |||
59 | |||
60 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ |
||
61 | static DEFINE_MUTEX(smn_mutex); |
||
62 | |||
63 | static u32 *flush_words; |
||
64 | |||
65 | static const struct pci_device_id amd_root_ids[] = { |
||
66 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
||
67 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, |
||
68 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, |
||
69 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, |
||
70 | {} |
||
71 | }; |
||
72 | |||
73 | #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 |
||
74 | |||
75 | static const struct pci_device_id amd_nb_misc_ids[] = { |
||
76 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
||
77 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
||
78 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
||
79 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, |
||
80 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, |
||
81 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, |
||
82 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
||
83 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
||
84 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
||
85 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
||
86 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, |
||
87 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, |
||
88 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
||
89 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, |
||
90 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, |
||
91 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, |
||
92 | {} |
||
93 | }; |
||
94 | |||
95 | static const struct pci_device_id amd_nb_link_ids[] = { |
||
96 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
||
97 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, |
||
98 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, |
||
99 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
||
100 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
||
101 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
||
102 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, |
||
103 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, |
||
104 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, |
||
105 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, |
||
106 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, |
||
107 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, |
||
108 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, |
||
109 | {} |
||
110 | }; |
||
111 | |||
112 | static const struct pci_device_id hygon_root_ids[] = { |
||
113 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
||
114 | {} |
||
115 | }; |
||
116 | |||
117 | static const struct pci_device_id hygon_nb_misc_ids[] = { |
||
118 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
||
119 | {} |
||
120 | }; |
||
121 | |||
122 | static const struct pci_device_id hygon_nb_link_ids[] = { |
||
123 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
||
124 | {} |
||
125 | }; |
||
126 | |||
127 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
||
128 | { 0x00, 0x18, 0x20 }, |
||
129 | { 0xff, 0x00, 0x20 }, |
||
130 | { 0xfe, 0x00, 0x20 }, |
||
131 | { } |
||
132 | }; |
||
133 | |||
134 | static struct amd_northbridge_info amd_northbridges; |
||
135 | |||
136 | u16 amd_nb_num(void) |
||
137 | { |
||
138 | return amd_northbridges.num; |
||
139 | } |
||
140 | EXPORT_SYMBOL_GPL(amd_nb_num); |
||
141 | |||
142 | bool amd_nb_has_feature(unsigned int feature) |
||
143 | { |
||
144 | return ((amd_northbridges.flags & feature) == feature); |
||
145 | } |
||
146 | EXPORT_SYMBOL_GPL(amd_nb_has_feature); |
||
147 | |||
148 | struct amd_northbridge *node_to_amd_nb(int node) |
||
149 | { |
||
150 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; |
||
151 | } |
||
152 | EXPORT_SYMBOL_GPL(node_to_amd_nb); |
||
153 | |||
154 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
||
155 | const struct pci_device_id *ids) |
||
156 | { |
||
157 | do { |
||
158 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
||
159 | if (!dev) |
||
160 | break; |
||
161 | } while (!pci_match_id(ids, dev)); |
||
162 | return dev; |
||
163 | } |
||
164 | |||
165 | static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) |
||
166 | { |
||
167 | struct pci_dev *root; |
||
168 | int err = -ENODEV; |
||
169 | |||
170 | if (node >= amd_northbridges.num) |
||
171 | goto out; |
||
172 | |||
173 | root = node_to_amd_nb(node)->root; |
||
174 | /* printk("Northbridge PCI device %x:%x bus:%x devfn:%x\n", |
||
175 | root->vendor, |
||
176 | root->device, |
||
177 | root->busnr, |
||
178 | root->devfn); |
||
179 | */ |
||
180 | if (!root) |
||
181 | goto out; |
||
182 | |||
183 | mutex_lock(&smn_mutex); |
||
184 | |||
185 | err = pci_write_config_dword(root, 0x60, address); |
||
186 | if (err) { |
||
187 | pr_warn("Error programming SMN address 0x%x.\n", address); |
||
188 | goto out_unlock; |
||
189 | } |
||
190 | |||
191 | err = (write ? pci_write_config_dword(root, 0x64, *value) |
||
192 | : pci_read_config_dword(root, 0x64, value)); |
||
193 | if (err) |
||
194 | pr_warn("Error %s SMN address 0x%x.\n", |
||
195 | (write ? "writing to" : "reading from"), address); |
||
196 | |||
197 | out_unlock: |
||
198 | mutex_unlock(&smn_mutex); |
||
199 | |||
200 | out: |
||
201 | return err; |
||
202 | } |
||
203 | |||
204 | int amd_smn_read(u16 node, u32 address, u32 *value) |
||
205 | { |
||
206 | return __amd_smn_rw(node, address, value, false); |
||
207 | } |
||
208 | EXPORT_SYMBOL_GPL(amd_smn_read); |
||
209 | |||
210 | int amd_smn_write(u16 node, u32 address, u32 value) |
||
211 | { |
||
212 | return __amd_smn_rw(node, address, &value, true); |
||
213 | } |
||
214 | EXPORT_SYMBOL_GPL(amd_smn_write); |
||
215 | |||
216 | /* |
||
217 | * Data Fabric Indirect Access uses FICAA/FICAD. |
||
218 | * |
||
219 | * Fabric Indirect Configuration Access Address (FICAA): Constructed based |
||
220 | * on the device's Instance Id and the PCI function and register offset of |
||
221 | * the desired register. |
||
222 | * |
||
223 | * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO |
||
224 | * and FICAD HI registers but so far we only need the LO register. |
||
225 | */ |
||
226 | int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) |
||
227 | { |
||
228 | struct pci_dev *F4; |
||
229 | u32 ficaa; |
||
230 | int err = -ENODEV; |
||
231 | |||
232 | if (node >= amd_northbridges.num) |
||
233 | goto out; |
||
234 | |||
235 | F4 = node_to_amd_nb(node)->link; |
||
236 | if (!F4) |
||
237 | goto out; |
||
238 | |||
239 | ficaa = 1; |
||
240 | ficaa |= reg & 0x3FC; |
||
241 | ficaa |= (func & 0x7) << 11; |
||
242 | ficaa |= instance_id << 16; |
||
243 | |||
244 | mutex_lock(&smn_mutex); |
||
245 | |||
246 | err = pci_write_config_dword(F4, 0x5C, ficaa); |
||
247 | if (err) { |
||
248 | pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); |
||
249 | goto out_unlock; |
||
250 | } |
||
251 | |||
252 | err = pci_read_config_dword(F4, 0x98, lo); |
||
253 | if (err) |
||
254 | pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); |
||
255 | |||
256 | out_unlock: |
||
257 | mutex_unlock(&smn_mutex); |
||
258 | |||
259 | out: |
||
260 | return err; |
||
261 | } |
||
262 | EXPORT_SYMBOL_GPL(amd_df_indirect_read); |
||
263 | |||
264 | int amd_cache_northbridges(void) |
||
265 | { |
||
266 | const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
||
267 | const struct pci_device_id *link_ids = amd_nb_link_ids; |
||
268 | const struct pci_device_id *root_ids = amd_root_ids; |
||
269 | struct pci_dev *root, *misc, *link; |
||
270 | struct amd_northbridge *nb; |
||
271 | u16 roots_per_misc = 0; |
||
272 | u16 misc_count = 0; |
||
273 | u16 root_count = 0; |
||
274 | u16 i, j; |
||
275 | |||
276 | if (amd_northbridges.num) |
||
277 | return 0; |
||
278 | |||
279 | if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
||
280 | root_ids = hygon_root_ids; |
||
281 | misc_ids = hygon_nb_misc_ids; |
||
282 | link_ids = hygon_nb_link_ids; |
||
283 | } |
||
284 | |||
285 | misc = NULL; |
||
286 | while ((misc = next_northbridge(misc, misc_ids)) != NULL) |
||
287 | misc_count++; |
||
288 | |||
289 | if (!misc_count) |
||
290 | return -ENODEV; |
||
291 | |||
292 | root = NULL; |
||
293 | while ((root = next_northbridge(root, root_ids)) != NULL) |
||
294 | root_count++; |
||
295 | |||
296 | if (root_count) { |
||
297 | roots_per_misc = root_count / misc_count; |
||
298 | |||
299 | /* |
||
300 | * There should be _exactly_ N roots for each DF/SMN |
||
301 | * interface. |
||
302 | */ |
||
303 | if (!roots_per_misc || (root_count % roots_per_misc)) { |
||
304 | pr_info("Unsupported AMD DF/PCI configuration found\n"); |
||
305 | return -ENODEV; |
||
306 | } |
||
307 | } |
||
308 | |||
309 | nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); |
||
310 | if (!nb) |
||
311 | return -ENOMEM; |
||
312 | |||
313 | amd_northbridges.nb = nb; |
||
314 | amd_northbridges.num = misc_count; |
||
315 | |||
316 | link = misc = root = NULL; |
||
317 | for (i = 0; i < amd_northbridges.num; i++) { |
||
318 | node_to_amd_nb(i)->root = root = |
||
319 | next_northbridge(root, root_ids); |
||
320 | node_to_amd_nb(i)->misc = misc = |
||
321 | next_northbridge(misc, misc_ids); |
||
322 | node_to_amd_nb(i)->link = link = |
||
323 | next_northbridge(link, link_ids); |
||
324 | |||
325 | /* |
||
326 | * If there are more PCI root devices than data fabric/ |
||
327 | * system management network interfaces, then the (N) |
||
328 | * PCI roots per DF/SMN interface are functionally the |
||
329 | * same (for DF/SMN access) and N-1 are redundant. N-1 |
||
330 | * PCI roots should be skipped per DF/SMN interface so |
||
331 | * the following DF/SMN interfaces get mapped to |
||
332 | * correct PCI roots. |
||
333 | */ |
||
334 | for (j = 1; j < roots_per_misc; j++) |
||
335 | root = next_northbridge(root, root_ids); |
||
336 | } |
||
337 | |||
338 | if (amd_gart_present()) |
||
339 | amd_northbridges.flags |= AMD_NB_GART; |
||
340 | |||
341 | /* |
||
342 | * Check for L3 cache presence. |
||
343 | */ |
||
344 | if (!cpuid_edx(0x80000006)) |
||
345 | return 0; |
||
346 | |||
347 | /* |
||
348 | * Some CPU families support L3 Cache Index Disable. There are some |
||
349 | * limitations because of E382 and E388 on family 0x10. |
||
350 | */ |
||
351 | if (boot_cpu_data.x86 == 0x10 && |
||
352 | boot_cpu_data.x86_model >= 0x8 && |
||
353 | (boot_cpu_data.x86_model > 0x9/* || |
||
354 | boot_cpu_data.x86_stepping >= 0x1*/)) |
||
355 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
||
356 | |||
357 | if (boot_cpu_data.x86 == 0x15) |
||
358 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
||
359 | |||
360 | /* L3 cache partitioning is supported on family 0x15 */ |
||
361 | if (boot_cpu_data.x86 == 0x15) |
||
362 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; |
||
363 | |||
364 | return 0; |
||
365 | } |
||
366 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
||
367 | |||
368 | /* |
||
369 | * Ignores subdevice/subvendor but as far as I can figure out |
||
370 | * they're useless anyways |
||
371 | */ |
||
372 | bool __init early_is_amd_nb(u32 device) |
||
373 | { |
||
374 | const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
||
375 | const struct pci_device_id *id; |
||
376 | u32 vendor = device & 0xffff; |
||
377 | |||
378 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
||
379 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
||
380 | return false; |
||
381 | |||
382 | if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
||
383 | misc_ids = hygon_nb_misc_ids; |
||
384 | |||
385 | device >>= 16; |
||
386 | for (id = misc_ids; id->vendor; id++) |
||
387 | if (vendor == id->vendor && device == id->device) |
||
388 | return true; |
||
389 | return false; |
||
390 | } |
||
391 | |||
392 | struct resource *amd_get_mmconfig_range(struct resource *res) |
||
393 | { |
||
394 | u32 address; |
||
395 | u64 base, msr; |
||
396 | unsigned int segn_busn_bits; |
||
397 | |||
398 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
||
399 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
||
400 | return NULL; |
||
401 | |||
402 | /* assume all cpus from fam10h have mmconfig */ |
||
403 | if (boot_cpu_data.x86 < 0x10) |
||
404 | return NULL; |
||
405 | |||
406 | address = MSR_FAM10H_MMIO_CONF_BASE; |
||
407 | rdmsrl(address, msr); |
||
408 | |||
409 | /* mmconfig is not enabled */ |
||
410 | if (!(msr & FAM10H_MMIO_CONF_ENABLE)) |
||
411 | return NULL; |
||
412 | |||
413 | base = msr & (FAM10H_MMIO_CONF_BASE_MASK< |
||
414 | |||
415 | segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & |
||
416 | FAM10H_MMIO_CONF_BUSRANGE_MASK; |
||
417 | |||
418 | res->flags = IORESOURCE_MEM; |
||
419 | res->start = base; |
||
420 | res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; |
||
421 | return res; |
||
422 | } |
||
423 | |||
424 | int amd_get_subcaches(int cpu) |
||
425 | { |
||
426 | struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link; |
||
427 | unsigned int mask; |
||
428 | |||
429 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
||
430 | return 0; |
||
431 | |||
432 | pci_read_config_dword(link, 0x1d4, &mask); |
||
433 | |||
434 | return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; |
||
435 | } |
||
436 | |||
437 | int amd_set_subcaches(int cpu, unsigned long mask) |
||
438 | { |
||
439 | static unsigned int reset, ban; |
||
440 | struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu)); |
||
441 | unsigned int reg; |
||
442 | int cuid; |
||
443 | |||
444 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) |
||
445 | return -EINVAL; |
||
446 | |||
447 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ |
||
448 | if (reset == 0) { |
||
449 | pci_read_config_dword(nb->link, 0x1d4, &reset); |
||
450 | pci_read_config_dword(nb->misc, 0x1b8, &ban); |
||
451 | ban &= 0x180000; |
||
452 | } |
||
453 | |||
454 | /* deactivate BAN mode if any subcaches are to be disabled */ |
||
455 | if (mask != 0xf) { |
||
456 | pci_read_config_dword(nb->misc, 0x1b8, ®); |
||
457 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); |
||
458 | } |
||
459 | |||
460 | cuid = cpu_data(cpu).cpu_core_id; |
||
461 | mask <<= 4 * cuid; |
||
462 | mask |= (0xf ^ (1 << cuid)) << 26; |
||
463 | |||
464 | pci_write_config_dword(nb->link, 0x1d4, mask); |
||
465 | |||
466 | /* reset BAN mode if L3 partitioning returned to reset state */ |
||
467 | pci_read_config_dword(nb->link, 0x1d4, ®); |
||
468 | if (reg == reset) { |
||
469 | pci_read_config_dword(nb->misc, 0x1b8, ®); |
||
470 | reg &= ~0x180000; |
||
471 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); |
||
472 | } |
||
473 | |||
474 | return 0; |
||
475 | } |
||
476 | |||
477 | static void amd_cache_gart(void) |
||
478 | { |
||
479 | u16 i; |
||
480 | |||
481 | if (!amd_nb_has_feature(AMD_NB_GART)) |
||
482 | return; |
||
483 | |||
9827 | turbocat | 484 | flush_words = KernelZeroAlloc(amd_northbridges.num * sizeof(u32)); |
9079 | turbocat | 485 | if (!flush_words) { |
486 | amd_northbridges.flags &= ~AMD_NB_GART; |
||
487 | pr_notice("Cannot initialize GART flush words, GART support disabled\n"); |
||
488 | return; |
||
489 | } |
||
490 | |||
491 | for (i = 0; i != amd_northbridges.num; i++) |
||
492 | pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); |
||
493 | } |
||
494 | |||
495 | void amd_flush_garts(void) |
||
496 | { |
||
497 | int flushed, i; |
||
498 | unsigned long flags; |
||
499 | static DEFINE_SPINLOCK(gart_lock); |
||
500 | |||
501 | if (!amd_nb_has_feature(AMD_NB_GART)) |
||
502 | return; |
||
503 | |||
504 | /* |
||
505 | * Avoid races between AGP and IOMMU. In theory it's not needed |
||
506 | * but I'm not sure if the hardware won't lose flush requests |
||
507 | * when another is pending. This whole thing is so expensive anyways |
||
508 | * that it doesn't matter to serialize more. -AK |
||
509 | */ |
||
510 | spin_lock_irqsave(&gart_lock, flags); |
||
511 | flushed = 0; |
||
512 | for (i = 0; i < amd_northbridges.num; i++) { |
||
513 | pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, |
||
514 | flush_words[i] | 1); |
||
515 | flushed++; |
||
516 | } |
||
517 | for (i = 0; i < amd_northbridges.num; i++) { |
||
518 | u32 w; |
||
519 | /* Make sure the hardware actually executed the flush*/ |
||
520 | for (;;) { |
||
521 | pci_read_config_dword(node_to_amd_nb(i)->misc, |
||
522 | 0x9c, &w); |
||
523 | if (!(w & 1)) |
||
524 | break; |
||
525 | cpu_relax(); |
||
526 | } |
||
527 | } |
||
528 | spin_unlock_irqrestore(&gart_lock, flags); |
||
529 | if (!flushed) |
||
530 | pr_notice("nothing to flush?\n"); |
||
531 | } |
||
532 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
||
533 | |||
534 | |||
535 | static void __fix_erratum_688(void *info) |
||
536 | { |
||
537 | #define MSR_AMD64_IC_CFG 0xC0011021 |
||
9152 | turbocat | 538 | e_msr_set_bit(MSR_AMD64_IC_CFG, 3); |
539 | e_msr_set_bit(MSR_AMD64_IC_CFG, 14); |
||
9079 | turbocat | 540 | } |
541 | |||
542 | /* Apply erratum 688 fix so machines without a BIOS fix work. */ |
||
543 | static __init void fix_erratum_688(void) |
||
544 | { |
||
545 | struct pci_dev *F4; |
||
546 | u32 val; |
||
547 | |||
548 | if (boot_cpu_data.x86 != 0x14) |
||
549 | return; |
||
550 | |||
551 | if (!amd_northbridges.num) |
||
552 | return; |
||
553 | |||
554 | F4 = node_to_amd_nb(0)->link; |
||
555 | if (!F4) |
||
556 | return; |
||
557 | |||
558 | if (pci_read_config_dword(F4, 0x164, &val)) |
||
559 | return; |
||
560 | |||
561 | if (val & BIT(2)) |
||
562 | return; |
||
563 | |||
564 | on_each_cpu(__fix_erratum_688, NULL, 0); |
||
565 | |||
566 | pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); |
||
567 | } |
||
568 | |||
569 | __init int init_amd_nbs(void) |
||
570 | { |
||
571 | amd_cache_northbridges(); |
||
572 | amd_cache_gart(); |
||
573 | |||
574 | fix_erratum_688(); |
||
575 | |||
576 | return 0; |
||
577 | } |
||
578 | |||
579 | /* This has to go after the PCI subsystem */ |
||
580 | //fs_initcall(init_amd_nbs);>>><>><>=><=>(segn_busn_bits><(segn_busn_bits> |