Rev 9152 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 9152 | Rev 9827 | ||
---|---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
2 | /* |
3 | * Shared support code for AMD K8 northbridges and derivatives. |
3 | * Shared support code for AMD K8 northbridges and derivatives. |
4 | * Copyright 2006 Andi Kleen, SUSE Labs. |
4 | * Copyright 2006 Andi Kleen, SUSE Labs. |
5 | */ |
5 | */ |
6 | 6 | ||
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | 8 | ||
9 | #include |
9 | #include |
10 | #include |
10 | #include |
11 | #include |
11 | #include |
12 | #include |
12 | #include |
13 | #include |
13 | #include |
14 | #include |
14 | #include |
15 | #include |
15 | #include |
16 | #include |
16 | #include |
17 | 17 | ||
18 | 18 | ||
19 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 |
19 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 |
20 | #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 |
20 | #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 |
21 | #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 |
21 | #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 |
22 | #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 |
22 | #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 |
23 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 |
23 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 |
24 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec |
24 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec |
25 | #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 |
25 | #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 |
26 | #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c |
26 | #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c |
27 | #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 |
27 | #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 |
28 | #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 |
28 | #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 |
29 | #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e |
29 | #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e |
30 | 30 | ||
31 | #ifndef topology_die_id |
31 | #ifndef topology_die_id |
32 | #define topology_die_id(cpu) ((void)(cpu), -1) |
32 | #define topology_die_id(cpu) ((void)(cpu), -1) |
33 | #endif |
33 | #endif |
34 | 34 | ||
35 | const struct pci_device_id *pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) |
35 | const struct pci_device_id *pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) |
36 | { |
36 | { |
37 | if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) && |
37 | if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) && |
38 | (id->device == PCI_ANY_ID || id->device == dev->device) && |
38 | (id->device == PCI_ANY_ID || id->device == dev->device) && |
39 | (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) && |
39 | (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) && |
40 | (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) && |
40 | (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) && |
41 | !((id->class ^ dev->class) & id->class_mask)) |
41 | !((id->class ^ dev->class) & id->class_mask)) |
42 | return id; |
42 | return id; |
43 | return NULL; |
43 | return NULL; |
44 | } |
44 | } |
45 | 45 | ||
46 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
46 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
47 | struct pci_dev *dev) |
47 | struct pci_dev *dev) |
48 | { |
48 | { |
49 | if (ids) { |
49 | if (ids) { |
50 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
50 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
51 | if (pci_match_one_device(ids, dev)) |
51 | if (pci_match_one_device(ids, dev)) |
52 | return ids; |
52 | return ids; |
53 | ids++; |
53 | ids++; |
54 | } |
54 | } |
55 | } |
55 | } |
56 | return NULL; |
56 | return NULL; |
57 | } |
57 | } |
58 | 58 | ||
59 | 59 | ||
60 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ |
60 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ |
61 | static DEFINE_MUTEX(smn_mutex); |
61 | static DEFINE_MUTEX(smn_mutex); |
62 | 62 | ||
63 | static u32 *flush_words; |
63 | static u32 *flush_words; |
64 | 64 | ||
65 | static const struct pci_device_id amd_root_ids[] = { |
65 | static const struct pci_device_id amd_root_ids[] = { |
66 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
66 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
67 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, |
67 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, |
68 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, |
68 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, |
69 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, |
69 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, |
70 | {} |
70 | {} |
71 | }; |
71 | }; |
72 | 72 | ||
73 | #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 |
73 | #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 |
74 | 74 | ||
75 | static const struct pci_device_id amd_nb_misc_ids[] = { |
75 | static const struct pci_device_id amd_nb_misc_ids[] = { |
76 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
76 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
77 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
77 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
78 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
78 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
79 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, |
79 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, |
80 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, |
80 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, |
81 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, |
81 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, |
82 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
82 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
83 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
83 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
84 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
84 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
85 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
85 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
86 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, |
86 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, |
87 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, |
87 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, |
88 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
88 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
89 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, |
89 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, |
90 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, |
90 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, |
91 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, |
91 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, |
92 | {} |
92 | {} |
93 | }; |
93 | }; |
94 | 94 | ||
95 | static const struct pci_device_id amd_nb_link_ids[] = { |
95 | static const struct pci_device_id amd_nb_link_ids[] = { |
96 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
96 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
97 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, |
97 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, |
98 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, |
98 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, |
99 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
99 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
100 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
100 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
101 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
101 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
102 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, |
102 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, |
103 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, |
103 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, |
104 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, |
104 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, |
105 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, |
105 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, |
106 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, |
106 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, |
107 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, |
107 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, |
108 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, |
108 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, |
109 | {} |
109 | {} |
110 | }; |
110 | }; |
111 | 111 | ||
112 | static const struct pci_device_id hygon_root_ids[] = { |
112 | static const struct pci_device_id hygon_root_ids[] = { |
113 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
113 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
114 | {} |
114 | {} |
115 | }; |
115 | }; |
116 | 116 | ||
117 | static const struct pci_device_id hygon_nb_misc_ids[] = { |
117 | static const struct pci_device_id hygon_nb_misc_ids[] = { |
118 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
118 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
119 | {} |
119 | {} |
120 | }; |
120 | }; |
121 | 121 | ||
122 | static const struct pci_device_id hygon_nb_link_ids[] = { |
122 | static const struct pci_device_id hygon_nb_link_ids[] = { |
123 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
123 | { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
124 | {} |
124 | {} |
125 | }; |
125 | }; |
126 | 126 | ||
127 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
127 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
128 | { 0x00, 0x18, 0x20 }, |
128 | { 0x00, 0x18, 0x20 }, |
129 | { 0xff, 0x00, 0x20 }, |
129 | { 0xff, 0x00, 0x20 }, |
130 | { 0xfe, 0x00, 0x20 }, |
130 | { 0xfe, 0x00, 0x20 }, |
131 | { } |
131 | { } |
132 | }; |
132 | }; |
133 | 133 | ||
134 | static struct amd_northbridge_info amd_northbridges; |
134 | static struct amd_northbridge_info amd_northbridges; |
135 | 135 | ||
136 | u16 amd_nb_num(void) |
136 | u16 amd_nb_num(void) |
137 | { |
137 | { |
138 | return amd_northbridges.num; |
138 | return amd_northbridges.num; |
139 | } |
139 | } |
140 | EXPORT_SYMBOL_GPL(amd_nb_num); |
140 | EXPORT_SYMBOL_GPL(amd_nb_num); |
141 | 141 | ||
142 | bool amd_nb_has_feature(unsigned int feature) |
142 | bool amd_nb_has_feature(unsigned int feature) |
143 | { |
143 | { |
144 | return ((amd_northbridges.flags & feature) == feature); |
144 | return ((amd_northbridges.flags & feature) == feature); |
145 | } |
145 | } |
146 | EXPORT_SYMBOL_GPL(amd_nb_has_feature); |
146 | EXPORT_SYMBOL_GPL(amd_nb_has_feature); |
147 | 147 | ||
148 | struct amd_northbridge *node_to_amd_nb(int node) |
148 | struct amd_northbridge *node_to_amd_nb(int node) |
149 | { |
149 | { |
150 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; |
150 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; |
151 | } |
151 | } |
152 | EXPORT_SYMBOL_GPL(node_to_amd_nb); |
152 | EXPORT_SYMBOL_GPL(node_to_amd_nb); |
153 | 153 | ||
154 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
154 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
155 | const struct pci_device_id *ids) |
155 | const struct pci_device_id *ids) |
156 | { |
156 | { |
157 | do { |
157 | do { |
158 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
158 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
159 | if (!dev) |
159 | if (!dev) |
160 | break; |
160 | break; |
161 | } while (!pci_match_id(ids, dev)); |
161 | } while (!pci_match_id(ids, dev)); |
162 | return dev; |
162 | return dev; |
163 | } |
163 | } |
164 | 164 | ||
165 | static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) |
165 | static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) |
166 | { |
166 | { |
167 | struct pci_dev *root; |
167 | struct pci_dev *root; |
168 | int err = -ENODEV; |
168 | int err = -ENODEV; |
169 | 169 | ||
170 | if (node >= amd_northbridges.num) |
170 | if (node >= amd_northbridges.num) |
171 | goto out; |
171 | goto out; |
172 | 172 | ||
173 | root = node_to_amd_nb(node)->root; |
173 | root = node_to_amd_nb(node)->root; |
174 | /* printk("Northbridge PCI device %x:%x bus:%x devfn:%x\n", |
174 | /* printk("Northbridge PCI device %x:%x bus:%x devfn:%x\n", |
175 | root->vendor, |
175 | root->vendor, |
176 | root->device, |
176 | root->device, |
177 | root->busnr, |
177 | root->busnr, |
178 | root->devfn); |
178 | root->devfn); |
179 | */ |
179 | */ |
180 | if (!root) |
180 | if (!root) |
181 | goto out; |
181 | goto out; |
182 | 182 | ||
183 | mutex_lock(&smn_mutex); |
183 | mutex_lock(&smn_mutex); |
184 | 184 | ||
185 | err = pci_write_config_dword(root, 0x60, address); |
185 | err = pci_write_config_dword(root, 0x60, address); |
186 | if (err) { |
186 | if (err) { |
187 | pr_warn("Error programming SMN address 0x%x.\n", address); |
187 | pr_warn("Error programming SMN address 0x%x.\n", address); |
188 | goto out_unlock; |
188 | goto out_unlock; |
189 | } |
189 | } |
190 | 190 | ||
191 | err = (write ? pci_write_config_dword(root, 0x64, *value) |
191 | err = (write ? pci_write_config_dword(root, 0x64, *value) |
192 | : pci_read_config_dword(root, 0x64, value)); |
192 | : pci_read_config_dword(root, 0x64, value)); |
193 | if (err) |
193 | if (err) |
194 | pr_warn("Error %s SMN address 0x%x.\n", |
194 | pr_warn("Error %s SMN address 0x%x.\n", |
195 | (write ? "writing to" : "reading from"), address); |
195 | (write ? "writing to" : "reading from"), address); |
196 | 196 | ||
197 | out_unlock: |
197 | out_unlock: |
198 | mutex_unlock(&smn_mutex); |
198 | mutex_unlock(&smn_mutex); |
199 | 199 | ||
200 | out: |
200 | out: |
201 | return err; |
201 | return err; |
202 | } |
202 | } |
203 | 203 | ||
204 | int amd_smn_read(u16 node, u32 address, u32 *value) |
204 | int amd_smn_read(u16 node, u32 address, u32 *value) |
205 | { |
205 | { |
206 | return __amd_smn_rw(node, address, value, false); |
206 | return __amd_smn_rw(node, address, value, false); |
207 | } |
207 | } |
208 | EXPORT_SYMBOL_GPL(amd_smn_read); |
208 | EXPORT_SYMBOL_GPL(amd_smn_read); |
209 | 209 | ||
210 | int amd_smn_write(u16 node, u32 address, u32 value) |
210 | int amd_smn_write(u16 node, u32 address, u32 value) |
211 | { |
211 | { |
212 | return __amd_smn_rw(node, address, &value, true); |
212 | return __amd_smn_rw(node, address, &value, true); |
213 | } |
213 | } |
214 | EXPORT_SYMBOL_GPL(amd_smn_write); |
214 | EXPORT_SYMBOL_GPL(amd_smn_write); |
215 | 215 | ||
216 | /* |
216 | /* |
217 | * Data Fabric Indirect Access uses FICAA/FICAD. |
217 | * Data Fabric Indirect Access uses FICAA/FICAD. |
218 | * |
218 | * |
219 | * Fabric Indirect Configuration Access Address (FICAA): Constructed based |
219 | * Fabric Indirect Configuration Access Address (FICAA): Constructed based |
220 | * on the device's Instance Id and the PCI function and register offset of |
220 | * on the device's Instance Id and the PCI function and register offset of |
221 | * the desired register. |
221 | * the desired register. |
222 | * |
222 | * |
223 | * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO |
223 | * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO |
224 | * and FICAD HI registers but so far we only need the LO register. |
224 | * and FICAD HI registers but so far we only need the LO register. |
225 | */ |
225 | */ |
226 | int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) |
226 | int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) |
227 | { |
227 | { |
228 | struct pci_dev *F4; |
228 | struct pci_dev *F4; |
229 | u32 ficaa; |
229 | u32 ficaa; |
230 | int err = -ENODEV; |
230 | int err = -ENODEV; |
231 | 231 | ||
232 | if (node >= amd_northbridges.num) |
232 | if (node >= amd_northbridges.num) |
233 | goto out; |
233 | goto out; |
234 | 234 | ||
235 | F4 = node_to_amd_nb(node)->link; |
235 | F4 = node_to_amd_nb(node)->link; |
236 | if (!F4) |
236 | if (!F4) |
237 | goto out; |
237 | goto out; |
238 | 238 | ||
239 | ficaa = 1; |
239 | ficaa = 1; |
240 | ficaa |= reg & 0x3FC; |
240 | ficaa |= reg & 0x3FC; |
241 | ficaa |= (func & 0x7) << 11; |
241 | ficaa |= (func & 0x7) << 11; |
242 | ficaa |= instance_id << 16; |
242 | ficaa |= instance_id << 16; |
243 | 243 | ||
244 | mutex_lock(&smn_mutex); |
244 | mutex_lock(&smn_mutex); |
245 | 245 | ||
246 | err = pci_write_config_dword(F4, 0x5C, ficaa); |
246 | err = pci_write_config_dword(F4, 0x5C, ficaa); |
247 | if (err) { |
247 | if (err) { |
248 | pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); |
248 | pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); |
249 | goto out_unlock; |
249 | goto out_unlock; |
250 | } |
250 | } |
251 | 251 | ||
252 | err = pci_read_config_dword(F4, 0x98, lo); |
252 | err = pci_read_config_dword(F4, 0x98, lo); |
253 | if (err) |
253 | if (err) |
254 | pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); |
254 | pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); |
255 | 255 | ||
256 | out_unlock: |
256 | out_unlock: |
257 | mutex_unlock(&smn_mutex); |
257 | mutex_unlock(&smn_mutex); |
258 | 258 | ||
259 | out: |
259 | out: |
260 | return err; |
260 | return err; |
261 | } |
261 | } |
262 | EXPORT_SYMBOL_GPL(amd_df_indirect_read); |
262 | EXPORT_SYMBOL_GPL(amd_df_indirect_read); |
263 | 263 | ||
264 | int amd_cache_northbridges(void) |
264 | int amd_cache_northbridges(void) |
265 | { |
265 | { |
266 | const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
266 | const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
267 | const struct pci_device_id *link_ids = amd_nb_link_ids; |
267 | const struct pci_device_id *link_ids = amd_nb_link_ids; |
268 | const struct pci_device_id *root_ids = amd_root_ids; |
268 | const struct pci_device_id *root_ids = amd_root_ids; |
269 | struct pci_dev *root, *misc, *link; |
269 | struct pci_dev *root, *misc, *link; |
270 | struct amd_northbridge *nb; |
270 | struct amd_northbridge *nb; |
271 | u16 roots_per_misc = 0; |
271 | u16 roots_per_misc = 0; |
272 | u16 misc_count = 0; |
272 | u16 misc_count = 0; |
273 | u16 root_count = 0; |
273 | u16 root_count = 0; |
274 | u16 i, j; |
274 | u16 i, j; |
275 | 275 | ||
276 | if (amd_northbridges.num) |
276 | if (amd_northbridges.num) |
277 | return 0; |
277 | return 0; |
278 | 278 | ||
279 | if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
279 | if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
280 | root_ids = hygon_root_ids; |
280 | root_ids = hygon_root_ids; |
281 | misc_ids = hygon_nb_misc_ids; |
281 | misc_ids = hygon_nb_misc_ids; |
282 | link_ids = hygon_nb_link_ids; |
282 | link_ids = hygon_nb_link_ids; |
283 | } |
283 | } |
284 | 284 | ||
285 | misc = NULL; |
285 | misc = NULL; |
286 | while ((misc = next_northbridge(misc, misc_ids)) != NULL) |
286 | while ((misc = next_northbridge(misc, misc_ids)) != NULL) |
287 | misc_count++; |
287 | misc_count++; |
288 | 288 | ||
289 | if (!misc_count) |
289 | if (!misc_count) |
290 | return -ENODEV; |
290 | return -ENODEV; |
291 | 291 | ||
292 | root = NULL; |
292 | root = NULL; |
293 | while ((root = next_northbridge(root, root_ids)) != NULL) |
293 | while ((root = next_northbridge(root, root_ids)) != NULL) |
294 | root_count++; |
294 | root_count++; |
295 | 295 | ||
296 | if (root_count) { |
296 | if (root_count) { |
297 | roots_per_misc = root_count / misc_count; |
297 | roots_per_misc = root_count / misc_count; |
298 | 298 | ||
299 | /* |
299 | /* |
300 | * There should be _exactly_ N roots for each DF/SMN |
300 | * There should be _exactly_ N roots for each DF/SMN |
301 | * interface. |
301 | * interface. |
302 | */ |
302 | */ |
303 | if (!roots_per_misc || (root_count % roots_per_misc)) { |
303 | if (!roots_per_misc || (root_count % roots_per_misc)) { |
304 | pr_info("Unsupported AMD DF/PCI configuration found\n"); |
304 | pr_info("Unsupported AMD DF/PCI configuration found\n"); |
305 | return -ENODEV; |
305 | return -ENODEV; |
306 | } |
306 | } |
307 | } |
307 | } |
308 | 308 | ||
309 | nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); |
309 | nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); |
310 | if (!nb) |
310 | if (!nb) |
311 | return -ENOMEM; |
311 | return -ENOMEM; |
312 | 312 | ||
313 | amd_northbridges.nb = nb; |
313 | amd_northbridges.nb = nb; |
314 | amd_northbridges.num = misc_count; |
314 | amd_northbridges.num = misc_count; |
315 | 315 | ||
316 | link = misc = root = NULL; |
316 | link = misc = root = NULL; |
317 | for (i = 0; i < amd_northbridges.num; i++) { |
317 | for (i = 0; i < amd_northbridges.num; i++) { |
318 | node_to_amd_nb(i)->root = root = |
318 | node_to_amd_nb(i)->root = root = |
319 | next_northbridge(root, root_ids); |
319 | next_northbridge(root, root_ids); |
320 | node_to_amd_nb(i)->misc = misc = |
320 | node_to_amd_nb(i)->misc = misc = |
321 | next_northbridge(misc, misc_ids); |
321 | next_northbridge(misc, misc_ids); |
322 | node_to_amd_nb(i)->link = link = |
322 | node_to_amd_nb(i)->link = link = |
323 | next_northbridge(link, link_ids); |
323 | next_northbridge(link, link_ids); |
324 | 324 | ||
325 | /* |
325 | /* |
326 | * If there are more PCI root devices than data fabric/ |
326 | * If there are more PCI root devices than data fabric/ |
327 | * system management network interfaces, then the (N) |
327 | * system management network interfaces, then the (N) |
328 | * PCI roots per DF/SMN interface are functionally the |
328 | * PCI roots per DF/SMN interface are functionally the |
329 | * same (for DF/SMN access) and N-1 are redundant. N-1 |
329 | * same (for DF/SMN access) and N-1 are redundant. N-1 |
330 | * PCI roots should be skipped per DF/SMN interface so |
330 | * PCI roots should be skipped per DF/SMN interface so |
331 | * the following DF/SMN interfaces get mapped to |
331 | * the following DF/SMN interfaces get mapped to |
332 | * correct PCI roots. |
332 | * correct PCI roots. |
333 | */ |
333 | */ |
334 | for (j = 1; j < roots_per_misc; j++) |
334 | for (j = 1; j < roots_per_misc; j++) |
335 | root = next_northbridge(root, root_ids); |
335 | root = next_northbridge(root, root_ids); |
336 | } |
336 | } |
337 | 337 | ||
338 | if (amd_gart_present()) |
338 | if (amd_gart_present()) |
339 | amd_northbridges.flags |= AMD_NB_GART; |
339 | amd_northbridges.flags |= AMD_NB_GART; |
340 | 340 | ||
341 | /* |
341 | /* |
342 | * Check for L3 cache presence. |
342 | * Check for L3 cache presence. |
343 | */ |
343 | */ |
344 | if (!cpuid_edx(0x80000006)) |
344 | if (!cpuid_edx(0x80000006)) |
345 | return 0; |
345 | return 0; |
346 | 346 | ||
347 | /* |
347 | /* |
348 | * Some CPU families support L3 Cache Index Disable. There are some |
348 | * Some CPU families support L3 Cache Index Disable. There are some |
349 | * limitations because of E382 and E388 on family 0x10. |
349 | * limitations because of E382 and E388 on family 0x10. |
350 | */ |
350 | */ |
351 | if (boot_cpu_data.x86 == 0x10 && |
351 | if (boot_cpu_data.x86 == 0x10 && |
352 | boot_cpu_data.x86_model >= 0x8 && |
352 | boot_cpu_data.x86_model >= 0x8 && |
353 | (boot_cpu_data.x86_model > 0x9/* || |
353 | (boot_cpu_data.x86_model > 0x9/* || |
354 | boot_cpu_data.x86_stepping >= 0x1*/)) |
354 | boot_cpu_data.x86_stepping >= 0x1*/)) |
355 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
355 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
356 | 356 | ||
357 | if (boot_cpu_data.x86 == 0x15) |
357 | if (boot_cpu_data.x86 == 0x15) |
358 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
358 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
359 | 359 | ||
360 | /* L3 cache partitioning is supported on family 0x15 */ |
360 | /* L3 cache partitioning is supported on family 0x15 */ |
361 | if (boot_cpu_data.x86 == 0x15) |
361 | if (boot_cpu_data.x86 == 0x15) |
362 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; |
362 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; |
363 | 363 | ||
364 | return 0; |
364 | return 0; |
365 | } |
365 | } |
366 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
366 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
367 | 367 | ||
368 | /* |
368 | /* |
369 | * Ignores subdevice/subvendor but as far as I can figure out |
369 | * Ignores subdevice/subvendor but as far as I can figure out |
370 | * they're useless anyways |
370 | * they're useless anyways |
371 | */ |
371 | */ |
372 | bool __init early_is_amd_nb(u32 device) |
372 | bool __init early_is_amd_nb(u32 device) |
373 | { |
373 | { |
374 | const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
374 | const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
375 | const struct pci_device_id *id; |
375 | const struct pci_device_id *id; |
376 | u32 vendor = device & 0xffff; |
376 | u32 vendor = device & 0xffff; |
377 | 377 | ||
378 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
378 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
379 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
379 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
380 | return false; |
380 | return false; |
381 | 381 | ||
382 | if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
382 | if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
383 | misc_ids = hygon_nb_misc_ids; |
383 | misc_ids = hygon_nb_misc_ids; |
384 | 384 | ||
385 | device >>= 16; |
385 | device >>= 16; |
386 | for (id = misc_ids; id->vendor; id++) |
386 | for (id = misc_ids; id->vendor; id++) |
387 | if (vendor == id->vendor && device == id->device) |
387 | if (vendor == id->vendor && device == id->device) |
388 | return true; |
388 | return true; |
389 | return false; |
389 | return false; |
390 | } |
390 | } |
391 | 391 | ||
392 | struct resource *amd_get_mmconfig_range(struct resource *res) |
392 | struct resource *amd_get_mmconfig_range(struct resource *res) |
393 | { |
393 | { |
394 | u32 address; |
394 | u32 address; |
395 | u64 base, msr; |
395 | u64 base, msr; |
396 | unsigned int segn_busn_bits; |
396 | unsigned int segn_busn_bits; |
397 | 397 | ||
398 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
398 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
399 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
399 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
400 | return NULL; |
400 | return NULL; |
401 | 401 | ||
402 | /* assume all cpus from fam10h have mmconfig */ |
402 | /* assume all cpus from fam10h have mmconfig */ |
403 | if (boot_cpu_data.x86 < 0x10) |
403 | if (boot_cpu_data.x86 < 0x10) |
404 | return NULL; |
404 | return NULL; |
405 | 405 | ||
406 | address = MSR_FAM10H_MMIO_CONF_BASE; |
406 | address = MSR_FAM10H_MMIO_CONF_BASE; |
407 | rdmsrl(address, msr); |
407 | rdmsrl(address, msr); |
408 | 408 | ||
409 | /* mmconfig is not enabled */ |
409 | /* mmconfig is not enabled */ |
410 | if (!(msr & FAM10H_MMIO_CONF_ENABLE)) |
410 | if (!(msr & FAM10H_MMIO_CONF_ENABLE)) |
411 | return NULL; |
411 | return NULL; |
412 | 412 | ||
413 | base = msr & (FAM10H_MMIO_CONF_BASE_MASK< |
413 | base = msr & (FAM10H_MMIO_CONF_BASE_MASK< |
414 | 414 | ||
415 | segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & |
415 | segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & |
416 | FAM10H_MMIO_CONF_BUSRANGE_MASK; |
416 | FAM10H_MMIO_CONF_BUSRANGE_MASK; |
417 | 417 | ||
418 | res->flags = IORESOURCE_MEM; |
418 | res->flags = IORESOURCE_MEM; |
419 | res->start = base; |
419 | res->start = base; |
420 | res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; |
420 | res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; |
421 | return res; |
421 | return res; |
422 | } |
422 | } |
423 | 423 | ||
424 | int amd_get_subcaches(int cpu) |
424 | int amd_get_subcaches(int cpu) |
425 | { |
425 | { |
426 | struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link; |
426 | struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link; |
427 | unsigned int mask; |
427 | unsigned int mask; |
428 | 428 | ||
429 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
429 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
430 | return 0; |
430 | return 0; |
431 | 431 | ||
432 | pci_read_config_dword(link, 0x1d4, &mask); |
432 | pci_read_config_dword(link, 0x1d4, &mask); |
433 | 433 | ||
434 | return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; |
434 | return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; |
435 | } |
435 | } |
436 | 436 | ||
437 | int amd_set_subcaches(int cpu, unsigned long mask) |
437 | int amd_set_subcaches(int cpu, unsigned long mask) |
438 | { |
438 | { |
439 | static unsigned int reset, ban; |
439 | static unsigned int reset, ban; |
440 | struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu)); |
440 | struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu)); |
441 | unsigned int reg; |
441 | unsigned int reg; |
442 | int cuid; |
442 | int cuid; |
443 | 443 | ||
444 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) |
444 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) |
445 | return -EINVAL; |
445 | return -EINVAL; |
446 | 446 | ||
447 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ |
447 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ |
448 | if (reset == 0) { |
448 | if (reset == 0) { |
449 | pci_read_config_dword(nb->link, 0x1d4, &reset); |
449 | pci_read_config_dword(nb->link, 0x1d4, &reset); |
450 | pci_read_config_dword(nb->misc, 0x1b8, &ban); |
450 | pci_read_config_dword(nb->misc, 0x1b8, &ban); |
451 | ban &= 0x180000; |
451 | ban &= 0x180000; |
452 | } |
452 | } |
453 | 453 | ||
454 | /* deactivate BAN mode if any subcaches are to be disabled */ |
454 | /* deactivate BAN mode if any subcaches are to be disabled */ |
455 | if (mask != 0xf) { |
455 | if (mask != 0xf) { |
456 | pci_read_config_dword(nb->misc, 0x1b8, ®); |
456 | pci_read_config_dword(nb->misc, 0x1b8, ®); |
457 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); |
457 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); |
458 | } |
458 | } |
459 | 459 | ||
460 | cuid = cpu_data(cpu).cpu_core_id; |
460 | cuid = cpu_data(cpu).cpu_core_id; |
461 | mask <<= 4 * cuid; |
461 | mask <<= 4 * cuid; |
462 | mask |= (0xf ^ (1 << cuid)) << 26; |
462 | mask |= (0xf ^ (1 << cuid)) << 26; |
463 | 463 | ||
464 | pci_write_config_dword(nb->link, 0x1d4, mask); |
464 | pci_write_config_dword(nb->link, 0x1d4, mask); |
465 | 465 | ||
466 | /* reset BAN mode if L3 partitioning returned to reset state */ |
466 | /* reset BAN mode if L3 partitioning returned to reset state */ |
467 | pci_read_config_dword(nb->link, 0x1d4, ®); |
467 | pci_read_config_dword(nb->link, 0x1d4, ®); |
468 | if (reg == reset) { |
468 | if (reg == reset) { |
469 | pci_read_config_dword(nb->misc, 0x1b8, ®); |
469 | pci_read_config_dword(nb->misc, 0x1b8, ®); |
470 | reg &= ~0x180000; |
470 | reg &= ~0x180000; |
471 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); |
471 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); |
472 | } |
472 | } |
473 | 473 | ||
474 | return 0; |
474 | return 0; |
475 | } |
475 | } |
476 | 476 | ||
477 | static void amd_cache_gart(void) |
477 | static void amd_cache_gart(void) |
478 | { |
478 | { |
479 | u16 i; |
479 | u16 i; |
480 | 480 | ||
481 | if (!amd_nb_has_feature(AMD_NB_GART)) |
481 | if (!amd_nb_has_feature(AMD_NB_GART)) |
482 | return; |
482 | return; |
483 | 483 | ||
484 | flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); |
484 | flush_words = KernelZeroAlloc(amd_northbridges.num * sizeof(u32)); |
485 | if (!flush_words) { |
485 | if (!flush_words) { |
486 | amd_northbridges.flags &= ~AMD_NB_GART; |
486 | amd_northbridges.flags &= ~AMD_NB_GART; |
487 | pr_notice("Cannot initialize GART flush words, GART support disabled\n"); |
487 | pr_notice("Cannot initialize GART flush words, GART support disabled\n"); |
488 | return; |
488 | return; |
489 | } |
489 | } |
490 | 490 | ||
491 | for (i = 0; i != amd_northbridges.num; i++) |
491 | for (i = 0; i != amd_northbridges.num; i++) |
492 | pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); |
492 | pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); |
493 | } |
493 | } |
494 | 494 | ||
495 | void amd_flush_garts(void) |
495 | void amd_flush_garts(void) |
496 | { |
496 | { |
497 | int flushed, i; |
497 | int flushed, i; |
498 | unsigned long flags; |
498 | unsigned long flags; |
499 | static DEFINE_SPINLOCK(gart_lock); |
499 | static DEFINE_SPINLOCK(gart_lock); |
500 | 500 | ||
501 | if (!amd_nb_has_feature(AMD_NB_GART)) |
501 | if (!amd_nb_has_feature(AMD_NB_GART)) |
502 | return; |
502 | return; |
503 | 503 | ||
504 | /* |
504 | /* |
505 | * Avoid races between AGP and IOMMU. In theory it's not needed |
505 | * Avoid races between AGP and IOMMU. In theory it's not needed |
506 | * but I'm not sure if the hardware won't lose flush requests |
506 | * but I'm not sure if the hardware won't lose flush requests |
507 | * when another is pending. This whole thing is so expensive anyways |
507 | * when another is pending. This whole thing is so expensive anyways |
508 | * that it doesn't matter to serialize more. -AK |
508 | * that it doesn't matter to serialize more. -AK |
509 | */ |
509 | */ |
510 | spin_lock_irqsave(&gart_lock, flags); |
510 | spin_lock_irqsave(&gart_lock, flags); |
511 | flushed = 0; |
511 | flushed = 0; |
512 | for (i = 0; i < amd_northbridges.num; i++) { |
512 | for (i = 0; i < amd_northbridges.num; i++) { |
513 | pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, |
513 | pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, |
514 | flush_words[i] | 1); |
514 | flush_words[i] | 1); |
515 | flushed++; |
515 | flushed++; |
516 | } |
516 | } |
517 | for (i = 0; i < amd_northbridges.num; i++) { |
517 | for (i = 0; i < amd_northbridges.num; i++) { |
518 | u32 w; |
518 | u32 w; |
519 | /* Make sure the hardware actually executed the flush*/ |
519 | /* Make sure the hardware actually executed the flush*/ |
520 | for (;;) { |
520 | for (;;) { |
521 | pci_read_config_dword(node_to_amd_nb(i)->misc, |
521 | pci_read_config_dword(node_to_amd_nb(i)->misc, |
522 | 0x9c, &w); |
522 | 0x9c, &w); |
523 | if (!(w & 1)) |
523 | if (!(w & 1)) |
524 | break; |
524 | break; |
525 | cpu_relax(); |
525 | cpu_relax(); |
526 | } |
526 | } |
527 | } |
527 | } |
528 | spin_unlock_irqrestore(&gart_lock, flags); |
528 | spin_unlock_irqrestore(&gart_lock, flags); |
529 | if (!flushed) |
529 | if (!flushed) |
530 | pr_notice("nothing to flush?\n"); |
530 | pr_notice("nothing to flush?\n"); |
531 | } |
531 | } |
532 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
532 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
533 | 533 | ||
534 | 534 | ||
535 | static void __fix_erratum_688(void *info) |
535 | static void __fix_erratum_688(void *info) |
536 | { |
536 | { |
537 | #define MSR_AMD64_IC_CFG 0xC0011021 |
537 | #define MSR_AMD64_IC_CFG 0xC0011021 |
538 | e_msr_set_bit(MSR_AMD64_IC_CFG, 3); |
538 | e_msr_set_bit(MSR_AMD64_IC_CFG, 3); |
539 | e_msr_set_bit(MSR_AMD64_IC_CFG, 14); |
539 | e_msr_set_bit(MSR_AMD64_IC_CFG, 14); |
540 | } |
540 | } |
541 | 541 | ||
542 | /* Apply erratum 688 fix so machines without a BIOS fix work. */ |
542 | /* Apply erratum 688 fix so machines without a BIOS fix work. */ |
543 | static __init void fix_erratum_688(void) |
543 | static __init void fix_erratum_688(void) |
544 | { |
544 | { |
545 | struct pci_dev *F4; |
545 | struct pci_dev *F4; |
546 | u32 val; |
546 | u32 val; |
547 | 547 | ||
548 | if (boot_cpu_data.x86 != 0x14) |
548 | if (boot_cpu_data.x86 != 0x14) |
549 | return; |
549 | return; |
550 | 550 | ||
551 | if (!amd_northbridges.num) |
551 | if (!amd_northbridges.num) |
552 | return; |
552 | return; |
553 | 553 | ||
554 | F4 = node_to_amd_nb(0)->link; |
554 | F4 = node_to_amd_nb(0)->link; |
555 | if (!F4) |
555 | if (!F4) |
556 | return; |
556 | return; |
557 | 557 | ||
558 | if (pci_read_config_dword(F4, 0x164, &val)) |
558 | if (pci_read_config_dword(F4, 0x164, &val)) |
559 | return; |
559 | return; |
560 | 560 | ||
561 | if (val & BIT(2)) |
561 | if (val & BIT(2)) |
562 | return; |
562 | return; |
563 | 563 | ||
564 | on_each_cpu(__fix_erratum_688, NULL, 0); |
564 | on_each_cpu(__fix_erratum_688, NULL, 0); |
565 | 565 | ||
566 | pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); |
566 | pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); |
567 | } |
567 | } |
568 | 568 | ||
569 | __init int init_amd_nbs(void) |
569 | __init int init_amd_nbs(void) |
570 | { |
570 | { |
571 | amd_cache_northbridges(); |
571 | amd_cache_northbridges(); |
572 | amd_cache_gart(); |
572 | amd_cache_gart(); |
573 | 573 | ||
574 | fix_erratum_688(); |
574 | fix_erratum_688(); |
575 | 575 | ||
576 | return 0; |
576 | return 0; |
577 | } |
577 | } |
578 | 578 | ||
579 | /* This has to go after the PCI subsystem */ |
579 | /* This has to go after the PCI subsystem */ |
580 | //fs_initcall(init_amd_nbs);>>><>><>=><=>(segn_busn_bits><(segn_busn_bits> |
580 | //fs_initcall(init_amd_nbs);>>><>><>=><=>(segn_busn_bits><(segn_busn_bits> |
581 | 581 | ||
582 | > |
582 | > |
583 | 583 | ||
584 | >>>>><>><>> |
584 | >>>>><>><>> |