Rev 6104 | Rev 6938 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6104 | Rev 6321 | ||
---|---|---|---|
1 | #define CONFIG_PCI |
- | |
2 | - | ||
3 | #include |
1 | #include |
4 | 2 | ||
5 | #include |
3 | #include |
6 | #include |
4 | #include |
7 | #include |
5 | #include |
8 | #include |
6 | #include |
9 | #include |
7 | #include |
10 | 8 | ||
11 | #include |
9 | #include |
12 | - | ||
13 | 10 | ||
14 | extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
11 | extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
15 | 12 | ||
16 | static LIST_HEAD(devices); |
13 | static LIST_HEAD(devices); |
17 | 14 | ||
18 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
15 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
19 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
16 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
20 | 17 | ||
21 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
18 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
22 | 19 | ||
23 | /* |
20 | /* |
24 | * Translate the low bits of the PCI base |
21 | * Translate the low bits of the PCI base |
25 | * to the resource type |
22 | * to the resource type |
26 | */ |
23 | */ |
27 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
24 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
28 | { |
25 | { |
29 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
26 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
30 | return IORESOURCE_IO; |
27 | return IORESOURCE_IO; |
31 | 28 | ||
32 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
29 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
33 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
30 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
34 | 31 | ||
35 | return IORESOURCE_MEM; |
32 | return IORESOURCE_MEM; |
36 | } |
33 | } |
37 | 34 | ||
38 | 35 | ||
39 | static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
36 | static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
40 | { |
37 | { |
41 | u32 size = mask & maxbase; /* Find the significant bits */ |
38 | u32 size = mask & maxbase; /* Find the significant bits */ |
42 | 39 | ||
43 | if (!size) |
40 | if (!size) |
44 | return 0; |
41 | return 0; |
45 | 42 | ||
46 | /* Get the lowest of them to find the decode size, and |
43 | /* Get the lowest of them to find the decode size, and |
47 | from that the extent. */ |
44 | from that the extent. */ |
48 | size = (size & ~(size-1)) - 1; |
45 | size = (size & ~(size-1)) - 1; |
49 | 46 | ||
50 | /* base == maxbase can be valid only if the BAR has |
47 | /* base == maxbase can be valid only if the BAR has |
51 | already been programmed with all 1s. */ |
48 | already been programmed with all 1s. */ |
52 | if (base == maxbase && ((base | size) & mask) != mask) |
49 | if (base == maxbase && ((base | size) & mask) != mask) |
53 | return 0; |
50 | return 0; |
54 | 51 | ||
55 | return size; |
52 | return size; |
56 | } |
53 | } |
57 | 54 | ||
58 | static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
55 | static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
59 | { |
56 | { |
60 | u64 size = mask & maxbase; /* Find the significant bits */ |
57 | u64 size = mask & maxbase; /* Find the significant bits */ |
61 | 58 | ||
62 | if (!size) |
59 | if (!size) |
63 | return 0; |
60 | return 0; |
64 | 61 | ||
65 | /* Get the lowest of them to find the decode size, and |
62 | /* Get the lowest of them to find the decode size, and |
66 | from that the extent. */ |
63 | from that the extent. */ |
67 | size = (size & ~(size-1)) - 1; |
64 | size = (size & ~(size-1)) - 1; |
68 | 65 | ||
69 | /* base == maxbase can be valid only if the BAR has |
66 | /* base == maxbase can be valid only if the BAR has |
70 | already been programmed with all 1s. */ |
67 | already been programmed with all 1s. */ |
71 | if (base == maxbase && ((base | size) & mask) != mask) |
68 | if (base == maxbase && ((base | size) & mask) != mask) |
72 | return 0; |
69 | return 0; |
73 | 70 | ||
74 | return size; |
71 | return size; |
75 | } |
72 | } |
76 | 73 | ||
77 | static inline int is_64bit_memory(u32 mask) |
74 | static inline int is_64bit_memory(u32 mask) |
78 | { |
75 | { |
79 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
76 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
80 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
77 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
81 | return 1; |
78 | return 1; |
82 | return 0; |
79 | return 0; |
83 | } |
80 | } |
84 | 81 | ||
85 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
82 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
86 | { |
83 | { |
87 | u32 pos, reg, next; |
84 | u32 pos, reg, next; |
88 | u32 l, sz; |
85 | u32 l, sz; |
89 | struct resource *res; |
86 | struct resource *res; |
90 | 87 | ||
91 | for(pos=0; pos < howmany; pos = next) |
88 | for(pos=0; pos < howmany; pos = next) |
92 | { |
89 | { |
93 | u64 l64; |
90 | u64 l64; |
94 | u64 sz64; |
91 | u64 sz64; |
95 | u32 raw_sz; |
92 | u32 raw_sz; |
96 | 93 | ||
97 | next = pos + 1; |
94 | next = pos + 1; |
98 | 95 | ||
99 | res = &dev->resource[pos]; |
96 | res = &dev->resource[pos]; |
100 | 97 | ||
101 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
98 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
102 | l = PciRead32(dev->busnr, dev->devfn, reg); |
99 | l = PciRead32(dev->busnr, dev->devfn, reg); |
103 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
100 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
104 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
101 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
105 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
102 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
106 | 103 | ||
107 | if (!sz || sz == 0xffffffff) |
104 | if (!sz || sz == 0xffffffff) |
108 | continue; |
105 | continue; |
109 | 106 | ||
110 | if (l == 0xffffffff) |
107 | if (l == 0xffffffff) |
111 | l = 0; |
108 | l = 0; |
112 | 109 | ||
113 | raw_sz = sz; |
110 | raw_sz = sz; |
114 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
111 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
115 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
112 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
116 | { |
113 | { |
117 | sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
114 | sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
118 | /* |
115 | /* |
119 | * For 64bit prefetchable memory sz could be 0, if the |
116 | * For 64bit prefetchable memory sz could be 0, if the |
120 | * real size is bigger than 4G, so we need to check |
117 | * real size is bigger than 4G, so we need to check |
121 | * szhi for that. |
118 | * szhi for that. |
122 | */ |
119 | */ |
123 | if (!is_64bit_memory(l) && !sz) |
120 | if (!is_64bit_memory(l) && !sz) |
124 | continue; |
121 | continue; |
125 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
122 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
126 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
123 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
127 | } |
124 | } |
128 | else { |
125 | else { |
129 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
126 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
130 | if (!sz) |
127 | if (!sz) |
131 | continue; |
128 | continue; |
132 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
129 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
133 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
130 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
134 | } |
131 | } |
135 | res->end = res->start + (unsigned long) sz; |
132 | res->end = res->start + (unsigned long) sz; |
136 | res->flags |= pci_calc_resource_flags(l); |
133 | res->flags |= pci_calc_resource_flags(l); |
137 | if (is_64bit_memory(l)) |
134 | if (is_64bit_memory(l)) |
138 | { |
135 | { |
139 | u32 szhi, lhi; |
136 | u32 szhi, lhi; |
140 | 137 | ||
141 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
138 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
142 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
139 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
143 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
140 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
144 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
141 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
145 | sz64 = ((u64)szhi << 32) | raw_sz; |
142 | sz64 = ((u64)szhi << 32) | raw_sz; |
146 | l64 = ((u64)lhi << 32) | l; |
143 | l64 = ((u64)lhi << 32) | l; |
147 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
144 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
148 | next++; |
145 | next++; |
149 | 146 | ||
150 | #if BITS_PER_LONG == 64 |
147 | #if BITS_PER_LONG == 64 |
151 | if (!sz64) { |
148 | if (!sz64) { |
152 | res->start = 0; |
149 | res->start = 0; |
153 | res->end = 0; |
150 | res->end = 0; |
154 | res->flags = 0; |
151 | res->flags = 0; |
155 | continue; |
152 | continue; |
156 | } |
153 | } |
157 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
154 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
158 | res->end = res->start + sz64; |
155 | res->end = res->start + sz64; |
159 | #else |
156 | #else |
160 | if (sz64 > 0x100000000ULL) { |
157 | if (sz64 > 0x100000000ULL) { |
161 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
158 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
162 | "BAR for device %s\n", pci_name(dev)); |
159 | "BAR for device %s\n", pci_name(dev)); |
163 | res->start = 0; |
160 | res->start = 0; |
164 | res->flags = 0; |
161 | res->flags = 0; |
165 | } |
162 | } |
166 | else if (lhi) |
163 | else if (lhi) |
167 | { |
164 | { |
168 | /* 64-bit wide address, treat as disabled */ |
165 | /* 64-bit wide address, treat as disabled */ |
169 | PciWrite32(dev->busnr, dev->devfn, reg, |
166 | PciWrite32(dev->busnr, dev->devfn, reg, |
170 | l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
167 | l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
171 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
168 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
172 | res->start = 0; |
169 | res->start = 0; |
173 | res->end = sz; |
170 | res->end = sz; |
174 | } |
171 | } |
175 | #endif |
172 | #endif |
176 | } |
173 | } |
177 | } |
174 | } |
178 | 175 | ||
179 | if ( rom ) |
176 | if ( rom ) |
180 | { |
177 | { |
181 | dev->rom_base_reg = rom; |
178 | dev->rom_base_reg = rom; |
182 | res = &dev->resource[PCI_ROM_RESOURCE]; |
179 | res = &dev->resource[PCI_ROM_RESOURCE]; |
183 | 180 | ||
184 | l = PciRead32(dev->busnr, dev->devfn, rom); |
181 | l = PciRead32(dev->busnr, dev->devfn, rom); |
185 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
182 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
186 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
183 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
187 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
184 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
188 | 185 | ||
189 | if (l == 0xffffffff) |
186 | if (l == 0xffffffff) |
190 | l = 0; |
187 | l = 0; |
191 | 188 | ||
192 | if (sz && sz != 0xffffffff) |
189 | if (sz && sz != 0xffffffff) |
193 | { |
190 | { |
194 | sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
191 | sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
195 | 192 | ||
196 | if (sz) |
193 | if (sz) |
197 | { |
194 | { |
198 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
195 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
199 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
196 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
200 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
197 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
201 | res->start = l & PCI_ROM_ADDRESS_MASK; |
198 | res->start = l & PCI_ROM_ADDRESS_MASK; |
202 | res->end = res->start + (unsigned long) sz; |
199 | res->end = res->start + (unsigned long) sz; |
203 | } |
200 | } |
204 | } |
201 | } |
205 | } |
202 | } |
206 | } |
203 | } |
207 | 204 | ||
208 | static void pci_read_irq(struct pci_dev *dev) |
205 | static void pci_read_irq(struct pci_dev *dev) |
209 | { |
206 | { |
210 | u8 irq; |
207 | u8 irq; |
211 | 208 | ||
212 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
209 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
213 | dev->pin = irq; |
210 | dev->pin = irq; |
214 | if (irq) |
211 | if (irq) |
215 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
212 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
216 | dev->irq = irq; |
213 | dev->irq = irq; |
217 | }; |
214 | }; |
218 | 215 | ||
219 | 216 | ||
220 | int pci_setup_device(struct pci_dev *dev) |
217 | int pci_setup_device(struct pci_dev *dev) |
221 | { |
218 | { |
222 | u32 class; |
219 | u32 class; |
223 | 220 | ||
224 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
221 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
225 | dev->revision = class & 0xff; |
222 | dev->revision = class & 0xff; |
226 | class >>= 8; /* upper 3 bytes */ |
223 | class >>= 8; /* upper 3 bytes */ |
227 | dev->class = class; |
224 | dev->class = class; |
228 | 225 | ||
229 | /* "Unknown power state" */ |
226 | /* "Unknown power state" */ |
230 | // dev->current_state = PCI_UNKNOWN; |
227 | // dev->current_state = PCI_UNKNOWN; |
231 | 228 | ||
232 | /* Early fixups, before probing the BARs */ |
229 | /* Early fixups, before probing the BARs */ |
233 | // pci_fixup_device(pci_fixup_early, dev); |
230 | // pci_fixup_device(pci_fixup_early, dev); |
234 | class = dev->class >> 8; |
231 | class = dev->class >> 8; |
235 | 232 | ||
236 | switch (dev->hdr_type) |
233 | switch (dev->hdr_type) |
237 | { |
234 | { |
238 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
235 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
239 | if (class == PCI_CLASS_BRIDGE_PCI) |
236 | if (class == PCI_CLASS_BRIDGE_PCI) |
240 | goto bad; |
237 | goto bad; |
241 | pci_read_irq(dev); |
238 | pci_read_irq(dev); |
242 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
239 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
243 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
240 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
244 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
241 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
245 | 242 | ||
246 | /* |
243 | /* |
247 | * Do the ugly legacy mode stuff here rather than broken chip |
244 | * Do the ugly legacy mode stuff here rather than broken chip |
248 | * quirk code. Legacy mode ATA controllers have fixed |
245 | * quirk code. Legacy mode ATA controllers have fixed |
249 | * addresses. These are not always echoed in BAR0-3, and |
246 | * addresses. These are not always echoed in BAR0-3, and |
250 | * BAR0-3 in a few cases contain junk! |
247 | * BAR0-3 in a few cases contain junk! |
251 | */ |
248 | */ |
252 | if (class == PCI_CLASS_STORAGE_IDE) |
249 | if (class == PCI_CLASS_STORAGE_IDE) |
253 | { |
250 | { |
254 | u8 progif; |
251 | u8 progif; |
255 | 252 | ||
256 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
253 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
257 | if ((progif & 1) == 0) |
254 | if ((progif & 1) == 0) |
258 | { |
255 | { |
259 | dev->resource[0].start = 0x1F0; |
256 | dev->resource[0].start = 0x1F0; |
260 | dev->resource[0].end = 0x1F7; |
257 | dev->resource[0].end = 0x1F7; |
261 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
258 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
262 | dev->resource[1].start = 0x3F6; |
259 | dev->resource[1].start = 0x3F6; |
263 | dev->resource[1].end = 0x3F6; |
260 | dev->resource[1].end = 0x3F6; |
264 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
261 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
265 | } |
262 | } |
266 | if ((progif & 4) == 0) |
263 | if ((progif & 4) == 0) |
267 | { |
264 | { |
268 | dev->resource[2].start = 0x170; |
265 | dev->resource[2].start = 0x170; |
269 | dev->resource[2].end = 0x177; |
266 | dev->resource[2].end = 0x177; |
270 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
267 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
271 | dev->resource[3].start = 0x376; |
268 | dev->resource[3].start = 0x376; |
272 | dev->resource[3].end = 0x376; |
269 | dev->resource[3].end = 0x376; |
273 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
270 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
274 | }; |
271 | }; |
275 | } |
272 | } |
276 | break; |
273 | break; |
277 | 274 | ||
278 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
275 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
279 | if (class != PCI_CLASS_BRIDGE_PCI) |
276 | if (class != PCI_CLASS_BRIDGE_PCI) |
280 | goto bad; |
277 | goto bad; |
281 | /* The PCI-to-PCI bridge spec requires that subtractive |
278 | /* The PCI-to-PCI bridge spec requires that subtractive |
282 | decoding (i.e. transparent) bridge must have programming |
279 | decoding (i.e. transparent) bridge must have programming |
283 | interface code of 0x01. */ |
280 | interface code of 0x01. */ |
284 | pci_read_irq(dev); |
281 | pci_read_irq(dev); |
285 | dev->transparent = ((dev->class & 0xff) == 1); |
282 | dev->transparent = ((dev->class & 0xff) == 1); |
286 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
283 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
287 | break; |
284 | break; |
288 | 285 | ||
289 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
286 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
290 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
287 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
291 | goto bad; |
288 | goto bad; |
292 | pci_read_irq(dev); |
289 | pci_read_irq(dev); |
293 | pci_read_bases(dev, 1, 0); |
290 | pci_read_bases(dev, 1, 0); |
294 | dev->subsystem_vendor = PciRead16(dev->busnr, |
291 | dev->subsystem_vendor = PciRead16(dev->busnr, |
295 | dev->devfn, |
292 | dev->devfn, |
296 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
293 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
297 | 294 | ||
298 | dev->subsystem_device = PciRead16(dev->busnr, |
295 | dev->subsystem_device = PciRead16(dev->busnr, |
299 | dev->devfn, |
296 | dev->devfn, |
300 | PCI_CB_SUBSYSTEM_ID); |
297 | PCI_CB_SUBSYSTEM_ID); |
301 | break; |
298 | break; |
302 | 299 | ||
303 | default: /* unknown header */ |
300 | default: /* unknown header */ |
304 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
301 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
305 | pci_name(dev), dev->hdr_type); |
302 | pci_name(dev), dev->hdr_type); |
306 | return -1; |
303 | return -1; |
307 | 304 | ||
308 | bad: |
305 | bad: |
309 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
306 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
310 | pci_name(dev), class, dev->hdr_type); |
307 | pci_name(dev), class, dev->hdr_type); |
311 | dev->class = PCI_CLASS_NOT_DEFINED; |
308 | dev->class = PCI_CLASS_NOT_DEFINED; |
312 | } |
309 | } |
313 | 310 | ||
314 | /* We found a fine healthy device, go go go... */ |
311 | /* We found a fine healthy device, go go go... */ |
315 | 312 | ||
316 | return 0; |
313 | return 0; |
317 | }; |
314 | }; |
318 | 315 | ||
319 | static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
316 | static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
320 | { |
317 | { |
321 | pci_dev_t *dev; |
318 | pci_dev_t *dev; |
322 | 319 | ||
323 | u32 id; |
320 | u32 id; |
324 | u8 hdr; |
321 | u8 hdr; |
325 | 322 | ||
326 | int timeout = 10; |
323 | int timeout = 10; |
327 | 324 | ||
328 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
325 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
329 | 326 | ||
330 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
327 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
331 | if (id == 0xffffffff || id == 0x00000000 || |
328 | if (id == 0xffffffff || id == 0x00000000 || |
332 | id == 0x0000ffff || id == 0xffff0000) |
329 | id == 0x0000ffff || id == 0xffff0000) |
333 | return NULL; |
330 | return NULL; |
334 | 331 | ||
335 | while (id == 0xffff0001) |
332 | while (id == 0xffff0001) |
336 | { |
333 | { |
337 | 334 | ||
338 | delay(timeout/10); |
335 | delay(timeout/10); |
339 | timeout *= 2; |
336 | timeout *= 2; |
340 | 337 | ||
341 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
338 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
342 | 339 | ||
343 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
340 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
344 | if (timeout > 60 * 100) |
341 | if (timeout > 60 * 100) |
345 | { |
342 | { |
346 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
343 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
347 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
344 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
348 | return NULL; |
345 | return NULL; |
349 | } |
346 | } |
350 | }; |
347 | }; |
351 | 348 | ||
352 | if( pci_scan_filter(id, busnr, devfn) == 0) |
349 | if( pci_scan_filter(id, busnr, devfn) == 0) |
353 | return NULL; |
350 | return NULL; |
354 | 351 | ||
355 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
352 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
356 | 353 | ||
357 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
354 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
358 | if(unlikely(dev == NULL)) |
355 | if(unlikely(dev == NULL)) |
359 | return NULL; |
356 | return NULL; |
360 | 357 | ||
361 | INIT_LIST_HEAD(&dev->link); |
358 | INIT_LIST_HEAD(&dev->link); |
362 | 359 | ||
363 | 360 | ||
364 | dev->pci_dev.busnr = busnr; |
361 | dev->pci_dev.busnr = busnr; |
365 | dev->pci_dev.devfn = devfn; |
362 | dev->pci_dev.devfn = devfn; |
366 | dev->pci_dev.hdr_type = hdr & 0x7f; |
363 | dev->pci_dev.hdr_type = hdr & 0x7f; |
367 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
364 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
368 | dev->pci_dev.vendor = id & 0xffff; |
365 | dev->pci_dev.vendor = id & 0xffff; |
369 | dev->pci_dev.device = (id >> 16) & 0xffff; |
366 | dev->pci_dev.device = (id >> 16) & 0xffff; |
370 | 367 | ||
371 | pci_setup_device(&dev->pci_dev); |
368 | pci_setup_device(&dev->pci_dev); |
372 | 369 | ||
373 | return dev; |
370 | return dev; |
374 | 371 | ||
375 | }; |
372 | }; |
376 | 373 | ||
377 | 374 | ||
378 | 375 | ||
379 | 376 | ||
380 | int _pci_scan_slot(u32 bus, int devfn) |
377 | int _pci_scan_slot(u32 bus, int devfn) |
381 | { |
378 | { |
382 | int func, nr = 0; |
379 | int func, nr = 0; |
383 | 380 | ||
384 | for (func = 0; func < 8; func++, devfn++) |
381 | for (func = 0; func < 8; func++, devfn++) |
385 | { |
382 | { |
386 | pci_dev_t *dev; |
383 | pci_dev_t *dev; |
387 | 384 | ||
388 | dev = pci_scan_device(bus, devfn); |
385 | dev = pci_scan_device(bus, devfn); |
389 | if( dev ) |
386 | if( dev ) |
390 | { |
387 | { |
391 | list_add(&dev->link, &devices); |
388 | list_add(&dev->link, &devices); |
392 | 389 | ||
393 | nr++; |
390 | nr++; |
394 | 391 | ||
395 | /* |
392 | /* |
396 | * If this is a single function device, |
393 | * If this is a single function device, |
397 | * don't scan past the first function. |
394 | * don't scan past the first function. |
398 | */ |
395 | */ |
399 | if (!dev->pci_dev.multifunction) |
396 | if (!dev->pci_dev.multifunction) |
400 | { |
397 | { |
401 | if (func > 0) { |
398 | if (func > 0) { |
402 | dev->pci_dev.multifunction = 1; |
399 | dev->pci_dev.multifunction = 1; |
403 | } |
400 | } |
404 | else { |
401 | else { |
405 | break; |
402 | break; |
406 | } |
403 | } |
407 | } |
404 | } |
408 | } |
405 | } |
409 | else { |
406 | else { |
410 | if (func == 0) |
407 | if (func == 0) |
411 | break; |
408 | break; |
412 | } |
409 | } |
413 | }; |
410 | }; |
414 | 411 | ||
415 | return nr; |
412 | return nr; |
416 | }; |
413 | }; |
417 | 414 | ||
418 | #define PCI_FIND_CAP_TTL 48 |
415 | #define PCI_FIND_CAP_TTL 48 |
419 | 416 | ||
420 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
417 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
421 | u8 pos, int cap, int *ttl) |
418 | u8 pos, int cap, int *ttl) |
422 | { |
419 | { |
423 | u8 id; |
420 | u8 id; |
424 | 421 | ||
425 | while ((*ttl)--) { |
422 | while ((*ttl)--) { |
426 | pos = PciRead8(bus, devfn, pos); |
423 | pos = PciRead8(bus, devfn, pos); |
427 | if (pos < 0x40) |
424 | if (pos < 0x40) |
428 | break; |
425 | break; |
429 | pos &= ~3; |
426 | pos &= ~3; |
430 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
427 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
431 | if (id == 0xff) |
428 | if (id == 0xff) |
432 | break; |
429 | break; |
433 | if (id == cap) |
430 | if (id == cap) |
434 | return pos; |
431 | return pos; |
435 | pos += PCI_CAP_LIST_NEXT; |
432 | pos += PCI_CAP_LIST_NEXT; |
436 | } |
433 | } |
437 | return 0; |
434 | return 0; |
438 | } |
435 | } |
439 | 436 | ||
440 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
437 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
441 | u8 pos, int cap) |
438 | u8 pos, int cap) |
442 | { |
439 | { |
443 | int ttl = PCI_FIND_CAP_TTL; |
440 | int ttl = PCI_FIND_CAP_TTL; |
444 | 441 | ||
445 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
442 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
446 | } |
443 | } |
447 | 444 | ||
448 | static int __pci_bus_find_cap_start(unsigned int bus, |
445 | static int __pci_bus_find_cap_start(unsigned int bus, |
449 | unsigned int devfn, u8 hdr_type) |
446 | unsigned int devfn, u8 hdr_type) |
450 | { |
447 | { |
451 | u16 status; |
448 | u16 status; |
452 | 449 | ||
453 | status = PciRead16(bus, devfn, PCI_STATUS); |
450 | status = PciRead16(bus, devfn, PCI_STATUS); |
454 | if (!(status & PCI_STATUS_CAP_LIST)) |
451 | if (!(status & PCI_STATUS_CAP_LIST)) |
455 | return 0; |
452 | return 0; |
456 | 453 | ||
457 | switch (hdr_type) { |
454 | switch (hdr_type) { |
458 | case PCI_HEADER_TYPE_NORMAL: |
455 | case PCI_HEADER_TYPE_NORMAL: |
459 | case PCI_HEADER_TYPE_BRIDGE: |
456 | case PCI_HEADER_TYPE_BRIDGE: |
460 | return PCI_CAPABILITY_LIST; |
457 | return PCI_CAPABILITY_LIST; |
461 | case PCI_HEADER_TYPE_CARDBUS: |
458 | case PCI_HEADER_TYPE_CARDBUS: |
462 | return PCI_CB_CAPABILITY_LIST; |
459 | return PCI_CB_CAPABILITY_LIST; |
463 | default: |
460 | default: |
464 | return 0; |
461 | return 0; |
465 | } |
462 | } |
466 | 463 | ||
467 | return 0; |
464 | return 0; |
468 | } |
465 | } |
469 | 466 | ||
470 | 467 | ||
471 | int pci_find_capability(struct pci_dev *dev, int cap) |
468 | int pci_find_capability(struct pci_dev *dev, int cap) |
472 | { |
469 | { |
473 | int pos; |
470 | int pos; |
474 | 471 | ||
475 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
472 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
476 | if (pos) |
473 | if (pos) |
477 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
474 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
478 | 475 | ||
479 | return pos; |
476 | return pos; |
480 | } |
477 | } |
481 | 478 | ||
482 | 479 | ||
483 | 480 | ||
484 | 481 | ||
485 | int enum_pci_devices() |
482 | int enum_pci_devices() |
486 | { |
483 | { |
487 | pci_dev_t *dev; |
484 | pci_dev_t *dev; |
488 | u32 last_bus; |
485 | u32 last_bus; |
489 | u32 bus = 0 , devfn = 0; |
486 | u32 bus = 0 , devfn = 0; |
490 | 487 | ||
491 | 488 | ||
492 | last_bus = PciApi(1); |
489 | last_bus = PciApi(1); |
493 | 490 | ||
494 | 491 | ||
495 | if( unlikely(last_bus == -1)) |
492 | if( unlikely(last_bus == -1)) |
496 | return -1; |
493 | return -1; |
497 | 494 | ||
498 | for(;bus <= last_bus; bus++) |
495 | for(;bus <= last_bus; bus++) |
499 | { |
496 | { |
500 | for (devfn = 0; devfn < 0x100; devfn += 8) |
497 | for (devfn = 0; devfn < 0x100; devfn += 8) |
501 | _pci_scan_slot(bus, devfn); |
498 | _pci_scan_slot(bus, devfn); |
502 | 499 | ||
503 | 500 | ||
504 | } |
501 | } |
505 | for(dev = (pci_dev_t*)devices.next; |
502 | for(dev = (pci_dev_t*)devices.next; |
506 | &dev->link != &devices; |
503 | &dev->link != &devices; |
507 | dev = (pci_dev_t*)dev->link.next) |
504 | dev = (pci_dev_t*)dev->link.next) |
508 | { |
505 | { |
509 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
506 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
510 | dev->pci_dev.vendor, |
507 | dev->pci_dev.vendor, |
511 | dev->pci_dev.device, |
508 | dev->pci_dev.device, |
512 | dev->pci_dev.busnr, |
509 | dev->pci_dev.busnr, |
513 | dev->pci_dev.devfn); |
510 | dev->pci_dev.devfn); |
514 | 511 | ||
515 | } |
512 | } |
516 | return 0; |
513 | return 0; |
517 | } |
514 | } |
518 | 515 | ||
519 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
516 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
520 | { |
517 | { |
521 | pci_dev_t *dev; |
518 | pci_dev_t *dev; |
522 | const struct pci_device_id *ent; |
519 | const struct pci_device_id *ent; |
523 | 520 | ||
524 | for(dev = (pci_dev_t*)devices.next; |
521 | for(dev = (pci_dev_t*)devices.next; |
525 | &dev->link != &devices; |
522 | &dev->link != &devices; |
526 | dev = (pci_dev_t*)dev->link.next) |
523 | dev = (pci_dev_t*)dev->link.next) |
527 | { |
524 | { |
528 | if( dev->pci_dev.vendor != idlist->vendor ) |
525 | if( dev->pci_dev.vendor != idlist->vendor ) |
529 | continue; |
526 | continue; |
530 | 527 | ||
531 | for(ent = idlist; ent->vendor != 0; ent++) |
528 | for(ent = idlist; ent->vendor != 0; ent++) |
532 | { |
529 | { |
533 | if(unlikely(ent->device == dev->pci_dev.device)) |
530 | if(unlikely(ent->device == dev->pci_dev.device)) |
534 | { |
531 | { |
535 | pdev->pci_dev = dev->pci_dev; |
532 | pdev->pci_dev = dev->pci_dev; |
536 | return ent; |
533 | return ent; |
537 | } |
534 | } |
538 | }; |
535 | }; |
539 | } |
536 | } |
540 | 537 | ||
541 | return NULL; |
538 | return NULL; |
542 | }; |
539 | }; |
543 | 540 | ||
544 | struct pci_dev * |
541 | struct pci_dev * |
545 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
542 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
546 | { |
543 | { |
547 | pci_dev_t *dev; |
544 | pci_dev_t *dev; |
548 | 545 | ||
549 | dev = (pci_dev_t*)devices.next; |
546 | dev = (pci_dev_t*)devices.next; |
550 | 547 | ||
551 | if(from != NULL) |
548 | if(from != NULL) |
552 | { |
549 | { |
553 | for(; &dev->link != &devices; |
550 | for(; &dev->link != &devices; |
554 | dev = (pci_dev_t*)dev->link.next) |
551 | dev = (pci_dev_t*)dev->link.next) |
555 | { |
552 | { |
556 | if( &dev->pci_dev == from) |
553 | if( &dev->pci_dev == from) |
557 | { |
554 | { |
558 | dev = (pci_dev_t*)dev->link.next; |
555 | dev = (pci_dev_t*)dev->link.next; |
559 | break; |
556 | break; |
560 | }; |
557 | }; |
561 | } |
558 | } |
562 | }; |
559 | }; |
563 | 560 | ||
564 | for(; &dev->link != &devices; |
561 | for(; &dev->link != &devices; |
565 | dev = (pci_dev_t*)dev->link.next) |
562 | dev = (pci_dev_t*)dev->link.next) |
566 | { |
563 | { |
567 | if( dev->pci_dev.vendor != vendor ) |
564 | if( dev->pci_dev.vendor != vendor ) |
568 | continue; |
565 | continue; |
569 | 566 | ||
570 | if(dev->pci_dev.device == device) |
567 | if(dev->pci_dev.device == device) |
571 | { |
568 | { |
572 | return &dev->pci_dev; |
569 | return &dev->pci_dev; |
573 | } |
570 | } |
574 | } |
571 | } |
575 | return NULL; |
572 | return NULL; |
576 | }; |
573 | }; |
577 | 574 | ||
578 | 575 | ||
579 | struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
576 | struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
580 | { |
577 | { |
581 | pci_dev_t *dev; |
578 | pci_dev_t *dev; |
582 | 579 | ||
583 | for(dev = (pci_dev_t*)devices.next; |
580 | for(dev = (pci_dev_t*)devices.next; |
584 | &dev->link != &devices; |
581 | &dev->link != &devices; |
585 | dev = (pci_dev_t*)dev->link.next) |
582 | dev = (pci_dev_t*)dev->link.next) |
586 | { |
583 | { |
587 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
584 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
588 | return &dev->pci_dev; |
585 | return &dev->pci_dev; |
589 | } |
586 | } |
590 | return NULL; |
587 | return NULL; |
591 | } |
588 | } |
592 | 589 | ||
593 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
590 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
594 | { |
591 | { |
595 | pci_dev_t *dev; |
592 | pci_dev_t *dev; |
596 | 593 | ||
597 | dev = (pci_dev_t*)devices.next; |
594 | dev = (pci_dev_t*)devices.next; |
598 | 595 | ||
599 | if(from != NULL) |
596 | if(from != NULL) |
600 | { |
597 | { |
601 | for(; &dev->link != &devices; |
598 | for(; &dev->link != &devices; |
602 | dev = (pci_dev_t*)dev->link.next) |
599 | dev = (pci_dev_t*)dev->link.next) |
603 | { |
600 | { |
604 | if( &dev->pci_dev == from) |
601 | if( &dev->pci_dev == from) |
605 | { |
602 | { |
606 | dev = (pci_dev_t*)dev->link.next; |
603 | dev = (pci_dev_t*)dev->link.next; |
607 | break; |
604 | break; |
608 | }; |
605 | }; |
609 | } |
606 | } |
610 | }; |
607 | }; |
611 | 608 | ||
612 | for(; &dev->link != &devices; |
609 | for(; &dev->link != &devices; |
613 | dev = (pci_dev_t*)dev->link.next) |
610 | dev = (pci_dev_t*)dev->link.next) |
614 | { |
611 | { |
615 | if( dev->pci_dev.class == class) |
612 | if( dev->pci_dev.class == class) |
616 | { |
613 | { |
617 | return &dev->pci_dev; |
614 | return &dev->pci_dev; |
618 | } |
615 | } |
619 | } |
616 | } |
620 | 617 | ||
621 | return NULL; |
618 | return NULL; |
622 | } |
619 | } |
623 | 620 | ||
624 | 621 | ||
625 | #define PIO_OFFSET 0x10000UL |
622 | #define PIO_OFFSET 0x10000UL |
626 | #define PIO_MASK 0x0ffffUL |
623 | #define PIO_MASK 0x0ffffUL |
627 | #define PIO_RESERVED 0x40000UL |
624 | #define PIO_RESERVED 0x40000UL |
628 | 625 | ||
629 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
626 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
630 | unsigned long port = (unsigned long __force)addr; \ |
627 | unsigned long port = (unsigned long __force)addr; \ |
631 | if (port >= PIO_RESERVED) { \ |
628 | if (port >= PIO_RESERVED) { \ |
632 | is_mmio; \ |
629 | is_mmio; \ |
633 | } else if (port > PIO_OFFSET) { \ |
630 | } else if (port > PIO_OFFSET) { \ |
634 | port &= PIO_MASK; \ |
631 | port &= PIO_MASK; \ |
635 | is_pio; \ |
632 | is_pio; \ |
636 | }; \ |
633 | }; \ |
637 | } while (0) |
634 | } while (0) |
638 | 635 | ||
639 | /* Create a virtual mapping cookie for an IO port range */ |
636 | /* Create a virtual mapping cookie for an IO port range */ |
640 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
637 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
641 | { |
638 | { |
642 | if (port > PIO_MASK) |
639 | if (port > PIO_MASK) |
643 | return NULL; |
640 | return NULL; |
644 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
641 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
645 | } |
642 | } |
646 | 643 | ||
647 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
644 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
648 | { |
645 | { |
649 | resource_size_t start = pci_resource_start(dev, bar); |
646 | resource_size_t start = pci_resource_start(dev, bar); |
650 | resource_size_t len = pci_resource_len(dev, bar); |
647 | resource_size_t len = pci_resource_len(dev, bar); |
651 | unsigned long flags = pci_resource_flags(dev, bar); |
648 | unsigned long flags = pci_resource_flags(dev, bar); |
652 | 649 | ||
653 | if (!len || !start) |
650 | if (!len || !start) |
654 | return NULL; |
651 | return NULL; |
655 | if (maxlen && len > maxlen) |
652 | if (maxlen && len > maxlen) |
656 | len = maxlen; |
653 | len = maxlen; |
657 | if (flags & IORESOURCE_IO) |
654 | if (flags & IORESOURCE_IO) |
658 | return ioport_map(start, len); |
655 | return ioport_map(start, len); |
659 | if (flags & IORESOURCE_MEM) { |
656 | if (flags & IORESOURCE_MEM) { |
660 | return ioremap(start, len); |
657 | return ioremap(start, len); |
661 | } |
658 | } |
662 | /* What? */ |
659 | /* What? */ |
663 | return NULL; |
660 | return NULL; |
664 | } |
661 | } |
665 | 662 | ||
666 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
663 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
667 | { |
664 | { |
668 | IO_COND(addr, /* nothing */, iounmap(addr)); |
665 | IO_COND(addr, /* nothing */, iounmap(addr)); |
669 | } |
666 | } |
670 | 667 | ||
671 | - | ||
672 | static inline void |
- | |
673 | _pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
- | |
674 | struct resource *res) |
- | |
675 | { |
- | |
676 | region->start = res->start; |
- | |
677 | region->end = res->end; |
- | |
678 | } |
668 | |
679 | 669 | ||
680 | 670 | ||
681 | int pci_enable_rom(struct pci_dev *pdev) |
671 | int pci_enable_rom(struct pci_dev *pdev) |
682 | { |
672 | { |
683 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
673 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
684 | struct pci_bus_region region; |
674 | struct pci_bus_region region; |
685 | u32 rom_addr; |
675 | u32 rom_addr; |
686 | 676 | ||
687 | if (!res->flags) |
677 | if (!res->flags) |
688 | return -1; |
678 | return -1; |
689 | 679 | ||
690 | _pcibios_resource_to_bus(pdev, ®ion, res); |
680 | _pcibios_resource_to_bus(pdev, ®ion, res); |
691 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
681 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
692 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
682 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
693 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
683 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
694 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
684 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
695 | return 0; |
685 | return 0; |
696 | } |
686 | } |
697 | 687 | ||
698 | void pci_disable_rom(struct pci_dev *pdev) |
688 | void pci_disable_rom(struct pci_dev *pdev) |
699 | { |
689 | { |
700 | u32 rom_addr; |
690 | u32 rom_addr; |
701 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
691 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
702 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
692 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
703 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
693 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
704 | } |
694 | } |
705 | 695 | ||
706 | /** |
696 | /** |
707 | * pci_get_rom_size - obtain the actual size of the ROM image |
697 | * pci_get_rom_size - obtain the actual size of the ROM image |
708 | * @pdev: target PCI device |
698 | * @pdev: target PCI device |
709 | * @rom: kernel virtual pointer to image of ROM |
699 | * @rom: kernel virtual pointer to image of ROM |
710 | * @size: size of PCI window |
700 | * @size: size of PCI window |
711 | * return: size of actual ROM image |
701 | * return: size of actual ROM image |
712 | * |
702 | * |
713 | * Determine the actual length of the ROM image. |
703 | * Determine the actual length of the ROM image. |
714 | * The PCI window size could be much larger than the |
704 | * The PCI window size could be much larger than the |
715 | * actual image size. |
705 | * actual image size. |
716 | */ |
706 | */ |
717 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
707 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
718 | { |
708 | { |
719 | void __iomem *image; |
709 | void __iomem *image; |
720 | int last_image; |
710 | int last_image; |
721 | 711 | ||
722 | image = rom; |
712 | image = rom; |
723 | do { |
713 | do { |
724 | void __iomem *pds; |
714 | void __iomem *pds; |
725 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
715 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
726 | if (readb(image) != 0x55) { |
716 | if (readb(image) != 0x55) { |
727 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
717 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
728 | break; |
718 | break; |
729 | } |
719 | } |
730 | if (readb(image + 1) != 0xAA) |
720 | if (readb(image + 1) != 0xAA) |
731 | break; |
721 | break; |
732 | /* get the PCI data structure and check its signature */ |
722 | /* get the PCI data structure and check its signature */ |
733 | pds = image + readw(image + 24); |
723 | pds = image + readw(image + 24); |
734 | if (readb(pds) != 'P') |
724 | if (readb(pds) != 'P') |
735 | break; |
725 | break; |
736 | if (readb(pds + 1) != 'C') |
726 | if (readb(pds + 1) != 'C') |
737 | break; |
727 | break; |
738 | if (readb(pds + 2) != 'I') |
728 | if (readb(pds + 2) != 'I') |
739 | break; |
729 | break; |
740 | if (readb(pds + 3) != 'R') |
730 | if (readb(pds + 3) != 'R') |
741 | break; |
731 | break; |
742 | last_image = readb(pds + 21) & 0x80; |
732 | last_image = readb(pds + 21) & 0x80; |
743 | /* this length is reliable */ |
733 | /* this length is reliable */ |
744 | image += readw(pds + 16) * 512; |
734 | image += readw(pds + 16) * 512; |
745 | } while (!last_image); |
735 | } while (!last_image); |
746 | 736 | ||
747 | /* never return a size larger than the PCI resource window */ |
737 | /* never return a size larger than the PCI resource window */ |
748 | /* there are known ROMs that get the size wrong */ |
738 | /* there are known ROMs that get the size wrong */ |
749 | return min((size_t)(image - rom), size); |
739 | return min((size_t)(image - rom), size); |
750 | } |
740 | } |
751 | 741 | ||
752 | 742 | ||
753 | /** |
743 | /** |
754 | * pci_map_rom - map a PCI ROM to kernel space |
744 | * pci_map_rom - map a PCI ROM to kernel space |
755 | * @pdev: pointer to pci device struct |
745 | * @pdev: pointer to pci device struct |
756 | * @size: pointer to receive size of pci window over ROM |
746 | * @size: pointer to receive size of pci window over ROM |
757 | * |
747 | * |
758 | * Return: kernel virtual pointer to image of ROM |
748 | * Return: kernel virtual pointer to image of ROM |
759 | * |
749 | * |
760 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
750 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
761 | * the shadow BIOS copy will be returned instead of the |
751 | * the shadow BIOS copy will be returned instead of the |
762 | * actual ROM. |
752 | * actual ROM. |
763 | */ |
753 | */ |
764 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
754 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
765 | { |
755 | { |
766 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
756 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
767 | loff_t start; |
757 | loff_t start; |
768 | void __iomem *rom; |
758 | void __iomem *rom; |
769 | 759 | ||
770 | /* |
760 | /* |
771 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
761 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
772 | * memory map if the VGA enable bit of the Bridge Control register is |
762 | * memory map if the VGA enable bit of the Bridge Control register is |
773 | * set for embedded VGA. |
763 | * set for embedded VGA. |
774 | */ |
764 | */ |
775 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
765 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
776 | /* primary video rom always starts here */ |
766 | /* primary video rom always starts here */ |
777 | start = (loff_t)0xC0000; |
767 | start = (loff_t)0xC0000; |
778 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
768 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
779 | } else { |
769 | } else { |
780 | if (res->flags & |
770 | if (res->flags & |
781 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
771 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
782 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
772 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
783 | return (void __iomem *)(unsigned long) |
773 | return (void __iomem *)(unsigned long) |
784 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
774 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
785 | } else { |
775 | } else { |
786 | start = (loff_t)0xC0000; |
776 | start = (loff_t)0xC0000; |
787 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
777 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
788 | 778 | ||
789 | } |
779 | } |
790 | } |
780 | } |
791 | 781 | ||
792 | rom = ioremap(start, *size); |
782 | rom = ioremap(start, *size); |
793 | if (!rom) { |
783 | if (!rom) { |
794 | /* restore enable if ioremap fails */ |
784 | /* restore enable if ioremap fails */ |
795 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
785 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
796 | IORESOURCE_ROM_SHADOW | |
786 | IORESOURCE_ROM_SHADOW | |
797 | IORESOURCE_ROM_COPY))) |
787 | IORESOURCE_ROM_COPY))) |
798 | pci_disable_rom(pdev); |
788 | pci_disable_rom(pdev); |
799 | return NULL; |
789 | return NULL; |
800 | } |
790 | } |
801 | 791 | ||
802 | /* |
792 | /* |
803 | * Try to find the true size of the ROM since sometimes the PCI window |
793 | * Try to find the true size of the ROM since sometimes the PCI window |
804 | * size is much larger than the actual size of the ROM. |
794 | * size is much larger than the actual size of the ROM. |
805 | * True size is important if the ROM is going to be copied. |
795 | * True size is important if the ROM is going to be copied. |
806 | */ |
796 | */ |
807 | *size = pci_get_rom_size(pdev, rom, *size); |
797 | *size = pci_get_rom_size(pdev, rom, *size); |
808 | return rom; |
798 | return rom; |
809 | } |
799 | } |
810 | 800 | ||
811 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
801 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
812 | { |
802 | { |
813 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
803 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
814 | 804 | ||
815 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
805 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
816 | return; |
806 | return; |
817 | 807 | ||
818 | iounmap(rom); |
808 | iounmap(rom); |
819 | 809 | ||
820 | /* Disable again before continuing, leave enabled if pci=rom */ |
810 | /* Disable again before continuing, leave enabled if pci=rom */ |
821 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
811 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
822 | pci_disable_rom(pdev); |
812 | pci_disable_rom(pdev); |
823 | } |
813 | } |
824 | 814 | ||
825 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
815 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
826 | { |
816 | { |
827 | u16 old_cmd, cmd; |
817 | u16 old_cmd, cmd; |
828 | 818 | ||
829 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
819 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
830 | if (enable) |
820 | if (enable) |
831 | cmd = old_cmd | PCI_COMMAND_MASTER; |
821 | cmd = old_cmd | PCI_COMMAND_MASTER; |
832 | else |
822 | else |
833 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
823 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
834 | if (cmd != old_cmd) { |
824 | if (cmd != old_cmd) { |
835 | dbgprintf("%s bus mastering\n", |
825 | dbgprintf("%s bus mastering\n", |
836 | enable ? "enabling" : "disabling"); |
826 | enable ? "enabling" : "disabling"); |
837 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
827 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
838 | } |
828 | } |
839 | dev->is_busmaster = enable; |
829 | dev->is_busmaster = enable; |
840 | } |
830 | } |
841 | 831 | ||
842 | 832 | ||
843 | /* pci_set_master - enables bus-mastering for device dev |
833 | /* pci_set_master - enables bus-mastering for device dev |
844 | * @dev: the PCI device to enable |
834 | * @dev: the PCI device to enable |
845 | * |
835 | * |
846 | * Enables bus-mastering on the device and calls pcibios_set_master() |
836 | * Enables bus-mastering on the device and calls pcibios_set_master() |
847 | * to do the needed arch specific settings. |
837 | * to do the needed arch specific settings. |
848 | */ |
838 | */ |
849 | void pci_set_master(struct pci_dev *dev) |
839 | void pci_set_master(struct pci_dev *dev) |
850 | { |
840 | { |
851 | __pci_set_master(dev, true); |
841 | __pci_set_master(dev, true); |
852 | // pcibios_set_master(dev); |
842 | // pcibios_set_master(dev); |
853 | } |
843 | } |
854 | 844 | ||
855 | /** |
845 | /** |
856 | * pci_clear_master - disables bus-mastering for device dev |
846 | * pci_clear_master - disables bus-mastering for device dev |
857 | * @dev: the PCI device to disable |
847 | * @dev: the PCI device to disable |
858 | */ |
848 | */ |
859 | void pci_clear_master(struct pci_dev *dev) |
849 | void pci_clear_master(struct pci_dev *dev) |
860 | { |
850 | { |
861 | __pci_set_master(dev, false); |
851 | __pci_set_master(dev, false); |
862 | } |
852 | } |
863 | 853 | ||
864 | 854 | ||
865 | static inline int pcie_cap_version(const struct pci_dev *dev) |
855 | static inline int pcie_cap_version(const struct pci_dev *dev) |
866 | { |
856 | { |
867 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
857 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
868 | } |
858 | } |
869 | 859 | ||
870 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
860 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
871 | { |
861 | { |
872 | return true; |
862 | return true; |
873 | } |
863 | } |
874 | 864 | ||
875 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
865 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
876 | { |
866 | { |
877 | int type = pci_pcie_type(dev); |
867 | int type = pci_pcie_type(dev); |
878 | 868 | ||
879 | return pcie_cap_version(dev) > 1 || |
869 | return pcie_cap_version(dev) > 1 || |
880 | type == PCI_EXP_TYPE_ROOT_PORT || |
870 | type == PCI_EXP_TYPE_ROOT_PORT || |
881 | type == PCI_EXP_TYPE_ENDPOINT || |
871 | type == PCI_EXP_TYPE_ENDPOINT || |
882 | type == PCI_EXP_TYPE_LEG_END; |
872 | type == PCI_EXP_TYPE_LEG_END; |
883 | } |
873 | } |
884 | 874 | ||
885 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
875 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
886 | { |
876 | { |
887 | int type = pci_pcie_type(dev); |
877 | int type = pci_pcie_type(dev); |
888 | 878 | ||
889 | return pcie_cap_version(dev) > 1 || |
879 | return pcie_cap_version(dev) > 1 || |
890 | type == PCI_EXP_TYPE_ROOT_PORT || |
880 | type == PCI_EXP_TYPE_ROOT_PORT || |
891 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
881 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
892 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
882 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
893 | } |
883 | } |
894 | 884 | ||
895 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
885 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
896 | { |
886 | { |
897 | int type = pci_pcie_type(dev); |
887 | int type = pci_pcie_type(dev); |
898 | 888 | ||
899 | return pcie_cap_version(dev) > 1 || |
889 | return pcie_cap_version(dev) > 1 || |
900 | type == PCI_EXP_TYPE_ROOT_PORT || |
890 | type == PCI_EXP_TYPE_ROOT_PORT || |
901 | type == PCI_EXP_TYPE_RC_EC; |
891 | type == PCI_EXP_TYPE_RC_EC; |
902 | } |
892 | } |
903 | 893 | ||
904 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
894 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
905 | { |
895 | { |
906 | if (!pci_is_pcie(dev)) |
896 | if (!pci_is_pcie(dev)) |
907 | return false; |
897 | return false; |
908 | 898 | ||
909 | switch (pos) { |
899 | switch (pos) { |
910 | case PCI_EXP_FLAGS_TYPE: |
900 | case PCI_EXP_FLAGS_TYPE: |
911 | return true; |
901 | return true; |
912 | case PCI_EXP_DEVCAP: |
902 | case PCI_EXP_DEVCAP: |
913 | case PCI_EXP_DEVCTL: |
903 | case PCI_EXP_DEVCTL: |
914 | case PCI_EXP_DEVSTA: |
904 | case PCI_EXP_DEVSTA: |
915 | return pcie_cap_has_devctl(dev); |
905 | return pcie_cap_has_devctl(dev); |
916 | case PCI_EXP_LNKCAP: |
906 | case PCI_EXP_LNKCAP: |
917 | case PCI_EXP_LNKCTL: |
907 | case PCI_EXP_LNKCTL: |
918 | case PCI_EXP_LNKSTA: |
908 | case PCI_EXP_LNKSTA: |
919 | return pcie_cap_has_lnkctl(dev); |
909 | return pcie_cap_has_lnkctl(dev); |
920 | case PCI_EXP_SLTCAP: |
910 | case PCI_EXP_SLTCAP: |
921 | case PCI_EXP_SLTCTL: |
911 | case PCI_EXP_SLTCTL: |
922 | case PCI_EXP_SLTSTA: |
912 | case PCI_EXP_SLTSTA: |
923 | return pcie_cap_has_sltctl(dev); |
913 | return pcie_cap_has_sltctl(dev); |
924 | case PCI_EXP_RTCTL: |
914 | case PCI_EXP_RTCTL: |
925 | case PCI_EXP_RTCAP: |
915 | case PCI_EXP_RTCAP: |
926 | case PCI_EXP_RTSTA: |
916 | case PCI_EXP_RTSTA: |
927 | return pcie_cap_has_rtctl(dev); |
917 | return pcie_cap_has_rtctl(dev); |
928 | case PCI_EXP_DEVCAP2: |
918 | case PCI_EXP_DEVCAP2: |
929 | case PCI_EXP_DEVCTL2: |
919 | case PCI_EXP_DEVCTL2: |
930 | case PCI_EXP_LNKCAP2: |
920 | case PCI_EXP_LNKCAP2: |
931 | case PCI_EXP_LNKCTL2: |
921 | case PCI_EXP_LNKCTL2: |
932 | case PCI_EXP_LNKSTA2: |
922 | case PCI_EXP_LNKSTA2: |
933 | return pcie_cap_version(dev) > 1; |
923 | return pcie_cap_version(dev) > 1; |
934 | default: |
924 | default: |
935 | return false; |
925 | return false; |
936 | } |
926 | } |
937 | } |
927 | } |
938 | 928 | ||
939 | /* |
929 | /* |
940 | * Note that these accessor functions are only for the "PCI Express |
930 | * Note that these accessor functions are only for the "PCI Express |
941 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
931 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
942 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
932 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
943 | */ |
933 | */ |
944 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
934 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
945 | { |
935 | { |
946 | int ret; |
936 | int ret; |
947 | 937 | ||
948 | *val = 0; |
938 | *val = 0; |
949 | if (pos & 1) |
939 | if (pos & 1) |
950 | return -EINVAL; |
940 | return -EINVAL; |
951 | 941 | ||
952 | if (pcie_capability_reg_implemented(dev, pos)) { |
942 | if (pcie_capability_reg_implemented(dev, pos)) { |
953 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
943 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
954 | /* |
944 | /* |
955 | * Reset *val to 0 if pci_read_config_word() fails, it may |
945 | * Reset *val to 0 if pci_read_config_word() fails, it may |
956 | * have been written as 0xFFFF if hardware error happens |
946 | * have been written as 0xFFFF if hardware error happens |
957 | * during pci_read_config_word(). |
947 | * during pci_read_config_word(). |
958 | */ |
948 | */ |
959 | if (ret) |
949 | if (ret) |
960 | *val = 0; |
950 | *val = 0; |
961 | return ret; |
951 | return ret; |
962 | } |
952 | } |
963 | 953 | ||
964 | /* |
954 | /* |
965 | * For Functions that do not implement the Slot Capabilities, |
955 | * For Functions that do not implement the Slot Capabilities, |
966 | * Slot Status, and Slot Control registers, these spaces must |
956 | * Slot Status, and Slot Control registers, these spaces must |
967 | * be hardwired to 0b, with the exception of the Presence Detect |
957 | * be hardwired to 0b, with the exception of the Presence Detect |
968 | * State bit in the Slot Status register of Downstream Ports, |
958 | * State bit in the Slot Status register of Downstream Ports, |
969 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
959 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
970 | */ |
960 | */ |
971 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
961 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
972 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
962 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
973 | *val = PCI_EXP_SLTSTA_PDS; |
963 | *val = PCI_EXP_SLTSTA_PDS; |
974 | } |
964 | } |
975 | 965 | ||
976 | return 0; |
966 | return 0; |
977 | } |
967 | } |
978 | EXPORT_SYMBOL(pcie_capability_read_word); |
968 | EXPORT_SYMBOL(pcie_capability_read_word); |
979 | 969 | ||
980 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
970 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
981 | { |
971 | { |
982 | int ret; |
972 | int ret; |
983 | 973 | ||
984 | *val = 0; |
974 | *val = 0; |
985 | if (pos & 3) |
975 | if (pos & 3) |
986 | return -EINVAL; |
976 | return -EINVAL; |
987 | 977 | ||
988 | if (pcie_capability_reg_implemented(dev, pos)) { |
978 | if (pcie_capability_reg_implemented(dev, pos)) { |
989 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
979 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
990 | /* |
980 | /* |
991 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
981 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
992 | * have been written as 0xFFFFFFFF if hardware error happens |
982 | * have been written as 0xFFFFFFFF if hardware error happens |
993 | * during pci_read_config_dword(). |
983 | * during pci_read_config_dword(). |
994 | */ |
984 | */ |
995 | if (ret) |
985 | if (ret) |
996 | *val = 0; |
986 | *val = 0; |
997 | return ret; |
987 | return ret; |
998 | } |
988 | } |
999 | 989 | ||
1000 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
990 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
1001 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
991 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
1002 | *val = PCI_EXP_SLTSTA_PDS; |
992 | *val = PCI_EXP_SLTSTA_PDS; |
1003 | } |
993 | } |
1004 | 994 | ||
1005 | return 0; |
995 | return 0; |
1006 | } |
996 | } |
1007 | EXPORT_SYMBOL(pcie_capability_read_dword); |
997 | EXPORT_SYMBOL(pcie_capability_read_dword); |
1008 | 998 | ||
1009 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
999 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1010 | { |
1000 | { |
1011 | if (pos & 1) |
1001 | if (pos & 1) |
1012 | return -EINVAL; |
1002 | return -EINVAL; |
1013 | 1003 | ||
1014 | if (!pcie_capability_reg_implemented(dev, pos)) |
1004 | if (!pcie_capability_reg_implemented(dev, pos)) |
1015 | return 0; |
1005 | return 0; |
1016 | 1006 | ||
1017 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1007 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1018 | } |
1008 | } |
1019 | EXPORT_SYMBOL(pcie_capability_write_word); |
1009 | EXPORT_SYMBOL(pcie_capability_write_word); |
1020 | 1010 | ||
1021 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1011 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1022 | { |
1012 | { |
1023 | if (pos & 3) |
1013 | if (pos & 3) |
1024 | return -EINVAL; |
1014 | return -EINVAL; |
1025 | 1015 | ||
1026 | if (!pcie_capability_reg_implemented(dev, pos)) |
1016 | if (!pcie_capability_reg_implemented(dev, pos)) |
1027 | return 0; |
1017 | return 0; |
1028 | 1018 | ||
1029 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1019 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1030 | } |
1020 | } |
1031 | EXPORT_SYMBOL(pcie_capability_write_dword); |
1021 | EXPORT_SYMBOL(pcie_capability_write_dword); |
1032 | 1022 | ||
1033 | int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, |
1023 | int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, |
1034 | u16 clear, u16 set) |
1024 | u16 clear, u16 set) |
1035 | { |
1025 | { |
1036 | int ret; |
1026 | int ret; |
1037 | u16 val; |
1027 | u16 val; |
1038 | 1028 | ||
1039 | ret = pcie_capability_read_word(dev, pos, &val); |
1029 | ret = pcie_capability_read_word(dev, pos, &val); |
1040 | if (!ret) { |
1030 | if (!ret) { |
1041 | val &= ~clear; |
1031 | val &= ~clear; |
1042 | val |= set; |
1032 | val |= set; |
1043 | ret = pcie_capability_write_word(dev, pos, val); |
1033 | ret = pcie_capability_write_word(dev, pos, val); |
1044 | } |
1034 | } |
1045 | 1035 | ||
1046 | return ret; |
1036 | return ret; |
1047 | } |
1037 | } |
1048 | 1038 | ||
1049 | 1039 | ||
1050 | 1040 | ||
1051 | int pcie_get_readrq(struct pci_dev *dev) |
1041 | int pcie_get_readrq(struct pci_dev *dev) |
1052 | { |
1042 | { |
1053 | u16 ctl; |
1043 | u16 ctl; |
1054 | 1044 | ||
1055 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); |
1045 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); |
1056 | 1046 | ||
1057 | return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
1047 | return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
1058 | } |
1048 | } |
1059 | EXPORT_SYMBOL(pcie_get_readrq); |
1049 | EXPORT_SYMBOL(pcie_get_readrq); |
1060 | 1050 | ||
1061 | /** |
1051 | /** |
1062 | * pcie_set_readrq - set PCI Express maximum memory read request |
1052 | * pcie_set_readrq - set PCI Express maximum memory read request |
1063 | * @dev: PCI device to query |
1053 | * @dev: PCI device to query |
1064 | * @rq: maximum memory read count in bytes |
1054 | * @rq: maximum memory read count in bytes |
1065 | * valid values are 128, 256, 512, 1024, 2048, 4096 |
1055 | * valid values are 128, 256, 512, 1024, 2048, 4096 |
1066 | * |
1056 | * |
1067 | * If possible sets maximum memory read request in bytes |
1057 | * If possible sets maximum memory read request in bytes |
1068 | */ |
1058 | */ |
1069 | int pcie_set_readrq(struct pci_dev *dev, int rq) |
1059 | int pcie_set_readrq(struct pci_dev *dev, int rq) |
1070 | { |
1060 | { |
1071 | u16 v; |
1061 | u16 v; |
1072 | 1062 | ||
1073 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
1063 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
1074 | return -EINVAL; |
1064 | return -EINVAL; |
1075 | 1065 | ||
1076 | v = (ffs(rq) - 8) << 12; |
1066 | v = (ffs(rq) - 8) << 12; |
1077 | 1067 | ||
1078 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
1068 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
1079 | PCI_EXP_DEVCTL_READRQ, v); |
1069 | PCI_EXP_DEVCTL_READRQ, v); |
1080 | }><>>><>>=>>>><>><>><>>4)><4)> |
1070 | }><>>><>>=>>>><>><>><>>4)><4)> |