Rev 2326 | Rev 2351 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2326 | Rev 2327 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | #include |
2 | #include |
3 | #include |
3 | #include |
4 | #include |
4 | #include |
5 | #include |
5 | #include |
6 | #include |
6 | #include |
7 | 7 | ||
8 | extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
8 | extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
9 | 9 | ||
10 | static LIST_HEAD(devices); |
10 | static LIST_HEAD(devices); |
11 | 11 | ||
12 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
12 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
13 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
13 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
14 | 14 | ||
15 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
15 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
16 | 16 | ||
17 | /* |
17 | /* |
18 | * Translate the low bits of the PCI base |
18 | * Translate the low bits of the PCI base |
19 | * to the resource type |
19 | * to the resource type |
20 | */ |
20 | */ |
21 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
21 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
22 | { |
22 | { |
23 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
23 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
24 | return IORESOURCE_IO; |
24 | return IORESOURCE_IO; |
25 | 25 | ||
26 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
26 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
27 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
27 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
28 | 28 | ||
29 | return IORESOURCE_MEM; |
29 | return IORESOURCE_MEM; |
30 | } |
30 | } |
31 | 31 | ||
32 | 32 | ||
33 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
33 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
34 | { |
34 | { |
35 | u32_t size = mask & maxbase; /* Find the significant bits */ |
35 | u32_t size = mask & maxbase; /* Find the significant bits */ |
36 | 36 | ||
37 | if (!size) |
37 | if (!size) |
38 | return 0; |
38 | return 0; |
39 | 39 | ||
40 | /* Get the lowest of them to find the decode size, and |
40 | /* Get the lowest of them to find the decode size, and |
41 | from that the extent. */ |
41 | from that the extent. */ |
42 | size = (size & ~(size-1)) - 1; |
42 | size = (size & ~(size-1)) - 1; |
43 | 43 | ||
44 | /* base == maxbase can be valid only if the BAR has |
44 | /* base == maxbase can be valid only if the BAR has |
45 | already been programmed with all 1s. */ |
45 | already been programmed with all 1s. */ |
46 | if (base == maxbase && ((base | size) & mask) != mask) |
46 | if (base == maxbase && ((base | size) & mask) != mask) |
47 | return 0; |
47 | return 0; |
48 | 48 | ||
49 | return size; |
49 | return size; |
50 | } |
50 | } |
51 | 51 | ||
52 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
52 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
53 | { |
53 | { |
54 | u64_t size = mask & maxbase; /* Find the significant bits */ |
54 | u64_t size = mask & maxbase; /* Find the significant bits */ |
55 | 55 | ||
56 | if (!size) |
56 | if (!size) |
57 | return 0; |
57 | return 0; |
58 | 58 | ||
59 | /* Get the lowest of them to find the decode size, and |
59 | /* Get the lowest of them to find the decode size, and |
60 | from that the extent. */ |
60 | from that the extent. */ |
61 | size = (size & ~(size-1)) - 1; |
61 | size = (size & ~(size-1)) - 1; |
62 | 62 | ||
63 | /* base == maxbase can be valid only if the BAR has |
63 | /* base == maxbase can be valid only if the BAR has |
64 | already been programmed with all 1s. */ |
64 | already been programmed with all 1s. */ |
65 | if (base == maxbase && ((base | size) & mask) != mask) |
65 | if (base == maxbase && ((base | size) & mask) != mask) |
66 | return 0; |
66 | return 0; |
67 | 67 | ||
68 | return size; |
68 | return size; |
69 | } |
69 | } |
70 | 70 | ||
71 | static inline int is_64bit_memory(u32_t mask) |
71 | static inline int is_64bit_memory(u32_t mask) |
72 | { |
72 | { |
73 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
73 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
74 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
74 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
75 | return 1; |
75 | return 1; |
76 | return 0; |
76 | return 0; |
77 | } |
77 | } |
78 | 78 | ||
79 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
79 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
80 | { |
80 | { |
81 | u32_t pos, reg, next; |
81 | u32_t pos, reg, next; |
82 | u32_t l, sz; |
82 | u32_t l, sz; |
83 | struct resource *res; |
83 | struct resource *res; |
84 | 84 | ||
85 | for(pos=0; pos < howmany; pos = next) |
85 | for(pos=0; pos < howmany; pos = next) |
86 | { |
86 | { |
87 | u64_t l64; |
87 | u64_t l64; |
88 | u64_t sz64; |
88 | u64_t sz64; |
89 | u32_t raw_sz; |
89 | u32_t raw_sz; |
90 | 90 | ||
91 | next = pos + 1; |
91 | next = pos + 1; |
92 | 92 | ||
93 | res = &dev->resource[pos]; |
93 | res = &dev->resource[pos]; |
94 | 94 | ||
95 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
95 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
96 | l = PciRead32(dev->busnr, dev->devfn, reg); |
96 | l = PciRead32(dev->busnr, dev->devfn, reg); |
97 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
97 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
98 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
98 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
99 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
99 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
100 | 100 | ||
101 | if (!sz || sz == 0xffffffff) |
101 | if (!sz || sz == 0xffffffff) |
102 | continue; |
102 | continue; |
103 | 103 | ||
104 | if (l == 0xffffffff) |
104 | if (l == 0xffffffff) |
105 | l = 0; |
105 | l = 0; |
106 | 106 | ||
107 | raw_sz = sz; |
107 | raw_sz = sz; |
108 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
108 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
109 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
109 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
110 | { |
110 | { |
111 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
111 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
112 | /* |
112 | /* |
113 | * For 64bit prefetchable memory sz could be 0, if the |
113 | * For 64bit prefetchable memory sz could be 0, if the |
114 | * real size is bigger than 4G, so we need to check |
114 | * real size is bigger than 4G, so we need to check |
115 | * szhi for that. |
115 | * szhi for that. |
116 | */ |
116 | */ |
117 | if (!is_64bit_memory(l) && !sz) |
117 | if (!is_64bit_memory(l) && !sz) |
118 | continue; |
118 | continue; |
119 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
119 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
120 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
120 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
121 | } |
121 | } |
122 | else { |
122 | else { |
123 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
123 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
124 | if (!sz) |
124 | if (!sz) |
125 | continue; |
125 | continue; |
126 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
126 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
127 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
127 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
128 | } |
128 | } |
129 | res->end = res->start + (unsigned long) sz; |
129 | res->end = res->start + (unsigned long) sz; |
130 | res->flags |= pci_calc_resource_flags(l); |
130 | res->flags |= pci_calc_resource_flags(l); |
131 | if (is_64bit_memory(l)) |
131 | if (is_64bit_memory(l)) |
132 | { |
132 | { |
133 | u32_t szhi, lhi; |
133 | u32_t szhi, lhi; |
134 | 134 | ||
135 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
135 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
136 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
136 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
137 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
137 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
138 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
138 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
139 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
139 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
140 | l64 = ((u64_t)lhi << 32) | l; |
140 | l64 = ((u64_t)lhi << 32) | l; |
141 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
141 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
142 | next++; |
142 | next++; |
143 | 143 | ||
144 | #if BITS_PER_LONG == 64 |
144 | #if BITS_PER_LONG == 64 |
145 | if (!sz64) { |
145 | if (!sz64) { |
146 | res->start = 0; |
146 | res->start = 0; |
147 | res->end = 0; |
147 | res->end = 0; |
148 | res->flags = 0; |
148 | res->flags = 0; |
149 | continue; |
149 | continue; |
150 | } |
150 | } |
151 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
151 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
152 | res->end = res->start + sz64; |
152 | res->end = res->start + sz64; |
153 | #else |
153 | #else |
154 | if (sz64 > 0x100000000ULL) { |
154 | if (sz64 > 0x100000000ULL) { |
155 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
155 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
156 | "BAR for device %s\n", pci_name(dev)); |
156 | "BAR for device %s\n", pci_name(dev)); |
157 | res->start = 0; |
157 | res->start = 0; |
158 | res->flags = 0; |
158 | res->flags = 0; |
159 | } |
159 | } |
160 | else if (lhi) |
160 | else if (lhi) |
161 | { |
161 | { |
162 | /* 64-bit wide address, treat as disabled */ |
162 | /* 64-bit wide address, treat as disabled */ |
163 | PciWrite32(dev->busnr, dev->devfn, reg, |
163 | PciWrite32(dev->busnr, dev->devfn, reg, |
164 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
164 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
165 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
165 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
166 | res->start = 0; |
166 | res->start = 0; |
167 | res->end = sz; |
167 | res->end = sz; |
168 | } |
168 | } |
169 | #endif |
169 | #endif |
170 | } |
170 | } |
171 | } |
171 | } |
172 | 172 | ||
173 | if ( rom ) |
173 | if ( rom ) |
174 | { |
174 | { |
175 | dev->rom_base_reg = rom; |
175 | dev->rom_base_reg = rom; |
176 | res = &dev->resource[PCI_ROM_RESOURCE]; |
176 | res = &dev->resource[PCI_ROM_RESOURCE]; |
177 | 177 | ||
178 | l = PciRead32(dev->busnr, dev->devfn, rom); |
178 | l = PciRead32(dev->busnr, dev->devfn, rom); |
179 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
179 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
180 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
180 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
181 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
181 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
182 | 182 | ||
183 | if (l == 0xffffffff) |
183 | if (l == 0xffffffff) |
184 | l = 0; |
184 | l = 0; |
185 | 185 | ||
186 | if (sz && sz != 0xffffffff) |
186 | if (sz && sz != 0xffffffff) |
187 | { |
187 | { |
188 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
188 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
189 | 189 | ||
190 | if (sz) |
190 | if (sz) |
191 | { |
191 | { |
192 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
192 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
193 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
193 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
194 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
194 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
195 | res->start = l & PCI_ROM_ADDRESS_MASK; |
195 | res->start = l & PCI_ROM_ADDRESS_MASK; |
196 | res->end = res->start + (unsigned long) sz; |
196 | res->end = res->start + (unsigned long) sz; |
197 | } |
197 | } |
198 | } |
198 | } |
199 | } |
199 | } |
200 | } |
200 | } |
201 | 201 | ||
202 | static void pci_read_irq(struct pci_dev *dev) |
202 | static void pci_read_irq(struct pci_dev *dev) |
203 | { |
203 | { |
204 | u8_t irq; |
204 | u8_t irq; |
205 | 205 | ||
206 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
206 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
207 | dev->pin = irq; |
207 | dev->pin = irq; |
208 | if (irq) |
208 | if (irq) |
209 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
209 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
210 | dev->irq = irq; |
210 | dev->irq = irq; |
211 | }; |
211 | }; |
212 | 212 | ||
213 | 213 | ||
214 | int pci_setup_device(struct pci_dev *dev) |
214 | int pci_setup_device(struct pci_dev *dev) |
215 | { |
215 | { |
216 | u32_t class; |
216 | u32_t class; |
217 | 217 | ||
218 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
218 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
219 | dev->revision = class & 0xff; |
219 | dev->revision = class & 0xff; |
220 | class >>= 8; /* upper 3 bytes */ |
220 | class >>= 8; /* upper 3 bytes */ |
221 | dev->class = class; |
221 | dev->class = class; |
222 | 222 | ||
223 | /* "Unknown power state" */ |
223 | /* "Unknown power state" */ |
224 | // dev->current_state = PCI_UNKNOWN; |
224 | // dev->current_state = PCI_UNKNOWN; |
225 | 225 | ||
226 | /* Early fixups, before probing the BARs */ |
226 | /* Early fixups, before probing the BARs */ |
227 | // pci_fixup_device(pci_fixup_early, dev); |
227 | // pci_fixup_device(pci_fixup_early, dev); |
228 | class = dev->class >> 8; |
228 | class = dev->class >> 8; |
229 | 229 | ||
230 | switch (dev->hdr_type) |
230 | switch (dev->hdr_type) |
231 | { |
231 | { |
232 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
232 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
233 | if (class == PCI_CLASS_BRIDGE_PCI) |
233 | if (class == PCI_CLASS_BRIDGE_PCI) |
234 | goto bad; |
234 | goto bad; |
235 | pci_read_irq(dev); |
235 | pci_read_irq(dev); |
236 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
236 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
237 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
237 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
238 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
238 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
239 | 239 | ||
240 | /* |
240 | /* |
241 | * Do the ugly legacy mode stuff here rather than broken chip |
241 | * Do the ugly legacy mode stuff here rather than broken chip |
242 | * quirk code. Legacy mode ATA controllers have fixed |
242 | * quirk code. Legacy mode ATA controllers have fixed |
243 | * addresses. These are not always echoed in BAR0-3, and |
243 | * addresses. These are not always echoed in BAR0-3, and |
244 | * BAR0-3 in a few cases contain junk! |
244 | * BAR0-3 in a few cases contain junk! |
245 | */ |
245 | */ |
246 | if (class == PCI_CLASS_STORAGE_IDE) |
246 | if (class == PCI_CLASS_STORAGE_IDE) |
247 | { |
247 | { |
248 | u8_t progif; |
248 | u8_t progif; |
249 | 249 | ||
250 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
250 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
251 | if ((progif & 1) == 0) |
251 | if ((progif & 1) == 0) |
252 | { |
252 | { |
253 | dev->resource[0].start = 0x1F0; |
253 | dev->resource[0].start = 0x1F0; |
254 | dev->resource[0].end = 0x1F7; |
254 | dev->resource[0].end = 0x1F7; |
255 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
255 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
256 | dev->resource[1].start = 0x3F6; |
256 | dev->resource[1].start = 0x3F6; |
257 | dev->resource[1].end = 0x3F6; |
257 | dev->resource[1].end = 0x3F6; |
258 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
258 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
259 | } |
259 | } |
260 | if ((progif & 4) == 0) |
260 | if ((progif & 4) == 0) |
261 | { |
261 | { |
262 | dev->resource[2].start = 0x170; |
262 | dev->resource[2].start = 0x170; |
263 | dev->resource[2].end = 0x177; |
263 | dev->resource[2].end = 0x177; |
264 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
264 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
265 | dev->resource[3].start = 0x376; |
265 | dev->resource[3].start = 0x376; |
266 | dev->resource[3].end = 0x376; |
266 | dev->resource[3].end = 0x376; |
267 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
267 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
268 | }; |
268 | }; |
269 | } |
269 | } |
270 | break; |
270 | break; |
271 | 271 | ||
272 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
272 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
273 | if (class != PCI_CLASS_BRIDGE_PCI) |
273 | if (class != PCI_CLASS_BRIDGE_PCI) |
274 | goto bad; |
274 | goto bad; |
275 | /* The PCI-to-PCI bridge spec requires that subtractive |
275 | /* The PCI-to-PCI bridge spec requires that subtractive |
276 | decoding (i.e. transparent) bridge must have programming |
276 | decoding (i.e. transparent) bridge must have programming |
277 | interface code of 0x01. */ |
277 | interface code of 0x01. */ |
278 | pci_read_irq(dev); |
278 | pci_read_irq(dev); |
279 | dev->transparent = ((dev->class & 0xff) == 1); |
279 | dev->transparent = ((dev->class & 0xff) == 1); |
280 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
280 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
281 | break; |
281 | break; |
282 | 282 | ||
283 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
283 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
284 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
284 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
285 | goto bad; |
285 | goto bad; |
286 | pci_read_irq(dev); |
286 | pci_read_irq(dev); |
287 | pci_read_bases(dev, 1, 0); |
287 | pci_read_bases(dev, 1, 0); |
288 | dev->subsystem_vendor = PciRead16(dev->busnr, |
288 | dev->subsystem_vendor = PciRead16(dev->busnr, |
289 | dev->devfn, |
289 | dev->devfn, |
290 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
290 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
291 | 291 | ||
292 | dev->subsystem_device = PciRead16(dev->busnr, |
292 | dev->subsystem_device = PciRead16(dev->busnr, |
293 | dev->devfn, |
293 | dev->devfn, |
294 | PCI_CB_SUBSYSTEM_ID); |
294 | PCI_CB_SUBSYSTEM_ID); |
295 | break; |
295 | break; |
296 | 296 | ||
297 | default: /* unknown header */ |
297 | default: /* unknown header */ |
298 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
298 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
299 | pci_name(dev), dev->hdr_type); |
299 | pci_name(dev), dev->hdr_type); |
300 | return -1; |
300 | return -1; |
301 | 301 | ||
302 | bad: |
302 | bad: |
303 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
303 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
304 | pci_name(dev), class, dev->hdr_type); |
304 | pci_name(dev), class, dev->hdr_type); |
305 | dev->class = PCI_CLASS_NOT_DEFINED; |
305 | dev->class = PCI_CLASS_NOT_DEFINED; |
306 | } |
306 | } |
307 | 307 | ||
308 | /* We found a fine healthy device, go go go... */ |
308 | /* We found a fine healthy device, go go go... */ |
309 | 309 | ||
310 | return 0; |
310 | return 0; |
311 | }; |
311 | }; |
312 | 312 | ||
313 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
313 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
314 | { |
314 | { |
315 | pci_dev_t *dev; |
315 | pci_dev_t *dev; |
316 | 316 | ||
317 | u32_t id; |
317 | u32_t id; |
318 | u8_t hdr; |
318 | u8_t hdr; |
319 | 319 | ||
320 | int timeout = 10; |
320 | int timeout = 10; |
321 | 321 | ||
322 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
322 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
323 | 323 | ||
324 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
324 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
325 | if (id == 0xffffffff || id == 0x00000000 || |
325 | if (id == 0xffffffff || id == 0x00000000 || |
326 | id == 0x0000ffff || id == 0xffff0000) |
326 | id == 0x0000ffff || id == 0xffff0000) |
327 | return NULL; |
327 | return NULL; |
328 | 328 | ||
329 | while (id == 0xffff0001) |
329 | while (id == 0xffff0001) |
330 | { |
330 | { |
331 | 331 | ||
332 | delay(timeout/10); |
332 | delay(timeout/10); |
333 | timeout *= 2; |
333 | timeout *= 2; |
334 | 334 | ||
335 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
335 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
336 | 336 | ||
337 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
337 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
338 | if (timeout > 60 * 100) |
338 | if (timeout > 60 * 100) |
339 | { |
339 | { |
340 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
340 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
341 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
341 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
342 | return NULL; |
342 | return NULL; |
343 | } |
343 | } |
344 | }; |
344 | }; |
345 | 345 | ||
346 | if( pci_scan_filter(id, busnr, devfn) == 0) |
346 | if( pci_scan_filter(id, busnr, devfn) == 0) |
347 | return NULL; |
347 | return NULL; |
348 | 348 | ||
349 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
349 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
350 | 350 | ||
351 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
351 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
352 | 352 | ||
353 | INIT_LIST_HEAD(&dev->link); |
353 | INIT_LIST_HEAD(&dev->link); |
354 | 354 | ||
355 | if(unlikely(dev == NULL)) |
355 | if(unlikely(dev == NULL)) |
356 | return NULL; |
356 | return NULL; |
357 | 357 | ||
358 | dev->pci_dev.busnr = busnr; |
358 | dev->pci_dev.busnr = busnr; |
359 | dev->pci_dev.devfn = devfn; |
359 | dev->pci_dev.devfn = devfn; |
360 | dev->pci_dev.hdr_type = hdr & 0x7f; |
360 | dev->pci_dev.hdr_type = hdr & 0x7f; |
361 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
361 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
362 | dev->pci_dev.vendor = id & 0xffff; |
362 | dev->pci_dev.vendor = id & 0xffff; |
363 | dev->pci_dev.device = (id >> 16) & 0xffff; |
363 | dev->pci_dev.device = (id >> 16) & 0xffff; |
364 | 364 | ||
365 | pci_setup_device(&dev->pci_dev); |
365 | pci_setup_device(&dev->pci_dev); |
366 | 366 | ||
367 | return dev; |
367 | return dev; |
368 | 368 | ||
369 | }; |
369 | }; |
370 | 370 | ||
371 | 371 | ||
372 | 372 | ||
373 | 373 | ||
374 | int pci_scan_slot(u32_t bus, int devfn) |
374 | int pci_scan_slot(u32_t bus, int devfn) |
375 | { |
375 | { |
376 | int func, nr = 0; |
376 | int func, nr = 0; |
377 | 377 | ||
378 | for (func = 0; func < 8; func++, devfn++) |
378 | for (func = 0; func < 8; func++, devfn++) |
379 | { |
379 | { |
380 | pci_dev_t *dev; |
380 | pci_dev_t *dev; |
381 | 381 | ||
382 | dev = pci_scan_device(bus, devfn); |
382 | dev = pci_scan_device(bus, devfn); |
383 | if( dev ) |
383 | if( dev ) |
384 | { |
384 | { |
385 | list_add(&dev->link, &devices); |
385 | list_add(&dev->link, &devices); |
386 | 386 | ||
387 | nr++; |
387 | nr++; |
388 | 388 | ||
389 | /* |
389 | /* |
390 | * If this is a single function device, |
390 | * If this is a single function device, |
391 | * don't scan past the first function. |
391 | * don't scan past the first function. |
392 | */ |
392 | */ |
393 | if (!dev->pci_dev.multifunction) |
393 | if (!dev->pci_dev.multifunction) |
394 | { |
394 | { |
395 | if (func > 0) { |
395 | if (func > 0) { |
396 | dev->pci_dev.multifunction = 1; |
396 | dev->pci_dev.multifunction = 1; |
397 | } |
397 | } |
398 | else { |
398 | else { |
399 | break; |
399 | break; |
400 | } |
400 | } |
401 | } |
401 | } |
402 | } |
402 | } |
403 | else { |
403 | else { |
404 | if (func == 0) |
404 | if (func == 0) |
405 | break; |
405 | break; |
406 | } |
406 | } |
407 | }; |
407 | }; |
408 | 408 | ||
409 | return nr; |
409 | return nr; |
410 | }; |
410 | }; |
411 | 411 | ||
412 | #define PCI_FIND_CAP_TTL 48 |
412 | #define PCI_FIND_CAP_TTL 48 |
413 | 413 | ||
414 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
414 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
415 | u8 pos, int cap, int *ttl) |
415 | u8 pos, int cap, int *ttl) |
416 | { |
416 | { |
417 | u8 id; |
417 | u8 id; |
418 | 418 | ||
419 | while ((*ttl)--) { |
419 | while ((*ttl)--) { |
420 | pos = PciRead8(bus, devfn, pos); |
420 | pos = PciRead8(bus, devfn, pos); |
421 | if (pos < 0x40) |
421 | if (pos < 0x40) |
422 | break; |
422 | break; |
423 | pos &= ~3; |
423 | pos &= ~3; |
424 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
424 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
425 | if (id == 0xff) |
425 | if (id == 0xff) |
426 | break; |
426 | break; |
427 | if (id == cap) |
427 | if (id == cap) |
428 | return pos; |
428 | return pos; |
429 | pos += PCI_CAP_LIST_NEXT; |
429 | pos += PCI_CAP_LIST_NEXT; |
430 | } |
430 | } |
431 | return 0; |
431 | return 0; |
432 | } |
432 | } |
433 | 433 | ||
434 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
434 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
435 | u8 pos, int cap) |
435 | u8 pos, int cap) |
436 | { |
436 | { |
437 | int ttl = PCI_FIND_CAP_TTL; |
437 | int ttl = PCI_FIND_CAP_TTL; |
438 | 438 | ||
439 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
439 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
440 | } |
440 | } |
441 | 441 | ||
442 | static int __pci_bus_find_cap_start(unsigned int bus, |
442 | static int __pci_bus_find_cap_start(unsigned int bus, |
443 | unsigned int devfn, u8 hdr_type) |
443 | unsigned int devfn, u8 hdr_type) |
444 | { |
444 | { |
445 | u16 status; |
445 | u16 status; |
446 | 446 | ||
447 | status = PciRead16(bus, devfn, PCI_STATUS); |
447 | status = PciRead16(bus, devfn, PCI_STATUS); |
448 | if (!(status & PCI_STATUS_CAP_LIST)) |
448 | if (!(status & PCI_STATUS_CAP_LIST)) |
449 | return 0; |
449 | return 0; |
450 | 450 | ||
451 | switch (hdr_type) { |
451 | switch (hdr_type) { |
452 | case PCI_HEADER_TYPE_NORMAL: |
452 | case PCI_HEADER_TYPE_NORMAL: |
453 | case PCI_HEADER_TYPE_BRIDGE: |
453 | case PCI_HEADER_TYPE_BRIDGE: |
454 | return PCI_CAPABILITY_LIST; |
454 | return PCI_CAPABILITY_LIST; |
455 | case PCI_HEADER_TYPE_CARDBUS: |
455 | case PCI_HEADER_TYPE_CARDBUS: |
456 | return PCI_CB_CAPABILITY_LIST; |
456 | return PCI_CB_CAPABILITY_LIST; |
457 | default: |
457 | default: |
458 | return 0; |
458 | return 0; |
459 | } |
459 | } |
460 | 460 | ||
461 | return 0; |
461 | return 0; |
462 | } |
462 | } |
463 | 463 | ||
464 | 464 | ||
465 | int pci_find_capability(struct pci_dev *dev, int cap) |
465 | int pci_find_capability(struct pci_dev *dev, int cap) |
466 | { |
466 | { |
467 | int pos; |
467 | int pos; |
468 | 468 | ||
469 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
469 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
470 | if (pos) |
470 | if (pos) |
471 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
471 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
472 | 472 | ||
473 | return pos; |
473 | return pos; |
474 | } |
474 | } |
475 | 475 | ||
476 | 476 | ||
477 | 477 | ||
478 | 478 | ||
479 | int enum_pci_devices() |
479 | int enum_pci_devices() |
480 | { |
480 | { |
481 | pci_dev_t *dev; |
481 | pci_dev_t *dev; |
482 | u32_t last_bus; |
482 | u32_t last_bus; |
483 | u32_t bus = 0 , devfn = 0; |
483 | u32_t bus = 0 , devfn = 0; |
484 | 484 | ||
485 | 485 | ||
486 | last_bus = PciApi(1); |
486 | last_bus = PciApi(1); |
487 | 487 | ||
488 | 488 | ||
489 | if( unlikely(last_bus == -1)) |
489 | if( unlikely(last_bus == -1)) |
490 | return -1; |
490 | return -1; |
491 | 491 | ||
492 | for(;bus <= last_bus; bus++) |
492 | for(;bus <= last_bus; bus++) |
493 | { |
493 | { |
494 | for (devfn = 0; devfn < 0x100; devfn += 8) |
494 | for (devfn = 0; devfn < 0x100; devfn += 8) |
495 | pci_scan_slot(bus, devfn); |
495 | pci_scan_slot(bus, devfn); |
496 | 496 | ||
497 | 497 | ||
498 | } |
498 | } |
499 | for(dev = (pci_dev_t*)devices.next; |
499 | for(dev = (pci_dev_t*)devices.next; |
500 | &dev->link != &devices; |
500 | &dev->link != &devices; |
501 | dev = (pci_dev_t*)dev->link.next) |
501 | dev = (pci_dev_t*)dev->link.next) |
502 | { |
502 | { |
503 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
503 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
504 | dev->pci_dev.vendor, |
504 | dev->pci_dev.vendor, |
505 | dev->pci_dev.device, |
505 | dev->pci_dev.device, |
506 | dev->pci_dev.busnr, |
506 | dev->pci_dev.busnr, |
507 | dev->pci_dev.devfn); |
507 | dev->pci_dev.devfn); |
508 | 508 | ||
509 | } |
509 | } |
510 | return 0; |
510 | return 0; |
511 | } |
511 | } |
512 | 512 | ||
513 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
513 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
514 | { |
514 | { |
515 | pci_dev_t *dev; |
515 | pci_dev_t *dev; |
516 | const struct pci_device_id *ent; |
516 | const struct pci_device_id *ent; |
517 | 517 | ||
518 | for(dev = (pci_dev_t*)devices.next; |
518 | for(dev = (pci_dev_t*)devices.next; |
519 | &dev->link != &devices; |
519 | &dev->link != &devices; |
520 | dev = (pci_dev_t*)dev->link.next) |
520 | dev = (pci_dev_t*)dev->link.next) |
521 | { |
521 | { |
522 | if( dev->pci_dev.vendor != idlist->vendor ) |
522 | if( dev->pci_dev.vendor != idlist->vendor ) |
523 | continue; |
523 | continue; |
524 | 524 | ||
525 | for(ent = idlist; ent->vendor != 0; ent++) |
525 | for(ent = idlist; ent->vendor != 0; ent++) |
526 | { |
526 | { |
527 | if(unlikely(ent->device == dev->pci_dev.device)) |
527 | if(unlikely(ent->device == dev->pci_dev.device)) |
528 | { |
528 | { |
529 | pdev->pci_dev = dev->pci_dev; |
529 | pdev->pci_dev = dev->pci_dev; |
530 | return ent; |
530 | return ent; |
531 | } |
531 | } |
532 | }; |
532 | }; |
533 | } |
533 | } |
534 | 534 | ||
535 | return NULL; |
535 | return NULL; |
536 | }; |
536 | }; |
537 | 537 | ||
538 | struct pci_dev * |
538 | struct pci_dev * |
539 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
539 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
540 | { |
540 | { |
541 | pci_dev_t *dev; |
541 | pci_dev_t *dev; |
542 | 542 | ||
543 | dev = (pci_dev_t*)devices.next; |
543 | dev = (pci_dev_t*)devices.next; |
544 | 544 | ||
545 | if(from != NULL) |
545 | if(from != NULL) |
546 | { |
546 | { |
547 | for(; &dev->link != &devices; |
547 | for(; &dev->link != &devices; |
548 | dev = (pci_dev_t*)dev->link.next) |
548 | dev = (pci_dev_t*)dev->link.next) |
549 | { |
549 | { |
550 | if( &dev->pci_dev == from) |
550 | if( &dev->pci_dev == from) |
551 | { |
551 | { |
552 | dev = (pci_dev_t*)dev->link.next; |
552 | dev = (pci_dev_t*)dev->link.next; |
553 | break; |
553 | break; |
554 | }; |
554 | }; |
555 | } |
555 | } |
556 | }; |
556 | }; |
557 | 557 | ||
558 | for(; &dev->link != &devices; |
558 | for(; &dev->link != &devices; |
559 | dev = (pci_dev_t*)dev->link.next) |
559 | dev = (pci_dev_t*)dev->link.next) |
560 | { |
560 | { |
561 | if( dev->pci_dev.vendor != vendor ) |
561 | if( dev->pci_dev.vendor != vendor ) |
562 | continue; |
562 | continue; |
563 | 563 | ||
564 | if(dev->pci_dev.device == device) |
564 | if(dev->pci_dev.device == device) |
565 | { |
565 | { |
566 | return &dev->pci_dev; |
566 | return &dev->pci_dev; |
567 | } |
567 | } |
568 | } |
568 | } |
569 | return NULL; |
569 | return NULL; |
570 | }; |
570 | }; |
571 | 571 | ||
572 | 572 | ||
573 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
573 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
574 | { |
574 | { |
575 | pci_dev_t *dev; |
575 | pci_dev_t *dev; |
576 | 576 | ||
577 | for(dev = (pci_dev_t*)devices.next; |
577 | for(dev = (pci_dev_t*)devices.next; |
578 | &dev->link != &devices; |
578 | &dev->link != &devices; |
579 | dev = (pci_dev_t*)dev->link.next) |
579 | dev = (pci_dev_t*)dev->link.next) |
580 | { |
580 | { |
581 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
581 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
582 | return &dev->pci_dev; |
582 | return &dev->pci_dev; |
583 | } |
583 | } |
584 | return NULL; |
584 | return NULL; |
585 | } |
585 | } |
586 | 586 | ||
587 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
587 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
588 | { |
588 | { |
589 | pci_dev_t *dev; |
589 | pci_dev_t *dev; |
590 | 590 | ||
591 | dev = (pci_dev_t*)devices.next; |
591 | dev = (pci_dev_t*)devices.next; |
592 | 592 | ||
593 | if(from != NULL) |
593 | if(from != NULL) |
594 | { |
594 | { |
595 | for(; &dev->link != &devices; |
595 | for(; &dev->link != &devices; |
596 | dev = (pci_dev_t*)dev->link.next) |
596 | dev = (pci_dev_t*)dev->link.next) |
597 | { |
597 | { |
598 | if( &dev->pci_dev == from) |
598 | if( &dev->pci_dev == from) |
599 | { |
599 | { |
600 | dev = (pci_dev_t*)dev->link.next; |
600 | dev = (pci_dev_t*)dev->link.next; |
601 | break; |
601 | break; |
602 | }; |
602 | }; |
603 | } |
603 | } |
604 | }; |
604 | }; |
605 | 605 | ||
606 | for(; &dev->link != &devices; |
606 | for(; &dev->link != &devices; |
607 | dev = (pci_dev_t*)dev->link.next) |
607 | dev = (pci_dev_t*)dev->link.next) |
608 | { |
608 | { |
609 | if( dev->pci_dev.class == class) |
609 | if( dev->pci_dev.class == class) |
610 | { |
610 | { |
611 | return &dev->pci_dev; |
611 | return &dev->pci_dev; |
612 | } |
612 | } |
613 | } |
613 | } |
614 | 614 | ||
615 | return NULL; |
615 | return NULL; |
616 | } |
616 | } |
617 | 617 | ||
618 | 618 | ||
619 | #define PIO_OFFSET 0x10000UL |
619 | #define PIO_OFFSET 0x10000UL |
620 | #define PIO_MASK 0x0ffffUL |
620 | #define PIO_MASK 0x0ffffUL |
621 | #define PIO_RESERVED 0x40000UL |
621 | #define PIO_RESERVED 0x40000UL |
622 | 622 | ||
623 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
623 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
624 | unsigned long port = (unsigned long __force)addr; \ |
624 | unsigned long port = (unsigned long __force)addr; \ |
625 | if (port >= PIO_RESERVED) { \ |
625 | if (port >= PIO_RESERVED) { \ |
626 | is_mmio; \ |
626 | is_mmio; \ |
627 | } else if (port > PIO_OFFSET) { \ |
627 | } else if (port > PIO_OFFSET) { \ |
628 | port &= PIO_MASK; \ |
628 | port &= PIO_MASK; \ |
629 | is_pio; \ |
629 | is_pio; \ |
630 | }; \ |
630 | }; \ |
631 | } while (0) |
631 | } while (0) |
632 | 632 | ||
633 | /* Create a virtual mapping cookie for an IO port range */ |
633 | /* Create a virtual mapping cookie for an IO port range */ |
634 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
634 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
635 | { |
635 | { |
636 | if (port > PIO_MASK) |
636 | if (port > PIO_MASK) |
637 | return NULL; |
637 | return NULL; |
638 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
638 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
639 | } |
639 | } |
640 | 640 | ||
641 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
641 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
642 | { |
642 | { |
643 | resource_size_t start = pci_resource_start(dev, bar); |
643 | resource_size_t start = pci_resource_start(dev, bar); |
644 | resource_size_t len = pci_resource_len(dev, bar); |
644 | resource_size_t len = pci_resource_len(dev, bar); |
645 | unsigned long flags = pci_resource_flags(dev, bar); |
645 | unsigned long flags = pci_resource_flags(dev, bar); |
646 | 646 | ||
647 | if (!len || !start) |
647 | if (!len || !start) |
648 | return NULL; |
648 | return NULL; |
649 | if (maxlen && len > maxlen) |
649 | if (maxlen && len > maxlen) |
650 | len = maxlen; |
650 | len = maxlen; |
651 | if (flags & IORESOURCE_IO) |
651 | if (flags & IORESOURCE_IO) |
652 | return ioport_map(start, len); |
652 | return ioport_map(start, len); |
653 | if (flags & IORESOURCE_MEM) { |
653 | if (flags & IORESOURCE_MEM) { |
654 | return ioremap(start, len); |
654 | return ioremap(start, len); |
655 | } |
655 | } |
656 | /* What? */ |
656 | /* What? */ |
657 | return NULL; |
657 | return NULL; |
658 | } |
658 | } |
659 | 659 | ||
660 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
660 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
661 | { |
661 | { |
662 | IO_COND(addr, /* nothing */, iounmap(addr)); |
662 | IO_COND(addr, /* nothing */, iounmap(addr)); |
663 | }>=>>>><>><>><>>4)><4)> |
663 | } |
664 | 664 | ||
- | 665 | ||
- | 666 | struct pci_bus_region { |
|
- | 667 | resource_size_t start; |
|
- | 668 | resource_size_t end; |
|
- | 669 | }; |
|
- | 670 | ||
- | 671 | static inline void |
|
- | 672 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
|
- | 673 | struct resource *res) |
|
- | 674 | { |
|
- | 675 | region->start = res->start; |
|
- | 676 | region->end = res->end; |
|
- | 677 | } |
|
- | 678 | ||
- | 679 | static inline int pci_read_config_dword(struct pci_dev *dev, int where, |
|
- | 680 | u32 *val) |
|
- | 681 | { |
|
- | 682 | *val = PciRead32(dev->busnr, dev->devfn, where); |
|
- | 683 | return 1; |
|
- | 684 | } |
|
- | 685 | ||
- | 686 | static inline int pci_write_config_dword(struct pci_dev *dev, int where, |
|
- | 687 | u32 val) |
|
- | 688 | { |
|
- | 689 | PciWrite32(dev->busnr, dev->devfn, where, val); |
|
- | 690 | return 1; |
|
- | 691 | } |
|
- | 692 | ||
- | 693 | int pci_enable_rom(struct pci_dev *pdev) |
|
- | 694 | { |
|
- | 695 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
|
- | 696 | struct pci_bus_region region; |
|
- | 697 | u32 rom_addr; |
|
- | 698 | ||
- | 699 | if (!res->flags) |
|
- | 700 | return -1; |
|
- | 701 | ||
- | 702 | pcibios_resource_to_bus(pdev, ®ion, res); |
|
- | 703 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
|
- | 704 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
|
- | 705 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
|
- | 706 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
|
- | 707 | return 0; |
|
- | 708 | } |
|
- | 709 | ||
- | 710 | void pci_disable_rom(struct pci_dev *pdev) |
|
- | 711 | { |
|
- | 712 | u32 rom_addr; |
|
- | 713 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
|
- | 714 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
|
- | 715 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
|
- | 716 | } |
|
- | 717 | ||
- | 718 | /** |
|
- | 719 | * pci_get_rom_size - obtain the actual size of the ROM image |
|
- | 720 | * @pdev: target PCI device |
|
- | 721 | * @rom: kernel virtual pointer to image of ROM |
|
- | 722 | * @size: size of PCI window |
|
- | 723 | * return: size of actual ROM image |
|
- | 724 | * |
|
- | 725 | * Determine the actual length of the ROM image. |
|
- | 726 | * The PCI window size could be much larger than the |
|
- | 727 | * actual image size. |
|
- | 728 | */ |
|
- | 729 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
|
- | 730 | { |
|
- | 731 | void __iomem *image; |
|
- | 732 | int last_image; |
|
- | 733 | ||
- | 734 | image = rom; |
|
- | 735 | do { |
|
- | 736 | void __iomem *pds; |
|
- | 737 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
|
- | 738 | if (readb(image) != 0x55) { |
|
- | 739 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
|
- | 740 | break; |
|
- | 741 | } |
|
- | 742 | if (readb(image + 1) != 0xAA) |
|
- | 743 | break; |
|
- | 744 | /* get the PCI data structure and check its signature */ |
|
- | 745 | pds = image + readw(image + 24); |
|
- | 746 | if (readb(pds) != 'P') |
|
- | 747 | break; |
|
- | 748 | if (readb(pds + 1) != 'C') |
|
- | 749 | break; |
|
- | 750 | if (readb(pds + 2) != 'I') |
|
- | 751 | break; |
|
- | 752 | if (readb(pds + 3) != 'R') |
|
- | 753 | break; |
|
- | 754 | last_image = readb(pds + 21) & 0x80; |
|
- | 755 | /* this length is reliable */ |
|
- | 756 | image += readw(pds + 16) * 512; |
|
- | 757 | } while (!last_image); |
|
- | 758 | ||
- | 759 | /* never return a size larger than the PCI resource window */ |
|
- | 760 | /* there are known ROMs that get the size wrong */ |
|
- | 761 | return min((size_t)(image - rom), size); |
|
- | 762 | } |
|
- | 763 | ||
- | 764 | ||
- | 765 | /** |
|
- | 766 | * pci_map_rom - map a PCI ROM to kernel space |
|
- | 767 | * @pdev: pointer to pci device struct |
|
- | 768 | * @size: pointer to receive size of pci window over ROM |
|
- | 769 | * |
|
- | 770 | * Return: kernel virtual pointer to image of ROM |
|
- | 771 | * |
|
- | 772 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
|
- | 773 | * the shadow BIOS copy will be returned instead of the |
|
- | 774 | * actual ROM. |
|
- | 775 | */ |
|
- | 776 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
|
- | 777 | { |
|
- | 778 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
|
- | 779 | loff_t start; |
|
- | 780 | void __iomem *rom; |
|
- | 781 | ||
- | 782 | /* |
|
- | 783 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
|
- | 784 | * memory map if the VGA enable bit of the Bridge Control register is |
|
- | 785 | * set for embedded VGA. |
|
- | 786 | */ |
|
- | 787 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
|
- | 788 | /* primary video rom always starts here */ |
|
- | 789 | start = (loff_t)0xC0000; |
|
- | 790 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
|
- | 791 | } else { |
|
- | 792 | if (res->flags & |
|
- | 793 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
|
- | 794 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
|
- | 795 | return (void __iomem *)(unsigned long) |
|
- | 796 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
|
- | 797 | } else { |
|
- | 798 | /* assign the ROM an address if it doesn't have one */ |
|
- | 799 | // if (res->parent == NULL && |
|
- | 800 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
|
- | 801 | return NULL; |
|
- | 802 | // start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
|
- | 803 | // *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
|
- | 804 | // if (*size == 0) |
|
- | 805 | // return NULL; |
|
- | 806 | ||
- | 807 | /* Enable ROM space decodes */ |
|
- | 808 | // if (pci_enable_rom(pdev)) |
|
- | 809 | // return NULL; |
|
- | 810 | } |
|
- | 811 | } |
|
- | 812 | ||
- | 813 | rom = ioremap(start, *size); |
|
- | 814 | if (!rom) { |
|
- | 815 | /* restore enable if ioremap fails */ |
|
- | 816 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
|
- | 817 | IORESOURCE_ROM_SHADOW | |
|
- | 818 | IORESOURCE_ROM_COPY))) |
|
- | 819 | pci_disable_rom(pdev); |
|
- | 820 | return NULL; |
|
- | 821 | } |
|
- | 822 | ||
- | 823 | /* |
|
- | 824 | * Try to find the true size of the ROM since sometimes the PCI window |
|
- | 825 | * size is much larger than the actual size of the ROM. |
|
- | 826 | * True size is important if the ROM is going to be copied. |
|
- | 827 | */ |
|
- | 828 | *size = pci_get_rom_size(pdev, rom, *size); |
|
- | 829 | return rom; |
|
- | 830 | } |
|
- | 831 | ||
- | 832 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
|
- | 833 | { |
|
- | 834 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
|
- | 835 | ||
- | 836 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
|
- | 837 | return; |
|
- | 838 | ||
- | 839 | iounmap(rom); |
|
- | 840 | ||
- | 841 | /* Disable again before continuing, leave enabled if pci=rom */ |
|
- | 842 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
|
- | 843 | pci_disable_rom(pdev); |
|
- | 844 | }>=>>>><>><>><>>4)><4)> |
|
- | 845 |