Rev 2004 | Rev 3031 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2004 | Rev 2160 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | #include |
2 | #include |
3 | #include |
3 | #include |
4 | #include |
4 | #include |
5 | #include |
- | |
6 | #include |
5 | #include |
- | 6 | #include |
|
7 | 7 | ||
8 | static LIST_HEAD(devices); |
8 | static LIST_HEAD(devices); |
9 | 9 | ||
10 | static pci_dev_t* pci_scan_device(u32_t bus, int devfn); |
10 | static pci_dev_t* pci_scan_device(u32_t bus, int devfn); |
11 | 11 | ||
12 | 12 | ||
13 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
13 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
14 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
14 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
15 | 15 | ||
16 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
16 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
17 | 17 | ||
18 | /* |
18 | /* |
19 | * Translate the low bits of the PCI base |
19 | * Translate the low bits of the PCI base |
20 | * to the resource type |
20 | * to the resource type |
21 | */ |
21 | */ |
22 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
22 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
23 | { |
23 | { |
24 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
24 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
25 | return IORESOURCE_IO; |
25 | return IORESOURCE_IO; |
26 | 26 | ||
27 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
27 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
28 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
28 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
29 | 29 | ||
30 | return IORESOURCE_MEM; |
30 | return IORESOURCE_MEM; |
31 | } |
31 | } |
32 | 32 | ||
33 | 33 | ||
34 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
34 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
35 | { |
35 | { |
36 | u32_t size = mask & maxbase; /* Find the significant bits */ |
36 | u32_t size = mask & maxbase; /* Find the significant bits */ |
37 | 37 | ||
38 | if (!size) |
38 | if (!size) |
39 | return 0; |
39 | return 0; |
40 | 40 | ||
41 | /* Get the lowest of them to find the decode size, and |
41 | /* Get the lowest of them to find the decode size, and |
42 | from that the extent. */ |
42 | from that the extent. */ |
43 | size = (size & ~(size-1)) - 1; |
43 | size = (size & ~(size-1)) - 1; |
44 | 44 | ||
45 | /* base == maxbase can be valid only if the BAR has |
45 | /* base == maxbase can be valid only if the BAR has |
46 | already been programmed with all 1s. */ |
46 | already been programmed with all 1s. */ |
47 | if (base == maxbase && ((base | size) & mask) != mask) |
47 | if (base == maxbase && ((base | size) & mask) != mask) |
48 | return 0; |
48 | return 0; |
49 | 49 | ||
50 | return size; |
50 | return size; |
51 | } |
51 | } |
52 | 52 | ||
53 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
53 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
54 | { |
54 | { |
55 | u64_t size = mask & maxbase; /* Find the significant bits */ |
55 | u64_t size = mask & maxbase; /* Find the significant bits */ |
56 | 56 | ||
57 | if (!size) |
57 | if (!size) |
58 | return 0; |
58 | return 0; |
59 | 59 | ||
60 | /* Get the lowest of them to find the decode size, and |
60 | /* Get the lowest of them to find the decode size, and |
61 | from that the extent. */ |
61 | from that the extent. */ |
62 | size = (size & ~(size-1)) - 1; |
62 | size = (size & ~(size-1)) - 1; |
63 | 63 | ||
64 | /* base == maxbase can be valid only if the BAR has |
64 | /* base == maxbase can be valid only if the BAR has |
65 | already been programmed with all 1s. */ |
65 | already been programmed with all 1s. */ |
66 | if (base == maxbase && ((base | size) & mask) != mask) |
66 | if (base == maxbase && ((base | size) & mask) != mask) |
67 | return 0; |
67 | return 0; |
68 | 68 | ||
69 | return size; |
69 | return size; |
70 | } |
70 | } |
71 | 71 | ||
72 | static inline int is_64bit_memory(u32_t mask) |
72 | static inline int is_64bit_memory(u32_t mask) |
73 | { |
73 | { |
74 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
74 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
75 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
75 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
76 | return 1; |
76 | return 1; |
77 | return 0; |
77 | return 0; |
78 | } |
78 | } |
79 | 79 | ||
80 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
80 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
81 | { |
81 | { |
82 | u32_t pos, reg, next; |
82 | u32_t pos, reg, next; |
83 | u32_t l, sz; |
83 | u32_t l, sz; |
84 | struct resource *res; |
84 | struct resource *res; |
85 | 85 | ||
86 | for(pos=0; pos < howmany; pos = next) |
86 | for(pos=0; pos < howmany; pos = next) |
87 | { |
87 | { |
88 | u64_t l64; |
88 | u64_t l64; |
89 | u64_t sz64; |
89 | u64_t sz64; |
90 | u32_t raw_sz; |
90 | u32_t raw_sz; |
91 | 91 | ||
92 | next = pos + 1; |
92 | next = pos + 1; |
93 | 93 | ||
94 | res = &dev->resource[pos]; |
94 | res = &dev->resource[pos]; |
95 | 95 | ||
96 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
96 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
97 | l = PciRead32(dev->bus, dev->devfn, reg); |
97 | l = PciRead32(dev->busnr, dev->devfn, reg); |
98 | PciWrite32(dev->bus, dev->devfn, reg, ~0); |
98 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
99 | sz = PciRead32(dev->bus, dev->devfn, reg); |
99 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
100 | PciWrite32(dev->bus, dev->devfn, reg, l); |
100 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
101 | 101 | ||
102 | if (!sz || sz == 0xffffffff) |
102 | if (!sz || sz == 0xffffffff) |
103 | continue; |
103 | continue; |
104 | 104 | ||
105 | if (l == 0xffffffff) |
105 | if (l == 0xffffffff) |
106 | l = 0; |
106 | l = 0; |
107 | 107 | ||
108 | raw_sz = sz; |
108 | raw_sz = sz; |
109 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
109 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
110 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
110 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
111 | { |
111 | { |
112 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
112 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
113 | /* |
113 | /* |
114 | * For 64bit prefetchable memory sz could be 0, if the |
114 | * For 64bit prefetchable memory sz could be 0, if the |
115 | * real size is bigger than 4G, so we need to check |
115 | * real size is bigger than 4G, so we need to check |
116 | * szhi for that. |
116 | * szhi for that. |
117 | */ |
117 | */ |
118 | if (!is_64bit_memory(l) && !sz) |
118 | if (!is_64bit_memory(l) && !sz) |
119 | continue; |
119 | continue; |
120 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
120 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
121 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
121 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
122 | } |
122 | } |
123 | else { |
123 | else { |
124 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
124 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
125 | if (!sz) |
125 | if (!sz) |
126 | continue; |
126 | continue; |
127 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
127 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
128 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
128 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
129 | } |
129 | } |
130 | res->end = res->start + (unsigned long) sz; |
130 | res->end = res->start + (unsigned long) sz; |
131 | res->flags |= pci_calc_resource_flags(l); |
131 | res->flags |= pci_calc_resource_flags(l); |
132 | if (is_64bit_memory(l)) |
132 | if (is_64bit_memory(l)) |
133 | { |
133 | { |
134 | u32_t szhi, lhi; |
134 | u32_t szhi, lhi; |
135 | 135 | ||
136 | lhi = PciRead32(dev->bus, dev->devfn, reg+4); |
136 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
137 | PciWrite32(dev->bus, dev->devfn, reg+4, ~0); |
137 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
138 | szhi = PciRead32(dev->bus, dev->devfn, reg+4); |
138 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
139 | PciWrite32(dev->bus, dev->devfn, reg+4, lhi); |
139 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
140 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
140 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
141 | l64 = ((u64_t)lhi << 32) | l; |
141 | l64 = ((u64_t)lhi << 32) | l; |
142 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
142 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
143 | next++; |
143 | next++; |
144 | 144 | ||
145 | #if BITS_PER_LONG == 64 |
145 | #if BITS_PER_LONG == 64 |
146 | if (!sz64) { |
146 | if (!sz64) { |
147 | res->start = 0; |
147 | res->start = 0; |
148 | res->end = 0; |
148 | res->end = 0; |
149 | res->flags = 0; |
149 | res->flags = 0; |
150 | continue; |
150 | continue; |
151 | } |
151 | } |
152 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
152 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
153 | res->end = res->start + sz64; |
153 | res->end = res->start + sz64; |
154 | #else |
154 | #else |
155 | if (sz64 > 0x100000000ULL) { |
155 | if (sz64 > 0x100000000ULL) { |
156 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
156 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
157 | "BAR for device %s\n", pci_name(dev)); |
157 | "BAR for device %s\n", pci_name(dev)); |
158 | res->start = 0; |
158 | res->start = 0; |
159 | res->flags = 0; |
159 | res->flags = 0; |
160 | } |
160 | } |
161 | else if (lhi) |
161 | else if (lhi) |
162 | { |
162 | { |
163 | /* 64-bit wide address, treat as disabled */ |
163 | /* 64-bit wide address, treat as disabled */ |
164 | PciWrite32(dev->bus, dev->devfn, reg, |
164 | PciWrite32(dev->busnr, dev->devfn, reg, |
165 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
165 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
166 | PciWrite32(dev->bus, dev->devfn, reg+4, 0); |
166 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
167 | res->start = 0; |
167 | res->start = 0; |
168 | res->end = sz; |
168 | res->end = sz; |
169 | } |
169 | } |
170 | #endif |
170 | #endif |
171 | } |
171 | } |
172 | } |
172 | } |
173 | 173 | ||
174 | if ( rom ) |
174 | if ( rom ) |
175 | { |
175 | { |
176 | dev->rom_base_reg = rom; |
176 | dev->rom_base_reg = rom; |
177 | res = &dev->resource[PCI_ROM_RESOURCE]; |
177 | res = &dev->resource[PCI_ROM_RESOURCE]; |
178 | 178 | ||
179 | l = PciRead32(dev->bus, dev->devfn, rom); |
179 | l = PciRead32(dev->busnr, dev->devfn, rom); |
180 | PciWrite32(dev->bus, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
180 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
181 | sz = PciRead32(dev->bus, dev->devfn, rom); |
181 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
182 | PciWrite32(dev->bus, dev->devfn, rom, l); |
182 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
183 | 183 | ||
184 | if (l == 0xffffffff) |
184 | if (l == 0xffffffff) |
185 | l = 0; |
185 | l = 0; |
186 | 186 | ||
187 | if (sz && sz != 0xffffffff) |
187 | if (sz && sz != 0xffffffff) |
188 | { |
188 | { |
189 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
189 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
190 | 190 | ||
191 | if (sz) |
191 | if (sz) |
192 | { |
192 | { |
193 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
193 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
194 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
194 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
195 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
195 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
196 | res->start = l & PCI_ROM_ADDRESS_MASK; |
196 | res->start = l & PCI_ROM_ADDRESS_MASK; |
197 | res->end = res->start + (unsigned long) sz; |
197 | res->end = res->start + (unsigned long) sz; |
198 | } |
198 | } |
199 | } |
199 | } |
200 | } |
200 | } |
201 | } |
201 | } |
202 | 202 | ||
203 | static void pci_read_irq(struct pci_dev *dev) |
203 | static void pci_read_irq(struct pci_dev *dev) |
204 | { |
204 | { |
205 | u8_t irq; |
205 | u8_t irq; |
206 | 206 | ||
207 | irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN); |
207 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
208 | dev->pin = irq; |
208 | dev->pin = irq; |
209 | if (irq) |
209 | if (irq) |
210 | irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE); |
210 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
211 | dev->irq = irq; |
211 | dev->irq = irq; |
212 | }; |
212 | }; |
213 | 213 | ||
214 | 214 | ||
215 | static int pci_setup_device(struct pci_dev *dev) |
215 | int pci_setup_device(struct pci_dev *dev) |
216 | { |
216 | { |
217 | u32_t class; |
217 | u32_t class; |
218 | 218 | ||
219 | class = PciRead32(dev->bus, dev->devfn, PCI_CLASS_REVISION); |
219 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
220 | dev->revision = class & 0xff; |
220 | dev->revision = class & 0xff; |
221 | class >>= 8; /* upper 3 bytes */ |
221 | class >>= 8; /* upper 3 bytes */ |
222 | dev->class = class; |
222 | dev->class = class; |
223 | 223 | ||
224 | /* "Unknown power state" */ |
224 | /* "Unknown power state" */ |
225 | // dev->current_state = PCI_UNKNOWN; |
225 | // dev->current_state = PCI_UNKNOWN; |
226 | 226 | ||
227 | /* Early fixups, before probing the BARs */ |
227 | /* Early fixups, before probing the BARs */ |
228 | // pci_fixup_device(pci_fixup_early, dev); |
228 | // pci_fixup_device(pci_fixup_early, dev); |
229 | class = dev->class >> 8; |
229 | class = dev->class >> 8; |
230 | 230 | ||
231 | switch (dev->hdr_type) |
231 | switch (dev->hdr_type) |
232 | { |
232 | { |
233 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
233 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
234 | if (class == PCI_CLASS_BRIDGE_PCI) |
234 | if (class == PCI_CLASS_BRIDGE_PCI) |
235 | goto bad; |
235 | goto bad; |
236 | pci_read_irq(dev); |
236 | pci_read_irq(dev); |
237 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
237 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
238 | dev->subsystem_vendor = PciRead16(dev->bus, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
238 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
239 | dev->subsystem_device = PciRead16(dev->bus, dev->devfn, PCI_SUBSYSTEM_ID); |
239 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
240 | 240 | ||
241 | /* |
241 | /* |
242 | * Do the ugly legacy mode stuff here rather than broken chip |
242 | * Do the ugly legacy mode stuff here rather than broken chip |
243 | * quirk code. Legacy mode ATA controllers have fixed |
243 | * quirk code. Legacy mode ATA controllers have fixed |
244 | * addresses. These are not always echoed in BAR0-3, and |
244 | * addresses. These are not always echoed in BAR0-3, and |
245 | * BAR0-3 in a few cases contain junk! |
245 | * BAR0-3 in a few cases contain junk! |
246 | */ |
246 | */ |
247 | if (class == PCI_CLASS_STORAGE_IDE) |
247 | if (class == PCI_CLASS_STORAGE_IDE) |
248 | { |
248 | { |
249 | u8_t progif; |
249 | u8_t progif; |
250 | 250 | ||
251 | progif = PciRead8(dev->bus, dev->devfn,PCI_CLASS_PROG); |
251 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
252 | if ((progif & 1) == 0) |
252 | if ((progif & 1) == 0) |
253 | { |
253 | { |
254 | dev->resource[0].start = 0x1F0; |
254 | dev->resource[0].start = 0x1F0; |
255 | dev->resource[0].end = 0x1F7; |
255 | dev->resource[0].end = 0x1F7; |
256 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
256 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
257 | dev->resource[1].start = 0x3F6; |
257 | dev->resource[1].start = 0x3F6; |
258 | dev->resource[1].end = 0x3F6; |
258 | dev->resource[1].end = 0x3F6; |
259 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
259 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
260 | } |
260 | } |
261 | if ((progif & 4) == 0) |
261 | if ((progif & 4) == 0) |
262 | { |
262 | { |
263 | dev->resource[2].start = 0x170; |
263 | dev->resource[2].start = 0x170; |
264 | dev->resource[2].end = 0x177; |
264 | dev->resource[2].end = 0x177; |
265 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
265 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
266 | dev->resource[3].start = 0x376; |
266 | dev->resource[3].start = 0x376; |
267 | dev->resource[3].end = 0x376; |
267 | dev->resource[3].end = 0x376; |
268 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
268 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
269 | }; |
269 | }; |
270 | } |
270 | } |
271 | break; |
271 | break; |
272 | 272 | ||
273 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
273 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
274 | if (class != PCI_CLASS_BRIDGE_PCI) |
274 | if (class != PCI_CLASS_BRIDGE_PCI) |
275 | goto bad; |
275 | goto bad; |
276 | /* The PCI-to-PCI bridge spec requires that subtractive |
276 | /* The PCI-to-PCI bridge spec requires that subtractive |
277 | decoding (i.e. transparent) bridge must have programming |
277 | decoding (i.e. transparent) bridge must have programming |
278 | interface code of 0x01. */ |
278 | interface code of 0x01. */ |
279 | pci_read_irq(dev); |
279 | pci_read_irq(dev); |
280 | dev->transparent = ((dev->class & 0xff) == 1); |
280 | dev->transparent = ((dev->class & 0xff) == 1); |
281 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
281 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
282 | break; |
282 | break; |
283 | 283 | ||
284 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
284 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
285 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
285 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
286 | goto bad; |
286 | goto bad; |
287 | pci_read_irq(dev); |
287 | pci_read_irq(dev); |
288 | pci_read_bases(dev, 1, 0); |
288 | pci_read_bases(dev, 1, 0); |
289 | dev->subsystem_vendor = PciRead16(dev->bus, |
289 | dev->subsystem_vendor = PciRead16(dev->busnr, |
290 | dev->devfn, |
290 | dev->devfn, |
291 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
291 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
292 | 292 | ||
293 | dev->subsystem_device = PciRead16(dev->bus, |
293 | dev->subsystem_device = PciRead16(dev->busnr, |
294 | dev->devfn, |
294 | dev->devfn, |
295 | PCI_CB_SUBSYSTEM_ID); |
295 | PCI_CB_SUBSYSTEM_ID); |
296 | break; |
296 | break; |
297 | 297 | ||
298 | default: /* unknown header */ |
298 | default: /* unknown header */ |
299 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
299 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
300 | pci_name(dev), dev->hdr_type); |
300 | pci_name(dev), dev->hdr_type); |
301 | return -1; |
301 | return -1; |
302 | 302 | ||
303 | bad: |
303 | bad: |
304 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
304 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
305 | pci_name(dev), class, dev->hdr_type); |
305 | pci_name(dev), class, dev->hdr_type); |
306 | dev->class = PCI_CLASS_NOT_DEFINED; |
306 | dev->class = PCI_CLASS_NOT_DEFINED; |
307 | } |
307 | } |
308 | 308 | ||
309 | /* We found a fine healthy device, go go go... */ |
309 | /* We found a fine healthy device, go go go... */ |
310 | 310 | ||
311 | return 0; |
311 | return 0; |
312 | }; |
312 | }; |
313 | 313 | ||
314 | static pci_dev_t* pci_scan_device(u32_t bus, int devfn) |
314 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
315 | { |
315 | { |
316 | pci_dev_t *dev; |
316 | pci_dev_t *dev; |
317 | 317 | ||
318 | u32_t id; |
318 | u32_t id; |
319 | u8_t hdr; |
319 | u8_t hdr; |
320 | 320 | ||
321 | int timeout = 10; |
321 | int timeout = 10; |
322 | 322 | ||
323 | id = PciRead32(bus,devfn, PCI_VENDOR_ID); |
323 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
324 | 324 | ||
325 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
325 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
326 | if (id == 0xffffffff || id == 0x00000000 || |
326 | if (id == 0xffffffff || id == 0x00000000 || |
327 | id == 0x0000ffff || id == 0xffff0000) |
327 | id == 0x0000ffff || id == 0xffff0000) |
328 | return NULL; |
328 | return NULL; |
329 | 329 | ||
330 | while (id == 0xffff0001) |
330 | while (id == 0xffff0001) |
331 | { |
331 | { |
332 | 332 | ||
333 | delay(timeout/10); |
333 | delay(timeout/10); |
334 | timeout *= 2; |
334 | timeout *= 2; |
335 | 335 | ||
336 | id = PciRead32(bus, devfn, PCI_VENDOR_ID); |
336 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
337 | 337 | ||
338 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
338 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
339 | if (timeout > 60 * 100) |
339 | if (timeout > 60 * 100) |
340 | { |
340 | { |
341 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
341 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
342 | "responding\n", bus,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
342 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
343 | return NULL; |
343 | return NULL; |
344 | } |
344 | } |
345 | }; |
345 | }; |
346 | 346 | ||
347 | hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); |
347 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
348 | 348 | ||
349 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
349 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
350 | 350 | ||
351 | INIT_LIST_HEAD(&dev->link); |
351 | INIT_LIST_HEAD(&dev->link); |
352 | 352 | ||
353 | if(unlikely(dev == NULL)) |
353 | if(unlikely(dev == NULL)) |
354 | return NULL; |
354 | return NULL; |
355 | 355 | ||
356 | dev->pci_dev.bus = bus; |
356 | dev->pci_dev.busnr = busnr; |
357 | dev->pci_dev.devfn = devfn; |
357 | dev->pci_dev.devfn = devfn; |
358 | dev->pci_dev.hdr_type = hdr & 0x7f; |
358 | dev->pci_dev.hdr_type = hdr & 0x7f; |
359 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
359 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
360 | dev->pci_dev.vendor = id & 0xffff; |
360 | dev->pci_dev.vendor = id & 0xffff; |
361 | dev->pci_dev.device = (id >> 16) & 0xffff; |
361 | dev->pci_dev.device = (id >> 16) & 0xffff; |
362 | 362 | ||
363 | pci_setup_device(&dev->pci_dev); |
363 | pci_setup_device(&dev->pci_dev); |
364 | 364 | ||
365 | return dev; |
365 | return dev; |
366 | 366 | ||
367 | }; |
367 | }; |
368 | 368 | ||
369 | int pci_scan_slot(u32_t bus, int devfn) |
369 | int pci_scan_slot(u32_t bus, int devfn) |
370 | { |
370 | { |
371 | int func, nr = 0; |
371 | int func, nr = 0; |
372 | 372 | ||
373 | for (func = 0; func < 8; func++, devfn++) |
373 | for (func = 0; func < 8; func++, devfn++) |
374 | { |
374 | { |
375 | pci_dev_t *dev; |
375 | pci_dev_t *dev; |
376 | 376 | ||
377 | dev = pci_scan_device(bus, devfn); |
377 | dev = pci_scan_device(bus, devfn); |
378 | if( dev ) |
378 | if( dev ) |
379 | { |
379 | { |
380 | list_add(&dev->link, &devices); |
380 | list_add(&dev->link, &devices); |
381 | 381 | ||
382 | nr++; |
382 | nr++; |
383 | 383 | ||
384 | /* |
384 | /* |
385 | * If this is a single function device, |
385 | * If this is a single function device, |
386 | * don't scan past the first function. |
386 | * don't scan past the first function. |
387 | */ |
387 | */ |
388 | if (!dev->pci_dev.multifunction) |
388 | if (!dev->pci_dev.multifunction) |
389 | { |
389 | { |
390 | if (func > 0) { |
390 | if (func > 0) { |
391 | dev->pci_dev.multifunction = 1; |
391 | dev->pci_dev.multifunction = 1; |
392 | } |
392 | } |
393 | else { |
393 | else { |
394 | break; |
394 | break; |
395 | } |
395 | } |
396 | } |
396 | } |
397 | } |
397 | } |
398 | else { |
398 | else { |
399 | if (func == 0) |
399 | if (func == 0) |
400 | break; |
400 | break; |
401 | } |
401 | } |
402 | }; |
402 | }; |
403 | 403 | ||
404 | return nr; |
404 | return nr; |
405 | }; |
405 | }; |
406 | 406 | ||
407 | 407 | ||
408 | void pci_scan_bus(u32_t bus) |
408 | void pci_scan_bus(u32_t bus) |
409 | { |
409 | { |
410 | u32_t devfn; |
410 | u32_t devfn; |
411 | pci_dev_t *dev; |
411 | pci_dev_t *dev; |
412 | 412 | ||
413 | 413 | ||
414 | for (devfn = 0; devfn < 0x100; devfn += 8) |
414 | for (devfn = 0; devfn < 0x100; devfn += 8) |
415 | pci_scan_slot(bus, devfn); |
415 | pci_scan_slot(bus, devfn); |
416 | 416 | ||
417 | } |
417 | } |
418 | 418 | ||
419 | int enum_pci_devices() |
419 | int enum_pci_devices() |
420 | { |
420 | { |
421 | pci_dev_t *dev; |
421 | pci_dev_t *dev; |
422 | u32_t last_bus; |
422 | u32_t last_bus; |
423 | u32_t bus = 0 , devfn = 0; |
423 | u32_t bus = 0 , devfn = 0; |
424 | 424 | ||
425 | // list_initialize(&devices); |
425 | // list_initialize(&devices); |
426 | 426 | ||
427 | last_bus = PciApi(1); |
427 | last_bus = PciApi(1); |
428 | 428 | ||
429 | 429 | ||
430 | if( unlikely(last_bus == -1)) |
430 | if( unlikely(last_bus == -1)) |
431 | return -1; |
431 | return -1; |
432 | 432 | ||
433 | for(;bus <= last_bus; bus++) |
433 | for(;bus <= last_bus; bus++) |
434 | pci_scan_bus(bus); |
434 | pci_scan_bus(bus); |
435 | 435 | ||
436 | // for(dev = (dev_t*)devices.next; |
436 | // for(dev = (dev_t*)devices.next; |
437 | // &dev->link != &devices; |
437 | // &dev->link != &devices; |
438 | // dev = (dev_t*)dev->link.next) |
438 | // dev = (dev_t*)dev->link.next) |
439 | // { |
439 | // { |
440 | // dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
440 | // dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
441 | // dev->pci_dev.vendor, |
441 | // dev->pci_dev.vendor, |
442 | // dev->pci_dev.device, |
442 | // dev->pci_dev.device, |
443 | // dev->pci_dev.bus, |
443 | // dev->pci_dev.bus, |
444 | // dev->pci_dev.devfn); |
444 | // dev->pci_dev.devfn); |
445 | // |
445 | // |
446 | // } |
446 | // } |
447 | return 0; |
447 | return 0; |
448 | } |
448 | } |
449 | 449 | ||
450 | #define PCI_FIND_CAP_TTL 48 |
450 | #define PCI_FIND_CAP_TTL 48 |
451 | 451 | ||
452 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
452 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
453 | u8 pos, int cap, int *ttl) |
453 | u8 pos, int cap, int *ttl) |
454 | { |
454 | { |
455 | u8 id; |
455 | u8 id; |
456 | 456 | ||
457 | while ((*ttl)--) { |
457 | while ((*ttl)--) { |
458 | pos = PciRead8(bus, devfn, pos); |
458 | pos = PciRead8(bus, devfn, pos); |
459 | if (pos < 0x40) |
459 | if (pos < 0x40) |
460 | break; |
460 | break; |
461 | pos &= ~3; |
461 | pos &= ~3; |
462 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
462 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
463 | if (id == 0xff) |
463 | if (id == 0xff) |
464 | break; |
464 | break; |
465 | if (id == cap) |
465 | if (id == cap) |
466 | return pos; |
466 | return pos; |
467 | pos += PCI_CAP_LIST_NEXT; |
467 | pos += PCI_CAP_LIST_NEXT; |
468 | } |
468 | } |
469 | return 0; |
469 | return 0; |
470 | } |
470 | } |
471 | 471 | ||
472 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
472 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
473 | u8 pos, int cap) |
473 | u8 pos, int cap) |
474 | { |
474 | { |
475 | int ttl = PCI_FIND_CAP_TTL; |
475 | int ttl = PCI_FIND_CAP_TTL; |
476 | 476 | ||
477 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
477 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
478 | } |
478 | } |
479 | 479 | ||
480 | static int __pci_bus_find_cap_start(unsigned int bus, |
480 | static int __pci_bus_find_cap_start(unsigned int bus, |
481 | unsigned int devfn, u8 hdr_type) |
481 | unsigned int devfn, u8 hdr_type) |
482 | { |
482 | { |
483 | u16 status; |
483 | u16 status; |
484 | 484 | ||
485 | status = PciRead16(bus, devfn, PCI_STATUS); |
485 | status = PciRead16(bus, devfn, PCI_STATUS); |
486 | if (!(status & PCI_STATUS_CAP_LIST)) |
486 | if (!(status & PCI_STATUS_CAP_LIST)) |
487 | return 0; |
487 | return 0; |
488 | 488 | ||
489 | switch (hdr_type) { |
489 | switch (hdr_type) { |
490 | case PCI_HEADER_TYPE_NORMAL: |
490 | case PCI_HEADER_TYPE_NORMAL: |
491 | case PCI_HEADER_TYPE_BRIDGE: |
491 | case PCI_HEADER_TYPE_BRIDGE: |
492 | return PCI_CAPABILITY_LIST; |
492 | return PCI_CAPABILITY_LIST; |
493 | case PCI_HEADER_TYPE_CARDBUS: |
493 | case PCI_HEADER_TYPE_CARDBUS: |
494 | return PCI_CB_CAPABILITY_LIST; |
494 | return PCI_CB_CAPABILITY_LIST; |
495 | default: |
495 | default: |
496 | return 0; |
496 | return 0; |
497 | } |
497 | } |
498 | 498 | ||
499 | return 0; |
499 | return 0; |
500 | } |
500 | } |
501 | 501 | ||
502 | 502 | ||
503 | int pci_find_capability(struct pci_dev *dev, int cap) |
503 | int pci_find_capability(struct pci_dev *dev, int cap) |
504 | { |
504 | { |
505 | int pos; |
505 | int pos; |
506 | 506 | ||
507 | pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); |
507 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
508 | if (pos) |
508 | if (pos) |
509 | pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); |
509 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
510 | 510 | ||
511 | return pos; |
511 | return pos; |
512 | } |
512 | } |
513 | 513 | ||
514 | 514 | ||
515 | #if 0 |
515 | #if 0 |
516 | /** |
516 | /** |
517 | * pci_set_power_state - Set the power state of a PCI device |
517 | * pci_set_power_state - Set the power state of a PCI device |
518 | * @dev: PCI device to be suspended |
518 | * @dev: PCI device to be suspended |
519 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering |
519 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering |
520 | * |
520 | * |
521 | * Transition a device to a new power state, using the Power Management |
521 | * Transition a device to a new power state, using the Power Management |
522 | * Capabilities in the device's config space. |
522 | * Capabilities in the device's config space. |
523 | * |
523 | * |
524 | * RETURN VALUE: |
524 | * RETURN VALUE: |
525 | * -EINVAL if trying to enter a lower state than we're already in. |
525 | * -EINVAL if trying to enter a lower state than we're already in. |
526 | * 0 if we're already in the requested state. |
526 | * 0 if we're already in the requested state. |
527 | * -EIO if device does not support PCI PM. |
527 | * -EIO if device does not support PCI PM. |
528 | * 0 if we can successfully change the power state. |
528 | * 0 if we can successfully change the power state. |
529 | */ |
529 | */ |
530 | int |
530 | int |
531 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
531 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
532 | { |
532 | { |
533 | int pm, need_restore = 0; |
533 | int pm, need_restore = 0; |
534 | u16 pmcsr, pmc; |
534 | u16 pmcsr, pmc; |
535 | 535 | ||
536 | /* bound the state we're entering */ |
536 | /* bound the state we're entering */ |
537 | if (state > PCI_D3hot) |
537 | if (state > PCI_D3hot) |
538 | state = PCI_D3hot; |
538 | state = PCI_D3hot; |
539 | 539 | ||
540 | /* |
540 | /* |
541 | * If the device or the parent bridge can't support PCI PM, ignore |
541 | * If the device or the parent bridge can't support PCI PM, ignore |
542 | * the request if we're doing anything besides putting it into D0 |
542 | * the request if we're doing anything besides putting it into D0 |
543 | * (which would only happen on boot). |
543 | * (which would only happen on boot). |
544 | */ |
544 | */ |
545 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
545 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
546 | return 0; |
546 | return 0; |
547 | 547 | ||
548 | /* find PCI PM capability in list */ |
548 | /* find PCI PM capability in list */ |
549 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
549 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
550 | 550 | ||
551 | /* abort if the device doesn't support PM capabilities */ |
551 | /* abort if the device doesn't support PM capabilities */ |
552 | if (!pm) |
552 | if (!pm) |
553 | return -EIO; |
553 | return -EIO; |
554 | 554 | ||
555 | /* Validate current state: |
555 | /* Validate current state: |
556 | * Can enter D0 from any state, but if we can only go deeper |
556 | * Can enter D0 from any state, but if we can only go deeper |
557 | * to sleep if we're already in a low power state |
557 | * to sleep if we're already in a low power state |
558 | */ |
558 | */ |
559 | if (state != PCI_D0 && dev->current_state > state) { |
559 | if (state != PCI_D0 && dev->current_state > state) { |
560 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", |
560 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", |
561 | __FUNCTION__, pci_name(dev), state, dev->current_state); |
561 | __FUNCTION__, pci_name(dev), state, dev->current_state); |
562 | return -EINVAL; |
562 | return -EINVAL; |
563 | } else if (dev->current_state == state) |
563 | } else if (dev->current_state == state) |
564 | return 0; /* we're already there */ |
564 | return 0; /* we're already there */ |
565 | 565 | ||
566 | 566 | ||
567 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); |
567 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); |
568 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
568 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
569 | printk(KERN_DEBUG |
569 | printk(KERN_DEBUG |
570 | "PCI: %s has unsupported PM cap regs version (%u)\n", |
570 | "PCI: %s has unsupported PM cap regs version (%u)\n", |
571 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); |
571 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); |
572 | return -EIO; |
572 | return -EIO; |
573 | } |
573 | } |
574 | 574 | ||
575 | /* check if this device supports the desired state */ |
575 | /* check if this device supports the desired state */ |
576 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
576 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
577 | return -EIO; |
577 | return -EIO; |
578 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) |
578 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) |
579 | return -EIO; |
579 | return -EIO; |
580 | 580 | ||
581 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
581 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
582 | 582 | ||
583 | /* If we're (effectively) in D3, force entire word to 0. |
583 | /* If we're (effectively) in D3, force entire word to 0. |
584 | * This doesn't affect PME_Status, disables PME_En, and |
584 | * This doesn't affect PME_Status, disables PME_En, and |
585 | * sets PowerState to 0. |
585 | * sets PowerState to 0. |
586 | */ |
586 | */ |
587 | switch (dev->current_state) { |
587 | switch (dev->current_state) { |
588 | case PCI_D0: |
588 | case PCI_D0: |
589 | case PCI_D1: |
589 | case PCI_D1: |
590 | case PCI_D2: |
590 | case PCI_D2: |
591 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
591 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
592 | pmcsr |= state; |
592 | pmcsr |= state; |
593 | break; |
593 | break; |
594 | case PCI_UNKNOWN: /* Boot-up */ |
594 | case PCI_UNKNOWN: /* Boot-up */ |
595 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
595 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
596 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
596 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
597 | need_restore = 1; |
597 | need_restore = 1; |
598 | /* Fall-through: force to D0 */ |
598 | /* Fall-through: force to D0 */ |
599 | default: |
599 | default: |
600 | pmcsr = 0; |
600 | pmcsr = 0; |
601 | break; |
601 | break; |
602 | } |
602 | } |
603 | 603 | ||
604 | /* enter specified state */ |
604 | /* enter specified state */ |
605 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); |
605 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); |
606 | 606 | ||
607 | /* Mandatory power management transition delays */ |
607 | /* Mandatory power management transition delays */ |
608 | /* see PCI PM 1.1 5.6.1 table 18 */ |
608 | /* see PCI PM 1.1 5.6.1 table 18 */ |
609 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
609 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
610 | msleep(pci_pm_d3_delay); |
610 | msleep(pci_pm_d3_delay); |
611 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
611 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
612 | udelay(200); |
612 | udelay(200); |
613 | 613 | ||
614 | /* |
614 | /* |
615 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx |
615 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx |
616 | * Firmware method after native method ? |
616 | * Firmware method after native method ? |
617 | */ |
617 | */ |
618 | if (platform_pci_set_power_state) |
618 | if (platform_pci_set_power_state) |
619 | platform_pci_set_power_state(dev, state); |
619 | platform_pci_set_power_state(dev, state); |
620 | 620 | ||
621 | dev->current_state = state; |
621 | dev->current_state = state; |
622 | 622 | ||
623 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
623 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
624 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
624 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
625 | * from D3hot to D0 _may_ perform an internal reset, thereby |
625 | * from D3hot to D0 _may_ perform an internal reset, thereby |
626 | * going to "D0 Uninitialized" rather than "D0 Initialized". |
626 | * going to "D0 Uninitialized" rather than "D0 Initialized". |
627 | * For example, at least some versions of the 3c905B and the |
627 | * For example, at least some versions of the 3c905B and the |
628 | * 3c556B exhibit this behaviour. |
628 | * 3c556B exhibit this behaviour. |
629 | * |
629 | * |
630 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
630 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
631 | * devices in a D3hot state at boot. Consequently, we need to |
631 | * devices in a D3hot state at boot. Consequently, we need to |
632 | * restore at least the BARs so that the device will be |
632 | * restore at least the BARs so that the device will be |
633 | * accessible to its driver. |
633 | * accessible to its driver. |
634 | */ |
634 | */ |
635 | if (need_restore) |
635 | if (need_restore) |
636 | pci_restore_bars(dev); |
636 | pci_restore_bars(dev); |
637 | 637 | ||
638 | return 0; |
638 | return 0; |
639 | } |
639 | } |
640 | #endif |
640 | #endif |
641 | 641 | ||
642 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
642 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
643 | { |
643 | { |
644 | u16_t cmd, old_cmd; |
644 | u16_t cmd, old_cmd; |
645 | int idx; |
645 | int idx; |
646 | struct resource *r; |
646 | struct resource *r; |
647 | 647 | ||
648 | cmd = PciRead16(dev->bus, dev->devfn, PCI_COMMAND); |
648 | cmd = PciRead16(dev->busnr, dev->devfn, PCI_COMMAND); |
649 | old_cmd = cmd; |
649 | old_cmd = cmd; |
650 | for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) |
650 | for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) |
651 | { |
651 | { |
652 | /* Only set up the requested stuff */ |
652 | /* Only set up the requested stuff */ |
653 | if (!(mask & (1 << idx))) |
653 | if (!(mask & (1 << idx))) |
654 | continue; |
654 | continue; |
655 | 655 | ||
656 | r = &dev->resource[idx]; |
656 | r = &dev->resource[idx]; |
657 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) |
657 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) |
658 | continue; |
658 | continue; |
659 | if ((idx == PCI_ROM_RESOURCE) && |
659 | if ((idx == PCI_ROM_RESOURCE) && |
660 | (!(r->flags & IORESOURCE_ROM_ENABLE))) |
660 | (!(r->flags & IORESOURCE_ROM_ENABLE))) |
661 | continue; |
661 | continue; |
662 | if (!r->start && r->end) { |
662 | if (!r->start && r->end) { |
663 | printk(KERN_ERR "PCI: Device %s not available " |
663 | printk(KERN_ERR "PCI: Device %s not available " |
664 | "because of resource %d collisions\n", |
664 | "because of resource %d collisions\n", |
665 | pci_name(dev), idx); |
665 | pci_name(dev), idx); |
666 | return -EINVAL; |
666 | return -EINVAL; |
667 | } |
667 | } |
668 | if (r->flags & IORESOURCE_IO) |
668 | if (r->flags & IORESOURCE_IO) |
669 | cmd |= PCI_COMMAND_IO; |
669 | cmd |= PCI_COMMAND_IO; |
670 | if (r->flags & IORESOURCE_MEM) |
670 | if (r->flags & IORESOURCE_MEM) |
671 | cmd |= PCI_COMMAND_MEMORY; |
671 | cmd |= PCI_COMMAND_MEMORY; |
672 | } |
672 | } |
673 | if (cmd != old_cmd) { |
673 | if (cmd != old_cmd) { |
674 | printk("PCI: Enabling device %s (%04x -> %04x)\n", |
674 | printk("PCI: Enabling device %s (%04x -> %04x)\n", |
675 | pci_name(dev), old_cmd, cmd); |
675 | pci_name(dev), old_cmd, cmd); |
676 | PciWrite16(dev->bus, dev->devfn, PCI_COMMAND, cmd); |
676 | PciWrite16(dev->busnr, dev->devfn, PCI_COMMAND, cmd); |
677 | } |
677 | } |
678 | return 0; |
678 | return 0; |
679 | } |
679 | } |
680 | 680 | ||
681 | 681 | ||
682 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
682 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
683 | { |
683 | { |
684 | int err; |
684 | int err; |
685 | 685 | ||
686 | if ((err = pcibios_enable_resources(dev, mask)) < 0) |
686 | if ((err = pcibios_enable_resources(dev, mask)) < 0) |
687 | return err; |
687 | return err; |
688 | 688 | ||
689 | // if (!dev->msi_enabled) |
689 | // if (!dev->msi_enabled) |
690 | // return pcibios_enable_irq(dev); |
690 | // return pcibios_enable_irq(dev); |
691 | return 0; |
691 | return 0; |
692 | } |
692 | } |
693 | 693 | ||
694 | 694 | ||
695 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
695 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
696 | { |
696 | { |
697 | int err; |
697 | int err; |
698 | 698 | ||
699 | // err = pci_set_power_state(dev, PCI_D0); |
699 | // err = pci_set_power_state(dev, PCI_D0); |
700 | // if (err < 0 && err != -EIO) |
700 | // if (err < 0 && err != -EIO) |
701 | // return err; |
701 | // return err; |
702 | err = pcibios_enable_device(dev, bars); |
702 | err = pcibios_enable_device(dev, bars); |
703 | // if (err < 0) |
703 | // if (err < 0) |
704 | // return err; |
704 | // return err; |
705 | // pci_fixup_device(pci_fixup_enable, dev); |
705 | // pci_fixup_device(pci_fixup_enable, dev); |
706 | 706 | ||
707 | return 0; |
707 | return 0; |
708 | } |
708 | } |
709 | 709 | ||
710 | 710 | ||
711 | static int __pci_enable_device_flags(struct pci_dev *dev, |
711 | static int __pci_enable_device_flags(struct pci_dev *dev, |
712 | resource_size_t flags) |
712 | resource_size_t flags) |
713 | { |
713 | { |
714 | int err; |
714 | int err; |
715 | int i, bars = 0; |
715 | int i, bars = 0; |
716 | 716 | ||
717 | // if (atomic_add_return(1, &dev->enable_cnt) > 1) |
717 | // if (atomic_add_return(1, &dev->enable_cnt) > 1) |
718 | // return 0; /* already enabled */ |
718 | // return 0; /* already enabled */ |
719 | 719 | ||
720 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
720 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
721 | if (dev->resource[i].flags & flags) |
721 | if (dev->resource[i].flags & flags) |
722 | bars |= (1 << i); |
722 | bars |= (1 << i); |
723 | 723 | ||
724 | err = do_pci_enable_device(dev, bars); |
724 | err = do_pci_enable_device(dev, bars); |
725 | // if (err < 0) |
725 | // if (err < 0) |
726 | // atomic_dec(&dev->enable_cnt); |
726 | // atomic_dec(&dev->enable_cnt); |
727 | return err; |
727 | return err; |
728 | } |
728 | } |
729 | 729 | ||
730 | 730 | ||
731 | /** |
731 | /** |
732 | * pci_enable_device - Initialize device before it's used by a driver. |
732 | * pci_enable_device - Initialize device before it's used by a driver. |
733 | * @dev: PCI device to be initialized |
733 | * @dev: PCI device to be initialized |
734 | * |
734 | * |
735 | * Initialize device before it's used by a driver. Ask low-level code |
735 | * Initialize device before it's used by a driver. Ask low-level code |
736 | * to enable I/O and memory. Wake up the device if it was suspended. |
736 | * to enable I/O and memory. Wake up the device if it was suspended. |
737 | * Beware, this function can fail. |
737 | * Beware, this function can fail. |
738 | * |
738 | * |
739 | * Note we don't actually enable the device many times if we call |
739 | * Note we don't actually enable the device many times if we call |
740 | * this function repeatedly (we just increment the count). |
740 | * this function repeatedly (we just increment the count). |
741 | */ |
741 | */ |
742 | int pci_enable_device(struct pci_dev *dev) |
742 | int pci_enable_device(struct pci_dev *dev) |
743 | { |
743 | { |
744 | return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
744 | return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
745 | } |
745 | } |
746 | 746 | ||
747 | 747 | ||
748 | 748 | ||
749 | struct pci_device_id* find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist) |
749 | struct pci_device_id* find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist) |
750 | { |
750 | { |
751 | pci_dev_t *dev; |
751 | pci_dev_t *dev; |
752 | struct pci_device_id *ent; |
752 | struct pci_device_id *ent; |
753 | 753 | ||
754 | for(dev = (pci_dev_t*)devices.next; |
754 | for(dev = (pci_dev_t*)devices.next; |
755 | &dev->link != &devices; |
755 | &dev->link != &devices; |
756 | dev = (pci_dev_t*)dev->link.next) |
756 | dev = (pci_dev_t*)dev->link.next) |
757 | { |
757 | { |
758 | if( dev->pci_dev.vendor != idlist->vendor ) |
758 | if( dev->pci_dev.vendor != idlist->vendor ) |
759 | continue; |
759 | continue; |
760 | 760 | ||
761 | for(ent = idlist; ent->vendor != 0; ent++) |
761 | for(ent = idlist; ent->vendor != 0; ent++) |
762 | { |
762 | { |
763 | if(unlikely(ent->device == dev->pci_dev.device)) |
763 | if(unlikely(ent->device == dev->pci_dev.device)) |
764 | { |
764 | { |
765 | pdev->pci_dev = dev->pci_dev; |
765 | pdev->pci_dev = dev->pci_dev; |
766 | return ent; |
766 | return ent; |
767 | } |
767 | } |
768 | }; |
768 | }; |
769 | } |
769 | } |
770 | 770 | ||
771 | return NULL; |
771 | return NULL; |
772 | }; |
772 | }; |
773 | 773 | ||
774 | 774 | ||
775 | 775 | ||
776 | /** |
776 | /** |
777 | * pci_map_rom - map a PCI ROM to kernel space |
777 | * pci_map_rom - map a PCI ROM to kernel space |
778 | * @pdev: pointer to pci device struct |
778 | * @pdev: pointer to pci device struct |
779 | * @size: pointer to receive size of pci window over ROM |
779 | * @size: pointer to receive size of pci window over ROM |
780 | * @return: kernel virtual pointer to image of ROM |
780 | * @return: kernel virtual pointer to image of ROM |
781 | * |
781 | * |
782 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
782 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
783 | * the shadow BIOS copy will be returned instead of the |
783 | * the shadow BIOS copy will be returned instead of the |
784 | * actual ROM. |
784 | * actual ROM. |
785 | */ |
785 | */ |
786 | 786 | ||
787 | #define legacyBIOSLocation 0xC0000 |
787 | #define legacyBIOSLocation 0xC0000 |
788 | #define OS_BASE 0x80000000 |
788 | #define OS_BASE 0x80000000 |
789 | 789 | ||
790 | void *pci_map_rom(struct pci_dev *pdev, size_t *size) |
790 | void *pci_map_rom(struct pci_dev *pdev, size_t *size) |
791 | { |
791 | { |
792 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
792 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
793 | u32_t start; |
793 | u32_t start; |
794 | void *rom; |
794 | void *rom; |
795 | 795 | ||
796 | #if 0 |
796 | #if 0 |
797 | /* |
797 | /* |
798 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
798 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
799 | * memory map if the VGA enable bit of the Bridge Control register is |
799 | * memory map if the VGA enable bit of the Bridge Control register is |
800 | * set for embedded VGA. |
800 | * set for embedded VGA. |
801 | */ |
801 | */ |
802 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
802 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
803 | /* primary video rom always starts here */ |
803 | /* primary video rom always starts here */ |
804 | start = (u32_t)0xC0000; |
804 | start = (u32_t)0xC0000; |
805 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
805 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
806 | } else { |
806 | } else { |
807 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
807 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
808 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
808 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
809 | return (void *)(unsigned long) |
809 | return (void *)(unsigned long) |
810 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
810 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
811 | } else { |
811 | } else { |
812 | /* assign the ROM an address if it doesn't have one */ |
812 | /* assign the ROM an address if it doesn't have one */ |
813 | //if (res->parent == NULL && |
813 | //if (res->parent == NULL && |
814 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
814 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
815 | // return NULL; |
815 | // return NULL; |
816 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
816 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
817 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
817 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
818 | if (*size == 0) |
818 | if (*size == 0) |
819 | return NULL; |
819 | return NULL; |
820 | 820 | ||
821 | /* Enable ROM space decodes */ |
821 | /* Enable ROM space decodes */ |
822 | if (pci_enable_rom(pdev)) |
822 | if (pci_enable_rom(pdev)) |
823 | return NULL; |
823 | return NULL; |
824 | } |
824 | } |
825 | } |
825 | } |
826 | 826 | ||
827 | rom = ioremap(start, *size); |
827 | rom = ioremap(start, *size); |
828 | if (!rom) { |
828 | if (!rom) { |
829 | /* restore enable if ioremap fails */ |
829 | /* restore enable if ioremap fails */ |
830 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
830 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
831 | IORESOURCE_ROM_SHADOW | |
831 | IORESOURCE_ROM_SHADOW | |
832 | IORESOURCE_ROM_COPY))) |
832 | IORESOURCE_ROM_COPY))) |
833 | pci_disable_rom(pdev); |
833 | pci_disable_rom(pdev); |
834 | return NULL; |
834 | return NULL; |
835 | } |
835 | } |
836 | 836 | ||
837 | /* |
837 | /* |
838 | * Try to find the true size of the ROM since sometimes the PCI window |
838 | * Try to find the true size of the ROM since sometimes the PCI window |
839 | * size is much larger than the actual size of the ROM. |
839 | * size is much larger than the actual size of the ROM. |
840 | * True size is important if the ROM is going to be copied. |
840 | * True size is important if the ROM is going to be copied. |
841 | */ |
841 | */ |
842 | *size = pci_get_rom_size(rom, *size); |
842 | *size = pci_get_rom_size(rom, *size); |
843 | 843 | ||
844 | #endif |
844 | #endif |
845 | 845 | ||
846 | unsigned char tmp[32]; |
846 | unsigned char tmp[32]; |
847 | rom = NULL; |
847 | rom = NULL; |
848 | 848 | ||
849 | dbgprintf("Getting BIOS copy from legacy VBIOS location\n"); |
849 | dbgprintf("Getting BIOS copy from legacy VBIOS location\n"); |
850 | memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32); |
850 | memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32); |
851 | *size = tmp[2] * 512; |
851 | *size = tmp[2] * 512; |
852 | if (*size > 0x10000 ) |
852 | if (*size > 0x10000 ) |
853 | { |
853 | { |
854 | *size = 0; |
854 | *size = 0; |
855 | dbgprintf("Invalid BIOS length field\n"); |
855 | dbgprintf("Invalid BIOS length field\n"); |
856 | } |
856 | } |
857 | else |
857 | else |
858 | rom = (void*)( OS_BASE+legacyBIOSLocation); |
858 | rom = (void*)( OS_BASE+legacyBIOSLocation); |
859 | 859 | ||
860 | return rom; |
860 | return rom; |
861 | } |
861 | } |
862 | 862 | ||
863 | 863 | ||
864 | int |
864 | int |
865 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
865 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
866 | { |
866 | { |
867 | // if (!pci_dma_supported(dev, mask)) |
867 | // if (!pci_dma_supported(dev, mask)) |
868 | // return -EIO; |
868 | // return -EIO; |
869 | 869 | ||
870 | dev->dma_mask = mask; |
870 | dev->dma_mask = mask; |
871 | 871 | ||
872 | return 0; |
872 | return 0; |
873 | }>><>>>>>><>>>=>>>><>><>><>>4)><4)> |
873 | }>><>>>>>><>>>=>>>><>><>><>>4)><4)> |