Rev 1246 | Rev 1404 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1246 | Rev 1403 | ||
---|---|---|---|
- | 1 | #include |
|
1 | #include |
2 | #include |
2 | #include |
3 | #include |
3 | #include |
4 | #include |
4 | 5 | ||
5 | static LIST_HEAD(devices); |
6 | static LIST_HEAD(devices); |
6 | 7 | ||
7 | static dev_t* pci_scan_device(u32_t bus, int devfn); |
8 | static pci_dev_t* pci_scan_device(u32_t bus, int devfn); |
8 | 9 | ||
9 | 10 | ||
10 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
11 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
11 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
12 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
12 | 13 | ||
13 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
14 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
14 | 15 | ||
15 | /* |
16 | /* |
16 | * Translate the low bits of the PCI base |
17 | * Translate the low bits of the PCI base |
17 | * to the resource type |
18 | * to the resource type |
18 | */ |
19 | */ |
19 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
20 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
20 | { |
21 | { |
21 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
22 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
22 | return IORESOURCE_IO; |
23 | return IORESOURCE_IO; |
23 | 24 | ||
24 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
25 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
25 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
26 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
26 | 27 | ||
27 | return IORESOURCE_MEM; |
28 | return IORESOURCE_MEM; |
28 | } |
29 | } |
29 | 30 | ||
30 | 31 | ||
31 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
32 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
32 | { |
33 | { |
33 | u32_t size = mask & maxbase; /* Find the significant bits */ |
34 | u32_t size = mask & maxbase; /* Find the significant bits */ |
34 | 35 | ||
35 | if (!size) |
36 | if (!size) |
36 | return 0; |
37 | return 0; |
37 | 38 | ||
38 | /* Get the lowest of them to find the decode size, and |
39 | /* Get the lowest of them to find the decode size, and |
39 | from that the extent. */ |
40 | from that the extent. */ |
40 | size = (size & ~(size-1)) - 1; |
41 | size = (size & ~(size-1)) - 1; |
41 | 42 | ||
42 | /* base == maxbase can be valid only if the BAR has |
43 | /* base == maxbase can be valid only if the BAR has |
43 | already been programmed with all 1s. */ |
44 | already been programmed with all 1s. */ |
44 | if (base == maxbase && ((base | size) & mask) != mask) |
45 | if (base == maxbase && ((base | size) & mask) != mask) |
45 | return 0; |
46 | return 0; |
46 | 47 | ||
47 | return size; |
48 | return size; |
48 | } |
49 | } |
49 | 50 | ||
50 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
51 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
51 | { |
52 | { |
52 | u64_t size = mask & maxbase; /* Find the significant bits */ |
53 | u64_t size = mask & maxbase; /* Find the significant bits */ |
53 | 54 | ||
54 | if (!size) |
55 | if (!size) |
55 | return 0; |
56 | return 0; |
56 | 57 | ||
57 | /* Get the lowest of them to find the decode size, and |
58 | /* Get the lowest of them to find the decode size, and |
58 | from that the extent. */ |
59 | from that the extent. */ |
59 | size = (size & ~(size-1)) - 1; |
60 | size = (size & ~(size-1)) - 1; |
60 | 61 | ||
61 | /* base == maxbase can be valid only if the BAR has |
62 | /* base == maxbase can be valid only if the BAR has |
62 | already been programmed with all 1s. */ |
63 | already been programmed with all 1s. */ |
63 | if (base == maxbase && ((base | size) & mask) != mask) |
64 | if (base == maxbase && ((base | size) & mask) != mask) |
64 | return 0; |
65 | return 0; |
65 | 66 | ||
66 | return size; |
67 | return size; |
67 | } |
68 | } |
68 | 69 | ||
69 | static inline int is_64bit_memory(u32_t mask) |
70 | static inline int is_64bit_memory(u32_t mask) |
70 | { |
71 | { |
71 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
72 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
72 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
73 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
73 | return 1; |
74 | return 1; |
74 | return 0; |
75 | return 0; |
75 | } |
76 | } |
76 | 77 | ||
77 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
78 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
78 | { |
79 | { |
79 | u32_t pos, reg, next; |
80 | u32_t pos, reg, next; |
80 | u32_t l, sz; |
81 | u32_t l, sz; |
81 | struct resource *res; |
82 | struct resource *res; |
82 | 83 | ||
83 | for(pos=0; pos < howmany; pos = next) |
84 | for(pos=0; pos < howmany; pos = next) |
84 | { |
85 | { |
85 | u64_t l64; |
86 | u64_t l64; |
86 | u64_t sz64; |
87 | u64_t sz64; |
87 | u32_t raw_sz; |
88 | u32_t raw_sz; |
88 | 89 | ||
89 | next = pos + 1; |
90 | next = pos + 1; |
90 | 91 | ||
91 | res = &dev->resource[pos]; |
92 | res = &dev->resource[pos]; |
92 | 93 | ||
93 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
94 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
94 | l = PciRead32(dev->bus, dev->devfn, reg); |
95 | l = PciRead32(dev->bus, dev->devfn, reg); |
95 | PciWrite32(dev->bus, dev->devfn, reg, ~0); |
96 | PciWrite32(dev->bus, dev->devfn, reg, ~0); |
96 | sz = PciRead32(dev->bus, dev->devfn, reg); |
97 | sz = PciRead32(dev->bus, dev->devfn, reg); |
97 | PciWrite32(dev->bus, dev->devfn, reg, l); |
98 | PciWrite32(dev->bus, dev->devfn, reg, l); |
98 | 99 | ||
99 | if (!sz || sz == 0xffffffff) |
100 | if (!sz || sz == 0xffffffff) |
100 | continue; |
101 | continue; |
101 | 102 | ||
102 | if (l == 0xffffffff) |
103 | if (l == 0xffffffff) |
103 | l = 0; |
104 | l = 0; |
104 | 105 | ||
105 | raw_sz = sz; |
106 | raw_sz = sz; |
106 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
107 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
107 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
108 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
108 | { |
109 | { |
109 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
110 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
110 | /* |
111 | /* |
111 | * For 64bit prefetchable memory sz could be 0, if the |
112 | * For 64bit prefetchable memory sz could be 0, if the |
112 | * real size is bigger than 4G, so we need to check |
113 | * real size is bigger than 4G, so we need to check |
113 | * szhi for that. |
114 | * szhi for that. |
114 | */ |
115 | */ |
115 | if (!is_64bit_memory(l) && !sz) |
116 | if (!is_64bit_memory(l) && !sz) |
116 | continue; |
117 | continue; |
117 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
118 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
118 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
119 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
119 | } |
120 | } |
120 | else { |
121 | else { |
121 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
122 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
122 | if (!sz) |
123 | if (!sz) |
123 | continue; |
124 | continue; |
124 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
125 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
125 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
126 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
126 | } |
127 | } |
127 | res->end = res->start + (unsigned long) sz; |
128 | res->end = res->start + (unsigned long) sz; |
128 | res->flags |= pci_calc_resource_flags(l); |
129 | res->flags |= pci_calc_resource_flags(l); |
129 | if (is_64bit_memory(l)) |
130 | if (is_64bit_memory(l)) |
130 | { |
131 | { |
131 | u32_t szhi, lhi; |
132 | u32_t szhi, lhi; |
132 | 133 | ||
133 | lhi = PciRead32(dev->bus, dev->devfn, reg+4); |
134 | lhi = PciRead32(dev->bus, dev->devfn, reg+4); |
134 | PciWrite32(dev->bus, dev->devfn, reg+4, ~0); |
135 | PciWrite32(dev->bus, dev->devfn, reg+4, ~0); |
135 | szhi = PciRead32(dev->bus, dev->devfn, reg+4); |
136 | szhi = PciRead32(dev->bus, dev->devfn, reg+4); |
136 | PciWrite32(dev->bus, dev->devfn, reg+4, lhi); |
137 | PciWrite32(dev->bus, dev->devfn, reg+4, lhi); |
137 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
138 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
138 | l64 = ((u64_t)lhi << 32) | l; |
139 | l64 = ((u64_t)lhi << 32) | l; |
139 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
140 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
140 | next++; |
141 | next++; |
141 | 142 | ||
142 | #if BITS_PER_LONG == 64 |
143 | #if BITS_PER_LONG == 64 |
143 | if (!sz64) { |
144 | if (!sz64) { |
144 | res->start = 0; |
145 | res->start = 0; |
145 | res->end = 0; |
146 | res->end = 0; |
146 | res->flags = 0; |
147 | res->flags = 0; |
147 | continue; |
148 | continue; |
148 | } |
149 | } |
149 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
150 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
150 | res->end = res->start + sz64; |
151 | res->end = res->start + sz64; |
151 | #else |
152 | #else |
152 | if (sz64 > 0x100000000ULL) { |
153 | if (sz64 > 0x100000000ULL) { |
153 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
154 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
154 | "BAR for device %s\n", pci_name(dev)); |
155 | "BAR for device %s\n", pci_name(dev)); |
155 | res->start = 0; |
156 | res->start = 0; |
156 | res->flags = 0; |
157 | res->flags = 0; |
157 | } |
158 | } |
158 | else if (lhi) |
159 | else if (lhi) |
159 | { |
160 | { |
160 | /* 64-bit wide address, treat as disabled */ |
161 | /* 64-bit wide address, treat as disabled */ |
161 | PciWrite32(dev->bus, dev->devfn, reg, |
162 | PciWrite32(dev->bus, dev->devfn, reg, |
162 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
163 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
163 | PciWrite32(dev->bus, dev->devfn, reg+4, 0); |
164 | PciWrite32(dev->bus, dev->devfn, reg+4, 0); |
164 | res->start = 0; |
165 | res->start = 0; |
165 | res->end = sz; |
166 | res->end = sz; |
166 | } |
167 | } |
167 | #endif |
168 | #endif |
168 | } |
169 | } |
169 | } |
170 | } |
170 | 171 | ||
171 | if ( rom ) |
172 | if ( rom ) |
172 | { |
173 | { |
173 | dev->rom_base_reg = rom; |
174 | dev->rom_base_reg = rom; |
174 | res = &dev->resource[PCI_ROM_RESOURCE]; |
175 | res = &dev->resource[PCI_ROM_RESOURCE]; |
175 | 176 | ||
176 | l = PciRead32(dev->bus, dev->devfn, rom); |
177 | l = PciRead32(dev->bus, dev->devfn, rom); |
177 | PciWrite32(dev->bus, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
178 | PciWrite32(dev->bus, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
178 | sz = PciRead32(dev->bus, dev->devfn, rom); |
179 | sz = PciRead32(dev->bus, dev->devfn, rom); |
179 | PciWrite32(dev->bus, dev->devfn, rom, l); |
180 | PciWrite32(dev->bus, dev->devfn, rom, l); |
180 | 181 | ||
181 | if (l == 0xffffffff) |
182 | if (l == 0xffffffff) |
182 | l = 0; |
183 | l = 0; |
183 | 184 | ||
184 | if (sz && sz != 0xffffffff) |
185 | if (sz && sz != 0xffffffff) |
185 | { |
186 | { |
186 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
187 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
187 | 188 | ||
188 | if (sz) |
189 | if (sz) |
189 | { |
190 | { |
190 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
191 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
191 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
192 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
192 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
193 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
193 | res->start = l & PCI_ROM_ADDRESS_MASK; |
194 | res->start = l & PCI_ROM_ADDRESS_MASK; |
194 | res->end = res->start + (unsigned long) sz; |
195 | res->end = res->start + (unsigned long) sz; |
195 | } |
196 | } |
196 | } |
197 | } |
197 | } |
198 | } |
198 | } |
199 | } |
199 | 200 | ||
200 | static void pci_read_irq(struct pci_dev *dev) |
201 | static void pci_read_irq(struct pci_dev *dev) |
201 | { |
202 | { |
202 | u8_t irq; |
203 | u8_t irq; |
203 | 204 | ||
204 | irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN); |
205 | irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN); |
205 | dev->pin = irq; |
206 | dev->pin = irq; |
206 | if (irq) |
207 | if (irq) |
207 | PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE); |
208 | PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE); |
208 | dev->irq = irq; |
209 | dev->irq = irq; |
209 | }; |
210 | }; |
210 | 211 | ||
211 | 212 | ||
212 | static int pci_setup_device(struct pci_dev *dev) |
213 | static int pci_setup_device(struct pci_dev *dev) |
213 | { |
214 | { |
214 | u32_t class; |
215 | u32_t class; |
215 | 216 | ||
216 | class = PciRead32(dev->bus, dev->devfn, PCI_CLASS_REVISION); |
217 | class = PciRead32(dev->bus, dev->devfn, PCI_CLASS_REVISION); |
217 | dev->revision = class & 0xff; |
218 | dev->revision = class & 0xff; |
218 | class >>= 8; /* upper 3 bytes */ |
219 | class >>= 8; /* upper 3 bytes */ |
219 | dev->class = class; |
220 | dev->class = class; |
220 | 221 | ||
221 | /* "Unknown power state" */ |
222 | /* "Unknown power state" */ |
222 | // dev->current_state = PCI_UNKNOWN; |
223 | // dev->current_state = PCI_UNKNOWN; |
223 | 224 | ||
224 | /* Early fixups, before probing the BARs */ |
225 | /* Early fixups, before probing the BARs */ |
225 | // pci_fixup_device(pci_fixup_early, dev); |
226 | // pci_fixup_device(pci_fixup_early, dev); |
226 | class = dev->class >> 8; |
227 | class = dev->class >> 8; |
227 | 228 | ||
228 | switch (dev->hdr_type) |
229 | switch (dev->hdr_type) |
229 | { |
230 | { |
230 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
231 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
231 | if (class == PCI_CLASS_BRIDGE_PCI) |
232 | if (class == PCI_CLASS_BRIDGE_PCI) |
232 | goto bad; |
233 | goto bad; |
233 | pci_read_irq(dev); |
234 | pci_read_irq(dev); |
234 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
235 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
235 | dev->subsystem_vendor = PciRead16(dev->bus, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
236 | dev->subsystem_vendor = PciRead16(dev->bus, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
236 | dev->subsystem_device = PciRead16(dev->bus, dev->devfn, PCI_SUBSYSTEM_ID); |
237 | dev->subsystem_device = PciRead16(dev->bus, dev->devfn, PCI_SUBSYSTEM_ID); |
237 | 238 | ||
238 | /* |
239 | /* |
239 | * Do the ugly legacy mode stuff here rather than broken chip |
240 | * Do the ugly legacy mode stuff here rather than broken chip |
240 | * quirk code. Legacy mode ATA controllers have fixed |
241 | * quirk code. Legacy mode ATA controllers have fixed |
241 | * addresses. These are not always echoed in BAR0-3, and |
242 | * addresses. These are not always echoed in BAR0-3, and |
242 | * BAR0-3 in a few cases contain junk! |
243 | * BAR0-3 in a few cases contain junk! |
243 | */ |
244 | */ |
244 | if (class == PCI_CLASS_STORAGE_IDE) |
245 | if (class == PCI_CLASS_STORAGE_IDE) |
245 | { |
246 | { |
246 | u8_t progif; |
247 | u8_t progif; |
247 | 248 | ||
248 | progif = PciRead8(dev->bus, dev->devfn,PCI_CLASS_PROG); |
249 | progif = PciRead8(dev->bus, dev->devfn,PCI_CLASS_PROG); |
249 | if ((progif & 1) == 0) |
250 | if ((progif & 1) == 0) |
250 | { |
251 | { |
251 | dev->resource[0].start = 0x1F0; |
252 | dev->resource[0].start = 0x1F0; |
252 | dev->resource[0].end = 0x1F7; |
253 | dev->resource[0].end = 0x1F7; |
253 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
254 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
254 | dev->resource[1].start = 0x3F6; |
255 | dev->resource[1].start = 0x3F6; |
255 | dev->resource[1].end = 0x3F6; |
256 | dev->resource[1].end = 0x3F6; |
256 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
257 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
257 | } |
258 | } |
258 | if ((progif & 4) == 0) |
259 | if ((progif & 4) == 0) |
259 | { |
260 | { |
260 | dev->resource[2].start = 0x170; |
261 | dev->resource[2].start = 0x170; |
261 | dev->resource[2].end = 0x177; |
262 | dev->resource[2].end = 0x177; |
262 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
263 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
263 | dev->resource[3].start = 0x376; |
264 | dev->resource[3].start = 0x376; |
264 | dev->resource[3].end = 0x376; |
265 | dev->resource[3].end = 0x376; |
265 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
266 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
266 | }; |
267 | }; |
267 | } |
268 | } |
268 | break; |
269 | break; |
269 | 270 | ||
270 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
271 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
271 | if (class != PCI_CLASS_BRIDGE_PCI) |
272 | if (class != PCI_CLASS_BRIDGE_PCI) |
272 | goto bad; |
273 | goto bad; |
273 | /* The PCI-to-PCI bridge spec requires that subtractive |
274 | /* The PCI-to-PCI bridge spec requires that subtractive |
274 | decoding (i.e. transparent) bridge must have programming |
275 | decoding (i.e. transparent) bridge must have programming |
275 | interface code of 0x01. */ |
276 | interface code of 0x01. */ |
276 | pci_read_irq(dev); |
277 | pci_read_irq(dev); |
277 | dev->transparent = ((dev->class & 0xff) == 1); |
278 | dev->transparent = ((dev->class & 0xff) == 1); |
278 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
279 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
279 | break; |
280 | break; |
280 | 281 | ||
281 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
282 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
282 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
283 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
283 | goto bad; |
284 | goto bad; |
284 | pci_read_irq(dev); |
285 | pci_read_irq(dev); |
285 | pci_read_bases(dev, 1, 0); |
286 | pci_read_bases(dev, 1, 0); |
286 | dev->subsystem_vendor = PciRead16(dev->bus, |
287 | dev->subsystem_vendor = PciRead16(dev->bus, |
287 | dev->devfn, |
288 | dev->devfn, |
288 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
289 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
289 | 290 | ||
290 | dev->subsystem_device = PciRead16(dev->bus, |
291 | dev->subsystem_device = PciRead16(dev->bus, |
291 | dev->devfn, |
292 | dev->devfn, |
292 | PCI_CB_SUBSYSTEM_ID); |
293 | PCI_CB_SUBSYSTEM_ID); |
293 | break; |
294 | break; |
294 | 295 | ||
295 | default: /* unknown header */ |
296 | default: /* unknown header */ |
296 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
297 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
297 | pci_name(dev), dev->hdr_type); |
298 | pci_name(dev), dev->hdr_type); |
298 | return -1; |
299 | return -1; |
299 | 300 | ||
300 | bad: |
301 | bad: |
301 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
302 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
302 | pci_name(dev), class, dev->hdr_type); |
303 | pci_name(dev), class, dev->hdr_type); |
303 | dev->class = PCI_CLASS_NOT_DEFINED; |
304 | dev->class = PCI_CLASS_NOT_DEFINED; |
304 | } |
305 | } |
305 | 306 | ||
306 | /* We found a fine healthy device, go go go... */ |
307 | /* We found a fine healthy device, go go go... */ |
307 | 308 | ||
308 | return 0; |
309 | return 0; |
309 | }; |
310 | }; |
310 | 311 | ||
311 | static dev_t* pci_scan_device(u32_t bus, int devfn) |
312 | static pci_dev_t* pci_scan_device(u32_t bus, int devfn) |
312 | { |
313 | { |
313 | dev_t *dev; |
314 | pci_dev_t *dev; |
314 | 315 | ||
315 | u32_t id; |
316 | u32_t id; |
316 | u8_t hdr; |
317 | u8_t hdr; |
317 | 318 | ||
318 | int timeout = 10; |
319 | int timeout = 10; |
319 | 320 | ||
320 | id = PciRead32(bus,devfn, PCI_VENDOR_ID); |
321 | id = PciRead32(bus,devfn, PCI_VENDOR_ID); |
321 | 322 | ||
322 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
323 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
323 | if (id == 0xffffffff || id == 0x00000000 || |
324 | if (id == 0xffffffff || id == 0x00000000 || |
324 | id == 0x0000ffff || id == 0xffff0000) |
325 | id == 0x0000ffff || id == 0xffff0000) |
325 | return NULL; |
326 | return NULL; |
326 | 327 | ||
327 | while (id == 0xffff0001) |
328 | while (id == 0xffff0001) |
328 | { |
329 | { |
329 | 330 | ||
330 | delay(timeout/10); |
331 | delay(timeout/10); |
331 | timeout *= 2; |
332 | timeout *= 2; |
332 | 333 | ||
333 | id = PciRead32(bus, devfn, PCI_VENDOR_ID); |
334 | id = PciRead32(bus, devfn, PCI_VENDOR_ID); |
334 | 335 | ||
335 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
336 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
336 | if (timeout > 60 * 100) |
337 | if (timeout > 60 * 100) |
337 | { |
338 | { |
338 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
339 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
339 | "responding\n", bus,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
340 | "responding\n", bus,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
340 | return NULL; |
341 | return NULL; |
341 | } |
342 | } |
342 | }; |
343 | }; |
343 | 344 | ||
344 | hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); |
345 | hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); |
345 | 346 | ||
346 | dev = (dev_t*)kzalloc(sizeof(dev_t), 0); |
347 | dev = (pci_dev_t*)kzalloc(sizeof(dev_t), 0); |
347 | 348 | ||
348 | INIT_LIST_HEAD(&dev->link); |
349 | INIT_LIST_HEAD(&dev->link); |
349 | 350 | ||
350 | if(unlikely(dev == NULL)) |
351 | if(unlikely(dev == NULL)) |
351 | return NULL; |
352 | return NULL; |
352 | 353 | ||
353 | dev->pci_dev.bus = bus; |
354 | dev->pci_dev.bus = bus; |
354 | dev->pci_dev.devfn = devfn; |
355 | dev->pci_dev.devfn = devfn; |
355 | dev->pci_dev.hdr_type = hdr & 0x7f; |
356 | dev->pci_dev.hdr_type = hdr & 0x7f; |
356 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
357 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
357 | dev->pci_dev.vendor = id & 0xffff; |
358 | dev->pci_dev.vendor = id & 0xffff; |
358 | dev->pci_dev.device = (id >> 16) & 0xffff; |
359 | dev->pci_dev.device = (id >> 16) & 0xffff; |
359 | 360 | ||
360 | pci_setup_device(&dev->pci_dev); |
361 | pci_setup_device(&dev->pci_dev); |
361 | 362 | ||
362 | return dev; |
363 | return dev; |
363 | 364 | ||
364 | }; |
365 | }; |
365 | 366 | ||
366 | int pci_scan_slot(u32_t bus, int devfn) |
367 | int pci_scan_slot(u32_t bus, int devfn) |
367 | { |
368 | { |
368 | int func, nr = 0; |
369 | int func, nr = 0; |
369 | 370 | ||
370 | for (func = 0; func < 8; func++, devfn++) |
371 | for (func = 0; func < 8; func++, devfn++) |
371 | { |
372 | { |
372 | dev_t *dev; |
373 | pci_dev_t *dev; |
373 | 374 | ||
374 | dev = pci_scan_device(bus, devfn); |
375 | dev = pci_scan_device(bus, devfn); |
375 | if( dev ) |
376 | if( dev ) |
376 | { |
377 | { |
377 | list_add(&dev->link, &devices); |
378 | list_add(&dev->link, &devices); |
378 | 379 | ||
379 | nr++; |
380 | nr++; |
380 | 381 | ||
381 | /* |
382 | /* |
382 | * If this is a single function device, |
383 | * If this is a single function device, |
383 | * don't scan past the first function. |
384 | * don't scan past the first function. |
384 | */ |
385 | */ |
385 | if (!dev->pci_dev.multifunction) |
386 | if (!dev->pci_dev.multifunction) |
386 | { |
387 | { |
387 | if (func > 0) { |
388 | if (func > 0) { |
388 | dev->pci_dev.multifunction = 1; |
389 | dev->pci_dev.multifunction = 1; |
389 | } |
390 | } |
390 | else { |
391 | else { |
391 | break; |
392 | break; |
392 | } |
393 | } |
393 | } |
394 | } |
394 | } |
395 | } |
395 | else { |
396 | else { |
396 | if (func == 0) |
397 | if (func == 0) |
397 | break; |
398 | break; |
398 | } |
399 | } |
399 | }; |
400 | }; |
400 | 401 | ||
401 | return nr; |
402 | return nr; |
402 | }; |
403 | }; |
403 | 404 | ||
404 | 405 | ||
405 | void pci_scan_bus(u32_t bus) |
406 | void pci_scan_bus(u32_t bus) |
406 | { |
407 | { |
407 | u32_t devfn; |
408 | u32_t devfn; |
408 | dev_t *dev; |
409 | dev_t *dev; |
409 | 410 | ||
410 | 411 | ||
411 | for (devfn = 0; devfn < 0x100; devfn += 8) |
412 | for (devfn = 0; devfn < 0x100; devfn += 8) |
412 | pci_scan_slot(bus, devfn); |
413 | pci_scan_slot(bus, devfn); |
413 | 414 | ||
414 | } |
415 | } |
415 | 416 | ||
416 | int enum_pci_devices() |
417 | int enum_pci_devices() |
417 | { |
418 | { |
418 | dev_t *dev; |
419 | pci_dev_t *dev; |
419 | u32_t last_bus; |
420 | u32_t last_bus; |
420 | u32_t bus = 0 , devfn = 0; |
421 | u32_t bus = 0 , devfn = 0; |
421 | 422 | ||
422 | // list_initialize(&devices); |
423 | // list_initialize(&devices); |
423 | 424 | ||
424 | last_bus = PciApi(1); |
425 | last_bus = PciApi(1); |
425 | 426 | ||
426 | 427 | ||
427 | if( unlikely(last_bus == -1)) |
428 | if( unlikely(last_bus == -1)) |
428 | return -1; |
429 | return -1; |
429 | 430 | ||
430 | for(;bus <= last_bus; bus++) |
431 | for(;bus <= last_bus; bus++) |
431 | pci_scan_bus(bus); |
432 | pci_scan_bus(bus); |
432 | 433 | ||
433 | // for(dev = (dev_t*)devices.next; |
434 | // for(dev = (dev_t*)devices.next; |
434 | // &dev->link != &devices; |
435 | // &dev->link != &devices; |
435 | // dev = (dev_t*)dev->link.next) |
436 | // dev = (dev_t*)dev->link.next) |
436 | // { |
437 | // { |
437 | // dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
438 | // dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
438 | // dev->pci_dev.vendor, |
439 | // dev->pci_dev.vendor, |
439 | // dev->pci_dev.device, |
440 | // dev->pci_dev.device, |
440 | // dev->pci_dev.bus, |
441 | // dev->pci_dev.bus, |
441 | // dev->pci_dev.devfn); |
442 | // dev->pci_dev.devfn); |
442 | // |
443 | // |
443 | // } |
444 | // } |
444 | return 0; |
445 | return 0; |
445 | } |
446 | } |
446 | 447 | ||
447 | #define PCI_FIND_CAP_TTL 48 |
448 | #define PCI_FIND_CAP_TTL 48 |
448 | 449 | ||
449 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
450 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
450 | u8 pos, int cap, int *ttl) |
451 | u8 pos, int cap, int *ttl) |
451 | { |
452 | { |
452 | u8 id; |
453 | u8 id; |
453 | 454 | ||
454 | while ((*ttl)--) { |
455 | while ((*ttl)--) { |
455 | pos = PciRead8(bus, devfn, pos); |
456 | pos = PciRead8(bus, devfn, pos); |
456 | if (pos < 0x40) |
457 | if (pos < 0x40) |
457 | break; |
458 | break; |
458 | pos &= ~3; |
459 | pos &= ~3; |
459 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
460 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
460 | if (id == 0xff) |
461 | if (id == 0xff) |
461 | break; |
462 | break; |
462 | if (id == cap) |
463 | if (id == cap) |
463 | return pos; |
464 | return pos; |
464 | pos += PCI_CAP_LIST_NEXT; |
465 | pos += PCI_CAP_LIST_NEXT; |
465 | } |
466 | } |
466 | return 0; |
467 | return 0; |
467 | } |
468 | } |
468 | 469 | ||
469 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
470 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
470 | u8 pos, int cap) |
471 | u8 pos, int cap) |
471 | { |
472 | { |
472 | int ttl = PCI_FIND_CAP_TTL; |
473 | int ttl = PCI_FIND_CAP_TTL; |
473 | 474 | ||
474 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
475 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
475 | } |
476 | } |
476 | 477 | ||
477 | static int __pci_bus_find_cap_start(unsigned int bus, |
478 | static int __pci_bus_find_cap_start(unsigned int bus, |
478 | unsigned int devfn, u8 hdr_type) |
479 | unsigned int devfn, u8 hdr_type) |
479 | { |
480 | { |
480 | u16 status; |
481 | u16 status; |
481 | 482 | ||
482 | status = PciRead16(bus, devfn, PCI_STATUS); |
483 | status = PciRead16(bus, devfn, PCI_STATUS); |
483 | if (!(status & PCI_STATUS_CAP_LIST)) |
484 | if (!(status & PCI_STATUS_CAP_LIST)) |
484 | return 0; |
485 | return 0; |
485 | 486 | ||
486 | switch (hdr_type) { |
487 | switch (hdr_type) { |
487 | case PCI_HEADER_TYPE_NORMAL: |
488 | case PCI_HEADER_TYPE_NORMAL: |
488 | case PCI_HEADER_TYPE_BRIDGE: |
489 | case PCI_HEADER_TYPE_BRIDGE: |
489 | return PCI_CAPABILITY_LIST; |
490 | return PCI_CAPABILITY_LIST; |
490 | case PCI_HEADER_TYPE_CARDBUS: |
491 | case PCI_HEADER_TYPE_CARDBUS: |
491 | return PCI_CB_CAPABILITY_LIST; |
492 | return PCI_CB_CAPABILITY_LIST; |
492 | default: |
493 | default: |
493 | return 0; |
494 | return 0; |
494 | } |
495 | } |
495 | 496 | ||
496 | return 0; |
497 | return 0; |
497 | } |
498 | } |
498 | 499 | ||
499 | 500 | ||
500 | int pci_find_capability(struct pci_dev *dev, int cap) |
501 | int pci_find_capability(struct pci_dev *dev, int cap) |
501 | { |
502 | { |
502 | int pos; |
503 | int pos; |
503 | 504 | ||
504 | pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); |
505 | pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); |
505 | if (pos) |
506 | if (pos) |
506 | pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); |
507 | pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); |
507 | 508 | ||
508 | return pos; |
509 | return pos; |
509 | } |
510 | } |
510 | 511 | ||
511 | 512 | ||
512 | #if 0 |
513 | #if 0 |
513 | /** |
514 | /** |
514 | * pci_set_power_state - Set the power state of a PCI device |
515 | * pci_set_power_state - Set the power state of a PCI device |
515 | * @dev: PCI device to be suspended |
516 | * @dev: PCI device to be suspended |
516 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering |
517 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering |
517 | * |
518 | * |
518 | * Transition a device to a new power state, using the Power Management |
519 | * Transition a device to a new power state, using the Power Management |
519 | * Capabilities in the device's config space. |
520 | * Capabilities in the device's config space. |
520 | * |
521 | * |
521 | * RETURN VALUE: |
522 | * RETURN VALUE: |
522 | * -EINVAL if trying to enter a lower state than we're already in. |
523 | * -EINVAL if trying to enter a lower state than we're already in. |
523 | * 0 if we're already in the requested state. |
524 | * 0 if we're already in the requested state. |
524 | * -EIO if device does not support PCI PM. |
525 | * -EIO if device does not support PCI PM. |
525 | * 0 if we can successfully change the power state. |
526 | * 0 if we can successfully change the power state. |
526 | */ |
527 | */ |
527 | int |
528 | int |
528 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
529 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
529 | { |
530 | { |
530 | int pm, need_restore = 0; |
531 | int pm, need_restore = 0; |
531 | u16 pmcsr, pmc; |
532 | u16 pmcsr, pmc; |
532 | 533 | ||
533 | /* bound the state we're entering */ |
534 | /* bound the state we're entering */ |
534 | if (state > PCI_D3hot) |
535 | if (state > PCI_D3hot) |
535 | state = PCI_D3hot; |
536 | state = PCI_D3hot; |
536 | 537 | ||
537 | /* |
538 | /* |
538 | * If the device or the parent bridge can't support PCI PM, ignore |
539 | * If the device or the parent bridge can't support PCI PM, ignore |
539 | * the request if we're doing anything besides putting it into D0 |
540 | * the request if we're doing anything besides putting it into D0 |
540 | * (which would only happen on boot). |
541 | * (which would only happen on boot). |
541 | */ |
542 | */ |
542 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
543 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
543 | return 0; |
544 | return 0; |
544 | 545 | ||
545 | /* find PCI PM capability in list */ |
546 | /* find PCI PM capability in list */ |
546 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
547 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
547 | 548 | ||
548 | /* abort if the device doesn't support PM capabilities */ |
549 | /* abort if the device doesn't support PM capabilities */ |
549 | if (!pm) |
550 | if (!pm) |
550 | return -EIO; |
551 | return -EIO; |
551 | 552 | ||
552 | /* Validate current state: |
553 | /* Validate current state: |
553 | * Can enter D0 from any state, but if we can only go deeper |
554 | * Can enter D0 from any state, but if we can only go deeper |
554 | * to sleep if we're already in a low power state |
555 | * to sleep if we're already in a low power state |
555 | */ |
556 | */ |
556 | if (state != PCI_D0 && dev->current_state > state) { |
557 | if (state != PCI_D0 && dev->current_state > state) { |
557 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", |
558 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", |
558 | __FUNCTION__, pci_name(dev), state, dev->current_state); |
559 | __FUNCTION__, pci_name(dev), state, dev->current_state); |
559 | return -EINVAL; |
560 | return -EINVAL; |
560 | } else if (dev->current_state == state) |
561 | } else if (dev->current_state == state) |
561 | return 0; /* we're already there */ |
562 | return 0; /* we're already there */ |
562 | 563 | ||
563 | 564 | ||
564 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); |
565 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); |
565 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
566 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
566 | printk(KERN_DEBUG |
567 | printk(KERN_DEBUG |
567 | "PCI: %s has unsupported PM cap regs version (%u)\n", |
568 | "PCI: %s has unsupported PM cap regs version (%u)\n", |
568 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); |
569 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); |
569 | return -EIO; |
570 | return -EIO; |
570 | } |
571 | } |
571 | 572 | ||
572 | /* check if this device supports the desired state */ |
573 | /* check if this device supports the desired state */ |
573 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
574 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
574 | return -EIO; |
575 | return -EIO; |
575 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) |
576 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) |
576 | return -EIO; |
577 | return -EIO; |
577 | 578 | ||
578 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
579 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
579 | 580 | ||
580 | /* If we're (effectively) in D3, force entire word to 0. |
581 | /* If we're (effectively) in D3, force entire word to 0. |
581 | * This doesn't affect PME_Status, disables PME_En, and |
582 | * This doesn't affect PME_Status, disables PME_En, and |
582 | * sets PowerState to 0. |
583 | * sets PowerState to 0. |
583 | */ |
584 | */ |
584 | switch (dev->current_state) { |
585 | switch (dev->current_state) { |
585 | case PCI_D0: |
586 | case PCI_D0: |
586 | case PCI_D1: |
587 | case PCI_D1: |
587 | case PCI_D2: |
588 | case PCI_D2: |
588 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
589 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
589 | pmcsr |= state; |
590 | pmcsr |= state; |
590 | break; |
591 | break; |
591 | case PCI_UNKNOWN: /* Boot-up */ |
592 | case PCI_UNKNOWN: /* Boot-up */ |
592 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
593 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
593 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
594 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
594 | need_restore = 1; |
595 | need_restore = 1; |
595 | /* Fall-through: force to D0 */ |
596 | /* Fall-through: force to D0 */ |
596 | default: |
597 | default: |
597 | pmcsr = 0; |
598 | pmcsr = 0; |
598 | break; |
599 | break; |
599 | } |
600 | } |
600 | 601 | ||
601 | /* enter specified state */ |
602 | /* enter specified state */ |
602 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); |
603 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); |
603 | 604 | ||
604 | /* Mandatory power management transition delays */ |
605 | /* Mandatory power management transition delays */ |
605 | /* see PCI PM 1.1 5.6.1 table 18 */ |
606 | /* see PCI PM 1.1 5.6.1 table 18 */ |
606 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
607 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
607 | msleep(pci_pm_d3_delay); |
608 | msleep(pci_pm_d3_delay); |
608 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
609 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
609 | udelay(200); |
610 | udelay(200); |
610 | 611 | ||
611 | /* |
612 | /* |
612 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx |
613 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx |
613 | * Firmware method after native method ? |
614 | * Firmware method after native method ? |
614 | */ |
615 | */ |
615 | if (platform_pci_set_power_state) |
616 | if (platform_pci_set_power_state) |
616 | platform_pci_set_power_state(dev, state); |
617 | platform_pci_set_power_state(dev, state); |
617 | 618 | ||
618 | dev->current_state = state; |
619 | dev->current_state = state; |
619 | 620 | ||
620 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
621 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
621 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
622 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
622 | * from D3hot to D0 _may_ perform an internal reset, thereby |
623 | * from D3hot to D0 _may_ perform an internal reset, thereby |
623 | * going to "D0 Uninitialized" rather than "D0 Initialized". |
624 | * going to "D0 Uninitialized" rather than "D0 Initialized". |
624 | * For example, at least some versions of the 3c905B and the |
625 | * For example, at least some versions of the 3c905B and the |
625 | * 3c556B exhibit this behaviour. |
626 | * 3c556B exhibit this behaviour. |
626 | * |
627 | * |
627 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
628 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
628 | * devices in a D3hot state at boot. Consequently, we need to |
629 | * devices in a D3hot state at boot. Consequently, we need to |
629 | * restore at least the BARs so that the device will be |
630 | * restore at least the BARs so that the device will be |
630 | * accessible to its driver. |
631 | * accessible to its driver. |
631 | */ |
632 | */ |
632 | if (need_restore) |
633 | if (need_restore) |
633 | pci_restore_bars(dev); |
634 | pci_restore_bars(dev); |
634 | 635 | ||
635 | return 0; |
636 | return 0; |
636 | } |
637 | } |
637 | #endif |
638 | #endif |
638 | 639 | ||
639 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
640 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
640 | { |
641 | { |
641 | u16_t cmd, old_cmd; |
642 | u16_t cmd, old_cmd; |
642 | int idx; |
643 | int idx; |
643 | struct resource *r; |
644 | struct resource *r; |
644 | 645 | ||
645 | cmd = PciRead16(dev->bus, dev->devfn, PCI_COMMAND); |
646 | cmd = PciRead16(dev->bus, dev->devfn, PCI_COMMAND); |
646 | old_cmd = cmd; |
647 | old_cmd = cmd; |
647 | for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) |
648 | for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) |
648 | { |
649 | { |
649 | /* Only set up the requested stuff */ |
650 | /* Only set up the requested stuff */ |
650 | if (!(mask & (1 << idx))) |
651 | if (!(mask & (1 << idx))) |
651 | continue; |
652 | continue; |
652 | 653 | ||
653 | r = &dev->resource[idx]; |
654 | r = &dev->resource[idx]; |
654 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) |
655 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) |
655 | continue; |
656 | continue; |
656 | if ((idx == PCI_ROM_RESOURCE) && |
657 | if ((idx == PCI_ROM_RESOURCE) && |
657 | (!(r->flags & IORESOURCE_ROM_ENABLE))) |
658 | (!(r->flags & IORESOURCE_ROM_ENABLE))) |
658 | continue; |
659 | continue; |
659 | if (!r->start && r->end) { |
660 | if (!r->start && r->end) { |
660 | printk(KERN_ERR "PCI: Device %s not available " |
661 | printk(KERN_ERR "PCI: Device %s not available " |
661 | "because of resource %d collisions\n", |
662 | "because of resource %d collisions\n", |
662 | pci_name(dev), idx); |
663 | pci_name(dev), idx); |
663 | return -EINVAL; |
664 | return -EINVAL; |
664 | } |
665 | } |
665 | if (r->flags & IORESOURCE_IO) |
666 | if (r->flags & IORESOURCE_IO) |
666 | cmd |= PCI_COMMAND_IO; |
667 | cmd |= PCI_COMMAND_IO; |
667 | if (r->flags & IORESOURCE_MEM) |
668 | if (r->flags & IORESOURCE_MEM) |
668 | cmd |= PCI_COMMAND_MEMORY; |
669 | cmd |= PCI_COMMAND_MEMORY; |
669 | } |
670 | } |
670 | if (cmd != old_cmd) { |
671 | if (cmd != old_cmd) { |
671 | printk("PCI: Enabling device %s (%04x -> %04x)\n", |
672 | printk("PCI: Enabling device %s (%04x -> %04x)\n", |
672 | pci_name(dev), old_cmd, cmd); |
673 | pci_name(dev), old_cmd, cmd); |
673 | PciWrite16(dev->bus, dev->devfn, PCI_COMMAND, cmd); |
674 | PciWrite16(dev->bus, dev->devfn, PCI_COMMAND, cmd); |
674 | } |
675 | } |
675 | return 0; |
676 | return 0; |
676 | } |
677 | } |
677 | 678 | ||
678 | 679 | ||
679 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
680 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
680 | { |
681 | { |
681 | int err; |
682 | int err; |
682 | 683 | ||
683 | if ((err = pcibios_enable_resources(dev, mask)) < 0) |
684 | if ((err = pcibios_enable_resources(dev, mask)) < 0) |
684 | return err; |
685 | return err; |
685 | 686 | ||
686 | // if (!dev->msi_enabled) |
687 | // if (!dev->msi_enabled) |
687 | // return pcibios_enable_irq(dev); |
688 | // return pcibios_enable_irq(dev); |
688 | return 0; |
689 | return 0; |
689 | } |
690 | } |
690 | 691 | ||
691 | 692 | ||
692 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
693 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
693 | { |
694 | { |
694 | int err; |
695 | int err; |
695 | 696 | ||
696 | // err = pci_set_power_state(dev, PCI_D0); |
697 | // err = pci_set_power_state(dev, PCI_D0); |
697 | // if (err < 0 && err != -EIO) |
698 | // if (err < 0 && err != -EIO) |
698 | // return err; |
699 | // return err; |
699 | err = pcibios_enable_device(dev, bars); |
700 | err = pcibios_enable_device(dev, bars); |
700 | // if (err < 0) |
701 | // if (err < 0) |
701 | // return err; |
702 | // return err; |
702 | // pci_fixup_device(pci_fixup_enable, dev); |
703 | // pci_fixup_device(pci_fixup_enable, dev); |
703 | 704 | ||
704 | return 0; |
705 | return 0; |
705 | } |
706 | } |
706 | 707 | ||
707 | 708 | ||
708 | static int __pci_enable_device_flags(struct pci_dev *dev, |
709 | static int __pci_enable_device_flags(struct pci_dev *dev, |
709 | resource_size_t flags) |
710 | resource_size_t flags) |
710 | { |
711 | { |
711 | int err; |
712 | int err; |
712 | int i, bars = 0; |
713 | int i, bars = 0; |
713 | 714 | ||
714 | // if (atomic_add_return(1, &dev->enable_cnt) > 1) |
715 | // if (atomic_add_return(1, &dev->enable_cnt) > 1) |
715 | // return 0; /* already enabled */ |
716 | // return 0; /* already enabled */ |
716 | 717 | ||
717 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
718 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
718 | if (dev->resource[i].flags & flags) |
719 | if (dev->resource[i].flags & flags) |
719 | bars |= (1 << i); |
720 | bars |= (1 << i); |
720 | 721 | ||
721 | err = do_pci_enable_device(dev, bars); |
722 | err = do_pci_enable_device(dev, bars); |
722 | // if (err < 0) |
723 | // if (err < 0) |
723 | // atomic_dec(&dev->enable_cnt); |
724 | // atomic_dec(&dev->enable_cnt); |
724 | return err; |
725 | return err; |
725 | } |
726 | } |
726 | 727 | ||
727 | 728 | ||
728 | /** |
729 | /** |
729 | * pci_enable_device - Initialize device before it's used by a driver. |
730 | * pci_enable_device - Initialize device before it's used by a driver. |
730 | * @dev: PCI device to be initialized |
731 | * @dev: PCI device to be initialized |
731 | * |
732 | * |
732 | * Initialize device before it's used by a driver. Ask low-level code |
733 | * Initialize device before it's used by a driver. Ask low-level code |
733 | * to enable I/O and memory. Wake up the device if it was suspended. |
734 | * to enable I/O and memory. Wake up the device if it was suspended. |
734 | * Beware, this function can fail. |
735 | * Beware, this function can fail. |
735 | * |
736 | * |
736 | * Note we don't actually enable the device many times if we call |
737 | * Note we don't actually enable the device many times if we call |
737 | * this function repeatedly (we just increment the count). |
738 | * this function repeatedly (we just increment the count). |
738 | */ |
739 | */ |
739 | int pci_enable_device(struct pci_dev *dev) |
740 | int pci_enable_device(struct pci_dev *dev) |
740 | { |
741 | { |
741 | return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
742 | return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
742 | } |
743 | } |
743 | 744 | ||
744 | 745 | ||
745 | 746 | ||
746 | struct pci_device_id* find_pci_device(dev_t* pdev, struct pci_device_id *idlist) |
747 | struct pci_device_id* find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist) |
747 | { |
748 | { |
748 | dev_t *dev; |
749 | pci_dev_t *dev; |
749 | struct pci_device_id *ent; |
750 | struct pci_device_id *ent; |
750 | 751 | ||
751 | for(dev = (dev_t*)devices.next; |
752 | for(dev = (pci_dev_t*)devices.next; |
752 | &dev->link != &devices; |
753 | &dev->link != &devices; |
753 | dev = (dev_t*)dev->link.next) |
754 | dev = (pci_dev_t*)dev->link.next) |
754 | { |
755 | { |
755 | if( dev->pci_dev.vendor != idlist->vendor ) |
756 | if( dev->pci_dev.vendor != idlist->vendor ) |
756 | continue; |
757 | continue; |
757 | 758 | ||
758 | for(ent = idlist; ent->vendor != 0; ent++) |
759 | for(ent = idlist; ent->vendor != 0; ent++) |
759 | { |
760 | { |
760 | if(unlikely(ent->device == dev->pci_dev.device)) |
761 | if(unlikely(ent->device == dev->pci_dev.device)) |
761 | { |
762 | { |
762 | pdev->pci_dev = dev->pci_dev; |
763 | pdev->pci_dev = dev->pci_dev; |
763 | return ent; |
764 | return ent; |
764 | } |
765 | } |
765 | }; |
766 | }; |
766 | } |
767 | } |
767 | 768 | ||
768 | return NULL; |
769 | return NULL; |
769 | }; |
770 | }; |
770 | 771 | ||
771 | 772 | ||
772 | 773 | ||
773 | /** |
774 | /** |
774 | * pci_map_rom - map a PCI ROM to kernel space |
775 | * pci_map_rom - map a PCI ROM to kernel space |
775 | * @pdev: pointer to pci device struct |
776 | * @pdev: pointer to pci device struct |
776 | * @size: pointer to receive size of pci window over ROM |
777 | * @size: pointer to receive size of pci window over ROM |
777 | * @return: kernel virtual pointer to image of ROM |
778 | * @return: kernel virtual pointer to image of ROM |
778 | * |
779 | * |
779 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
780 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
780 | * the shadow BIOS copy will be returned instead of the |
781 | * the shadow BIOS copy will be returned instead of the |
781 | * actual ROM. |
782 | * actual ROM. |
782 | */ |
783 | */ |
783 | 784 | ||
784 | #define legacyBIOSLocation 0xC0000 |
785 | #define legacyBIOSLocation 0xC0000 |
785 | #define OS_BASE 0x80000000 |
786 | #define OS_BASE 0x80000000 |
786 | 787 | ||
787 | void *pci_map_rom(struct pci_dev *pdev, size_t *size) |
788 | void *pci_map_rom(struct pci_dev *pdev, size_t *size) |
788 | { |
789 | { |
789 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
790 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
790 | u32_t start; |
791 | u32_t start; |
791 | void *rom; |
792 | void *rom; |
792 | 793 | ||
793 | #if 0 |
794 | #if 0 |
794 | /* |
795 | /* |
795 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
796 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
796 | * memory map if the VGA enable bit of the Bridge Control register is |
797 | * memory map if the VGA enable bit of the Bridge Control register is |
797 | * set for embedded VGA. |
798 | * set for embedded VGA. |
798 | */ |
799 | */ |
799 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
800 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
800 | /* primary video rom always starts here */ |
801 | /* primary video rom always starts here */ |
801 | start = (u32_t)0xC0000; |
802 | start = (u32_t)0xC0000; |
802 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
803 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
803 | } else { |
804 | } else { |
804 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
805 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
805 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
806 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
806 | return (void *)(unsigned long) |
807 | return (void *)(unsigned long) |
807 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
808 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
808 | } else { |
809 | } else { |
809 | /* assign the ROM an address if it doesn't have one */ |
810 | /* assign the ROM an address if it doesn't have one */ |
810 | //if (res->parent == NULL && |
811 | //if (res->parent == NULL && |
811 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
812 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
812 | // return NULL; |
813 | // return NULL; |
813 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
814 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
814 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
815 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
815 | if (*size == 0) |
816 | if (*size == 0) |
816 | return NULL; |
817 | return NULL; |
817 | 818 | ||
818 | /* Enable ROM space decodes */ |
819 | /* Enable ROM space decodes */ |
819 | if (pci_enable_rom(pdev)) |
820 | if (pci_enable_rom(pdev)) |
820 | return NULL; |
821 | return NULL; |
821 | } |
822 | } |
822 | } |
823 | } |
823 | 824 | ||
824 | rom = ioremap(start, *size); |
825 | rom = ioremap(start, *size); |
825 | if (!rom) { |
826 | if (!rom) { |
826 | /* restore enable if ioremap fails */ |
827 | /* restore enable if ioremap fails */ |
827 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
828 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
828 | IORESOURCE_ROM_SHADOW | |
829 | IORESOURCE_ROM_SHADOW | |
829 | IORESOURCE_ROM_COPY))) |
830 | IORESOURCE_ROM_COPY))) |
830 | pci_disable_rom(pdev); |
831 | pci_disable_rom(pdev); |
831 | return NULL; |
832 | return NULL; |
832 | } |
833 | } |
833 | 834 | ||
834 | /* |
835 | /* |
835 | * Try to find the true size of the ROM since sometimes the PCI window |
836 | * Try to find the true size of the ROM since sometimes the PCI window |
836 | * size is much larger than the actual size of the ROM. |
837 | * size is much larger than the actual size of the ROM. |
837 | * True size is important if the ROM is going to be copied. |
838 | * True size is important if the ROM is going to be copied. |
838 | */ |
839 | */ |
839 | *size = pci_get_rom_size(rom, *size); |
840 | *size = pci_get_rom_size(rom, *size); |
840 | 841 | ||
841 | #endif |
842 | #endif |
842 | 843 | ||
843 | unsigned char tmp[32]; |
844 | unsigned char tmp[32]; |
844 | rom = NULL; |
845 | rom = NULL; |
845 | 846 | ||
846 | dbgprintf("Getting BIOS copy from legacy VBIOS location\n"); |
847 | dbgprintf("Getting BIOS copy from legacy VBIOS location\n"); |
847 | memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32); |
848 | memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32); |
848 | *size = tmp[2] * 512; |
849 | *size = tmp[2] * 512; |
849 | if (*size > 0x10000 ) |
850 | if (*size > 0x10000 ) |
850 | { |
851 | { |
851 | *size = 0; |
852 | *size = 0; |
852 | dbgprintf("Invalid BIOS length field\n"); |
853 | dbgprintf("Invalid BIOS length field\n"); |
853 | } |
854 | } |
854 | else |
855 | else |
855 | rom = (void*)( OS_BASE+legacyBIOSLocation); |
856 | rom = (void*)( OS_BASE+legacyBIOSLocation); |
856 | 857 | ||
857 | return rom; |
858 | return rom; |
858 | } |
859 | } |
859 | 860 | ||
860 | 861 | ||
861 | int |
862 | int |
862 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
863 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
863 | { |
864 | { |
864 | // if (!pci_dma_supported(dev, mask)) |
865 | // if (!pci_dma_supported(dev, mask)) |
865 | // return -EIO; |
866 | // return -EIO; |
866 | 867 | ||
867 | dev->dma_mask = mask; |
868 | dev->dma_mask = mask; |
868 | 869 | ||
869 | return 0; |
870 | return 0; |
870 | }>><>>>>>><>>>=>>>><>><>><>>4)><4)> |
871 | }>><>>>>>><>>>=>>>><>><>><>>4)><4)> |