Rev 1119 | Rev 1246 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1119 | Rev 1120 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | #include |
2 | #include |
3 | #include |
3 | #include |
4 | 4 | ||
5 | link_t devices; |
5 | static LIST_HEAD(devices); |
6 | 6 | ||
7 | static dev_t* pci_scan_device(u32_t bus, int devfn); |
7 | static dev_t* pci_scan_device(u32_t bus, int devfn); |
8 | 8 | ||
9 | 9 | ||
10 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
10 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
11 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
11 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
12 | 12 | ||
13 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
13 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
14 | 14 | ||
15 | /* |
15 | /* |
16 | * Translate the low bits of the PCI base |
16 | * Translate the low bits of the PCI base |
17 | * to the resource type |
17 | * to the resource type |
18 | */ |
18 | */ |
19 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
19 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
20 | { |
20 | { |
21 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
21 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
22 | return IORESOURCE_IO; |
22 | return IORESOURCE_IO; |
23 | 23 | ||
24 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
24 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
25 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
25 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
26 | 26 | ||
27 | return IORESOURCE_MEM; |
27 | return IORESOURCE_MEM; |
28 | } |
28 | } |
29 | 29 | ||
30 | 30 | ||
31 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
31 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
32 | { |
32 | { |
33 | u32_t size = mask & maxbase; /* Find the significant bits */ |
33 | u32_t size = mask & maxbase; /* Find the significant bits */ |
34 | 34 | ||
35 | if (!size) |
35 | if (!size) |
36 | return 0; |
36 | return 0; |
37 | 37 | ||
38 | /* Get the lowest of them to find the decode size, and |
38 | /* Get the lowest of them to find the decode size, and |
39 | from that the extent. */ |
39 | from that the extent. */ |
40 | size = (size & ~(size-1)) - 1; |
40 | size = (size & ~(size-1)) - 1; |
41 | 41 | ||
42 | /* base == maxbase can be valid only if the BAR has |
42 | /* base == maxbase can be valid only if the BAR has |
43 | already been programmed with all 1s. */ |
43 | already been programmed with all 1s. */ |
44 | if (base == maxbase && ((base | size) & mask) != mask) |
44 | if (base == maxbase && ((base | size) & mask) != mask) |
45 | return 0; |
45 | return 0; |
46 | 46 | ||
47 | return size; |
47 | return size; |
48 | } |
48 | } |
49 | 49 | ||
50 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
50 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
51 | { |
51 | { |
52 | u64_t size = mask & maxbase; /* Find the significant bits */ |
52 | u64_t size = mask & maxbase; /* Find the significant bits */ |
53 | 53 | ||
54 | if (!size) |
54 | if (!size) |
55 | return 0; |
55 | return 0; |
56 | 56 | ||
57 | /* Get the lowest of them to find the decode size, and |
57 | /* Get the lowest of them to find the decode size, and |
58 | from that the extent. */ |
58 | from that the extent. */ |
59 | size = (size & ~(size-1)) - 1; |
59 | size = (size & ~(size-1)) - 1; |
60 | 60 | ||
61 | /* base == maxbase can be valid only if the BAR has |
61 | /* base == maxbase can be valid only if the BAR has |
62 | already been programmed with all 1s. */ |
62 | already been programmed with all 1s. */ |
63 | if (base == maxbase && ((base | size) & mask) != mask) |
63 | if (base == maxbase && ((base | size) & mask) != mask) |
64 | return 0; |
64 | return 0; |
65 | 65 | ||
66 | return size; |
66 | return size; |
67 | } |
67 | } |
68 | 68 | ||
69 | static inline int is_64bit_memory(u32_t mask) |
69 | static inline int is_64bit_memory(u32_t mask) |
70 | { |
70 | { |
71 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
71 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
72 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
72 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
73 | return 1; |
73 | return 1; |
74 | return 0; |
74 | return 0; |
75 | } |
75 | } |
76 | 76 | ||
77 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
77 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
78 | { |
78 | { |
79 | u32_t pos, reg, next; |
79 | u32_t pos, reg, next; |
80 | u32_t l, sz; |
80 | u32_t l, sz; |
81 | struct resource *res; |
81 | struct resource *res; |
82 | 82 | ||
83 | for(pos=0; pos < howmany; pos = next) |
83 | for(pos=0; pos < howmany; pos = next) |
84 | { |
84 | { |
85 | u64_t l64; |
85 | u64_t l64; |
86 | u64_t sz64; |
86 | u64_t sz64; |
87 | u32_t raw_sz; |
87 | u32_t raw_sz; |
88 | 88 | ||
89 | next = pos + 1; |
89 | next = pos + 1; |
90 | 90 | ||
91 | res = &dev->resource[pos]; |
91 | res = &dev->resource[pos]; |
92 | 92 | ||
93 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
93 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
94 | l = PciRead32(dev->bus, dev->devfn, reg); |
94 | l = PciRead32(dev->bus, dev->devfn, reg); |
95 | PciWrite32(dev->bus, dev->devfn, reg, ~0); |
95 | PciWrite32(dev->bus, dev->devfn, reg, ~0); |
96 | sz = PciRead32(dev->bus, dev->devfn, reg); |
96 | sz = PciRead32(dev->bus, dev->devfn, reg); |
97 | PciWrite32(dev->bus, dev->devfn, reg, l); |
97 | PciWrite32(dev->bus, dev->devfn, reg, l); |
98 | 98 | ||
99 | if (!sz || sz == 0xffffffff) |
99 | if (!sz || sz == 0xffffffff) |
100 | continue; |
100 | continue; |
101 | 101 | ||
102 | if (l == 0xffffffff) |
102 | if (l == 0xffffffff) |
103 | l = 0; |
103 | l = 0; |
104 | 104 | ||
105 | raw_sz = sz; |
105 | raw_sz = sz; |
106 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
106 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
107 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
107 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
108 | { |
108 | { |
109 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
109 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
110 | /* |
110 | /* |
111 | * For 64bit prefetchable memory sz could be 0, if the |
111 | * For 64bit prefetchable memory sz could be 0, if the |
112 | * real size is bigger than 4G, so we need to check |
112 | * real size is bigger than 4G, so we need to check |
113 | * szhi for that. |
113 | * szhi for that. |
114 | */ |
114 | */ |
115 | if (!is_64bit_memory(l) && !sz) |
115 | if (!is_64bit_memory(l) && !sz) |
116 | continue; |
116 | continue; |
117 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
117 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
118 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
118 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
119 | } |
119 | } |
120 | else { |
120 | else { |
121 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
121 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
122 | if (!sz) |
122 | if (!sz) |
123 | continue; |
123 | continue; |
124 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
124 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
125 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
125 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
126 | } |
126 | } |
127 | res->end = res->start + (unsigned long) sz; |
127 | res->end = res->start + (unsigned long) sz; |
128 | res->flags |= pci_calc_resource_flags(l); |
128 | res->flags |= pci_calc_resource_flags(l); |
129 | if (is_64bit_memory(l)) |
129 | if (is_64bit_memory(l)) |
130 | { |
130 | { |
131 | u32_t szhi, lhi; |
131 | u32_t szhi, lhi; |
132 | 132 | ||
133 | lhi = PciRead32(dev->bus, dev->devfn, reg+4); |
133 | lhi = PciRead32(dev->bus, dev->devfn, reg+4); |
134 | PciWrite32(dev->bus, dev->devfn, reg+4, ~0); |
134 | PciWrite32(dev->bus, dev->devfn, reg+4, ~0); |
135 | szhi = PciRead32(dev->bus, dev->devfn, reg+4); |
135 | szhi = PciRead32(dev->bus, dev->devfn, reg+4); |
136 | PciWrite32(dev->bus, dev->devfn, reg+4, lhi); |
136 | PciWrite32(dev->bus, dev->devfn, reg+4, lhi); |
137 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
137 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
138 | l64 = ((u64_t)lhi << 32) | l; |
138 | l64 = ((u64_t)lhi << 32) | l; |
139 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
139 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
140 | next++; |
140 | next++; |
141 | 141 | ||
142 | #if BITS_PER_LONG == 64 |
142 | #if BITS_PER_LONG == 64 |
143 | if (!sz64) { |
143 | if (!sz64) { |
144 | res->start = 0; |
144 | res->start = 0; |
145 | res->end = 0; |
145 | res->end = 0; |
146 | res->flags = 0; |
146 | res->flags = 0; |
147 | continue; |
147 | continue; |
148 | } |
148 | } |
149 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
149 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
150 | res->end = res->start + sz64; |
150 | res->end = res->start + sz64; |
151 | #else |
151 | #else |
152 | if (sz64 > 0x100000000ULL) { |
152 | if (sz64 > 0x100000000ULL) { |
153 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
153 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
154 | "BAR for device %s\n", pci_name(dev)); |
154 | "BAR for device %s\n", pci_name(dev)); |
155 | res->start = 0; |
155 | res->start = 0; |
156 | res->flags = 0; |
156 | res->flags = 0; |
157 | } |
157 | } |
158 | else if (lhi) |
158 | else if (lhi) |
159 | { |
159 | { |
160 | /* 64-bit wide address, treat as disabled */ |
160 | /* 64-bit wide address, treat as disabled */ |
161 | PciWrite32(dev->bus, dev->devfn, reg, |
161 | PciWrite32(dev->bus, dev->devfn, reg, |
162 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
162 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
163 | PciWrite32(dev->bus, dev->devfn, reg+4, 0); |
163 | PciWrite32(dev->bus, dev->devfn, reg+4, 0); |
164 | res->start = 0; |
164 | res->start = 0; |
165 | res->end = sz; |
165 | res->end = sz; |
166 | } |
166 | } |
167 | #endif |
167 | #endif |
168 | } |
168 | } |
169 | } |
169 | } |
170 | 170 | ||
171 | if ( rom ) |
171 | if ( rom ) |
172 | { |
172 | { |
173 | dev->rom_base_reg = rom; |
173 | dev->rom_base_reg = rom; |
174 | res = &dev->resource[PCI_ROM_RESOURCE]; |
174 | res = &dev->resource[PCI_ROM_RESOURCE]; |
175 | 175 | ||
176 | l = PciRead32(dev->bus, dev->devfn, rom); |
176 | l = PciRead32(dev->bus, dev->devfn, rom); |
177 | PciWrite32(dev->bus, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
177 | PciWrite32(dev->bus, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
178 | sz = PciRead32(dev->bus, dev->devfn, rom); |
178 | sz = PciRead32(dev->bus, dev->devfn, rom); |
179 | PciWrite32(dev->bus, dev->devfn, rom, l); |
179 | PciWrite32(dev->bus, dev->devfn, rom, l); |
180 | 180 | ||
181 | if (l == 0xffffffff) |
181 | if (l == 0xffffffff) |
182 | l = 0; |
182 | l = 0; |
183 | 183 | ||
184 | if (sz && sz != 0xffffffff) |
184 | if (sz && sz != 0xffffffff) |
185 | { |
185 | { |
186 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
186 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
187 | 187 | ||
188 | if (sz) |
188 | if (sz) |
189 | { |
189 | { |
190 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
190 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
191 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
191 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
192 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
192 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
193 | res->start = l & PCI_ROM_ADDRESS_MASK; |
193 | res->start = l & PCI_ROM_ADDRESS_MASK; |
194 | res->end = res->start + (unsigned long) sz; |
194 | res->end = res->start + (unsigned long) sz; |
195 | } |
195 | } |
196 | } |
196 | } |
197 | } |
197 | } |
198 | } |
198 | } |
199 | 199 | ||
200 | static void pci_read_irq(struct pci_dev *dev) |
200 | static void pci_read_irq(struct pci_dev *dev) |
201 | { |
201 | { |
202 | u8_t irq; |
202 | u8_t irq; |
203 | 203 | ||
204 | irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN); |
204 | irq = PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN); |
205 | dev->pin = irq; |
205 | dev->pin = irq; |
206 | if (irq) |
206 | if (irq) |
207 | PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE); |
207 | PciRead8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE); |
208 | dev->irq = irq; |
208 | dev->irq = irq; |
209 | }; |
209 | }; |
210 | 210 | ||
211 | 211 | ||
212 | static int pci_setup_device(struct pci_dev *dev) |
212 | static int pci_setup_device(struct pci_dev *dev) |
213 | { |
213 | { |
214 | u32_t class; |
214 | u32_t class; |
215 | 215 | ||
216 | class = PciRead32(dev->bus, dev->devfn, PCI_CLASS_REVISION); |
216 | class = PciRead32(dev->bus, dev->devfn, PCI_CLASS_REVISION); |
217 | dev->revision = class & 0xff; |
217 | dev->revision = class & 0xff; |
218 | class >>= 8; /* upper 3 bytes */ |
218 | class >>= 8; /* upper 3 bytes */ |
219 | dev->class = class; |
219 | dev->class = class; |
220 | 220 | ||
221 | /* "Unknown power state" */ |
221 | /* "Unknown power state" */ |
222 | // dev->current_state = PCI_UNKNOWN; |
222 | // dev->current_state = PCI_UNKNOWN; |
223 | 223 | ||
224 | /* Early fixups, before probing the BARs */ |
224 | /* Early fixups, before probing the BARs */ |
225 | // pci_fixup_device(pci_fixup_early, dev); |
225 | // pci_fixup_device(pci_fixup_early, dev); |
226 | class = dev->class >> 8; |
226 | class = dev->class >> 8; |
227 | 227 | ||
228 | switch (dev->hdr_type) |
228 | switch (dev->hdr_type) |
229 | { |
229 | { |
230 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
230 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
231 | if (class == PCI_CLASS_BRIDGE_PCI) |
231 | if (class == PCI_CLASS_BRIDGE_PCI) |
232 | goto bad; |
232 | goto bad; |
233 | pci_read_irq(dev); |
233 | pci_read_irq(dev); |
234 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
234 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
235 | dev->subsystem_vendor = PciRead16(dev->bus, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
235 | dev->subsystem_vendor = PciRead16(dev->bus, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
236 | dev->subsystem_device = PciRead16(dev->bus, dev->devfn, PCI_SUBSYSTEM_ID); |
236 | dev->subsystem_device = PciRead16(dev->bus, dev->devfn, PCI_SUBSYSTEM_ID); |
237 | 237 | ||
238 | /* |
238 | /* |
239 | * Do the ugly legacy mode stuff here rather than broken chip |
239 | * Do the ugly legacy mode stuff here rather than broken chip |
240 | * quirk code. Legacy mode ATA controllers have fixed |
240 | * quirk code. Legacy mode ATA controllers have fixed |
241 | * addresses. These are not always echoed in BAR0-3, and |
241 | * addresses. These are not always echoed in BAR0-3, and |
242 | * BAR0-3 in a few cases contain junk! |
242 | * BAR0-3 in a few cases contain junk! |
243 | */ |
243 | */ |
244 | if (class == PCI_CLASS_STORAGE_IDE) |
244 | if (class == PCI_CLASS_STORAGE_IDE) |
245 | { |
245 | { |
246 | u8_t progif; |
246 | u8_t progif; |
247 | 247 | ||
248 | progif = PciRead8(dev->bus, dev->devfn,PCI_CLASS_PROG); |
248 | progif = PciRead8(dev->bus, dev->devfn,PCI_CLASS_PROG); |
249 | if ((progif & 1) == 0) |
249 | if ((progif & 1) == 0) |
250 | { |
250 | { |
251 | dev->resource[0].start = 0x1F0; |
251 | dev->resource[0].start = 0x1F0; |
252 | dev->resource[0].end = 0x1F7; |
252 | dev->resource[0].end = 0x1F7; |
253 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
253 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
254 | dev->resource[1].start = 0x3F6; |
254 | dev->resource[1].start = 0x3F6; |
255 | dev->resource[1].end = 0x3F6; |
255 | dev->resource[1].end = 0x3F6; |
256 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
256 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
257 | } |
257 | } |
258 | if ((progif & 4) == 0) |
258 | if ((progif & 4) == 0) |
259 | { |
259 | { |
260 | dev->resource[2].start = 0x170; |
260 | dev->resource[2].start = 0x170; |
261 | dev->resource[2].end = 0x177; |
261 | dev->resource[2].end = 0x177; |
262 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
262 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
263 | dev->resource[3].start = 0x376; |
263 | dev->resource[3].start = 0x376; |
264 | dev->resource[3].end = 0x376; |
264 | dev->resource[3].end = 0x376; |
265 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
265 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
266 | }; |
266 | }; |
267 | } |
267 | } |
268 | break; |
268 | break; |
269 | 269 | ||
270 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
270 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
271 | if (class != PCI_CLASS_BRIDGE_PCI) |
271 | if (class != PCI_CLASS_BRIDGE_PCI) |
272 | goto bad; |
272 | goto bad; |
273 | /* The PCI-to-PCI bridge spec requires that subtractive |
273 | /* The PCI-to-PCI bridge spec requires that subtractive |
274 | decoding (i.e. transparent) bridge must have programming |
274 | decoding (i.e. transparent) bridge must have programming |
275 | interface code of 0x01. */ |
275 | interface code of 0x01. */ |
276 | pci_read_irq(dev); |
276 | pci_read_irq(dev); |
277 | dev->transparent = ((dev->class & 0xff) == 1); |
277 | dev->transparent = ((dev->class & 0xff) == 1); |
278 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
278 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
279 | break; |
279 | break; |
280 | 280 | ||
281 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
281 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
282 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
282 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
283 | goto bad; |
283 | goto bad; |
284 | pci_read_irq(dev); |
284 | pci_read_irq(dev); |
285 | pci_read_bases(dev, 1, 0); |
285 | pci_read_bases(dev, 1, 0); |
286 | dev->subsystem_vendor = PciRead16(dev->bus, |
286 | dev->subsystem_vendor = PciRead16(dev->bus, |
287 | dev->devfn, |
287 | dev->devfn, |
288 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
288 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
289 | 289 | ||
290 | dev->subsystem_device = PciRead16(dev->bus, |
290 | dev->subsystem_device = PciRead16(dev->bus, |
291 | dev->devfn, |
291 | dev->devfn, |
292 | PCI_CB_SUBSYSTEM_ID); |
292 | PCI_CB_SUBSYSTEM_ID); |
293 | break; |
293 | break; |
294 | 294 | ||
295 | default: /* unknown header */ |
295 | default: /* unknown header */ |
296 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
296 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
297 | pci_name(dev), dev->hdr_type); |
297 | pci_name(dev), dev->hdr_type); |
298 | return -1; |
298 | return -1; |
299 | 299 | ||
300 | bad: |
300 | bad: |
301 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
301 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
302 | pci_name(dev), class, dev->hdr_type); |
302 | pci_name(dev), class, dev->hdr_type); |
303 | dev->class = PCI_CLASS_NOT_DEFINED; |
303 | dev->class = PCI_CLASS_NOT_DEFINED; |
304 | } |
304 | } |
305 | 305 | ||
306 | /* We found a fine healthy device, go go go... */ |
306 | /* We found a fine healthy device, go go go... */ |
307 | 307 | ||
308 | return 0; |
308 | return 0; |
309 | }; |
309 | }; |
310 | 310 | ||
311 | static dev_t* pci_scan_device(u32_t bus, int devfn) |
311 | static dev_t* pci_scan_device(u32_t bus, int devfn) |
312 | { |
312 | { |
313 | dev_t *dev; |
313 | dev_t *dev; |
314 | 314 | ||
315 | u32_t id; |
315 | u32_t id; |
316 | u8_t hdr; |
316 | u8_t hdr; |
317 | 317 | ||
318 | int timeout = 10; |
318 | int timeout = 10; |
319 | 319 | ||
320 | id = PciRead32(bus,devfn, PCI_VENDOR_ID); |
320 | id = PciRead32(bus,devfn, PCI_VENDOR_ID); |
321 | 321 | ||
322 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
322 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
323 | if (id == 0xffffffff || id == 0x00000000 || |
323 | if (id == 0xffffffff || id == 0x00000000 || |
324 | id == 0x0000ffff || id == 0xffff0000) |
324 | id == 0x0000ffff || id == 0xffff0000) |
325 | return NULL; |
325 | return NULL; |
326 | 326 | ||
327 | while (id == 0xffff0001) |
327 | while (id == 0xffff0001) |
328 | { |
328 | { |
329 | 329 | ||
330 | delay(timeout/10); |
330 | delay(timeout/10); |
331 | timeout *= 2; |
331 | timeout *= 2; |
332 | 332 | ||
333 | id = PciRead32(bus, devfn, PCI_VENDOR_ID); |
333 | id = PciRead32(bus, devfn, PCI_VENDOR_ID); |
334 | 334 | ||
335 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
335 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
336 | if (timeout > 60 * 100) |
336 | if (timeout > 60 * 100) |
337 | { |
337 | { |
338 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
338 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
339 | "responding\n", bus,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
339 | "responding\n", bus,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
340 | return NULL; |
340 | return NULL; |
341 | } |
341 | } |
342 | }; |
342 | }; |
343 | 343 | ||
344 | hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); |
344 | hdr = PciRead8(bus, devfn, PCI_HEADER_TYPE); |
345 | 345 | ||
346 | dev = (dev_t*)malloc(sizeof(dev_t)); |
346 | dev = (dev_t*)malloc(sizeof(dev_t)); |
347 | 347 | ||
348 | link_initialize(&dev->link); |
348 | INIT_LIST_HEAD(&dev->link); |
349 | 349 | ||
350 | if(unlikely(dev == NULL)) |
350 | if(unlikely(dev == NULL)) |
351 | return NULL; |
351 | return NULL; |
352 | 352 | ||
353 | dev->pci_dev.bus = bus; |
353 | dev->pci_dev.bus = bus; |
354 | dev->pci_dev.devfn = devfn; |
354 | dev->pci_dev.devfn = devfn; |
355 | dev->pci_dev.hdr_type = hdr & 0x7f; |
355 | dev->pci_dev.hdr_type = hdr & 0x7f; |
356 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
356 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
357 | dev->pci_dev.vendor = id & 0xffff; |
357 | dev->pci_dev.vendor = id & 0xffff; |
358 | dev->pci_dev.device = (id >> 16) & 0xffff; |
358 | dev->pci_dev.device = (id >> 16) & 0xffff; |
359 | 359 | ||
360 | pci_setup_device(&dev->pci_dev); |
360 | pci_setup_device(&dev->pci_dev); |
361 | 361 | ||
362 | return dev; |
362 | return dev; |
363 | 363 | ||
364 | }; |
364 | }; |
365 | 365 | ||
366 | int pci_scan_slot(u32_t bus, int devfn) |
366 | int pci_scan_slot(u32_t bus, int devfn) |
367 | { |
367 | { |
368 | int func, nr = 0; |
368 | int func, nr = 0; |
369 | 369 | ||
370 | for (func = 0; func < 8; func++, devfn++) |
370 | for (func = 0; func < 8; func++, devfn++) |
371 | { |
371 | { |
372 | dev_t *dev; |
372 | dev_t *dev; |
373 | 373 | ||
374 | dev = pci_scan_device(bus, devfn); |
374 | dev = pci_scan_device(bus, devfn); |
375 | if( dev ) |
375 | if( dev ) |
376 | { |
376 | { |
377 | list_append(&dev->link, &devices); |
377 | list_add(&dev->link, &devices); |
378 | 378 | ||
379 | nr++; |
379 | nr++; |
380 | 380 | ||
381 | /* |
381 | /* |
382 | * If this is a single function device, |
382 | * If this is a single function device, |
383 | * don't scan past the first function. |
383 | * don't scan past the first function. |
384 | */ |
384 | */ |
385 | if (!dev->pci_dev.multifunction) |
385 | if (!dev->pci_dev.multifunction) |
386 | { |
386 | { |
387 | if (func > 0) { |
387 | if (func > 0) { |
388 | dev->pci_dev.multifunction = 1; |
388 | dev->pci_dev.multifunction = 1; |
389 | } |
389 | } |
390 | else { |
390 | else { |
391 | break; |
391 | break; |
392 | } |
392 | } |
393 | } |
393 | } |
394 | } |
394 | } |
395 | else { |
395 | else { |
396 | if (func == 0) |
396 | if (func == 0) |
397 | break; |
397 | break; |
398 | } |
398 | } |
399 | }; |
399 | }; |
400 | 400 | ||
401 | return nr; |
401 | return nr; |
402 | }; |
402 | }; |
403 | 403 | ||
404 | 404 | ||
405 | void pci_scan_bus(u32_t bus) |
405 | void pci_scan_bus(u32_t bus) |
406 | { |
406 | { |
407 | u32_t devfn; |
407 | u32_t devfn; |
408 | dev_t *dev; |
408 | dev_t *dev; |
409 | 409 | ||
410 | 410 | ||
411 | for (devfn = 0; devfn < 0x100; devfn += 8) |
411 | for (devfn = 0; devfn < 0x100; devfn += 8) |
412 | pci_scan_slot(bus, devfn); |
412 | pci_scan_slot(bus, devfn); |
413 | 413 | ||
414 | } |
414 | } |
415 | 415 | ||
416 | int enum_pci_devices() |
416 | int enum_pci_devices() |
417 | { |
417 | { |
418 | dev_t *dev; |
418 | dev_t *dev; |
419 | u32_t last_bus; |
419 | u32_t last_bus; |
420 | u32_t bus = 0 , devfn = 0; |
420 | u32_t bus = 0 , devfn = 0; |
421 | 421 | ||
422 | list_initialize(&devices); |
422 | // list_initialize(&devices); |
423 | 423 | ||
424 | last_bus = PciApi(1); |
424 | last_bus = PciApi(1); |
425 | 425 | ||
426 | 426 | ||
427 | if( unlikely(last_bus == -1)) |
427 | if( unlikely(last_bus == -1)) |
428 | return -1; |
428 | return -1; |
429 | 429 | ||
430 | for(;bus <= last_bus; bus++) |
430 | for(;bus <= last_bus; bus++) |
431 | pci_scan_bus(bus); |
431 | pci_scan_bus(bus); |
432 | 432 | ||
433 | // for(dev = (dev_t*)devices.next; |
433 | // for(dev = (dev_t*)devices.next; |
434 | // &dev->link != &devices; |
434 | // &dev->link != &devices; |
435 | // dev = (dev_t*)dev->link.next) |
435 | // dev = (dev_t*)dev->link.next) |
436 | // { |
436 | // { |
437 | // dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
437 | // dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
438 | // dev->pci_dev.vendor, |
438 | // dev->pci_dev.vendor, |
439 | // dev->pci_dev.device, |
439 | // dev->pci_dev.device, |
440 | // dev->pci_dev.bus, |
440 | // dev->pci_dev.bus, |
441 | // dev->pci_dev.devfn); |
441 | // dev->pci_dev.devfn); |
442 | // |
442 | // |
443 | // } |
443 | // } |
444 | return 0; |
444 | return 0; |
445 | } |
445 | } |
446 | 446 | ||
447 | #if 0 |
447 | #if 0 |
448 | /** |
448 | /** |
449 | * pci_set_power_state - Set the power state of a PCI device |
449 | * pci_set_power_state - Set the power state of a PCI device |
450 | * @dev: PCI device to be suspended |
450 | * @dev: PCI device to be suspended |
451 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering |
451 | * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering |
452 | * |
452 | * |
453 | * Transition a device to a new power state, using the Power Management |
453 | * Transition a device to a new power state, using the Power Management |
454 | * Capabilities in the device's config space. |
454 | * Capabilities in the device's config space. |
455 | * |
455 | * |
456 | * RETURN VALUE: |
456 | * RETURN VALUE: |
457 | * -EINVAL if trying to enter a lower state than we're already in. |
457 | * -EINVAL if trying to enter a lower state than we're already in. |
458 | * 0 if we're already in the requested state. |
458 | * 0 if we're already in the requested state. |
459 | * -EIO if device does not support PCI PM. |
459 | * -EIO if device does not support PCI PM. |
460 | * 0 if we can successfully change the power state. |
460 | * 0 if we can successfully change the power state. |
461 | */ |
461 | */ |
462 | int |
462 | int |
463 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
463 | pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
464 | { |
464 | { |
465 | int pm, need_restore = 0; |
465 | int pm, need_restore = 0; |
466 | u16 pmcsr, pmc; |
466 | u16 pmcsr, pmc; |
467 | 467 | ||
468 | /* bound the state we're entering */ |
468 | /* bound the state we're entering */ |
469 | if (state > PCI_D3hot) |
469 | if (state > PCI_D3hot) |
470 | state = PCI_D3hot; |
470 | state = PCI_D3hot; |
471 | 471 | ||
472 | /* |
472 | /* |
473 | * If the device or the parent bridge can't support PCI PM, ignore |
473 | * If the device or the parent bridge can't support PCI PM, ignore |
474 | * the request if we're doing anything besides putting it into D0 |
474 | * the request if we're doing anything besides putting it into D0 |
475 | * (which would only happen on boot). |
475 | * (which would only happen on boot). |
476 | */ |
476 | */ |
477 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
477 | if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
478 | return 0; |
478 | return 0; |
479 | 479 | ||
480 | /* find PCI PM capability in list */ |
480 | /* find PCI PM capability in list */ |
481 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
481 | pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
482 | 482 | ||
483 | /* abort if the device doesn't support PM capabilities */ |
483 | /* abort if the device doesn't support PM capabilities */ |
484 | if (!pm) |
484 | if (!pm) |
485 | return -EIO; |
485 | return -EIO; |
486 | 486 | ||
487 | /* Validate current state: |
487 | /* Validate current state: |
488 | * Can enter D0 from any state, but if we can only go deeper |
488 | * Can enter D0 from any state, but if we can only go deeper |
489 | * to sleep if we're already in a low power state |
489 | * to sleep if we're already in a low power state |
490 | */ |
490 | */ |
491 | if (state != PCI_D0 && dev->current_state > state) { |
491 | if (state != PCI_D0 && dev->current_state > state) { |
492 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", |
492 | printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", |
493 | __FUNCTION__, pci_name(dev), state, dev->current_state); |
493 | __FUNCTION__, pci_name(dev), state, dev->current_state); |
494 | return -EINVAL; |
494 | return -EINVAL; |
495 | } else if (dev->current_state == state) |
495 | } else if (dev->current_state == state) |
496 | return 0; /* we're already there */ |
496 | return 0; /* we're already there */ |
497 | 497 | ||
498 | 498 | ||
499 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); |
499 | pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); |
500 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
500 | if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
501 | printk(KERN_DEBUG |
501 | printk(KERN_DEBUG |
502 | "PCI: %s has unsupported PM cap regs version (%u)\n", |
502 | "PCI: %s has unsupported PM cap regs version (%u)\n", |
503 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); |
503 | pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); |
504 | return -EIO; |
504 | return -EIO; |
505 | } |
505 | } |
506 | 506 | ||
507 | /* check if this device supports the desired state */ |
507 | /* check if this device supports the desired state */ |
508 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
508 | if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) |
509 | return -EIO; |
509 | return -EIO; |
510 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) |
510 | else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) |
511 | return -EIO; |
511 | return -EIO; |
512 | 512 | ||
513 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
513 | pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); |
514 | 514 | ||
515 | /* If we're (effectively) in D3, force entire word to 0. |
515 | /* If we're (effectively) in D3, force entire word to 0. |
516 | * This doesn't affect PME_Status, disables PME_En, and |
516 | * This doesn't affect PME_Status, disables PME_En, and |
517 | * sets PowerState to 0. |
517 | * sets PowerState to 0. |
518 | */ |
518 | */ |
519 | switch (dev->current_state) { |
519 | switch (dev->current_state) { |
520 | case PCI_D0: |
520 | case PCI_D0: |
521 | case PCI_D1: |
521 | case PCI_D1: |
522 | case PCI_D2: |
522 | case PCI_D2: |
523 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
523 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
524 | pmcsr |= state; |
524 | pmcsr |= state; |
525 | break; |
525 | break; |
526 | case PCI_UNKNOWN: /* Boot-up */ |
526 | case PCI_UNKNOWN: /* Boot-up */ |
527 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
527 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
528 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
528 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
529 | need_restore = 1; |
529 | need_restore = 1; |
530 | /* Fall-through: force to D0 */ |
530 | /* Fall-through: force to D0 */ |
531 | default: |
531 | default: |
532 | pmcsr = 0; |
532 | pmcsr = 0; |
533 | break; |
533 | break; |
534 | } |
534 | } |
535 | 535 | ||
536 | /* enter specified state */ |
536 | /* enter specified state */ |
537 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); |
537 | pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); |
538 | 538 | ||
539 | /* Mandatory power management transition delays */ |
539 | /* Mandatory power management transition delays */ |
540 | /* see PCI PM 1.1 5.6.1 table 18 */ |
540 | /* see PCI PM 1.1 5.6.1 table 18 */ |
541 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
541 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
542 | msleep(pci_pm_d3_delay); |
542 | msleep(pci_pm_d3_delay); |
543 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
543 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
544 | udelay(200); |
544 | udelay(200); |
545 | 545 | ||
546 | /* |
546 | /* |
547 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx |
547 | * Give firmware a chance to be called, such as ACPI _PRx, _PSx |
548 | * Firmware method after native method ? |
548 | * Firmware method after native method ? |
549 | */ |
549 | */ |
550 | if (platform_pci_set_power_state) |
550 | if (platform_pci_set_power_state) |
551 | platform_pci_set_power_state(dev, state); |
551 | platform_pci_set_power_state(dev, state); |
552 | 552 | ||
553 | dev->current_state = state; |
553 | dev->current_state = state; |
554 | 554 | ||
555 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
555 | /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
556 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
556 | * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
557 | * from D3hot to D0 _may_ perform an internal reset, thereby |
557 | * from D3hot to D0 _may_ perform an internal reset, thereby |
558 | * going to "D0 Uninitialized" rather than "D0 Initialized". |
558 | * going to "D0 Uninitialized" rather than "D0 Initialized". |
559 | * For example, at least some versions of the 3c905B and the |
559 | * For example, at least some versions of the 3c905B and the |
560 | * 3c556B exhibit this behaviour. |
560 | * 3c556B exhibit this behaviour. |
561 | * |
561 | * |
562 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
562 | * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
563 | * devices in a D3hot state at boot. Consequently, we need to |
563 | * devices in a D3hot state at boot. Consequently, we need to |
564 | * restore at least the BARs so that the device will be |
564 | * restore at least the BARs so that the device will be |
565 | * accessible to its driver. |
565 | * accessible to its driver. |
566 | */ |
566 | */ |
567 | if (need_restore) |
567 | if (need_restore) |
568 | pci_restore_bars(dev); |
568 | pci_restore_bars(dev); |
569 | 569 | ||
570 | return 0; |
570 | return 0; |
571 | } |
571 | } |
572 | #endif |
572 | #endif |
573 | 573 | ||
574 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
574 | int pcibios_enable_resources(struct pci_dev *dev, int mask) |
575 | { |
575 | { |
576 | u16_t cmd, old_cmd; |
576 | u16_t cmd, old_cmd; |
577 | int idx; |
577 | int idx; |
578 | struct resource *r; |
578 | struct resource *r; |
579 | 579 | ||
580 | cmd = PciRead16(dev->bus, dev->devfn, PCI_COMMAND); |
580 | cmd = PciRead16(dev->bus, dev->devfn, PCI_COMMAND); |
581 | old_cmd = cmd; |
581 | old_cmd = cmd; |
582 | for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) |
582 | for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) |
583 | { |
583 | { |
584 | /* Only set up the requested stuff */ |
584 | /* Only set up the requested stuff */ |
585 | if (!(mask & (1 << idx))) |
585 | if (!(mask & (1 << idx))) |
586 | continue; |
586 | continue; |
587 | 587 | ||
588 | r = &dev->resource[idx]; |
588 | r = &dev->resource[idx]; |
589 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) |
589 | if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) |
590 | continue; |
590 | continue; |
591 | if ((idx == PCI_ROM_RESOURCE) && |
591 | if ((idx == PCI_ROM_RESOURCE) && |
592 | (!(r->flags & IORESOURCE_ROM_ENABLE))) |
592 | (!(r->flags & IORESOURCE_ROM_ENABLE))) |
593 | continue; |
593 | continue; |
594 | if (!r->start && r->end) { |
594 | if (!r->start && r->end) { |
595 | printk(KERN_ERR "PCI: Device %s not available " |
595 | printk(KERN_ERR "PCI: Device %s not available " |
596 | "because of resource %d collisions\n", |
596 | "because of resource %d collisions\n", |
597 | pci_name(dev), idx); |
597 | pci_name(dev), idx); |
598 | return -EINVAL; |
598 | return -EINVAL; |
599 | } |
599 | } |
600 | if (r->flags & IORESOURCE_IO) |
600 | if (r->flags & IORESOURCE_IO) |
601 | cmd |= PCI_COMMAND_IO; |
601 | cmd |= PCI_COMMAND_IO; |
602 | if (r->flags & IORESOURCE_MEM) |
602 | if (r->flags & IORESOURCE_MEM) |
603 | cmd |= PCI_COMMAND_MEMORY; |
603 | cmd |= PCI_COMMAND_MEMORY; |
604 | } |
604 | } |
605 | if (cmd != old_cmd) { |
605 | if (cmd != old_cmd) { |
606 | printk("PCI: Enabling device %s (%04x -> %04x)\n", |
606 | printk("PCI: Enabling device %s (%04x -> %04x)\n", |
607 | pci_name(dev), old_cmd, cmd); |
607 | pci_name(dev), old_cmd, cmd); |
608 | PciWrite16(dev->bus, dev->devfn, PCI_COMMAND, cmd); |
608 | PciWrite16(dev->bus, dev->devfn, PCI_COMMAND, cmd); |
609 | } |
609 | } |
610 | return 0; |
610 | return 0; |
611 | } |
611 | } |
612 | 612 | ||
613 | 613 | ||
614 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
614 | int pcibios_enable_device(struct pci_dev *dev, int mask) |
615 | { |
615 | { |
616 | int err; |
616 | int err; |
617 | 617 | ||
618 | if ((err = pcibios_enable_resources(dev, mask)) < 0) |
618 | if ((err = pcibios_enable_resources(dev, mask)) < 0) |
619 | return err; |
619 | return err; |
620 | 620 | ||
621 | // if (!dev->msi_enabled) |
621 | // if (!dev->msi_enabled) |
622 | // return pcibios_enable_irq(dev); |
622 | // return pcibios_enable_irq(dev); |
623 | return 0; |
623 | return 0; |
624 | } |
624 | } |
625 | 625 | ||
626 | 626 | ||
627 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
627 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
628 | { |
628 | { |
629 | int err; |
629 | int err; |
630 | 630 | ||
631 | // err = pci_set_power_state(dev, PCI_D0); |
631 | // err = pci_set_power_state(dev, PCI_D0); |
632 | // if (err < 0 && err != -EIO) |
632 | // if (err < 0 && err != -EIO) |
633 | // return err; |
633 | // return err; |
634 | err = pcibios_enable_device(dev, bars); |
634 | err = pcibios_enable_device(dev, bars); |
635 | // if (err < 0) |
635 | // if (err < 0) |
636 | // return err; |
636 | // return err; |
637 | // pci_fixup_device(pci_fixup_enable, dev); |
637 | // pci_fixup_device(pci_fixup_enable, dev); |
638 | 638 | ||
639 | return 0; |
639 | return 0; |
640 | } |
640 | } |
641 | 641 | ||
642 | 642 | ||
643 | static int __pci_enable_device_flags(struct pci_dev *dev, |
643 | static int __pci_enable_device_flags(struct pci_dev *dev, |
644 | resource_size_t flags) |
644 | resource_size_t flags) |
645 | { |
645 | { |
646 | int err; |
646 | int err; |
647 | int i, bars = 0; |
647 | int i, bars = 0; |
648 | 648 | ||
649 | // if (atomic_add_return(1, &dev->enable_cnt) > 1) |
649 | // if (atomic_add_return(1, &dev->enable_cnt) > 1) |
650 | // return 0; /* already enabled */ |
650 | // return 0; /* already enabled */ |
651 | 651 | ||
652 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
652 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
653 | if (dev->resource[i].flags & flags) |
653 | if (dev->resource[i].flags & flags) |
654 | bars |= (1 << i); |
654 | bars |= (1 << i); |
655 | 655 | ||
656 | err = do_pci_enable_device(dev, bars); |
656 | err = do_pci_enable_device(dev, bars); |
657 | // if (err < 0) |
657 | // if (err < 0) |
658 | // atomic_dec(&dev->enable_cnt); |
658 | // atomic_dec(&dev->enable_cnt); |
659 | return err; |
659 | return err; |
660 | } |
660 | } |
661 | 661 | ||
662 | 662 | ||
663 | /** |
663 | /** |
664 | * pci_enable_device - Initialize device before it's used by a driver. |
664 | * pci_enable_device - Initialize device before it's used by a driver. |
665 | * @dev: PCI device to be initialized |
665 | * @dev: PCI device to be initialized |
666 | * |
666 | * |
667 | * Initialize device before it's used by a driver. Ask low-level code |
667 | * Initialize device before it's used by a driver. Ask low-level code |
668 | * to enable I/O and memory. Wake up the device if it was suspended. |
668 | * to enable I/O and memory. Wake up the device if it was suspended. |
669 | * Beware, this function can fail. |
669 | * Beware, this function can fail. |
670 | * |
670 | * |
671 | * Note we don't actually enable the device many times if we call |
671 | * Note we don't actually enable the device many times if we call |
672 | * this function repeatedly (we just increment the count). |
672 | * this function repeatedly (we just increment the count). |
673 | */ |
673 | */ |
674 | int pci_enable_device(struct pci_dev *dev) |
674 | int pci_enable_device(struct pci_dev *dev) |
675 | { |
675 | { |
676 | return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
676 | return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
677 | } |
677 | } |
678 | 678 | ||
679 | 679 | ||
680 | 680 | ||
681 | struct pci_device_id* find_pci_device(dev_t* pdev, struct pci_device_id *idlist) |
681 | struct pci_device_id* find_pci_device(dev_t* pdev, struct pci_device_id *idlist) |
682 | { |
682 | { |
683 | dev_t *dev; |
683 | dev_t *dev; |
684 | struct pci_device_id *ent; |
684 | struct pci_device_id *ent; |
685 | 685 | ||
686 | for(dev = (dev_t*)devices.next; |
686 | for(dev = (dev_t*)devices.next; |
687 | &dev->link != &devices; |
687 | &dev->link != &devices; |
688 | dev = (dev_t*)dev->link.next) |
688 | dev = (dev_t*)dev->link.next) |
689 | { |
689 | { |
690 | if( dev->pci_dev.vendor != idlist->vendor ) |
690 | if( dev->pci_dev.vendor != idlist->vendor ) |
691 | continue; |
691 | continue; |
692 | 692 | ||
693 | for(ent = idlist; ent->vendor != 0; ent++) |
693 | for(ent = idlist; ent->vendor != 0; ent++) |
694 | { |
694 | { |
695 | if(unlikely(ent->device == dev->pci_dev.device)) |
695 | if(unlikely(ent->device == dev->pci_dev.device)) |
696 | { |
696 | { |
697 | pdev->pci_dev = dev->pci_dev; |
697 | pdev->pci_dev = dev->pci_dev; |
698 | return ent; |
698 | return ent; |
699 | } |
699 | } |
700 | }; |
700 | }; |
701 | } |
701 | } |
702 | 702 | ||
703 | return NULL; |
703 | return NULL; |
704 | }; |
704 | }; |
705 | 705 | ||
706 | 706 | ||
707 | 707 | ||
708 | /** |
708 | /** |
709 | * pci_map_rom - map a PCI ROM to kernel space |
709 | * pci_map_rom - map a PCI ROM to kernel space |
710 | * @pdev: pointer to pci device struct |
710 | * @pdev: pointer to pci device struct |
711 | * @size: pointer to receive size of pci window over ROM |
711 | * @size: pointer to receive size of pci window over ROM |
712 | * @return: kernel virtual pointer to image of ROM |
712 | * @return: kernel virtual pointer to image of ROM |
713 | * |
713 | * |
714 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
714 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
715 | * the shadow BIOS copy will be returned instead of the |
715 | * the shadow BIOS copy will be returned instead of the |
716 | * actual ROM. |
716 | * actual ROM. |
717 | */ |
717 | */ |
718 | 718 | ||
719 | #define legacyBIOSLocation 0xC0000 |
719 | #define legacyBIOSLocation 0xC0000 |
720 | #define OS_BASE 0x80000000 |
720 | #define OS_BASE 0x80000000 |
721 | 721 | ||
722 | void *pci_map_rom(struct pci_dev *pdev, size_t *size) |
722 | void *pci_map_rom(struct pci_dev *pdev, size_t *size) |
723 | { |
723 | { |
724 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
724 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
725 | u32_t start; |
725 | u32_t start; |
726 | void *rom; |
726 | void *rom; |
727 | 727 | ||
728 | #if 0 |
728 | #if 0 |
729 | /* |
729 | /* |
730 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
730 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
731 | * memory map if the VGA enable bit of the Bridge Control register is |
731 | * memory map if the VGA enable bit of the Bridge Control register is |
732 | * set for embedded VGA. |
732 | * set for embedded VGA. |
733 | */ |
733 | */ |
734 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
734 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
735 | /* primary video rom always starts here */ |
735 | /* primary video rom always starts here */ |
736 | start = (u32_t)0xC0000; |
736 | start = (u32_t)0xC0000; |
737 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
737 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
738 | } else { |
738 | } else { |
739 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
739 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
740 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
740 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
741 | return (void *)(unsigned long) |
741 | return (void *)(unsigned long) |
742 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
742 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
743 | } else { |
743 | } else { |
744 | /* assign the ROM an address if it doesn't have one */ |
744 | /* assign the ROM an address if it doesn't have one */ |
745 | //if (res->parent == NULL && |
745 | //if (res->parent == NULL && |
746 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
746 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
747 | // return NULL; |
747 | // return NULL; |
748 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
748 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
749 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
749 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
750 | if (*size == 0) |
750 | if (*size == 0) |
751 | return NULL; |
751 | return NULL; |
752 | 752 | ||
753 | /* Enable ROM space decodes */ |
753 | /* Enable ROM space decodes */ |
754 | if (pci_enable_rom(pdev)) |
754 | if (pci_enable_rom(pdev)) |
755 | return NULL; |
755 | return NULL; |
756 | } |
756 | } |
757 | } |
757 | } |
758 | 758 | ||
759 | rom = ioremap(start, *size); |
759 | rom = ioremap(start, *size); |
760 | if (!rom) { |
760 | if (!rom) { |
761 | /* restore enable if ioremap fails */ |
761 | /* restore enable if ioremap fails */ |
762 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
762 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
763 | IORESOURCE_ROM_SHADOW | |
763 | IORESOURCE_ROM_SHADOW | |
764 | IORESOURCE_ROM_COPY))) |
764 | IORESOURCE_ROM_COPY))) |
765 | pci_disable_rom(pdev); |
765 | pci_disable_rom(pdev); |
766 | return NULL; |
766 | return NULL; |
767 | } |
767 | } |
768 | 768 | ||
769 | /* |
769 | /* |
770 | * Try to find the true size of the ROM since sometimes the PCI window |
770 | * Try to find the true size of the ROM since sometimes the PCI window |
771 | * size is much larger than the actual size of the ROM. |
771 | * size is much larger than the actual size of the ROM. |
772 | * True size is important if the ROM is going to be copied. |
772 | * True size is important if the ROM is going to be copied. |
773 | */ |
773 | */ |
774 | *size = pci_get_rom_size(rom, *size); |
774 | *size = pci_get_rom_size(rom, *size); |
775 | 775 | ||
776 | #endif |
776 | #endif |
777 | 777 | ||
778 | unsigned char tmp[32]; |
778 | unsigned char tmp[32]; |
779 | rom = NULL; |
779 | rom = NULL; |
780 | 780 | ||
781 | dbgprintf("Getting BIOS copy from legacy VBIOS location\n"); |
781 | dbgprintf("Getting BIOS copy from legacy VBIOS location\n"); |
782 | memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32); |
782 | memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32); |
783 | *size = tmp[2] * 512; |
783 | *size = tmp[2] * 512; |
784 | if (*size > 0x10000 ) |
784 | if (*size > 0x10000 ) |
785 | { |
785 | { |
786 | *size = 0; |
786 | *size = 0; |
787 | dbgprintf("Invalid BIOS length field\n"); |
787 | dbgprintf("Invalid BIOS length field\n"); |
788 | } |
788 | } |
789 | else |
789 | else |
790 | rom = (void*)( OS_BASE+legacyBIOSLocation); |
790 | rom = (void*)( OS_BASE+legacyBIOSLocation); |
791 | 791 | ||
792 | return rom; |
792 | return rom; |
793 | } |
793 | } |
794 | 794 | ||
795 | 795 | ||
796 | int |
796 | int |
797 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
797 | pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
798 | { |
798 | { |
799 | // if (!pci_dma_supported(dev, mask)) |
799 | // if (!pci_dma_supported(dev, mask)) |
800 | // return -EIO; |
800 | // return -EIO; |
801 | 801 | ||
802 | dev->dma_mask = mask; |
802 | dev->dma_mask = mask; |
803 | 803 | ||
804 | return 0; |
804 | return 0; |
805 | }>><>>>>>><>>=>>>><>><>><>>4)><4)> |
805 | }>><>>>>>><>>=>>>><>><>><>>4)><4)> |