Rev 2997 | Rev 3764 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2997 | Rev 3031 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | #include |
2 | #include |
3 | #include |
3 | #include |
4 | #include |
4 | #include |
5 | #include |
5 | #include |
6 | #include |
6 | #include |
7 | #include |
7 | #include |
8 | 8 | ||
9 | extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
9 | extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
10 | 10 | ||
11 | static LIST_HEAD(devices); |
11 | static LIST_HEAD(devices); |
12 | 12 | ||
13 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
13 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
14 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
14 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
15 | 15 | ||
16 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
16 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
17 | 17 | ||
18 | /* |
18 | /* |
19 | * Translate the low bits of the PCI base |
19 | * Translate the low bits of the PCI base |
20 | * to the resource type |
20 | * to the resource type |
21 | */ |
21 | */ |
22 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
22 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
23 | { |
23 | { |
24 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
24 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
25 | return IORESOURCE_IO; |
25 | return IORESOURCE_IO; |
26 | 26 | ||
27 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
27 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
28 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
28 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
29 | 29 | ||
30 | return IORESOURCE_MEM; |
30 | return IORESOURCE_MEM; |
31 | } |
31 | } |
32 | 32 | ||
33 | 33 | ||
34 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
34 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
35 | { |
35 | { |
36 | u32_t size = mask & maxbase; /* Find the significant bits */ |
36 | u32_t size = mask & maxbase; /* Find the significant bits */ |
37 | 37 | ||
38 | if (!size) |
38 | if (!size) |
39 | return 0; |
39 | return 0; |
40 | 40 | ||
41 | /* Get the lowest of them to find the decode size, and |
41 | /* Get the lowest of them to find the decode size, and |
42 | from that the extent. */ |
42 | from that the extent. */ |
43 | size = (size & ~(size-1)) - 1; |
43 | size = (size & ~(size-1)) - 1; |
44 | 44 | ||
45 | /* base == maxbase can be valid only if the BAR has |
45 | /* base == maxbase can be valid only if the BAR has |
46 | already been programmed with all 1s. */ |
46 | already been programmed with all 1s. */ |
47 | if (base == maxbase && ((base | size) & mask) != mask) |
47 | if (base == maxbase && ((base | size) & mask) != mask) |
48 | return 0; |
48 | return 0; |
49 | 49 | ||
50 | return size; |
50 | return size; |
51 | } |
51 | } |
52 | 52 | ||
53 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
53 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
54 | { |
54 | { |
55 | u64_t size = mask & maxbase; /* Find the significant bits */ |
55 | u64_t size = mask & maxbase; /* Find the significant bits */ |
56 | 56 | ||
57 | if (!size) |
57 | if (!size) |
58 | return 0; |
58 | return 0; |
59 | 59 | ||
60 | /* Get the lowest of them to find the decode size, and |
60 | /* Get the lowest of them to find the decode size, and |
61 | from that the extent. */ |
61 | from that the extent. */ |
62 | size = (size & ~(size-1)) - 1; |
62 | size = (size & ~(size-1)) - 1; |
63 | 63 | ||
64 | /* base == maxbase can be valid only if the BAR has |
64 | /* base == maxbase can be valid only if the BAR has |
65 | already been programmed with all 1s. */ |
65 | already been programmed with all 1s. */ |
66 | if (base == maxbase && ((base | size) & mask) != mask) |
66 | if (base == maxbase && ((base | size) & mask) != mask) |
67 | return 0; |
67 | return 0; |
68 | 68 | ||
69 | return size; |
69 | return size; |
70 | } |
70 | } |
71 | 71 | ||
72 | static inline int is_64bit_memory(u32_t mask) |
72 | static inline int is_64bit_memory(u32_t mask) |
73 | { |
73 | { |
74 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
74 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
75 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
75 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
76 | return 1; |
76 | return 1; |
77 | return 0; |
77 | return 0; |
78 | } |
78 | } |
79 | 79 | ||
80 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
80 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
81 | { |
81 | { |
82 | u32_t pos, reg, next; |
82 | u32_t pos, reg, next; |
83 | u32_t l, sz; |
83 | u32_t l, sz; |
84 | struct resource *res; |
84 | struct resource *res; |
85 | 85 | ||
86 | for(pos=0; pos < howmany; pos = next) |
86 | for(pos=0; pos < howmany; pos = next) |
87 | { |
87 | { |
88 | u64_t l64; |
88 | u64_t l64; |
89 | u64_t sz64; |
89 | u64_t sz64; |
90 | u32_t raw_sz; |
90 | u32_t raw_sz; |
91 | 91 | ||
92 | next = pos + 1; |
92 | next = pos + 1; |
93 | 93 | ||
94 | res = &dev->resource[pos]; |
94 | res = &dev->resource[pos]; |
95 | 95 | ||
96 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
96 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
97 | l = PciRead32(dev->busnr, dev->devfn, reg); |
97 | l = PciRead32(dev->busnr, dev->devfn, reg); |
98 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
98 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
99 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
99 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
100 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
100 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
101 | 101 | ||
102 | if (!sz || sz == 0xffffffff) |
102 | if (!sz || sz == 0xffffffff) |
103 | continue; |
103 | continue; |
104 | 104 | ||
105 | if (l == 0xffffffff) |
105 | if (l == 0xffffffff) |
106 | l = 0; |
106 | l = 0; |
107 | 107 | ||
108 | raw_sz = sz; |
108 | raw_sz = sz; |
109 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
109 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
110 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
110 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
111 | { |
111 | { |
112 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
112 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
113 | /* |
113 | /* |
114 | * For 64bit prefetchable memory sz could be 0, if the |
114 | * For 64bit prefetchable memory sz could be 0, if the |
115 | * real size is bigger than 4G, so we need to check |
115 | * real size is bigger than 4G, so we need to check |
116 | * szhi for that. |
116 | * szhi for that. |
117 | */ |
117 | */ |
118 | if (!is_64bit_memory(l) && !sz) |
118 | if (!is_64bit_memory(l) && !sz) |
119 | continue; |
119 | continue; |
120 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
120 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
121 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
121 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
122 | } |
122 | } |
123 | else { |
123 | else { |
124 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
124 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
125 | if (!sz) |
125 | if (!sz) |
126 | continue; |
126 | continue; |
127 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
127 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
128 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
128 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
129 | } |
129 | } |
130 | res->end = res->start + (unsigned long) sz; |
130 | res->end = res->start + (unsigned long) sz; |
131 | res->flags |= pci_calc_resource_flags(l); |
131 | res->flags |= pci_calc_resource_flags(l); |
132 | if (is_64bit_memory(l)) |
132 | if (is_64bit_memory(l)) |
133 | { |
133 | { |
134 | u32_t szhi, lhi; |
134 | u32_t szhi, lhi; |
135 | 135 | ||
136 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
136 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
137 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
137 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
138 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
138 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
139 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
139 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
140 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
140 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
141 | l64 = ((u64_t)lhi << 32) | l; |
141 | l64 = ((u64_t)lhi << 32) | l; |
142 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
142 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
143 | next++; |
143 | next++; |
144 | 144 | ||
145 | #if BITS_PER_LONG == 64 |
145 | #if BITS_PER_LONG == 64 |
146 | if (!sz64) { |
146 | if (!sz64) { |
147 | res->start = 0; |
147 | res->start = 0; |
148 | res->end = 0; |
148 | res->end = 0; |
149 | res->flags = 0; |
149 | res->flags = 0; |
150 | continue; |
150 | continue; |
151 | } |
151 | } |
152 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
152 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
153 | res->end = res->start + sz64; |
153 | res->end = res->start + sz64; |
154 | #else |
154 | #else |
155 | if (sz64 > 0x100000000ULL) { |
155 | if (sz64 > 0x100000000ULL) { |
156 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
156 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
157 | "BAR for device %s\n", pci_name(dev)); |
157 | "BAR for device %s\n", pci_name(dev)); |
158 | res->start = 0; |
158 | res->start = 0; |
159 | res->flags = 0; |
159 | res->flags = 0; |
160 | } |
160 | } |
161 | else if (lhi) |
161 | else if (lhi) |
162 | { |
162 | { |
163 | /* 64-bit wide address, treat as disabled */ |
163 | /* 64-bit wide address, treat as disabled */ |
164 | PciWrite32(dev->busnr, dev->devfn, reg, |
164 | PciWrite32(dev->busnr, dev->devfn, reg, |
165 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
165 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
166 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
166 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
167 | res->start = 0; |
167 | res->start = 0; |
168 | res->end = sz; |
168 | res->end = sz; |
169 | } |
169 | } |
170 | #endif |
170 | #endif |
171 | } |
171 | } |
172 | } |
172 | } |
173 | 173 | ||
174 | if ( rom ) |
174 | if ( rom ) |
175 | { |
175 | { |
176 | dev->rom_base_reg = rom; |
176 | dev->rom_base_reg = rom; |
177 | res = &dev->resource[PCI_ROM_RESOURCE]; |
177 | res = &dev->resource[PCI_ROM_RESOURCE]; |
178 | 178 | ||
179 | l = PciRead32(dev->busnr, dev->devfn, rom); |
179 | l = PciRead32(dev->busnr, dev->devfn, rom); |
180 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
180 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
181 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
181 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
182 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
182 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
183 | 183 | ||
184 | if (l == 0xffffffff) |
184 | if (l == 0xffffffff) |
185 | l = 0; |
185 | l = 0; |
186 | 186 | ||
187 | if (sz && sz != 0xffffffff) |
187 | if (sz && sz != 0xffffffff) |
188 | { |
188 | { |
189 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
189 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
190 | 190 | ||
191 | if (sz) |
191 | if (sz) |
192 | { |
192 | { |
193 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
193 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
194 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
194 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
195 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
195 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
196 | res->start = l & PCI_ROM_ADDRESS_MASK; |
196 | res->start = l & PCI_ROM_ADDRESS_MASK; |
197 | res->end = res->start + (unsigned long) sz; |
197 | res->end = res->start + (unsigned long) sz; |
198 | } |
198 | } |
199 | } |
199 | } |
200 | } |
200 | } |
201 | } |
201 | } |
202 | 202 | ||
203 | static void pci_read_irq(struct pci_dev *dev) |
203 | static void pci_read_irq(struct pci_dev *dev) |
204 | { |
204 | { |
205 | u8_t irq; |
205 | u8_t irq; |
206 | 206 | ||
207 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
207 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
208 | dev->pin = irq; |
208 | dev->pin = irq; |
209 | if (irq) |
209 | if (irq) |
210 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
210 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
211 | dev->irq = irq; |
211 | dev->irq = irq; |
212 | }; |
212 | }; |
213 | 213 | ||
214 | 214 | ||
215 | int pci_setup_device(struct pci_dev *dev) |
215 | int pci_setup_device(struct pci_dev *dev) |
216 | { |
216 | { |
217 | u32_t class; |
217 | u32_t class; |
218 | 218 | ||
219 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
219 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
220 | dev->revision = class & 0xff; |
220 | dev->revision = class & 0xff; |
221 | class >>= 8; /* upper 3 bytes */ |
221 | class >>= 8; /* upper 3 bytes */ |
222 | dev->class = class; |
222 | dev->class = class; |
223 | 223 | ||
224 | /* "Unknown power state" */ |
224 | /* "Unknown power state" */ |
225 | // dev->current_state = PCI_UNKNOWN; |
225 | // dev->current_state = PCI_UNKNOWN; |
226 | 226 | ||
227 | /* Early fixups, before probing the BARs */ |
227 | /* Early fixups, before probing the BARs */ |
228 | // pci_fixup_device(pci_fixup_early, dev); |
228 | // pci_fixup_device(pci_fixup_early, dev); |
229 | class = dev->class >> 8; |
229 | class = dev->class >> 8; |
230 | 230 | ||
231 | switch (dev->hdr_type) |
231 | switch (dev->hdr_type) |
232 | { |
232 | { |
233 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
233 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
234 | if (class == PCI_CLASS_BRIDGE_PCI) |
234 | if (class == PCI_CLASS_BRIDGE_PCI) |
235 | goto bad; |
235 | goto bad; |
236 | pci_read_irq(dev); |
236 | pci_read_irq(dev); |
237 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
237 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
238 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
238 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
239 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
239 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
240 | 240 | ||
241 | /* |
241 | /* |
242 | * Do the ugly legacy mode stuff here rather than broken chip |
242 | * Do the ugly legacy mode stuff here rather than broken chip |
243 | * quirk code. Legacy mode ATA controllers have fixed |
243 | * quirk code. Legacy mode ATA controllers have fixed |
244 | * addresses. These are not always echoed in BAR0-3, and |
244 | * addresses. These are not always echoed in BAR0-3, and |
245 | * BAR0-3 in a few cases contain junk! |
245 | * BAR0-3 in a few cases contain junk! |
246 | */ |
246 | */ |
247 | if (class == PCI_CLASS_STORAGE_IDE) |
247 | if (class == PCI_CLASS_STORAGE_IDE) |
248 | { |
248 | { |
249 | u8_t progif; |
249 | u8_t progif; |
250 | 250 | ||
251 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
251 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
252 | if ((progif & 1) == 0) |
252 | if ((progif & 1) == 0) |
253 | { |
253 | { |
254 | dev->resource[0].start = 0x1F0; |
254 | dev->resource[0].start = 0x1F0; |
255 | dev->resource[0].end = 0x1F7; |
255 | dev->resource[0].end = 0x1F7; |
256 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
256 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
257 | dev->resource[1].start = 0x3F6; |
257 | dev->resource[1].start = 0x3F6; |
258 | dev->resource[1].end = 0x3F6; |
258 | dev->resource[1].end = 0x3F6; |
259 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
259 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
260 | } |
260 | } |
261 | if ((progif & 4) == 0) |
261 | if ((progif & 4) == 0) |
262 | { |
262 | { |
263 | dev->resource[2].start = 0x170; |
263 | dev->resource[2].start = 0x170; |
264 | dev->resource[2].end = 0x177; |
264 | dev->resource[2].end = 0x177; |
265 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
265 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
266 | dev->resource[3].start = 0x376; |
266 | dev->resource[3].start = 0x376; |
267 | dev->resource[3].end = 0x376; |
267 | dev->resource[3].end = 0x376; |
268 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
268 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
269 | }; |
269 | }; |
270 | } |
270 | } |
271 | break; |
271 | break; |
272 | 272 | ||
273 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
273 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
274 | if (class != PCI_CLASS_BRIDGE_PCI) |
274 | if (class != PCI_CLASS_BRIDGE_PCI) |
275 | goto bad; |
275 | goto bad; |
276 | /* The PCI-to-PCI bridge spec requires that subtractive |
276 | /* The PCI-to-PCI bridge spec requires that subtractive |
277 | decoding (i.e. transparent) bridge must have programming |
277 | decoding (i.e. transparent) bridge must have programming |
278 | interface code of 0x01. */ |
278 | interface code of 0x01. */ |
279 | pci_read_irq(dev); |
279 | pci_read_irq(dev); |
280 | dev->transparent = ((dev->class & 0xff) == 1); |
280 | dev->transparent = ((dev->class & 0xff) == 1); |
281 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
281 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
282 | break; |
282 | break; |
283 | 283 | ||
284 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
284 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
285 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
285 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
286 | goto bad; |
286 | goto bad; |
287 | pci_read_irq(dev); |
287 | pci_read_irq(dev); |
288 | pci_read_bases(dev, 1, 0); |
288 | pci_read_bases(dev, 1, 0); |
289 | dev->subsystem_vendor = PciRead16(dev->busnr, |
289 | dev->subsystem_vendor = PciRead16(dev->busnr, |
290 | dev->devfn, |
290 | dev->devfn, |
291 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
291 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
292 | 292 | ||
293 | dev->subsystem_device = PciRead16(dev->busnr, |
293 | dev->subsystem_device = PciRead16(dev->busnr, |
294 | dev->devfn, |
294 | dev->devfn, |
295 | PCI_CB_SUBSYSTEM_ID); |
295 | PCI_CB_SUBSYSTEM_ID); |
296 | break; |
296 | break; |
297 | 297 | ||
298 | default: /* unknown header */ |
298 | default: /* unknown header */ |
299 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
299 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
300 | pci_name(dev), dev->hdr_type); |
300 | pci_name(dev), dev->hdr_type); |
301 | return -1; |
301 | return -1; |
302 | 302 | ||
303 | bad: |
303 | bad: |
304 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
304 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
305 | pci_name(dev), class, dev->hdr_type); |
305 | pci_name(dev), class, dev->hdr_type); |
306 | dev->class = PCI_CLASS_NOT_DEFINED; |
306 | dev->class = PCI_CLASS_NOT_DEFINED; |
307 | } |
307 | } |
308 | 308 | ||
309 | /* We found a fine healthy device, go go go... */ |
309 | /* We found a fine healthy device, go go go... */ |
310 | 310 | ||
311 | return 0; |
311 | return 0; |
312 | }; |
312 | }; |
313 | 313 | ||
314 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
314 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
315 | { |
315 | { |
316 | pci_dev_t *dev; |
316 | pci_dev_t *dev; |
317 | 317 | ||
318 | u32_t id; |
318 | u32_t id; |
319 | u8_t hdr; |
319 | u8_t hdr; |
320 | 320 | ||
321 | int timeout = 10; |
321 | int timeout = 10; |
322 | 322 | ||
323 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
323 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
324 | 324 | ||
325 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
325 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
326 | if (id == 0xffffffff || id == 0x00000000 || |
326 | if (id == 0xffffffff || id == 0x00000000 || |
327 | id == 0x0000ffff || id == 0xffff0000) |
327 | id == 0x0000ffff || id == 0xffff0000) |
328 | return NULL; |
328 | return NULL; |
329 | 329 | ||
330 | while (id == 0xffff0001) |
330 | while (id == 0xffff0001) |
331 | { |
331 | { |
332 | 332 | ||
333 | delay(timeout/10); |
333 | delay(timeout/10); |
334 | timeout *= 2; |
334 | timeout *= 2; |
335 | 335 | ||
336 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
336 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
337 | 337 | ||
338 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
338 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
339 | if (timeout > 60 * 100) |
339 | if (timeout > 60 * 100) |
340 | { |
340 | { |
341 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
341 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
342 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
342 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
343 | return NULL; |
343 | return NULL; |
344 | } |
344 | } |
345 | }; |
345 | }; |
346 | 346 | ||
347 | if( pci_scan_filter(id, busnr, devfn) == 0) |
347 | if( pci_scan_filter(id, busnr, devfn) == 0) |
348 | return NULL; |
348 | return NULL; |
349 | 349 | ||
350 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
350 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
351 | 351 | ||
352 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
352 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
353 | if(unlikely(dev == NULL)) |
353 | if(unlikely(dev == NULL)) |
354 | return NULL; |
354 | return NULL; |
355 | 355 | ||
356 | INIT_LIST_HEAD(&dev->link); |
356 | INIT_LIST_HEAD(&dev->link); |
357 | 357 | ||
358 | 358 | ||
359 | dev->pci_dev.busnr = busnr; |
359 | dev->pci_dev.busnr = busnr; |
360 | dev->pci_dev.devfn = devfn; |
360 | dev->pci_dev.devfn = devfn; |
361 | dev->pci_dev.hdr_type = hdr & 0x7f; |
361 | dev->pci_dev.hdr_type = hdr & 0x7f; |
362 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
362 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
363 | dev->pci_dev.vendor = id & 0xffff; |
363 | dev->pci_dev.vendor = id & 0xffff; |
364 | dev->pci_dev.device = (id >> 16) & 0xffff; |
364 | dev->pci_dev.device = (id >> 16) & 0xffff; |
365 | 365 | ||
366 | pci_setup_device(&dev->pci_dev); |
366 | pci_setup_device(&dev->pci_dev); |
367 | 367 | ||
368 | return dev; |
368 | return dev; |
369 | 369 | ||
370 | }; |
370 | }; |
371 | 371 | ||
372 | 372 | ||
373 | 373 | ||
374 | 374 | ||
375 | int pci_scan_slot(u32_t bus, int devfn) |
375 | int pci_scan_slot(u32_t bus, int devfn) |
376 | { |
376 | { |
377 | int func, nr = 0; |
377 | int func, nr = 0; |
378 | 378 | ||
379 | for (func = 0; func < 8; func++, devfn++) |
379 | for (func = 0; func < 8; func++, devfn++) |
380 | { |
380 | { |
381 | pci_dev_t *dev; |
381 | pci_dev_t *dev; |
382 | 382 | ||
383 | dev = pci_scan_device(bus, devfn); |
383 | dev = pci_scan_device(bus, devfn); |
384 | if( dev ) |
384 | if( dev ) |
385 | { |
385 | { |
386 | list_add(&dev->link, &devices); |
386 | list_add(&dev->link, &devices); |
387 | 387 | ||
388 | nr++; |
388 | nr++; |
389 | 389 | ||
390 | /* |
390 | /* |
391 | * If this is a single function device, |
391 | * If this is a single function device, |
392 | * don't scan past the first function. |
392 | * don't scan past the first function. |
393 | */ |
393 | */ |
394 | if (!dev->pci_dev.multifunction) |
394 | if (!dev->pci_dev.multifunction) |
395 | { |
395 | { |
396 | if (func > 0) { |
396 | if (func > 0) { |
397 | dev->pci_dev.multifunction = 1; |
397 | dev->pci_dev.multifunction = 1; |
398 | } |
398 | } |
399 | else { |
399 | else { |
400 | break; |
400 | break; |
401 | } |
401 | } |
402 | } |
402 | } |
403 | } |
403 | } |
404 | else { |
404 | else { |
405 | if (func == 0) |
405 | if (func == 0) |
406 | break; |
406 | break; |
407 | } |
407 | } |
408 | }; |
408 | }; |
409 | 409 | ||
410 | return nr; |
410 | return nr; |
411 | }; |
411 | }; |
412 | 412 | ||
413 | #define PCI_FIND_CAP_TTL 48 |
413 | #define PCI_FIND_CAP_TTL 48 |
414 | 414 | ||
415 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
415 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
416 | u8 pos, int cap, int *ttl) |
416 | u8 pos, int cap, int *ttl) |
417 | { |
417 | { |
418 | u8 id; |
418 | u8 id; |
419 | 419 | ||
420 | while ((*ttl)--) { |
420 | while ((*ttl)--) { |
421 | pos = PciRead8(bus, devfn, pos); |
421 | pos = PciRead8(bus, devfn, pos); |
422 | if (pos < 0x40) |
422 | if (pos < 0x40) |
423 | break; |
423 | break; |
424 | pos &= ~3; |
424 | pos &= ~3; |
425 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
425 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
426 | if (id == 0xff) |
426 | if (id == 0xff) |
427 | break; |
427 | break; |
428 | if (id == cap) |
428 | if (id == cap) |
429 | return pos; |
429 | return pos; |
430 | pos += PCI_CAP_LIST_NEXT; |
430 | pos += PCI_CAP_LIST_NEXT; |
431 | } |
431 | } |
432 | return 0; |
432 | return 0; |
433 | } |
433 | } |
434 | 434 | ||
435 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
435 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
436 | u8 pos, int cap) |
436 | u8 pos, int cap) |
437 | { |
437 | { |
438 | int ttl = PCI_FIND_CAP_TTL; |
438 | int ttl = PCI_FIND_CAP_TTL; |
439 | 439 | ||
440 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
440 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
441 | } |
441 | } |
442 | 442 | ||
443 | static int __pci_bus_find_cap_start(unsigned int bus, |
443 | static int __pci_bus_find_cap_start(unsigned int bus, |
444 | unsigned int devfn, u8 hdr_type) |
444 | unsigned int devfn, u8 hdr_type) |
445 | { |
445 | { |
446 | u16 status; |
446 | u16 status; |
447 | 447 | ||
448 | status = PciRead16(bus, devfn, PCI_STATUS); |
448 | status = PciRead16(bus, devfn, PCI_STATUS); |
449 | if (!(status & PCI_STATUS_CAP_LIST)) |
449 | if (!(status & PCI_STATUS_CAP_LIST)) |
450 | return 0; |
450 | return 0; |
451 | 451 | ||
452 | switch (hdr_type) { |
452 | switch (hdr_type) { |
453 | case PCI_HEADER_TYPE_NORMAL: |
453 | case PCI_HEADER_TYPE_NORMAL: |
454 | case PCI_HEADER_TYPE_BRIDGE: |
454 | case PCI_HEADER_TYPE_BRIDGE: |
455 | return PCI_CAPABILITY_LIST; |
455 | return PCI_CAPABILITY_LIST; |
456 | case PCI_HEADER_TYPE_CARDBUS: |
456 | case PCI_HEADER_TYPE_CARDBUS: |
457 | return PCI_CB_CAPABILITY_LIST; |
457 | return PCI_CB_CAPABILITY_LIST; |
458 | default: |
458 | default: |
459 | return 0; |
459 | return 0; |
460 | } |
460 | } |
461 | 461 | ||
462 | return 0; |
462 | return 0; |
463 | } |
463 | } |
464 | 464 | ||
465 | 465 | ||
466 | int pci_find_capability(struct pci_dev *dev, int cap) |
466 | int pci_find_capability(struct pci_dev *dev, int cap) |
467 | { |
467 | { |
468 | int pos; |
468 | int pos; |
469 | 469 | ||
470 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
470 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
471 | if (pos) |
471 | if (pos) |
472 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
472 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
473 | 473 | ||
474 | return pos; |
474 | return pos; |
475 | } |
475 | } |
476 | 476 | ||
477 | 477 | ||
478 | 478 | ||
479 | 479 | ||
480 | int enum_pci_devices() |
480 | int enum_pci_devices() |
481 | { |
481 | { |
482 | pci_dev_t *dev; |
482 | pci_dev_t *dev; |
483 | u32_t last_bus; |
483 | u32_t last_bus; |
484 | u32_t bus = 0 , devfn = 0; |
484 | u32_t bus = 0 , devfn = 0; |
485 | 485 | ||
486 | 486 | ||
487 | last_bus = PciApi(1); |
487 | last_bus = PciApi(1); |
488 | 488 | ||
489 | 489 | ||
490 | if( unlikely(last_bus == -1)) |
490 | if( unlikely(last_bus == -1)) |
491 | return -1; |
491 | return -1; |
492 | 492 | ||
493 | for(;bus <= last_bus; bus++) |
493 | for(;bus <= last_bus; bus++) |
494 | { |
494 | { |
495 | for (devfn = 0; devfn < 0x100; devfn += 8) |
495 | for (devfn = 0; devfn < 0x100; devfn += 8) |
496 | pci_scan_slot(bus, devfn); |
496 | pci_scan_slot(bus, devfn); |
497 | 497 | ||
498 | 498 | ||
499 | } |
499 | } |
500 | for(dev = (pci_dev_t*)devices.next; |
500 | for(dev = (pci_dev_t*)devices.next; |
501 | &dev->link != &devices; |
501 | &dev->link != &devices; |
502 | dev = (pci_dev_t*)dev->link.next) |
502 | dev = (pci_dev_t*)dev->link.next) |
503 | { |
503 | { |
504 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
504 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
505 | dev->pci_dev.vendor, |
505 | dev->pci_dev.vendor, |
506 | dev->pci_dev.device, |
506 | dev->pci_dev.device, |
507 | dev->pci_dev.busnr, |
507 | dev->pci_dev.busnr, |
508 | dev->pci_dev.devfn); |
508 | dev->pci_dev.devfn); |
509 | 509 | ||
510 | } |
510 | } |
511 | return 0; |
511 | return 0; |
512 | } |
512 | } |
513 | 513 | ||
514 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
514 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
515 | { |
515 | { |
516 | pci_dev_t *dev; |
516 | pci_dev_t *dev; |
517 | const struct pci_device_id *ent; |
517 | const struct pci_device_id *ent; |
518 | 518 | ||
519 | for(dev = (pci_dev_t*)devices.next; |
519 | for(dev = (pci_dev_t*)devices.next; |
520 | &dev->link != &devices; |
520 | &dev->link != &devices; |
521 | dev = (pci_dev_t*)dev->link.next) |
521 | dev = (pci_dev_t*)dev->link.next) |
522 | { |
522 | { |
523 | if( dev->pci_dev.vendor != idlist->vendor ) |
523 | if( dev->pci_dev.vendor != idlist->vendor ) |
524 | continue; |
524 | continue; |
525 | 525 | ||
526 | for(ent = idlist; ent->vendor != 0; ent++) |
526 | for(ent = idlist; ent->vendor != 0; ent++) |
527 | { |
527 | { |
528 | if(unlikely(ent->device == dev->pci_dev.device)) |
528 | if(unlikely(ent->device == dev->pci_dev.device)) |
529 | { |
529 | { |
530 | pdev->pci_dev = dev->pci_dev; |
530 | pdev->pci_dev = dev->pci_dev; |
531 | return ent; |
531 | return ent; |
532 | } |
532 | } |
533 | }; |
533 | }; |
534 | } |
534 | } |
535 | 535 | ||
536 | return NULL; |
536 | return NULL; |
537 | }; |
537 | }; |
538 | 538 | ||
539 | struct pci_dev * |
539 | struct pci_dev * |
540 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
540 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
541 | { |
541 | { |
542 | pci_dev_t *dev; |
542 | pci_dev_t *dev; |
543 | 543 | ||
544 | dev = (pci_dev_t*)devices.next; |
544 | dev = (pci_dev_t*)devices.next; |
545 | 545 | ||
546 | if(from != NULL) |
546 | if(from != NULL) |
547 | { |
547 | { |
548 | for(; &dev->link != &devices; |
548 | for(; &dev->link != &devices; |
549 | dev = (pci_dev_t*)dev->link.next) |
549 | dev = (pci_dev_t*)dev->link.next) |
550 | { |
550 | { |
551 | if( &dev->pci_dev == from) |
551 | if( &dev->pci_dev == from) |
552 | { |
552 | { |
553 | dev = (pci_dev_t*)dev->link.next; |
553 | dev = (pci_dev_t*)dev->link.next; |
554 | break; |
554 | break; |
555 | }; |
555 | }; |
556 | } |
556 | } |
557 | }; |
557 | }; |
558 | 558 | ||
559 | for(; &dev->link != &devices; |
559 | for(; &dev->link != &devices; |
560 | dev = (pci_dev_t*)dev->link.next) |
560 | dev = (pci_dev_t*)dev->link.next) |
561 | { |
561 | { |
562 | if( dev->pci_dev.vendor != vendor ) |
562 | if( dev->pci_dev.vendor != vendor ) |
563 | continue; |
563 | continue; |
564 | 564 | ||
565 | if(dev->pci_dev.device == device) |
565 | if(dev->pci_dev.device == device) |
566 | { |
566 | { |
567 | return &dev->pci_dev; |
567 | return &dev->pci_dev; |
568 | } |
568 | } |
569 | } |
569 | } |
570 | return NULL; |
570 | return NULL; |
571 | }; |
571 | }; |
572 | 572 | ||
573 | 573 | ||
574 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
574 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
575 | { |
575 | { |
576 | pci_dev_t *dev; |
576 | pci_dev_t *dev; |
577 | 577 | ||
578 | for(dev = (pci_dev_t*)devices.next; |
578 | for(dev = (pci_dev_t*)devices.next; |
579 | &dev->link != &devices; |
579 | &dev->link != &devices; |
580 | dev = (pci_dev_t*)dev->link.next) |
580 | dev = (pci_dev_t*)dev->link.next) |
581 | { |
581 | { |
582 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
582 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
583 | return &dev->pci_dev; |
583 | return &dev->pci_dev; |
584 | } |
584 | } |
585 | return NULL; |
585 | return NULL; |
586 | } |
586 | } |
587 | 587 | ||
588 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
588 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
589 | { |
589 | { |
590 | pci_dev_t *dev; |
590 | pci_dev_t *dev; |
591 | 591 | ||
592 | dev = (pci_dev_t*)devices.next; |
592 | dev = (pci_dev_t*)devices.next; |
593 | 593 | ||
594 | if(from != NULL) |
594 | if(from != NULL) |
595 | { |
595 | { |
596 | for(; &dev->link != &devices; |
596 | for(; &dev->link != &devices; |
597 | dev = (pci_dev_t*)dev->link.next) |
597 | dev = (pci_dev_t*)dev->link.next) |
598 | { |
598 | { |
599 | if( &dev->pci_dev == from) |
599 | if( &dev->pci_dev == from) |
600 | { |
600 | { |
601 | dev = (pci_dev_t*)dev->link.next; |
601 | dev = (pci_dev_t*)dev->link.next; |
602 | break; |
602 | break; |
603 | }; |
603 | }; |
604 | } |
604 | } |
605 | }; |
605 | }; |
606 | 606 | ||
607 | for(; &dev->link != &devices; |
607 | for(; &dev->link != &devices; |
608 | dev = (pci_dev_t*)dev->link.next) |
608 | dev = (pci_dev_t*)dev->link.next) |
609 | { |
609 | { |
610 | if( dev->pci_dev.class == class) |
610 | if( dev->pci_dev.class == class) |
611 | { |
611 | { |
612 | return &dev->pci_dev; |
612 | return &dev->pci_dev; |
613 | } |
613 | } |
614 | } |
614 | } |
615 | 615 | ||
616 | return NULL; |
616 | return NULL; |
617 | } |
617 | } |
618 | 618 | ||
619 | 619 | ||
620 | #define PIO_OFFSET 0x10000UL |
620 | #define PIO_OFFSET 0x10000UL |
621 | #define PIO_MASK 0x0ffffUL |
621 | #define PIO_MASK 0x0ffffUL |
622 | #define PIO_RESERVED 0x40000UL |
622 | #define PIO_RESERVED 0x40000UL |
623 | 623 | ||
624 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
624 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
625 | unsigned long port = (unsigned long __force)addr; \ |
625 | unsigned long port = (unsigned long __force)addr; \ |
626 | if (port >= PIO_RESERVED) { \ |
626 | if (port >= PIO_RESERVED) { \ |
627 | is_mmio; \ |
627 | is_mmio; \ |
628 | } else if (port > PIO_OFFSET) { \ |
628 | } else if (port > PIO_OFFSET) { \ |
629 | port &= PIO_MASK; \ |
629 | port &= PIO_MASK; \ |
630 | is_pio; \ |
630 | is_pio; \ |
631 | }; \ |
631 | }; \ |
632 | } while (0) |
632 | } while (0) |
633 | 633 | ||
634 | /* Create a virtual mapping cookie for an IO port range */ |
634 | /* Create a virtual mapping cookie for an IO port range */ |
635 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
635 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
636 | { |
636 | { |
637 | return (void __iomem *) port; |
637 | return (void __iomem *) port; |
638 | } |
638 | } |
639 | 639 | ||
640 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
640 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
641 | { |
641 | { |
642 | resource_size_t start = pci_resource_start(dev, bar); |
642 | resource_size_t start = pci_resource_start(dev, bar); |
643 | resource_size_t len = pci_resource_len(dev, bar); |
643 | resource_size_t len = pci_resource_len(dev, bar); |
644 | unsigned long flags = pci_resource_flags(dev, bar); |
644 | unsigned long flags = pci_resource_flags(dev, bar); |
645 | 645 | ||
646 | if (!len || !start) |
646 | if (!len || !start) |
647 | return NULL; |
647 | return NULL; |
648 | if (maxlen && len > maxlen) |
648 | if (maxlen && len > maxlen) |
649 | len = maxlen; |
649 | len = maxlen; |
650 | if (flags & IORESOURCE_IO) |
650 | if (flags & IORESOURCE_IO) |
651 | return ioport_map(start, len); |
651 | return ioport_map(start, len); |
652 | if (flags & IORESOURCE_MEM) { |
652 | if (flags & IORESOURCE_MEM) { |
653 | return ioremap(start, len); |
653 | return ioremap(start, len); |
654 | } |
654 | } |
655 | /* What? */ |
655 | /* What? */ |
656 | return NULL; |
656 | return NULL; |
657 | } |
657 | } |
658 | 658 | ||
659 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
659 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
660 | { |
660 | { |
661 | IO_COND(addr, /* nothing */, iounmap(addr)); |
661 | IO_COND(addr, /* nothing */, iounmap(addr)); |
662 | } |
662 | } |
663 | 663 | ||
664 | 664 | ||
665 | struct pci_bus_region { |
665 | struct pci_bus_region { |
666 | resource_size_t start; |
666 | resource_size_t start; |
667 | resource_size_t end; |
667 | resource_size_t end; |
668 | }; |
668 | }; |
669 | 669 | ||
670 | static inline void |
670 | static inline void |
671 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
671 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
672 | struct resource *res) |
672 | struct resource *res) |
673 | { |
673 | { |
674 | region->start = res->start; |
674 | region->start = res->start; |
675 | region->end = res->end; |
675 | region->end = res->end; |
676 | } |
676 | } |
677 | 677 | ||
678 | static inline int pci_read_config_dword(struct pci_dev *dev, int where, |
- | |
679 | u32 *val) |
- | |
680 | { |
- | |
681 | *val = PciRead32(dev->busnr, dev->devfn, where); |
- | |
682 | return 1; |
- | |
683 | } |
- | |
684 | - | ||
685 | static inline int pci_write_config_dword(struct pci_dev *dev, int where, |
- | |
686 | u32 val) |
- | |
687 | { |
- | |
688 | PciWrite32(dev->busnr, dev->devfn, where, val); |
- | |
689 | return 1; |
- | |
690 | } |
- | |
691 | - | ||
692 | static inline int pci_read_config_word(struct pci_dev *dev, int where, |
- | |
693 | u16 *val) |
- | |
694 | { |
- | |
695 | *val = PciRead16(dev->busnr, dev->devfn, where); |
- | |
696 | return 1; |
- | |
697 | } |
- | |
698 | - | ||
699 | static inline int pci_write_config_word(struct pci_dev *dev, int where, |
- | |
700 | u16 val) |
- | |
701 | { |
- | |
702 | PciWrite16(dev->busnr, dev->devfn, where, val); |
- | |
703 | return 1; |
- | |
704 | } |
- | |
705 | - | ||
706 | 678 | ||
707 | int pci_enable_rom(struct pci_dev *pdev) |
679 | int pci_enable_rom(struct pci_dev *pdev) |
708 | { |
680 | { |
709 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
681 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
710 | struct pci_bus_region region; |
682 | struct pci_bus_region region; |
711 | u32 rom_addr; |
683 | u32 rom_addr; |
712 | 684 | ||
713 | if (!res->flags) |
685 | if (!res->flags) |
714 | return -1; |
686 | return -1; |
715 | 687 | ||
716 | pcibios_resource_to_bus(pdev, ®ion, res); |
688 | pcibios_resource_to_bus(pdev, ®ion, res); |
717 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
689 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
718 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
690 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
719 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
691 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
720 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
692 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
721 | return 0; |
693 | return 0; |
722 | } |
694 | } |
723 | 695 | ||
724 | void pci_disable_rom(struct pci_dev *pdev) |
696 | void pci_disable_rom(struct pci_dev *pdev) |
725 | { |
697 | { |
726 | u32 rom_addr; |
698 | u32 rom_addr; |
727 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
699 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
728 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
700 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
729 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
701 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
730 | } |
702 | } |
731 | 703 | ||
732 | /** |
704 | /** |
733 | * pci_get_rom_size - obtain the actual size of the ROM image |
705 | * pci_get_rom_size - obtain the actual size of the ROM image |
734 | * @pdev: target PCI device |
706 | * @pdev: target PCI device |
735 | * @rom: kernel virtual pointer to image of ROM |
707 | * @rom: kernel virtual pointer to image of ROM |
736 | * @size: size of PCI window |
708 | * @size: size of PCI window |
737 | * return: size of actual ROM image |
709 | * return: size of actual ROM image |
738 | * |
710 | * |
739 | * Determine the actual length of the ROM image. |
711 | * Determine the actual length of the ROM image. |
740 | * The PCI window size could be much larger than the |
712 | * The PCI window size could be much larger than the |
741 | * actual image size. |
713 | * actual image size. |
742 | */ |
714 | */ |
743 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
715 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
744 | { |
716 | { |
745 | void __iomem *image; |
717 | void __iomem *image; |
746 | int last_image; |
718 | int last_image; |
747 | 719 | ||
748 | image = rom; |
720 | image = rom; |
749 | do { |
721 | do { |
750 | void __iomem *pds; |
722 | void __iomem *pds; |
751 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
723 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
752 | if (readb(image) != 0x55) { |
724 | if (readb(image) != 0x55) { |
753 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
725 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
754 | break; |
726 | break; |
755 | } |
727 | } |
756 | if (readb(image + 1) != 0xAA) |
728 | if (readb(image + 1) != 0xAA) |
757 | break; |
729 | break; |
758 | /* get the PCI data structure and check its signature */ |
730 | /* get the PCI data structure and check its signature */ |
759 | pds = image + readw(image + 24); |
731 | pds = image + readw(image + 24); |
760 | if (readb(pds) != 'P') |
732 | if (readb(pds) != 'P') |
761 | break; |
733 | break; |
762 | if (readb(pds + 1) != 'C') |
734 | if (readb(pds + 1) != 'C') |
763 | break; |
735 | break; |
764 | if (readb(pds + 2) != 'I') |
736 | if (readb(pds + 2) != 'I') |
765 | break; |
737 | break; |
766 | if (readb(pds + 3) != 'R') |
738 | if (readb(pds + 3) != 'R') |
767 | break; |
739 | break; |
768 | last_image = readb(pds + 21) & 0x80; |
740 | last_image = readb(pds + 21) & 0x80; |
769 | /* this length is reliable */ |
741 | /* this length is reliable */ |
770 | image += readw(pds + 16) * 512; |
742 | image += readw(pds + 16) * 512; |
771 | } while (!last_image); |
743 | } while (!last_image); |
772 | 744 | ||
773 | /* never return a size larger than the PCI resource window */ |
745 | /* never return a size larger than the PCI resource window */ |
774 | /* there are known ROMs that get the size wrong */ |
746 | /* there are known ROMs that get the size wrong */ |
775 | return min((size_t)(image - rom), size); |
747 | return min((size_t)(image - rom), size); |
776 | } |
748 | } |
777 | 749 | ||
778 | 750 | ||
779 | /** |
751 | /** |
780 | * pci_map_rom - map a PCI ROM to kernel space |
752 | * pci_map_rom - map a PCI ROM to kernel space |
781 | * @pdev: pointer to pci device struct |
753 | * @pdev: pointer to pci device struct |
782 | * @size: pointer to receive size of pci window over ROM |
754 | * @size: pointer to receive size of pci window over ROM |
783 | * |
755 | * |
784 | * Return: kernel virtual pointer to image of ROM |
756 | * Return: kernel virtual pointer to image of ROM |
785 | * |
757 | * |
786 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
758 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
787 | * the shadow BIOS copy will be returned instead of the |
759 | * the shadow BIOS copy will be returned instead of the |
788 | * actual ROM. |
760 | * actual ROM. |
789 | */ |
761 | */ |
790 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
762 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
791 | { |
763 | { |
792 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
764 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
793 | loff_t start; |
765 | loff_t start; |
794 | void __iomem *rom; |
766 | void __iomem *rom; |
795 | 767 | ||
796 | // ENTER(); |
768 | // ENTER(); |
797 | 769 | ||
798 | // dbgprintf("resource start %x end %x flags %x\n", |
770 | // dbgprintf("resource start %x end %x flags %x\n", |
799 | // res->start, res->end, res->flags); |
771 | // res->start, res->end, res->flags); |
800 | /* |
772 | /* |
801 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
773 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
802 | * memory map if the VGA enable bit of the Bridge Control register is |
774 | * memory map if the VGA enable bit of the Bridge Control register is |
803 | * set for embedded VGA. |
775 | * set for embedded VGA. |
804 | */ |
776 | */ |
805 | 777 | ||
806 | start = (loff_t)0xC0000; |
778 | start = (loff_t)0xC0000; |
807 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
779 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
808 | 780 | ||
809 | #if 0 |
781 | #if 0 |
810 | 782 | ||
811 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
783 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
812 | /* primary video rom always starts here */ |
784 | /* primary video rom always starts here */ |
813 | start = (loff_t)0xC0000; |
785 | start = (loff_t)0xC0000; |
814 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
786 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
815 | } else { |
787 | } else { |
816 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
788 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
817 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
789 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
818 | return (void __iomem *)(unsigned long) |
790 | return (void __iomem *)(unsigned long) |
819 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
791 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
820 | } else { |
792 | } else { |
821 | /* assign the ROM an address if it doesn't have one */ |
793 | /* assign the ROM an address if it doesn't have one */ |
822 | // if (res->parent == NULL && |
794 | // if (res->parent == NULL && |
823 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
795 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
824 | return NULL; |
796 | return NULL; |
825 | // start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
797 | // start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
826 | // *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
798 | // *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
827 | // if (*size == 0) |
799 | // if (*size == 0) |
828 | // return NULL; |
800 | // return NULL; |
829 | 801 | ||
830 | /* Enable ROM space decodes */ |
802 | /* Enable ROM space decodes */ |
831 | // if (pci_enable_rom(pdev)) |
803 | // if (pci_enable_rom(pdev)) |
832 | // return NULL; |
804 | // return NULL; |
833 | } |
805 | } |
834 | } |
806 | } |
835 | #endif |
807 | #endif |
836 | 808 | ||
837 | rom = ioremap(start, *size); |
809 | rom = ioremap(start, *size); |
838 | if (!rom) { |
810 | if (!rom) { |
839 | /* restore enable if ioremap fails */ |
811 | /* restore enable if ioremap fails */ |
840 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
812 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
841 | IORESOURCE_ROM_SHADOW | |
813 | IORESOURCE_ROM_SHADOW | |
842 | IORESOURCE_ROM_COPY))) |
814 | IORESOURCE_ROM_COPY))) |
843 | pci_disable_rom(pdev); |
815 | pci_disable_rom(pdev); |
844 | return NULL; |
816 | return NULL; |
845 | } |
817 | } |
846 | 818 | ||
847 | /* |
819 | /* |
848 | * Try to find the true size of the ROM since sometimes the PCI window |
820 | * Try to find the true size of the ROM since sometimes the PCI window |
849 | * size is much larger than the actual size of the ROM. |
821 | * size is much larger than the actual size of the ROM. |
850 | * True size is important if the ROM is going to be copied. |
822 | * True size is important if the ROM is going to be copied. |
851 | */ |
823 | */ |
852 | *size = pci_get_rom_size(pdev, rom, *size); |
824 | *size = pci_get_rom_size(pdev, rom, *size); |
853 | // LEAVE(); |
825 | // LEAVE(); |
854 | return rom; |
826 | return rom; |
855 | } |
827 | } |
856 | 828 | ||
857 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
829 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
858 | { |
830 | { |
859 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
831 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
860 | 832 | ||
861 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
833 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
862 | return; |
834 | return; |
863 | 835 | ||
864 | iounmap(rom); |
836 | iounmap(rom); |
865 | 837 | ||
866 | /* Disable again before continuing, leave enabled if pci=rom */ |
838 | /* Disable again before continuing, leave enabled if pci=rom */ |
867 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
839 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
868 | pci_disable_rom(pdev); |
840 | pci_disable_rom(pdev); |
869 | } |
841 | } |
870 | 842 | ||
871 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
843 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
872 | { |
844 | { |
873 | dev->dma_mask = mask; |
845 | dev->dma_mask = mask; |
874 | 846 | ||
875 | return 0; |
847 | return 0; |
876 | } |
848 | } |
877 | 849 | ||
878 | 850 | ||
879 | 851 | ||
880 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
852 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
881 | { |
853 | { |
882 | u16 old_cmd, cmd; |
854 | u16 old_cmd, cmd; |
883 | 855 | ||
884 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
856 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
885 | if (enable) |
857 | if (enable) |
886 | cmd = old_cmd | PCI_COMMAND_MASTER; |
858 | cmd = old_cmd | PCI_COMMAND_MASTER; |
887 | else |
859 | else |
888 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
860 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
889 | if (cmd != old_cmd) { |
861 | if (cmd != old_cmd) { |
890 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
862 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
891 | } |
863 | } |
892 | dev->is_busmaster = enable; |
864 | dev->is_busmaster = enable; |
893 | } |
865 | } |
894 | 866 | ||
895 | 867 | ||
896 | /* pci_set_master - enables bus-mastering for device dev |
868 | /* pci_set_master - enables bus-mastering for device dev |
897 | * @dev: the PCI device to enable |
869 | * @dev: the PCI device to enable |
898 | * |
870 | * |
899 | * Enables bus-mastering on the device and calls pcibios_set_master() |
871 | * Enables bus-mastering on the device and calls pcibios_set_master() |
900 | * to do the needed arch specific settings. |
872 | * to do the needed arch specific settings. |
901 | */ |
873 | */ |
902 | void pci_set_master(struct pci_dev *dev) |
874 | void pci_set_master(struct pci_dev *dev) |
903 | { |
875 | { |
904 | __pci_set_master(dev, true); |
876 | __pci_set_master(dev, true); |
905 | // pcibios_set_master(dev); |
877 | // pcibios_set_master(dev); |
906 | } |
878 | } |
907 | 879 | ||
908 | /** |
880 | /** |
909 | * pci_clear_master - disables bus-mastering for device dev |
881 | * pci_clear_master - disables bus-mastering for device dev |
910 | * @dev: the PCI device to disable |
882 | * @dev: the PCI device to disable |
911 | */ |
883 | */ |
912 | void pci_clear_master(struct pci_dev *dev) |
884 | void pci_clear_master(struct pci_dev *dev) |
913 | { |
885 | { |
914 | __pci_set_master(dev, false); |
886 | __pci_set_master(dev, false); |
915 | } |
887 | } |
916 | 888 | ||
917 | 889 | ||
918 | static inline int pcie_cap_version(const struct pci_dev *dev) |
890 | static inline int pcie_cap_version(const struct pci_dev *dev) |
919 | { |
891 | { |
920 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
892 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
921 | } |
893 | } |
922 | 894 | ||
923 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
895 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
924 | { |
896 | { |
925 | return true; |
897 | return true; |
926 | } |
898 | } |
927 | 899 | ||
928 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
900 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
929 | { |
901 | { |
930 | int type = pci_pcie_type(dev); |
902 | int type = pci_pcie_type(dev); |
931 | 903 | ||
932 | return pcie_cap_version(dev) > 1 || |
904 | return pcie_cap_version(dev) > 1 || |
933 | type == PCI_EXP_TYPE_ROOT_PORT || |
905 | type == PCI_EXP_TYPE_ROOT_PORT || |
934 | type == PCI_EXP_TYPE_ENDPOINT || |
906 | type == PCI_EXP_TYPE_ENDPOINT || |
935 | type == PCI_EXP_TYPE_LEG_END; |
907 | type == PCI_EXP_TYPE_LEG_END; |
936 | } |
908 | } |
937 | 909 | ||
938 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
910 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
939 | { |
911 | { |
940 | int type = pci_pcie_type(dev); |
912 | int type = pci_pcie_type(dev); |
941 | 913 | ||
942 | return pcie_cap_version(dev) > 1 || |
914 | return pcie_cap_version(dev) > 1 || |
943 | type == PCI_EXP_TYPE_ROOT_PORT || |
915 | type == PCI_EXP_TYPE_ROOT_PORT || |
944 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
916 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
945 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
917 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
946 | } |
918 | } |
947 | 919 | ||
948 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
920 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
949 | { |
921 | { |
950 | int type = pci_pcie_type(dev); |
922 | int type = pci_pcie_type(dev); |
951 | 923 | ||
952 | return pcie_cap_version(dev) > 1 || |
924 | return pcie_cap_version(dev) > 1 || |
953 | type == PCI_EXP_TYPE_ROOT_PORT || |
925 | type == PCI_EXP_TYPE_ROOT_PORT || |
954 | type == PCI_EXP_TYPE_RC_EC; |
926 | type == PCI_EXP_TYPE_RC_EC; |
955 | } |
927 | } |
956 | 928 | ||
957 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
929 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
958 | { |
930 | { |
959 | if (!pci_is_pcie(dev)) |
931 | if (!pci_is_pcie(dev)) |
960 | return false; |
932 | return false; |
961 | 933 | ||
962 | switch (pos) { |
934 | switch (pos) { |
963 | case PCI_EXP_FLAGS_TYPE: |
935 | case PCI_EXP_FLAGS_TYPE: |
964 | return true; |
936 | return true; |
965 | case PCI_EXP_DEVCAP: |
937 | case PCI_EXP_DEVCAP: |
966 | case PCI_EXP_DEVCTL: |
938 | case PCI_EXP_DEVCTL: |
967 | case PCI_EXP_DEVSTA: |
939 | case PCI_EXP_DEVSTA: |
968 | return pcie_cap_has_devctl(dev); |
940 | return pcie_cap_has_devctl(dev); |
969 | case PCI_EXP_LNKCAP: |
941 | case PCI_EXP_LNKCAP: |
970 | case PCI_EXP_LNKCTL: |
942 | case PCI_EXP_LNKCTL: |
971 | case PCI_EXP_LNKSTA: |
943 | case PCI_EXP_LNKSTA: |
972 | return pcie_cap_has_lnkctl(dev); |
944 | return pcie_cap_has_lnkctl(dev); |
973 | case PCI_EXP_SLTCAP: |
945 | case PCI_EXP_SLTCAP: |
974 | case PCI_EXP_SLTCTL: |
946 | case PCI_EXP_SLTCTL: |
975 | case PCI_EXP_SLTSTA: |
947 | case PCI_EXP_SLTSTA: |
976 | return pcie_cap_has_sltctl(dev); |
948 | return pcie_cap_has_sltctl(dev); |
977 | case PCI_EXP_RTCTL: |
949 | case PCI_EXP_RTCTL: |
978 | case PCI_EXP_RTCAP: |
950 | case PCI_EXP_RTCAP: |
979 | case PCI_EXP_RTSTA: |
951 | case PCI_EXP_RTSTA: |
980 | return pcie_cap_has_rtctl(dev); |
952 | return pcie_cap_has_rtctl(dev); |
981 | case PCI_EXP_DEVCAP2: |
953 | case PCI_EXP_DEVCAP2: |
982 | case PCI_EXP_DEVCTL2: |
954 | case PCI_EXP_DEVCTL2: |
983 | case PCI_EXP_LNKCAP2: |
955 | case PCI_EXP_LNKCAP2: |
984 | case PCI_EXP_LNKCTL2: |
956 | case PCI_EXP_LNKCTL2: |
985 | case PCI_EXP_LNKSTA2: |
957 | case PCI_EXP_LNKSTA2: |
986 | return pcie_cap_version(dev) > 1; |
958 | return pcie_cap_version(dev) > 1; |
987 | default: |
959 | default: |
988 | return false; |
960 | return false; |
989 | } |
961 | } |
990 | } |
962 | } |
991 | 963 | ||
992 | /* |
964 | /* |
993 | * Note that these accessor functions are only for the "PCI Express |
965 | * Note that these accessor functions are only for the "PCI Express |
994 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
966 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
995 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
967 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
996 | */ |
968 | */ |
997 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
969 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
998 | { |
970 | { |
999 | int ret; |
971 | int ret; |
1000 | 972 | ||
1001 | *val = 0; |
973 | *val = 0; |
1002 | if (pos & 1) |
974 | if (pos & 1) |
1003 | return -EINVAL; |
975 | return -EINVAL; |
1004 | 976 | ||
1005 | if (pcie_capability_reg_implemented(dev, pos)) { |
977 | if (pcie_capability_reg_implemented(dev, pos)) { |
1006 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
978 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1007 | /* |
979 | /* |
1008 | * Reset *val to 0 if pci_read_config_word() fails, it may |
980 | * Reset *val to 0 if pci_read_config_word() fails, it may |
1009 | * have been written as 0xFFFF if hardware error happens |
981 | * have been written as 0xFFFF if hardware error happens |
1010 | * during pci_read_config_word(). |
982 | * during pci_read_config_word(). |
1011 | */ |
983 | */ |
1012 | if (ret) |
984 | if (ret) |
1013 | *val = 0; |
985 | *val = 0; |
1014 | return ret; |
986 | return ret; |
1015 | } |
987 | } |
1016 | 988 | ||
1017 | /* |
989 | /* |
1018 | * For Functions that do not implement the Slot Capabilities, |
990 | * For Functions that do not implement the Slot Capabilities, |
1019 | * Slot Status, and Slot Control registers, these spaces must |
991 | * Slot Status, and Slot Control registers, these spaces must |
1020 | * be hardwired to 0b, with the exception of the Presence Detect |
992 | * be hardwired to 0b, with the exception of the Presence Detect |
1021 | * State bit in the Slot Status register of Downstream Ports, |
993 | * State bit in the Slot Status register of Downstream Ports, |
1022 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
994 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
1023 | */ |
995 | */ |
1024 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
996 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
1025 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
997 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
1026 | *val = PCI_EXP_SLTSTA_PDS; |
998 | *val = PCI_EXP_SLTSTA_PDS; |
1027 | } |
999 | } |
1028 | 1000 | ||
1029 | return 0; |
1001 | return 0; |
1030 | } |
1002 | } |
1031 | EXPORT_SYMBOL(pcie_capability_read_word); |
1003 | EXPORT_SYMBOL(pcie_capability_read_word); |
1032 | 1004 | ||
1033 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
1005 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
1034 | { |
1006 | { |
1035 | int ret; |
1007 | int ret; |
1036 | 1008 | ||
1037 | *val = 0; |
1009 | *val = 0; |
1038 | if (pos & 3) |
1010 | if (pos & 3) |
1039 | return -EINVAL; |
1011 | return -EINVAL; |
1040 | 1012 | ||
1041 | if (pcie_capability_reg_implemented(dev, pos)) { |
1013 | if (pcie_capability_reg_implemented(dev, pos)) { |
1042 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1014 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1043 | /* |
1015 | /* |
1044 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
1016 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
1045 | * have been written as 0xFFFFFFFF if hardware error happens |
1017 | * have been written as 0xFFFFFFFF if hardware error happens |
1046 | * during pci_read_config_dword(). |
1018 | * during pci_read_config_dword(). |
1047 | */ |
1019 | */ |
1048 | if (ret) |
1020 | if (ret) |
1049 | *val = 0; |
1021 | *val = 0; |
1050 | return ret; |
1022 | return ret; |
1051 | } |
1023 | } |
1052 | 1024 | ||
1053 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
1025 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
1054 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
1026 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
1055 | *val = PCI_EXP_SLTSTA_PDS; |
1027 | *val = PCI_EXP_SLTSTA_PDS; |
1056 | } |
1028 | } |
1057 | 1029 | ||
1058 | return 0; |
1030 | return 0; |
1059 | } |
1031 | } |
1060 | EXPORT_SYMBOL(pcie_capability_read_dword); |
1032 | EXPORT_SYMBOL(pcie_capability_read_dword); |
1061 | 1033 | ||
1062 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1034 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1063 | { |
1035 | { |
1064 | if (pos & 1) |
1036 | if (pos & 1) |
1065 | return -EINVAL; |
1037 | return -EINVAL; |
1066 | 1038 | ||
1067 | if (!pcie_capability_reg_implemented(dev, pos)) |
1039 | if (!pcie_capability_reg_implemented(dev, pos)) |
1068 | return 0; |
1040 | return 0; |
1069 | 1041 | ||
1070 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1042 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1071 | } |
1043 | } |
1072 | EXPORT_SYMBOL(pcie_capability_write_word); |
1044 | EXPORT_SYMBOL(pcie_capability_write_word); |
1073 | 1045 | ||
1074 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1046 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1075 | { |
1047 | { |
1076 | if (pos & 3) |
1048 | if (pos & 3) |
1077 | return -EINVAL; |
1049 | return -EINVAL; |
1078 | 1050 | ||
1079 | if (!pcie_capability_reg_implemented(dev, pos)) |
1051 | if (!pcie_capability_reg_implemented(dev, pos)) |
1080 | return 0; |
1052 | return 0; |
1081 | 1053 | ||
1082 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1054 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1083 | } |
1055 | } |
1084 | EXPORT_SYMBOL(pcie_capability_write_dword);>=>>>><>><>><>>4)><4)> |
1056 | EXPORT_SYMBOL(pcie_capability_write_dword);>=>>>><>><>><>>4)><4)> |