Rev 3031 | Rev 5078 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3031 | Rev 3764 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | #include |
2 | #include |
- | 3 | #include |
|
3 | #include |
4 | #include |
4 | #include |
5 | #include |
5 | #include |
6 | #include |
6 | #include |
7 | #include |
7 | #include |
8 | #include |
8 | 9 | ||
9 | extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
10 | extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
10 | 11 | ||
11 | static LIST_HEAD(devices); |
12 | static LIST_HEAD(devices); |
12 | 13 | ||
13 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
14 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
14 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
15 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
15 | 16 | ||
16 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
17 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
17 | 18 | ||
18 | /* |
19 | /* |
19 | * Translate the low bits of the PCI base |
20 | * Translate the low bits of the PCI base |
20 | * to the resource type |
21 | * to the resource type |
21 | */ |
22 | */ |
22 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
23 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
23 | { |
24 | { |
24 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
25 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
25 | return IORESOURCE_IO; |
26 | return IORESOURCE_IO; |
26 | 27 | ||
27 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
28 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
28 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
29 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
29 | 30 | ||
30 | return IORESOURCE_MEM; |
31 | return IORESOURCE_MEM; |
31 | } |
32 | } |
32 | 33 | ||
33 | 34 | ||
34 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
35 | static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
35 | { |
36 | { |
36 | u32_t size = mask & maxbase; /* Find the significant bits */ |
37 | u32_t size = mask & maxbase; /* Find the significant bits */ |
37 | 38 | ||
38 | if (!size) |
39 | if (!size) |
39 | return 0; |
40 | return 0; |
40 | 41 | ||
41 | /* Get the lowest of them to find the decode size, and |
42 | /* Get the lowest of them to find the decode size, and |
42 | from that the extent. */ |
43 | from that the extent. */ |
43 | size = (size & ~(size-1)) - 1; |
44 | size = (size & ~(size-1)) - 1; |
44 | 45 | ||
45 | /* base == maxbase can be valid only if the BAR has |
46 | /* base == maxbase can be valid only if the BAR has |
46 | already been programmed with all 1s. */ |
47 | already been programmed with all 1s. */ |
47 | if (base == maxbase && ((base | size) & mask) != mask) |
48 | if (base == maxbase && ((base | size) & mask) != mask) |
48 | return 0; |
49 | return 0; |
49 | 50 | ||
50 | return size; |
51 | return size; |
51 | } |
52 | } |
52 | 53 | ||
53 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
54 | static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
54 | { |
55 | { |
55 | u64_t size = mask & maxbase; /* Find the significant bits */ |
56 | u64_t size = mask & maxbase; /* Find the significant bits */ |
56 | 57 | ||
57 | if (!size) |
58 | if (!size) |
58 | return 0; |
59 | return 0; |
59 | 60 | ||
60 | /* Get the lowest of them to find the decode size, and |
61 | /* Get the lowest of them to find the decode size, and |
61 | from that the extent. */ |
62 | from that the extent. */ |
62 | size = (size & ~(size-1)) - 1; |
63 | size = (size & ~(size-1)) - 1; |
63 | 64 | ||
64 | /* base == maxbase can be valid only if the BAR has |
65 | /* base == maxbase can be valid only if the BAR has |
65 | already been programmed with all 1s. */ |
66 | already been programmed with all 1s. */ |
66 | if (base == maxbase && ((base | size) & mask) != mask) |
67 | if (base == maxbase && ((base | size) & mask) != mask) |
67 | return 0; |
68 | return 0; |
68 | 69 | ||
69 | return size; |
70 | return size; |
70 | } |
71 | } |
71 | 72 | ||
72 | static inline int is_64bit_memory(u32_t mask) |
73 | static inline int is_64bit_memory(u32_t mask) |
73 | { |
74 | { |
74 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
75 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
75 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
76 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
76 | return 1; |
77 | return 1; |
77 | return 0; |
78 | return 0; |
78 | } |
79 | } |
79 | 80 | ||
80 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
81 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
81 | { |
82 | { |
82 | u32_t pos, reg, next; |
83 | u32_t pos, reg, next; |
83 | u32_t l, sz; |
84 | u32_t l, sz; |
84 | struct resource *res; |
85 | struct resource *res; |
85 | 86 | ||
86 | for(pos=0; pos < howmany; pos = next) |
87 | for(pos=0; pos < howmany; pos = next) |
87 | { |
88 | { |
88 | u64_t l64; |
89 | u64_t l64; |
89 | u64_t sz64; |
90 | u64_t sz64; |
90 | u32_t raw_sz; |
91 | u32_t raw_sz; |
91 | 92 | ||
92 | next = pos + 1; |
93 | next = pos + 1; |
93 | 94 | ||
94 | res = &dev->resource[pos]; |
95 | res = &dev->resource[pos]; |
95 | 96 | ||
96 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
97 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
97 | l = PciRead32(dev->busnr, dev->devfn, reg); |
98 | l = PciRead32(dev->busnr, dev->devfn, reg); |
98 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
99 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
99 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
100 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
100 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
101 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
101 | 102 | ||
102 | if (!sz || sz == 0xffffffff) |
103 | if (!sz || sz == 0xffffffff) |
103 | continue; |
104 | continue; |
104 | 105 | ||
105 | if (l == 0xffffffff) |
106 | if (l == 0xffffffff) |
106 | l = 0; |
107 | l = 0; |
107 | 108 | ||
108 | raw_sz = sz; |
109 | raw_sz = sz; |
109 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
110 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
110 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
111 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
111 | { |
112 | { |
112 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
113 | sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
113 | /* |
114 | /* |
114 | * For 64bit prefetchable memory sz could be 0, if the |
115 | * For 64bit prefetchable memory sz could be 0, if the |
115 | * real size is bigger than 4G, so we need to check |
116 | * real size is bigger than 4G, so we need to check |
116 | * szhi for that. |
117 | * szhi for that. |
117 | */ |
118 | */ |
118 | if (!is_64bit_memory(l) && !sz) |
119 | if (!is_64bit_memory(l) && !sz) |
119 | continue; |
120 | continue; |
120 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
121 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
121 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
122 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
122 | } |
123 | } |
123 | else { |
124 | else { |
124 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
125 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
125 | if (!sz) |
126 | if (!sz) |
126 | continue; |
127 | continue; |
127 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
128 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
128 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
129 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
129 | } |
130 | } |
130 | res->end = res->start + (unsigned long) sz; |
131 | res->end = res->start + (unsigned long) sz; |
131 | res->flags |= pci_calc_resource_flags(l); |
132 | res->flags |= pci_calc_resource_flags(l); |
132 | if (is_64bit_memory(l)) |
133 | if (is_64bit_memory(l)) |
133 | { |
134 | { |
134 | u32_t szhi, lhi; |
135 | u32_t szhi, lhi; |
135 | 136 | ||
136 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
137 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
137 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
138 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
138 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
139 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
139 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
140 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
140 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
141 | sz64 = ((u64_t)szhi << 32) | raw_sz; |
141 | l64 = ((u64_t)lhi << 32) | l; |
142 | l64 = ((u64_t)lhi << 32) | l; |
142 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
143 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
143 | next++; |
144 | next++; |
144 | 145 | ||
145 | #if BITS_PER_LONG == 64 |
146 | #if BITS_PER_LONG == 64 |
146 | if (!sz64) { |
147 | if (!sz64) { |
147 | res->start = 0; |
148 | res->start = 0; |
148 | res->end = 0; |
149 | res->end = 0; |
149 | res->flags = 0; |
150 | res->flags = 0; |
150 | continue; |
151 | continue; |
151 | } |
152 | } |
152 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
153 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
153 | res->end = res->start + sz64; |
154 | res->end = res->start + sz64; |
154 | #else |
155 | #else |
155 | if (sz64 > 0x100000000ULL) { |
156 | if (sz64 > 0x100000000ULL) { |
156 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
157 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
157 | "BAR for device %s\n", pci_name(dev)); |
158 | "BAR for device %s\n", pci_name(dev)); |
158 | res->start = 0; |
159 | res->start = 0; |
159 | res->flags = 0; |
160 | res->flags = 0; |
160 | } |
161 | } |
161 | else if (lhi) |
162 | else if (lhi) |
162 | { |
163 | { |
163 | /* 64-bit wide address, treat as disabled */ |
164 | /* 64-bit wide address, treat as disabled */ |
164 | PciWrite32(dev->busnr, dev->devfn, reg, |
165 | PciWrite32(dev->busnr, dev->devfn, reg, |
165 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
166 | l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
166 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
167 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
167 | res->start = 0; |
168 | res->start = 0; |
168 | res->end = sz; |
169 | res->end = sz; |
169 | } |
170 | } |
170 | #endif |
171 | #endif |
171 | } |
172 | } |
172 | } |
173 | } |
173 | 174 | ||
174 | if ( rom ) |
175 | if ( rom ) |
175 | { |
176 | { |
176 | dev->rom_base_reg = rom; |
177 | dev->rom_base_reg = rom; |
177 | res = &dev->resource[PCI_ROM_RESOURCE]; |
178 | res = &dev->resource[PCI_ROM_RESOURCE]; |
178 | 179 | ||
179 | l = PciRead32(dev->busnr, dev->devfn, rom); |
180 | l = PciRead32(dev->busnr, dev->devfn, rom); |
180 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
181 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
181 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
182 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
182 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
183 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
183 | 184 | ||
184 | if (l == 0xffffffff) |
185 | if (l == 0xffffffff) |
185 | l = 0; |
186 | l = 0; |
186 | 187 | ||
187 | if (sz && sz != 0xffffffff) |
188 | if (sz && sz != 0xffffffff) |
188 | { |
189 | { |
189 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
190 | sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
190 | 191 | ||
191 | if (sz) |
192 | if (sz) |
192 | { |
193 | { |
193 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
194 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
194 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
195 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
195 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
196 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
196 | res->start = l & PCI_ROM_ADDRESS_MASK; |
197 | res->start = l & PCI_ROM_ADDRESS_MASK; |
197 | res->end = res->start + (unsigned long) sz; |
198 | res->end = res->start + (unsigned long) sz; |
198 | } |
199 | } |
199 | } |
200 | } |
200 | } |
201 | } |
201 | } |
202 | } |
202 | 203 | ||
203 | static void pci_read_irq(struct pci_dev *dev) |
204 | static void pci_read_irq(struct pci_dev *dev) |
204 | { |
205 | { |
205 | u8_t irq; |
206 | u8_t irq; |
206 | 207 | ||
207 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
208 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
208 | dev->pin = irq; |
209 | dev->pin = irq; |
209 | if (irq) |
210 | if (irq) |
210 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
211 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
211 | dev->irq = irq; |
212 | dev->irq = irq; |
212 | }; |
213 | }; |
213 | 214 | ||
214 | 215 | ||
215 | int pci_setup_device(struct pci_dev *dev) |
216 | int pci_setup_device(struct pci_dev *dev) |
216 | { |
217 | { |
217 | u32_t class; |
218 | u32_t class; |
218 | 219 | ||
219 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
220 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
220 | dev->revision = class & 0xff; |
221 | dev->revision = class & 0xff; |
221 | class >>= 8; /* upper 3 bytes */ |
222 | class >>= 8; /* upper 3 bytes */ |
222 | dev->class = class; |
223 | dev->class = class; |
223 | 224 | ||
224 | /* "Unknown power state" */ |
225 | /* "Unknown power state" */ |
225 | // dev->current_state = PCI_UNKNOWN; |
226 | // dev->current_state = PCI_UNKNOWN; |
226 | 227 | ||
227 | /* Early fixups, before probing the BARs */ |
228 | /* Early fixups, before probing the BARs */ |
228 | // pci_fixup_device(pci_fixup_early, dev); |
229 | // pci_fixup_device(pci_fixup_early, dev); |
229 | class = dev->class >> 8; |
230 | class = dev->class >> 8; |
230 | 231 | ||
231 | switch (dev->hdr_type) |
232 | switch (dev->hdr_type) |
232 | { |
233 | { |
233 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
234 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
234 | if (class == PCI_CLASS_BRIDGE_PCI) |
235 | if (class == PCI_CLASS_BRIDGE_PCI) |
235 | goto bad; |
236 | goto bad; |
236 | pci_read_irq(dev); |
237 | pci_read_irq(dev); |
237 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
238 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
238 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
239 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
239 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
240 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
240 | 241 | ||
241 | /* |
242 | /* |
242 | * Do the ugly legacy mode stuff here rather than broken chip |
243 | * Do the ugly legacy mode stuff here rather than broken chip |
243 | * quirk code. Legacy mode ATA controllers have fixed |
244 | * quirk code. Legacy mode ATA controllers have fixed |
244 | * addresses. These are not always echoed in BAR0-3, and |
245 | * addresses. These are not always echoed in BAR0-3, and |
245 | * BAR0-3 in a few cases contain junk! |
246 | * BAR0-3 in a few cases contain junk! |
246 | */ |
247 | */ |
247 | if (class == PCI_CLASS_STORAGE_IDE) |
248 | if (class == PCI_CLASS_STORAGE_IDE) |
248 | { |
249 | { |
249 | u8_t progif; |
250 | u8_t progif; |
250 | 251 | ||
251 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
252 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
252 | if ((progif & 1) == 0) |
253 | if ((progif & 1) == 0) |
253 | { |
254 | { |
254 | dev->resource[0].start = 0x1F0; |
255 | dev->resource[0].start = 0x1F0; |
255 | dev->resource[0].end = 0x1F7; |
256 | dev->resource[0].end = 0x1F7; |
256 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
257 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
257 | dev->resource[1].start = 0x3F6; |
258 | dev->resource[1].start = 0x3F6; |
258 | dev->resource[1].end = 0x3F6; |
259 | dev->resource[1].end = 0x3F6; |
259 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
260 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
260 | } |
261 | } |
261 | if ((progif & 4) == 0) |
262 | if ((progif & 4) == 0) |
262 | { |
263 | { |
263 | dev->resource[2].start = 0x170; |
264 | dev->resource[2].start = 0x170; |
264 | dev->resource[2].end = 0x177; |
265 | dev->resource[2].end = 0x177; |
265 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
266 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
266 | dev->resource[3].start = 0x376; |
267 | dev->resource[3].start = 0x376; |
267 | dev->resource[3].end = 0x376; |
268 | dev->resource[3].end = 0x376; |
268 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
269 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
269 | }; |
270 | }; |
270 | } |
271 | } |
271 | break; |
272 | break; |
272 | 273 | ||
273 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
274 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
274 | if (class != PCI_CLASS_BRIDGE_PCI) |
275 | if (class != PCI_CLASS_BRIDGE_PCI) |
275 | goto bad; |
276 | goto bad; |
276 | /* The PCI-to-PCI bridge spec requires that subtractive |
277 | /* The PCI-to-PCI bridge spec requires that subtractive |
277 | decoding (i.e. transparent) bridge must have programming |
278 | decoding (i.e. transparent) bridge must have programming |
278 | interface code of 0x01. */ |
279 | interface code of 0x01. */ |
279 | pci_read_irq(dev); |
280 | pci_read_irq(dev); |
280 | dev->transparent = ((dev->class & 0xff) == 1); |
281 | dev->transparent = ((dev->class & 0xff) == 1); |
281 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
282 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
282 | break; |
283 | break; |
283 | 284 | ||
284 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
285 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
285 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
286 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
286 | goto bad; |
287 | goto bad; |
287 | pci_read_irq(dev); |
288 | pci_read_irq(dev); |
288 | pci_read_bases(dev, 1, 0); |
289 | pci_read_bases(dev, 1, 0); |
289 | dev->subsystem_vendor = PciRead16(dev->busnr, |
290 | dev->subsystem_vendor = PciRead16(dev->busnr, |
290 | dev->devfn, |
291 | dev->devfn, |
291 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
292 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
292 | 293 | ||
293 | dev->subsystem_device = PciRead16(dev->busnr, |
294 | dev->subsystem_device = PciRead16(dev->busnr, |
294 | dev->devfn, |
295 | dev->devfn, |
295 | PCI_CB_SUBSYSTEM_ID); |
296 | PCI_CB_SUBSYSTEM_ID); |
296 | break; |
297 | break; |
297 | 298 | ||
298 | default: /* unknown header */ |
299 | default: /* unknown header */ |
299 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
300 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
300 | pci_name(dev), dev->hdr_type); |
301 | pci_name(dev), dev->hdr_type); |
301 | return -1; |
302 | return -1; |
302 | 303 | ||
303 | bad: |
304 | bad: |
304 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
305 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
305 | pci_name(dev), class, dev->hdr_type); |
306 | pci_name(dev), class, dev->hdr_type); |
306 | dev->class = PCI_CLASS_NOT_DEFINED; |
307 | dev->class = PCI_CLASS_NOT_DEFINED; |
307 | } |
308 | } |
308 | 309 | ||
309 | /* We found a fine healthy device, go go go... */ |
310 | /* We found a fine healthy device, go go go... */ |
310 | 311 | ||
311 | return 0; |
312 | return 0; |
312 | }; |
313 | }; |
313 | 314 | ||
314 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
315 | static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
315 | { |
316 | { |
316 | pci_dev_t *dev; |
317 | pci_dev_t *dev; |
317 | 318 | ||
318 | u32_t id; |
319 | u32_t id; |
319 | u8_t hdr; |
320 | u8_t hdr; |
320 | 321 | ||
321 | int timeout = 10; |
322 | int timeout = 10; |
322 | 323 | ||
323 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
324 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
324 | 325 | ||
325 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
326 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
326 | if (id == 0xffffffff || id == 0x00000000 || |
327 | if (id == 0xffffffff || id == 0x00000000 || |
327 | id == 0x0000ffff || id == 0xffff0000) |
328 | id == 0x0000ffff || id == 0xffff0000) |
328 | return NULL; |
329 | return NULL; |
329 | 330 | ||
330 | while (id == 0xffff0001) |
331 | while (id == 0xffff0001) |
331 | { |
332 | { |
332 | 333 | ||
333 | delay(timeout/10); |
334 | delay(timeout/10); |
334 | timeout *= 2; |
335 | timeout *= 2; |
335 | 336 | ||
336 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
337 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
337 | 338 | ||
338 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
339 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
339 | if (timeout > 60 * 100) |
340 | if (timeout > 60 * 100) |
340 | { |
341 | { |
341 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
342 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
342 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
343 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
343 | return NULL; |
344 | return NULL; |
344 | } |
345 | } |
345 | }; |
346 | }; |
346 | 347 | ||
347 | if( pci_scan_filter(id, busnr, devfn) == 0) |
348 | if( pci_scan_filter(id, busnr, devfn) == 0) |
348 | return NULL; |
349 | return NULL; |
349 | 350 | ||
350 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
351 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
351 | 352 | ||
352 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
353 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
353 | if(unlikely(dev == NULL)) |
354 | if(unlikely(dev == NULL)) |
354 | return NULL; |
355 | return NULL; |
355 | 356 | ||
356 | INIT_LIST_HEAD(&dev->link); |
357 | INIT_LIST_HEAD(&dev->link); |
357 | 358 | ||
358 | 359 | ||
359 | dev->pci_dev.busnr = busnr; |
360 | dev->pci_dev.busnr = busnr; |
360 | dev->pci_dev.devfn = devfn; |
361 | dev->pci_dev.devfn = devfn; |
361 | dev->pci_dev.hdr_type = hdr & 0x7f; |
362 | dev->pci_dev.hdr_type = hdr & 0x7f; |
362 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
363 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
363 | dev->pci_dev.vendor = id & 0xffff; |
364 | dev->pci_dev.vendor = id & 0xffff; |
364 | dev->pci_dev.device = (id >> 16) & 0xffff; |
365 | dev->pci_dev.device = (id >> 16) & 0xffff; |
365 | 366 | ||
366 | pci_setup_device(&dev->pci_dev); |
367 | pci_setup_device(&dev->pci_dev); |
367 | 368 | ||
368 | return dev; |
369 | return dev; |
369 | 370 | ||
370 | }; |
371 | }; |
371 | 372 | ||
372 | 373 | ||
373 | 374 | ||
374 | 375 | ||
375 | int pci_scan_slot(u32_t bus, int devfn) |
376 | int pci_scan_slot(u32_t bus, int devfn) |
376 | { |
377 | { |
377 | int func, nr = 0; |
378 | int func, nr = 0; |
378 | 379 | ||
379 | for (func = 0; func < 8; func++, devfn++) |
380 | for (func = 0; func < 8; func++, devfn++) |
380 | { |
381 | { |
381 | pci_dev_t *dev; |
382 | pci_dev_t *dev; |
382 | 383 | ||
383 | dev = pci_scan_device(bus, devfn); |
384 | dev = pci_scan_device(bus, devfn); |
384 | if( dev ) |
385 | if( dev ) |
385 | { |
386 | { |
386 | list_add(&dev->link, &devices); |
387 | list_add(&dev->link, &devices); |
387 | 388 | ||
388 | nr++; |
389 | nr++; |
389 | 390 | ||
390 | /* |
391 | /* |
391 | * If this is a single function device, |
392 | * If this is a single function device, |
392 | * don't scan past the first function. |
393 | * don't scan past the first function. |
393 | */ |
394 | */ |
394 | if (!dev->pci_dev.multifunction) |
395 | if (!dev->pci_dev.multifunction) |
395 | { |
396 | { |
396 | if (func > 0) { |
397 | if (func > 0) { |
397 | dev->pci_dev.multifunction = 1; |
398 | dev->pci_dev.multifunction = 1; |
398 | } |
399 | } |
399 | else { |
400 | else { |
400 | break; |
401 | break; |
401 | } |
402 | } |
402 | } |
403 | } |
403 | } |
404 | } |
404 | else { |
405 | else { |
405 | if (func == 0) |
406 | if (func == 0) |
406 | break; |
407 | break; |
407 | } |
408 | } |
408 | }; |
409 | }; |
409 | 410 | ||
410 | return nr; |
411 | return nr; |
411 | }; |
412 | }; |
412 | 413 | ||
413 | #define PCI_FIND_CAP_TTL 48 |
414 | #define PCI_FIND_CAP_TTL 48 |
414 | 415 | ||
415 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
416 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
416 | u8 pos, int cap, int *ttl) |
417 | u8 pos, int cap, int *ttl) |
417 | { |
418 | { |
418 | u8 id; |
419 | u8 id; |
419 | 420 | ||
420 | while ((*ttl)--) { |
421 | while ((*ttl)--) { |
421 | pos = PciRead8(bus, devfn, pos); |
422 | pos = PciRead8(bus, devfn, pos); |
422 | if (pos < 0x40) |
423 | if (pos < 0x40) |
423 | break; |
424 | break; |
424 | pos &= ~3; |
425 | pos &= ~3; |
425 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
426 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
426 | if (id == 0xff) |
427 | if (id == 0xff) |
427 | break; |
428 | break; |
428 | if (id == cap) |
429 | if (id == cap) |
429 | return pos; |
430 | return pos; |
430 | pos += PCI_CAP_LIST_NEXT; |
431 | pos += PCI_CAP_LIST_NEXT; |
431 | } |
432 | } |
432 | return 0; |
433 | return 0; |
433 | } |
434 | } |
434 | 435 | ||
435 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
436 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
436 | u8 pos, int cap) |
437 | u8 pos, int cap) |
437 | { |
438 | { |
438 | int ttl = PCI_FIND_CAP_TTL; |
439 | int ttl = PCI_FIND_CAP_TTL; |
439 | 440 | ||
440 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
441 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
441 | } |
442 | } |
442 | 443 | ||
443 | static int __pci_bus_find_cap_start(unsigned int bus, |
444 | static int __pci_bus_find_cap_start(unsigned int bus, |
444 | unsigned int devfn, u8 hdr_type) |
445 | unsigned int devfn, u8 hdr_type) |
445 | { |
446 | { |
446 | u16 status; |
447 | u16 status; |
447 | 448 | ||
448 | status = PciRead16(bus, devfn, PCI_STATUS); |
449 | status = PciRead16(bus, devfn, PCI_STATUS); |
449 | if (!(status & PCI_STATUS_CAP_LIST)) |
450 | if (!(status & PCI_STATUS_CAP_LIST)) |
450 | return 0; |
451 | return 0; |
451 | 452 | ||
452 | switch (hdr_type) { |
453 | switch (hdr_type) { |
453 | case PCI_HEADER_TYPE_NORMAL: |
454 | case PCI_HEADER_TYPE_NORMAL: |
454 | case PCI_HEADER_TYPE_BRIDGE: |
455 | case PCI_HEADER_TYPE_BRIDGE: |
455 | return PCI_CAPABILITY_LIST; |
456 | return PCI_CAPABILITY_LIST; |
456 | case PCI_HEADER_TYPE_CARDBUS: |
457 | case PCI_HEADER_TYPE_CARDBUS: |
457 | return PCI_CB_CAPABILITY_LIST; |
458 | return PCI_CB_CAPABILITY_LIST; |
458 | default: |
459 | default: |
459 | return 0; |
460 | return 0; |
460 | } |
461 | } |
461 | 462 | ||
462 | return 0; |
463 | return 0; |
463 | } |
464 | } |
464 | 465 | ||
465 | 466 | ||
466 | int pci_find_capability(struct pci_dev *dev, int cap) |
467 | int pci_find_capability(struct pci_dev *dev, int cap) |
467 | { |
468 | { |
468 | int pos; |
469 | int pos; |
469 | 470 | ||
470 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
471 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
471 | if (pos) |
472 | if (pos) |
472 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
473 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
473 | 474 | ||
474 | return pos; |
475 | return pos; |
475 | } |
476 | } |
476 | 477 | ||
477 | 478 | ||
478 | 479 | ||
479 | 480 | ||
480 | int enum_pci_devices() |
481 | int enum_pci_devices() |
481 | { |
482 | { |
482 | pci_dev_t *dev; |
483 | pci_dev_t *dev; |
483 | u32_t last_bus; |
484 | u32_t last_bus; |
484 | u32_t bus = 0 , devfn = 0; |
485 | u32_t bus = 0 , devfn = 0; |
485 | 486 | ||
486 | 487 | ||
487 | last_bus = PciApi(1); |
488 | last_bus = PciApi(1); |
488 | 489 | ||
489 | 490 | ||
490 | if( unlikely(last_bus == -1)) |
491 | if( unlikely(last_bus == -1)) |
491 | return -1; |
492 | return -1; |
492 | 493 | ||
493 | for(;bus <= last_bus; bus++) |
494 | for(;bus <= last_bus; bus++) |
494 | { |
495 | { |
495 | for (devfn = 0; devfn < 0x100; devfn += 8) |
496 | for (devfn = 0; devfn < 0x100; devfn += 8) |
496 | pci_scan_slot(bus, devfn); |
497 | pci_scan_slot(bus, devfn); |
497 | 498 | ||
498 | 499 | ||
499 | } |
500 | } |
500 | for(dev = (pci_dev_t*)devices.next; |
501 | for(dev = (pci_dev_t*)devices.next; |
501 | &dev->link != &devices; |
502 | &dev->link != &devices; |
502 | dev = (pci_dev_t*)dev->link.next) |
503 | dev = (pci_dev_t*)dev->link.next) |
503 | { |
504 | { |
504 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
505 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
505 | dev->pci_dev.vendor, |
506 | dev->pci_dev.vendor, |
506 | dev->pci_dev.device, |
507 | dev->pci_dev.device, |
507 | dev->pci_dev.busnr, |
508 | dev->pci_dev.busnr, |
508 | dev->pci_dev.devfn); |
509 | dev->pci_dev.devfn); |
509 | 510 | ||
510 | } |
511 | } |
511 | return 0; |
512 | return 0; |
512 | } |
513 | } |
513 | 514 | ||
514 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
515 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
515 | { |
516 | { |
516 | pci_dev_t *dev; |
517 | pci_dev_t *dev; |
517 | const struct pci_device_id *ent; |
518 | const struct pci_device_id *ent; |
518 | 519 | ||
519 | for(dev = (pci_dev_t*)devices.next; |
520 | for(dev = (pci_dev_t*)devices.next; |
520 | &dev->link != &devices; |
521 | &dev->link != &devices; |
521 | dev = (pci_dev_t*)dev->link.next) |
522 | dev = (pci_dev_t*)dev->link.next) |
522 | { |
523 | { |
523 | if( dev->pci_dev.vendor != idlist->vendor ) |
524 | if( dev->pci_dev.vendor != idlist->vendor ) |
524 | continue; |
525 | continue; |
525 | 526 | ||
526 | for(ent = idlist; ent->vendor != 0; ent++) |
527 | for(ent = idlist; ent->vendor != 0; ent++) |
527 | { |
528 | { |
528 | if(unlikely(ent->device == dev->pci_dev.device)) |
529 | if(unlikely(ent->device == dev->pci_dev.device)) |
529 | { |
530 | { |
530 | pdev->pci_dev = dev->pci_dev; |
531 | pdev->pci_dev = dev->pci_dev; |
531 | return ent; |
532 | return ent; |
532 | } |
533 | } |
533 | }; |
534 | }; |
534 | } |
535 | } |
535 | 536 | ||
536 | return NULL; |
537 | return NULL; |
537 | }; |
538 | }; |
538 | 539 | ||
539 | struct pci_dev * |
540 | struct pci_dev * |
540 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
541 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
541 | { |
542 | { |
542 | pci_dev_t *dev; |
543 | pci_dev_t *dev; |
543 | 544 | ||
544 | dev = (pci_dev_t*)devices.next; |
545 | dev = (pci_dev_t*)devices.next; |
545 | 546 | ||
546 | if(from != NULL) |
547 | if(from != NULL) |
547 | { |
548 | { |
548 | for(; &dev->link != &devices; |
549 | for(; &dev->link != &devices; |
549 | dev = (pci_dev_t*)dev->link.next) |
550 | dev = (pci_dev_t*)dev->link.next) |
550 | { |
551 | { |
551 | if( &dev->pci_dev == from) |
552 | if( &dev->pci_dev == from) |
552 | { |
553 | { |
553 | dev = (pci_dev_t*)dev->link.next; |
554 | dev = (pci_dev_t*)dev->link.next; |
554 | break; |
555 | break; |
555 | }; |
556 | }; |
556 | } |
557 | } |
557 | }; |
558 | }; |
558 | 559 | ||
559 | for(; &dev->link != &devices; |
560 | for(; &dev->link != &devices; |
560 | dev = (pci_dev_t*)dev->link.next) |
561 | dev = (pci_dev_t*)dev->link.next) |
561 | { |
562 | { |
562 | if( dev->pci_dev.vendor != vendor ) |
563 | if( dev->pci_dev.vendor != vendor ) |
563 | continue; |
564 | continue; |
564 | 565 | ||
565 | if(dev->pci_dev.device == device) |
566 | if(dev->pci_dev.device == device) |
566 | { |
567 | { |
567 | return &dev->pci_dev; |
568 | return &dev->pci_dev; |
568 | } |
569 | } |
569 | } |
570 | } |
570 | return NULL; |
571 | return NULL; |
571 | }; |
572 | }; |
572 | 573 | ||
573 | 574 | ||
574 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
575 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
575 | { |
576 | { |
576 | pci_dev_t *dev; |
577 | pci_dev_t *dev; |
577 | 578 | ||
578 | for(dev = (pci_dev_t*)devices.next; |
579 | for(dev = (pci_dev_t*)devices.next; |
579 | &dev->link != &devices; |
580 | &dev->link != &devices; |
580 | dev = (pci_dev_t*)dev->link.next) |
581 | dev = (pci_dev_t*)dev->link.next) |
581 | { |
582 | { |
582 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
583 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
583 | return &dev->pci_dev; |
584 | return &dev->pci_dev; |
584 | } |
585 | } |
585 | return NULL; |
586 | return NULL; |
586 | } |
587 | } |
587 | 588 | ||
588 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
589 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
589 | { |
590 | { |
590 | pci_dev_t *dev; |
591 | pci_dev_t *dev; |
591 | 592 | ||
592 | dev = (pci_dev_t*)devices.next; |
593 | dev = (pci_dev_t*)devices.next; |
593 | 594 | ||
594 | if(from != NULL) |
595 | if(from != NULL) |
595 | { |
596 | { |
596 | for(; &dev->link != &devices; |
597 | for(; &dev->link != &devices; |
597 | dev = (pci_dev_t*)dev->link.next) |
598 | dev = (pci_dev_t*)dev->link.next) |
598 | { |
599 | { |
599 | if( &dev->pci_dev == from) |
600 | if( &dev->pci_dev == from) |
600 | { |
601 | { |
601 | dev = (pci_dev_t*)dev->link.next; |
602 | dev = (pci_dev_t*)dev->link.next; |
602 | break; |
603 | break; |
603 | }; |
604 | }; |
604 | } |
605 | } |
605 | }; |
606 | }; |
606 | 607 | ||
607 | for(; &dev->link != &devices; |
608 | for(; &dev->link != &devices; |
608 | dev = (pci_dev_t*)dev->link.next) |
609 | dev = (pci_dev_t*)dev->link.next) |
609 | { |
610 | { |
610 | if( dev->pci_dev.class == class) |
611 | if( dev->pci_dev.class == class) |
611 | { |
612 | { |
612 | return &dev->pci_dev; |
613 | return &dev->pci_dev; |
613 | } |
614 | } |
614 | } |
615 | } |
615 | 616 | ||
616 | return NULL; |
617 | return NULL; |
617 | } |
618 | } |
618 | 619 | ||
619 | 620 | ||
620 | #define PIO_OFFSET 0x10000UL |
621 | #define PIO_OFFSET 0x10000UL |
621 | #define PIO_MASK 0x0ffffUL |
622 | #define PIO_MASK 0x0ffffUL |
622 | #define PIO_RESERVED 0x40000UL |
623 | #define PIO_RESERVED 0x40000UL |
623 | 624 | ||
624 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
625 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
625 | unsigned long port = (unsigned long __force)addr; \ |
626 | unsigned long port = (unsigned long __force)addr; \ |
626 | if (port >= PIO_RESERVED) { \ |
627 | if (port >= PIO_RESERVED) { \ |
627 | is_mmio; \ |
628 | is_mmio; \ |
628 | } else if (port > PIO_OFFSET) { \ |
629 | } else if (port > PIO_OFFSET) { \ |
629 | port &= PIO_MASK; \ |
630 | port &= PIO_MASK; \ |
630 | is_pio; \ |
631 | is_pio; \ |
631 | }; \ |
632 | }; \ |
632 | } while (0) |
633 | } while (0) |
633 | 634 | ||
634 | /* Create a virtual mapping cookie for an IO port range */ |
635 | /* Create a virtual mapping cookie for an IO port range */ |
635 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
636 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
636 | { |
637 | { |
637 | return (void __iomem *) port; |
638 | if (port > PIO_MASK) |
- | 639 | return NULL; |
|
- | 640 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
|
638 | } |
641 | } |
639 | 642 | ||
640 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
643 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
641 | { |
644 | { |
642 | resource_size_t start = pci_resource_start(dev, bar); |
645 | resource_size_t start = pci_resource_start(dev, bar); |
643 | resource_size_t len = pci_resource_len(dev, bar); |
646 | resource_size_t len = pci_resource_len(dev, bar); |
644 | unsigned long flags = pci_resource_flags(dev, bar); |
647 | unsigned long flags = pci_resource_flags(dev, bar); |
645 | 648 | ||
646 | if (!len || !start) |
649 | if (!len || !start) |
647 | return NULL; |
650 | return NULL; |
648 | if (maxlen && len > maxlen) |
651 | if (maxlen && len > maxlen) |
649 | len = maxlen; |
652 | len = maxlen; |
650 | if (flags & IORESOURCE_IO) |
653 | if (flags & IORESOURCE_IO) |
651 | return ioport_map(start, len); |
654 | return ioport_map(start, len); |
652 | if (flags & IORESOURCE_MEM) { |
655 | if (flags & IORESOURCE_MEM) { |
653 | return ioremap(start, len); |
656 | return ioremap(start, len); |
654 | } |
657 | } |
655 | /* What? */ |
658 | /* What? */ |
656 | return NULL; |
659 | return NULL; |
657 | } |
660 | } |
658 | 661 | ||
659 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
662 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
660 | { |
663 | { |
661 | IO_COND(addr, /* nothing */, iounmap(addr)); |
664 | IO_COND(addr, /* nothing */, iounmap(addr)); |
662 | } |
665 | } |
663 | 666 | ||
664 | 667 | ||
665 | struct pci_bus_region { |
668 | struct pci_bus_region { |
666 | resource_size_t start; |
669 | resource_size_t start; |
667 | resource_size_t end; |
670 | resource_size_t end; |
668 | }; |
671 | }; |
669 | 672 | ||
670 | static inline void |
673 | static inline void |
671 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
674 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
672 | struct resource *res) |
675 | struct resource *res) |
673 | { |
676 | { |
674 | region->start = res->start; |
677 | region->start = res->start; |
675 | region->end = res->end; |
678 | region->end = res->end; |
676 | } |
679 | } |
677 | 680 | ||
678 | 681 | ||
679 | int pci_enable_rom(struct pci_dev *pdev) |
682 | int pci_enable_rom(struct pci_dev *pdev) |
680 | { |
683 | { |
681 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
684 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
682 | struct pci_bus_region region; |
685 | struct pci_bus_region region; |
683 | u32 rom_addr; |
686 | u32 rom_addr; |
684 | 687 | ||
685 | if (!res->flags) |
688 | if (!res->flags) |
686 | return -1; |
689 | return -1; |
687 | 690 | ||
688 | pcibios_resource_to_bus(pdev, ®ion, res); |
691 | pcibios_resource_to_bus(pdev, ®ion, res); |
689 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
692 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
690 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
693 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
691 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
694 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
692 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
695 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
693 | return 0; |
696 | return 0; |
694 | } |
697 | } |
695 | 698 | ||
696 | void pci_disable_rom(struct pci_dev *pdev) |
699 | void pci_disable_rom(struct pci_dev *pdev) |
697 | { |
700 | { |
698 | u32 rom_addr; |
701 | u32 rom_addr; |
699 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
702 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
700 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
703 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
701 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
704 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
702 | } |
705 | } |
703 | 706 | ||
704 | /** |
707 | /** |
705 | * pci_get_rom_size - obtain the actual size of the ROM image |
708 | * pci_get_rom_size - obtain the actual size of the ROM image |
706 | * @pdev: target PCI device |
709 | * @pdev: target PCI device |
707 | * @rom: kernel virtual pointer to image of ROM |
710 | * @rom: kernel virtual pointer to image of ROM |
708 | * @size: size of PCI window |
711 | * @size: size of PCI window |
709 | * return: size of actual ROM image |
712 | * return: size of actual ROM image |
710 | * |
713 | * |
711 | * Determine the actual length of the ROM image. |
714 | * Determine the actual length of the ROM image. |
712 | * The PCI window size could be much larger than the |
715 | * The PCI window size could be much larger than the |
713 | * actual image size. |
716 | * actual image size. |
714 | */ |
717 | */ |
715 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
718 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
716 | { |
719 | { |
717 | void __iomem *image; |
720 | void __iomem *image; |
718 | int last_image; |
721 | int last_image; |
719 | 722 | ||
720 | image = rom; |
723 | image = rom; |
721 | do { |
724 | do { |
722 | void __iomem *pds; |
725 | void __iomem *pds; |
723 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
726 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
724 | if (readb(image) != 0x55) { |
727 | if (readb(image) != 0x55) { |
725 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
728 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
726 | break; |
729 | break; |
727 | } |
730 | } |
728 | if (readb(image + 1) != 0xAA) |
731 | if (readb(image + 1) != 0xAA) |
729 | break; |
732 | break; |
730 | /* get the PCI data structure and check its signature */ |
733 | /* get the PCI data structure and check its signature */ |
731 | pds = image + readw(image + 24); |
734 | pds = image + readw(image + 24); |
732 | if (readb(pds) != 'P') |
735 | if (readb(pds) != 'P') |
733 | break; |
736 | break; |
734 | if (readb(pds + 1) != 'C') |
737 | if (readb(pds + 1) != 'C') |
735 | break; |
738 | break; |
736 | if (readb(pds + 2) != 'I') |
739 | if (readb(pds + 2) != 'I') |
737 | break; |
740 | break; |
738 | if (readb(pds + 3) != 'R') |
741 | if (readb(pds + 3) != 'R') |
739 | break; |
742 | break; |
740 | last_image = readb(pds + 21) & 0x80; |
743 | last_image = readb(pds + 21) & 0x80; |
741 | /* this length is reliable */ |
744 | /* this length is reliable */ |
742 | image += readw(pds + 16) * 512; |
745 | image += readw(pds + 16) * 512; |
743 | } while (!last_image); |
746 | } while (!last_image); |
744 | 747 | ||
745 | /* never return a size larger than the PCI resource window */ |
748 | /* never return a size larger than the PCI resource window */ |
746 | /* there are known ROMs that get the size wrong */ |
749 | /* there are known ROMs that get the size wrong */ |
747 | return min((size_t)(image - rom), size); |
750 | return min((size_t)(image - rom), size); |
748 | } |
751 | } |
749 | 752 | ||
750 | 753 | ||
751 | /** |
754 | /** |
752 | * pci_map_rom - map a PCI ROM to kernel space |
755 | * pci_map_rom - map a PCI ROM to kernel space |
753 | * @pdev: pointer to pci device struct |
756 | * @pdev: pointer to pci device struct |
754 | * @size: pointer to receive size of pci window over ROM |
757 | * @size: pointer to receive size of pci window over ROM |
755 | * |
758 | * |
756 | * Return: kernel virtual pointer to image of ROM |
759 | * Return: kernel virtual pointer to image of ROM |
757 | * |
760 | * |
758 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
761 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
759 | * the shadow BIOS copy will be returned instead of the |
762 | * the shadow BIOS copy will be returned instead of the |
760 | * actual ROM. |
763 | * actual ROM. |
761 | */ |
764 | */ |
762 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
765 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
763 | { |
766 | { |
764 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
767 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
765 | loff_t start; |
768 | loff_t start; |
766 | void __iomem *rom; |
769 | void __iomem *rom; |
767 | 770 | ||
768 | // ENTER(); |
771 | // ENTER(); |
769 | 772 | ||
770 | // dbgprintf("resource start %x end %x flags %x\n", |
773 | // dbgprintf("resource start %x end %x flags %x\n", |
771 | // res->start, res->end, res->flags); |
774 | // res->start, res->end, res->flags); |
772 | /* |
775 | /* |
773 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
776 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
774 | * memory map if the VGA enable bit of the Bridge Control register is |
777 | * memory map if the VGA enable bit of the Bridge Control register is |
775 | * set for embedded VGA. |
778 | * set for embedded VGA. |
776 | */ |
779 | */ |
777 | 780 | ||
778 | start = (loff_t)0xC0000; |
781 | start = (loff_t)0xC0000; |
779 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
782 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
780 | 783 | ||
781 | #if 0 |
784 | #if 0 |
782 | 785 | ||
783 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
786 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
784 | /* primary video rom always starts here */ |
787 | /* primary video rom always starts here */ |
785 | start = (loff_t)0xC0000; |
788 | start = (loff_t)0xC0000; |
786 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
789 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
787 | } else { |
790 | } else { |
788 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
791 | if (res->flags & |
- | 792 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
|
789 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
793 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
790 | return (void __iomem *)(unsigned long) |
794 | return (void __iomem *)(unsigned long) |
791 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
795 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
792 | } else { |
796 | } else { |
793 | /* assign the ROM an address if it doesn't have one */ |
797 | /* assign the ROM an address if it doesn't have one */ |
794 | // if (res->parent == NULL && |
798 | // if (res->parent == NULL && |
795 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
799 | // pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
796 | return NULL; |
800 | return NULL; |
797 | // start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
801 | // start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
798 | // *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
802 | // *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
799 | // if (*size == 0) |
803 | // if (*size == 0) |
800 | // return NULL; |
804 | // return NULL; |
801 | 805 | ||
802 | /* Enable ROM space decodes */ |
806 | /* Enable ROM space decodes */ |
803 | // if (pci_enable_rom(pdev)) |
807 | // if (pci_enable_rom(pdev)) |
804 | // return NULL; |
808 | // return NULL; |
805 | } |
809 | } |
806 | } |
810 | } |
807 | #endif |
811 | #endif |
808 | 812 | ||
809 | rom = ioremap(start, *size); |
813 | rom = ioremap(start, *size); |
810 | if (!rom) { |
814 | if (!rom) { |
811 | /* restore enable if ioremap fails */ |
815 | /* restore enable if ioremap fails */ |
812 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
816 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
813 | IORESOURCE_ROM_SHADOW | |
817 | IORESOURCE_ROM_SHADOW | |
814 | IORESOURCE_ROM_COPY))) |
818 | IORESOURCE_ROM_COPY))) |
815 | pci_disable_rom(pdev); |
819 | pci_disable_rom(pdev); |
816 | return NULL; |
820 | return NULL; |
817 | } |
821 | } |
818 | 822 | ||
819 | /* |
823 | /* |
820 | * Try to find the true size of the ROM since sometimes the PCI window |
824 | * Try to find the true size of the ROM since sometimes the PCI window |
821 | * size is much larger than the actual size of the ROM. |
825 | * size is much larger than the actual size of the ROM. |
822 | * True size is important if the ROM is going to be copied. |
826 | * True size is important if the ROM is going to be copied. |
823 | */ |
827 | */ |
824 | *size = pci_get_rom_size(pdev, rom, *size); |
828 | *size = pci_get_rom_size(pdev, rom, *size); |
825 | // LEAVE(); |
829 | // LEAVE(); |
826 | return rom; |
830 | return rom; |
827 | } |
831 | } |
828 | 832 | ||
829 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
833 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
830 | { |
834 | { |
831 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
835 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
832 | 836 | ||
833 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
837 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
834 | return; |
838 | return; |
835 | 839 | ||
836 | iounmap(rom); |
840 | iounmap(rom); |
837 | 841 | ||
838 | /* Disable again before continuing, leave enabled if pci=rom */ |
842 | /* Disable again before continuing, leave enabled if pci=rom */ |
839 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
843 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
840 | pci_disable_rom(pdev); |
844 | pci_disable_rom(pdev); |
841 | } |
845 | } |
842 | 846 | ||
843 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask) |
- | |
844 | { |
- | |
845 | dev->dma_mask = mask; |
- | |
846 | - | ||
847 | return 0; |
- | |
848 | } |
- | |
849 | - | ||
850 | - | ||
851 | - | ||
852 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
847 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
853 | { |
848 | { |
854 | u16 old_cmd, cmd; |
849 | u16 old_cmd, cmd; |
855 | 850 | ||
856 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
851 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
857 | if (enable) |
852 | if (enable) |
858 | cmd = old_cmd | PCI_COMMAND_MASTER; |
853 | cmd = old_cmd | PCI_COMMAND_MASTER; |
859 | else |
854 | else |
860 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
855 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
861 | if (cmd != old_cmd) { |
856 | if (cmd != old_cmd) { |
862 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
857 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
863 | } |
858 | } |
864 | dev->is_busmaster = enable; |
859 | dev->is_busmaster = enable; |
865 | } |
860 | } |
866 | 861 | ||
867 | 862 | ||
868 | /* pci_set_master - enables bus-mastering for device dev |
863 | /* pci_set_master - enables bus-mastering for device dev |
869 | * @dev: the PCI device to enable |
864 | * @dev: the PCI device to enable |
870 | * |
865 | * |
871 | * Enables bus-mastering on the device and calls pcibios_set_master() |
866 | * Enables bus-mastering on the device and calls pcibios_set_master() |
872 | * to do the needed arch specific settings. |
867 | * to do the needed arch specific settings. |
873 | */ |
868 | */ |
874 | void pci_set_master(struct pci_dev *dev) |
869 | void pci_set_master(struct pci_dev *dev) |
875 | { |
870 | { |
876 | __pci_set_master(dev, true); |
871 | __pci_set_master(dev, true); |
877 | // pcibios_set_master(dev); |
872 | // pcibios_set_master(dev); |
878 | } |
873 | } |
879 | 874 | ||
880 | /** |
875 | /** |
881 | * pci_clear_master - disables bus-mastering for device dev |
876 | * pci_clear_master - disables bus-mastering for device dev |
882 | * @dev: the PCI device to disable |
877 | * @dev: the PCI device to disable |
883 | */ |
878 | */ |
884 | void pci_clear_master(struct pci_dev *dev) |
879 | void pci_clear_master(struct pci_dev *dev) |
885 | { |
880 | { |
886 | __pci_set_master(dev, false); |
881 | __pci_set_master(dev, false); |
887 | } |
882 | } |
888 | 883 | ||
889 | 884 | ||
890 | static inline int pcie_cap_version(const struct pci_dev *dev) |
885 | static inline int pcie_cap_version(const struct pci_dev *dev) |
891 | { |
886 | { |
892 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
887 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
893 | } |
888 | } |
894 | 889 | ||
895 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
890 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
896 | { |
891 | { |
897 | return true; |
892 | return true; |
898 | } |
893 | } |
899 | 894 | ||
900 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
895 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
901 | { |
896 | { |
902 | int type = pci_pcie_type(dev); |
897 | int type = pci_pcie_type(dev); |
903 | 898 | ||
904 | return pcie_cap_version(dev) > 1 || |
899 | return pcie_cap_version(dev) > 1 || |
905 | type == PCI_EXP_TYPE_ROOT_PORT || |
900 | type == PCI_EXP_TYPE_ROOT_PORT || |
906 | type == PCI_EXP_TYPE_ENDPOINT || |
901 | type == PCI_EXP_TYPE_ENDPOINT || |
907 | type == PCI_EXP_TYPE_LEG_END; |
902 | type == PCI_EXP_TYPE_LEG_END; |
908 | } |
903 | } |
909 | 904 | ||
910 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
905 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
911 | { |
906 | { |
912 | int type = pci_pcie_type(dev); |
907 | int type = pci_pcie_type(dev); |
913 | 908 | ||
914 | return pcie_cap_version(dev) > 1 || |
909 | return pcie_cap_version(dev) > 1 || |
915 | type == PCI_EXP_TYPE_ROOT_PORT || |
910 | type == PCI_EXP_TYPE_ROOT_PORT || |
916 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
911 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
917 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
912 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
918 | } |
913 | } |
919 | 914 | ||
920 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
915 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
921 | { |
916 | { |
922 | int type = pci_pcie_type(dev); |
917 | int type = pci_pcie_type(dev); |
923 | 918 | ||
924 | return pcie_cap_version(dev) > 1 || |
919 | return pcie_cap_version(dev) > 1 || |
925 | type == PCI_EXP_TYPE_ROOT_PORT || |
920 | type == PCI_EXP_TYPE_ROOT_PORT || |
926 | type == PCI_EXP_TYPE_RC_EC; |
921 | type == PCI_EXP_TYPE_RC_EC; |
927 | } |
922 | } |
928 | 923 | ||
929 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
924 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
930 | { |
925 | { |
931 | if (!pci_is_pcie(dev)) |
926 | if (!pci_is_pcie(dev)) |
932 | return false; |
927 | return false; |
933 | 928 | ||
934 | switch (pos) { |
929 | switch (pos) { |
935 | case PCI_EXP_FLAGS_TYPE: |
930 | case PCI_EXP_FLAGS_TYPE: |
936 | return true; |
931 | return true; |
937 | case PCI_EXP_DEVCAP: |
932 | case PCI_EXP_DEVCAP: |
938 | case PCI_EXP_DEVCTL: |
933 | case PCI_EXP_DEVCTL: |
939 | case PCI_EXP_DEVSTA: |
934 | case PCI_EXP_DEVSTA: |
940 | return pcie_cap_has_devctl(dev); |
935 | return pcie_cap_has_devctl(dev); |
941 | case PCI_EXP_LNKCAP: |
936 | case PCI_EXP_LNKCAP: |
942 | case PCI_EXP_LNKCTL: |
937 | case PCI_EXP_LNKCTL: |
943 | case PCI_EXP_LNKSTA: |
938 | case PCI_EXP_LNKSTA: |
944 | return pcie_cap_has_lnkctl(dev); |
939 | return pcie_cap_has_lnkctl(dev); |
945 | case PCI_EXP_SLTCAP: |
940 | case PCI_EXP_SLTCAP: |
946 | case PCI_EXP_SLTCTL: |
941 | case PCI_EXP_SLTCTL: |
947 | case PCI_EXP_SLTSTA: |
942 | case PCI_EXP_SLTSTA: |
948 | return pcie_cap_has_sltctl(dev); |
943 | return pcie_cap_has_sltctl(dev); |
949 | case PCI_EXP_RTCTL: |
944 | case PCI_EXP_RTCTL: |
950 | case PCI_EXP_RTCAP: |
945 | case PCI_EXP_RTCAP: |
951 | case PCI_EXP_RTSTA: |
946 | case PCI_EXP_RTSTA: |
952 | return pcie_cap_has_rtctl(dev); |
947 | return pcie_cap_has_rtctl(dev); |
953 | case PCI_EXP_DEVCAP2: |
948 | case PCI_EXP_DEVCAP2: |
954 | case PCI_EXP_DEVCTL2: |
949 | case PCI_EXP_DEVCTL2: |
955 | case PCI_EXP_LNKCAP2: |
950 | case PCI_EXP_LNKCAP2: |
956 | case PCI_EXP_LNKCTL2: |
951 | case PCI_EXP_LNKCTL2: |
957 | case PCI_EXP_LNKSTA2: |
952 | case PCI_EXP_LNKSTA2: |
958 | return pcie_cap_version(dev) > 1; |
953 | return pcie_cap_version(dev) > 1; |
959 | default: |
954 | default: |
960 | return false; |
955 | return false; |
961 | } |
956 | } |
962 | } |
957 | } |
963 | 958 | ||
964 | /* |
959 | /* |
965 | * Note that these accessor functions are only for the "PCI Express |
960 | * Note that these accessor functions are only for the "PCI Express |
966 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
961 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
967 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
962 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
968 | */ |
963 | */ |
969 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
964 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
970 | { |
965 | { |
971 | int ret; |
966 | int ret; |
972 | 967 | ||
973 | *val = 0; |
968 | *val = 0; |
974 | if (pos & 1) |
969 | if (pos & 1) |
975 | return -EINVAL; |
970 | return -EINVAL; |
976 | 971 | ||
977 | if (pcie_capability_reg_implemented(dev, pos)) { |
972 | if (pcie_capability_reg_implemented(dev, pos)) { |
978 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
973 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
979 | /* |
974 | /* |
980 | * Reset *val to 0 if pci_read_config_word() fails, it may |
975 | * Reset *val to 0 if pci_read_config_word() fails, it may |
981 | * have been written as 0xFFFF if hardware error happens |
976 | * have been written as 0xFFFF if hardware error happens |
982 | * during pci_read_config_word(). |
977 | * during pci_read_config_word(). |
983 | */ |
978 | */ |
984 | if (ret) |
979 | if (ret) |
985 | *val = 0; |
980 | *val = 0; |
986 | return ret; |
981 | return ret; |
987 | } |
982 | } |
988 | 983 | ||
989 | /* |
984 | /* |
990 | * For Functions that do not implement the Slot Capabilities, |
985 | * For Functions that do not implement the Slot Capabilities, |
991 | * Slot Status, and Slot Control registers, these spaces must |
986 | * Slot Status, and Slot Control registers, these spaces must |
992 | * be hardwired to 0b, with the exception of the Presence Detect |
987 | * be hardwired to 0b, with the exception of the Presence Detect |
993 | * State bit in the Slot Status register of Downstream Ports, |
988 | * State bit in the Slot Status register of Downstream Ports, |
994 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
989 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
995 | */ |
990 | */ |
996 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
991 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
997 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
992 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
998 | *val = PCI_EXP_SLTSTA_PDS; |
993 | *val = PCI_EXP_SLTSTA_PDS; |
999 | } |
994 | } |
1000 | 995 | ||
1001 | return 0; |
996 | return 0; |
1002 | } |
997 | } |
1003 | EXPORT_SYMBOL(pcie_capability_read_word); |
998 | EXPORT_SYMBOL(pcie_capability_read_word); |
1004 | 999 | ||
1005 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
1000 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
1006 | { |
1001 | { |
1007 | int ret; |
1002 | int ret; |
1008 | 1003 | ||
1009 | *val = 0; |
1004 | *val = 0; |
1010 | if (pos & 3) |
1005 | if (pos & 3) |
1011 | return -EINVAL; |
1006 | return -EINVAL; |
1012 | 1007 | ||
1013 | if (pcie_capability_reg_implemented(dev, pos)) { |
1008 | if (pcie_capability_reg_implemented(dev, pos)) { |
1014 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1009 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1015 | /* |
1010 | /* |
1016 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
1011 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
1017 | * have been written as 0xFFFFFFFF if hardware error happens |
1012 | * have been written as 0xFFFFFFFF if hardware error happens |
1018 | * during pci_read_config_dword(). |
1013 | * during pci_read_config_dword(). |
1019 | */ |
1014 | */ |
1020 | if (ret) |
1015 | if (ret) |
1021 | *val = 0; |
1016 | *val = 0; |
1022 | return ret; |
1017 | return ret; |
1023 | } |
1018 | } |
1024 | 1019 | ||
1025 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
1020 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
1026 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
1021 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
1027 | *val = PCI_EXP_SLTSTA_PDS; |
1022 | *val = PCI_EXP_SLTSTA_PDS; |
1028 | } |
1023 | } |
1029 | 1024 | ||
1030 | return 0; |
1025 | return 0; |
1031 | } |
1026 | } |
1032 | EXPORT_SYMBOL(pcie_capability_read_dword); |
1027 | EXPORT_SYMBOL(pcie_capability_read_dword); |
1033 | 1028 | ||
1034 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1029 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1035 | { |
1030 | { |
1036 | if (pos & 1) |
1031 | if (pos & 1) |
1037 | return -EINVAL; |
1032 | return -EINVAL; |
1038 | 1033 | ||
1039 | if (!pcie_capability_reg_implemented(dev, pos)) |
1034 | if (!pcie_capability_reg_implemented(dev, pos)) |
1040 | return 0; |
1035 | return 0; |
1041 | 1036 | ||
1042 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1037 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1043 | } |
1038 | } |
1044 | EXPORT_SYMBOL(pcie_capability_write_word); |
1039 | EXPORT_SYMBOL(pcie_capability_write_word); |
1045 | 1040 | ||
1046 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1041 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1047 | { |
1042 | { |
1048 | if (pos & 3) |
1043 | if (pos & 3) |
1049 | return -EINVAL; |
1044 | return -EINVAL; |
1050 | 1045 | ||
1051 | if (!pcie_capability_reg_implemented(dev, pos)) |
1046 | if (!pcie_capability_reg_implemented(dev, pos)) |
1052 | return 0; |
1047 | return 0; |
1053 | 1048 | ||
1054 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1049 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1055 | } |
1050 | } |
1056 | EXPORT_SYMBOL(pcie_capability_write_dword);>=>>>><>><>><>>4)><4)> |
1051 | EXPORT_SYMBOL(pcie_capability_write_dword);>=>>>><>><>><>>4)><4)> |