Rev 6938 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6938 | Rev 7146 | ||
---|---|---|---|
1 | #include |
1 | #include |
2 | 2 | ||
3 | #include |
3 | #include |
4 | #include |
4 | #include |
5 | #include |
5 | #include |
6 | #include |
6 | #include |
7 | #include |
7 | #include |
8 | 8 | ||
9 | #include |
9 | #include |
10 | 10 | ||
11 | extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
11 | extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
12 | 12 | ||
13 | static LIST_HEAD(devices); |
13 | static LIST_HEAD(devices); |
14 | 14 | ||
15 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
15 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
16 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
16 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
17 | 17 | ||
18 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
18 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
- | 19 | ||
- | 20 | #define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ |
|
- | 21 | #define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ |
|
19 | 22 | ||
20 | /* |
23 | /* |
21 | * Translate the low bits of the PCI base |
24 | * Translate the low bits of the PCI base |
22 | * to the resource type |
25 | * to the resource type |
23 | */ |
26 | */ |
24 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
27 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
25 | { |
28 | { |
26 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
29 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
27 | return IORESOURCE_IO; |
30 | return IORESOURCE_IO; |
28 | 31 | ||
29 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
32 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
30 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
33 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
31 | 34 | ||
32 | return IORESOURCE_MEM; |
35 | return IORESOURCE_MEM; |
33 | } |
36 | } |
34 | 37 | ||
35 | 38 | ||
36 | static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
39 | static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
37 | { |
40 | { |
38 | u32 size = mask & maxbase; /* Find the significant bits */ |
41 | u32 size = mask & maxbase; /* Find the significant bits */ |
39 | 42 | ||
40 | if (!size) |
43 | if (!size) |
41 | return 0; |
44 | return 0; |
42 | 45 | ||
43 | /* Get the lowest of them to find the decode size, and |
46 | /* Get the lowest of them to find the decode size, and |
44 | from that the extent. */ |
47 | from that the extent. */ |
45 | size = (size & ~(size-1)) - 1; |
48 | size = (size & ~(size-1)) - 1; |
46 | 49 | ||
47 | /* base == maxbase can be valid only if the BAR has |
50 | /* base == maxbase can be valid only if the BAR has |
48 | already been programmed with all 1s. */ |
51 | already been programmed with all 1s. */ |
49 | if (base == maxbase && ((base | size) & mask) != mask) |
52 | if (base == maxbase && ((base | size) & mask) != mask) |
50 | return 0; |
53 | return 0; |
51 | 54 | ||
52 | return size; |
55 | return size; |
53 | } |
56 | } |
54 | 57 | ||
55 | static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
58 | static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
56 | { |
59 | { |
57 | u64 size = mask & maxbase; /* Find the significant bits */ |
60 | u64 size = mask & maxbase; /* Find the significant bits */ |
58 | 61 | ||
59 | if (!size) |
62 | if (!size) |
60 | return 0; |
63 | return 0; |
61 | 64 | ||
62 | /* Get the lowest of them to find the decode size, and |
65 | /* Get the lowest of them to find the decode size, and |
63 | from that the extent. */ |
66 | from that the extent. */ |
64 | size = (size & ~(size-1)) - 1; |
67 | size = (size & ~(size-1)) - 1; |
65 | 68 | ||
66 | /* base == maxbase can be valid only if the BAR has |
69 | /* base == maxbase can be valid only if the BAR has |
67 | already been programmed with all 1s. */ |
70 | already been programmed with all 1s. */ |
68 | if (base == maxbase && ((base | size) & mask) != mask) |
71 | if (base == maxbase && ((base | size) & mask) != mask) |
69 | return 0; |
72 | return 0; |
70 | 73 | ||
71 | return size; |
74 | return size; |
72 | } |
75 | } |
73 | 76 | ||
74 | static inline int is_64bit_memory(u32 mask) |
77 | static inline int is_64bit_memory(u32 mask) |
75 | { |
78 | { |
76 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
79 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
77 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
80 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
78 | return 1; |
81 | return 1; |
79 | return 0; |
82 | return 0; |
80 | } |
83 | } |
81 | 84 | ||
82 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
85 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
83 | { |
86 | { |
84 | u32 pos, reg, next; |
87 | u32 pos, reg, next; |
85 | u32 l, sz; |
88 | u32 l, sz; |
86 | struct resource *res; |
89 | struct resource *res; |
87 | 90 | ||
88 | for(pos=0; pos < howmany; pos = next) |
91 | for(pos=0; pos < howmany; pos = next) |
89 | { |
92 | { |
90 | u64 l64; |
93 | u64 l64; |
91 | u64 sz64; |
94 | u64 sz64; |
92 | u32 raw_sz; |
95 | u32 raw_sz; |
93 | 96 | ||
94 | next = pos + 1; |
97 | next = pos + 1; |
95 | 98 | ||
96 | res = &dev->resource[pos]; |
99 | res = &dev->resource[pos]; |
97 | 100 | ||
98 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
101 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
99 | l = PciRead32(dev->busnr, dev->devfn, reg); |
102 | l = PciRead32(dev->busnr, dev->devfn, reg); |
100 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
103 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
101 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
104 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
102 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
105 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
103 | 106 | ||
104 | if (!sz || sz == 0xffffffff) |
107 | if (!sz || sz == 0xffffffff) |
105 | continue; |
108 | continue; |
106 | 109 | ||
107 | if (l == 0xffffffff) |
110 | if (l == 0xffffffff) |
108 | l = 0; |
111 | l = 0; |
109 | 112 | ||
110 | raw_sz = sz; |
113 | raw_sz = sz; |
111 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
114 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
112 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
115 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
113 | { |
116 | { |
114 | sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
117 | sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
115 | /* |
118 | /* |
116 | * For 64bit prefetchable memory sz could be 0, if the |
119 | * For 64bit prefetchable memory sz could be 0, if the |
117 | * real size is bigger than 4G, so we need to check |
120 | * real size is bigger than 4G, so we need to check |
118 | * szhi for that. |
121 | * szhi for that. |
119 | */ |
122 | */ |
120 | if (!is_64bit_memory(l) && !sz) |
123 | if (!is_64bit_memory(l) && !sz) |
121 | continue; |
124 | continue; |
122 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
125 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
123 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
126 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
124 | } |
127 | } |
125 | else { |
128 | else { |
126 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
129 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
127 | if (!sz) |
130 | if (!sz) |
128 | continue; |
131 | continue; |
129 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
132 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
130 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
133 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
131 | } |
134 | } |
132 | res->end = res->start + (unsigned long) sz; |
135 | res->end = res->start + (unsigned long) sz; |
133 | res->flags |= pci_calc_resource_flags(l); |
136 | res->flags |= pci_calc_resource_flags(l); |
134 | if (is_64bit_memory(l)) |
137 | if (is_64bit_memory(l)) |
135 | { |
138 | { |
136 | u32 szhi, lhi; |
139 | u32 szhi, lhi; |
137 | 140 | ||
138 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
141 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
139 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
142 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
140 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
143 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
141 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
144 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
142 | sz64 = ((u64)szhi << 32) | raw_sz; |
145 | sz64 = ((u64)szhi << 32) | raw_sz; |
143 | l64 = ((u64)lhi << 32) | l; |
146 | l64 = ((u64)lhi << 32) | l; |
144 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
147 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
145 | next++; |
148 | next++; |
146 | 149 | ||
147 | #if BITS_PER_LONG == 64 |
150 | #if BITS_PER_LONG == 64 |
148 | if (!sz64) { |
151 | if (!sz64) { |
149 | res->start = 0; |
152 | res->start = 0; |
150 | res->end = 0; |
153 | res->end = 0; |
151 | res->flags = 0; |
154 | res->flags = 0; |
152 | continue; |
155 | continue; |
153 | } |
156 | } |
154 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
157 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
155 | res->end = res->start + sz64; |
158 | res->end = res->start + sz64; |
156 | #else |
159 | #else |
157 | if (sz64 > 0x100000000ULL) { |
160 | if (sz64 > 0x100000000ULL) { |
158 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
161 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
159 | "BAR for device %s\n", pci_name(dev)); |
162 | "BAR for device %s\n", pci_name(dev)); |
160 | res->start = 0; |
163 | res->start = 0; |
161 | res->flags = 0; |
164 | res->flags = 0; |
162 | } |
165 | } |
163 | else if (lhi) |
166 | else if (lhi) |
164 | { |
167 | { |
165 | /* 64-bit wide address, treat as disabled */ |
168 | /* 64-bit wide address, treat as disabled */ |
166 | PciWrite32(dev->busnr, dev->devfn, reg, |
169 | PciWrite32(dev->busnr, dev->devfn, reg, |
167 | l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
170 | l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
168 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
171 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
169 | res->start = 0; |
172 | res->start = 0; |
170 | res->end = sz; |
173 | res->end = sz; |
171 | } |
174 | } |
172 | #endif |
175 | #endif |
173 | } |
176 | } |
174 | } |
177 | } |
175 | 178 | ||
176 | if ( rom ) |
179 | if ( rom ) |
177 | { |
180 | { |
178 | dev->rom_base_reg = rom; |
181 | dev->rom_base_reg = rom; |
179 | res = &dev->resource[PCI_ROM_RESOURCE]; |
182 | res = &dev->resource[PCI_ROM_RESOURCE]; |
180 | 183 | ||
181 | l = PciRead32(dev->busnr, dev->devfn, rom); |
184 | l = PciRead32(dev->busnr, dev->devfn, rom); |
182 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
185 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
183 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
186 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
184 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
187 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
185 | 188 | ||
186 | if (l == 0xffffffff) |
189 | if (l == 0xffffffff) |
187 | l = 0; |
190 | l = 0; |
188 | 191 | ||
189 | if (sz && sz != 0xffffffff) |
192 | if (sz && sz != 0xffffffff) |
190 | { |
193 | { |
191 | sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
194 | sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
192 | 195 | ||
193 | if (sz) |
196 | if (sz) |
194 | { |
197 | { |
195 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
198 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
196 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
199 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
197 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
200 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
198 | res->start = l & PCI_ROM_ADDRESS_MASK; |
201 | res->start = l & PCI_ROM_ADDRESS_MASK; |
199 | res->end = res->start + (unsigned long) sz; |
202 | res->end = res->start + (unsigned long) sz; |
200 | } |
203 | } |
201 | } |
204 | } |
202 | } |
205 | } |
203 | } |
206 | } |
204 | 207 | ||
205 | static void pci_read_irq(struct pci_dev *dev) |
208 | static void pci_read_irq(struct pci_dev *dev) |
206 | { |
209 | { |
207 | u8 irq; |
210 | u8 irq; |
208 | 211 | ||
209 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
212 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
210 | dev->pin = irq; |
213 | dev->pin = irq; |
211 | if (irq) |
214 | if (irq) |
212 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
215 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
213 | dev->irq = irq; |
216 | dev->irq = irq; |
214 | }; |
217 | }; |
215 | 218 | ||
216 | 219 | ||
217 | int pci_setup_device(struct pci_dev *dev) |
220 | int pci_setup_device(struct pci_dev *dev) |
218 | { |
221 | { |
219 | u32 class; |
222 | u32 class; |
220 | 223 | ||
221 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
224 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
222 | dev->revision = class & 0xff; |
225 | dev->revision = class & 0xff; |
223 | class >>= 8; /* upper 3 bytes */ |
226 | class >>= 8; /* upper 3 bytes */ |
224 | dev->class = class; |
227 | dev->class = class; |
225 | 228 | ||
226 | /* "Unknown power state" */ |
229 | /* "Unknown power state" */ |
227 | // dev->current_state = PCI_UNKNOWN; |
230 | // dev->current_state = PCI_UNKNOWN; |
228 | 231 | ||
229 | /* Early fixups, before probing the BARs */ |
232 | /* Early fixups, before probing the BARs */ |
230 | // pci_fixup_device(pci_fixup_early, dev); |
233 | // pci_fixup_device(pci_fixup_early, dev); |
231 | class = dev->class >> 8; |
234 | class = dev->class >> 8; |
232 | 235 | ||
233 | switch (dev->hdr_type) |
236 | switch (dev->hdr_type) |
234 | { |
237 | { |
235 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
238 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
236 | if (class == PCI_CLASS_BRIDGE_PCI) |
239 | if (class == PCI_CLASS_BRIDGE_PCI) |
237 | goto bad; |
240 | goto bad; |
238 | pci_read_irq(dev); |
241 | pci_read_irq(dev); |
239 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
242 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
240 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
243 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
241 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
244 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
242 | 245 | ||
243 | /* |
246 | /* |
244 | * Do the ugly legacy mode stuff here rather than broken chip |
247 | * Do the ugly legacy mode stuff here rather than broken chip |
245 | * quirk code. Legacy mode ATA controllers have fixed |
248 | * quirk code. Legacy mode ATA controllers have fixed |
246 | * addresses. These are not always echoed in BAR0-3, and |
249 | * addresses. These are not always echoed in BAR0-3, and |
247 | * BAR0-3 in a few cases contain junk! |
250 | * BAR0-3 in a few cases contain junk! |
248 | */ |
251 | */ |
249 | if (class == PCI_CLASS_STORAGE_IDE) |
252 | if (class == PCI_CLASS_STORAGE_IDE) |
250 | { |
253 | { |
251 | u8 progif; |
254 | u8 progif; |
252 | 255 | ||
253 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
256 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
254 | if ((progif & 1) == 0) |
257 | if ((progif & 1) == 0) |
255 | { |
258 | { |
256 | dev->resource[0].start = 0x1F0; |
259 | dev->resource[0].start = 0x1F0; |
257 | dev->resource[0].end = 0x1F7; |
260 | dev->resource[0].end = 0x1F7; |
258 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
261 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
259 | dev->resource[1].start = 0x3F6; |
262 | dev->resource[1].start = 0x3F6; |
260 | dev->resource[1].end = 0x3F6; |
263 | dev->resource[1].end = 0x3F6; |
261 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
264 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
262 | } |
265 | } |
263 | if ((progif & 4) == 0) |
266 | if ((progif & 4) == 0) |
264 | { |
267 | { |
265 | dev->resource[2].start = 0x170; |
268 | dev->resource[2].start = 0x170; |
266 | dev->resource[2].end = 0x177; |
269 | dev->resource[2].end = 0x177; |
267 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
270 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
268 | dev->resource[3].start = 0x376; |
271 | dev->resource[3].start = 0x376; |
269 | dev->resource[3].end = 0x376; |
272 | dev->resource[3].end = 0x376; |
270 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
273 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
271 | }; |
274 | }; |
272 | } |
275 | } |
273 | break; |
276 | break; |
274 | 277 | ||
275 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
278 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
276 | if (class != PCI_CLASS_BRIDGE_PCI) |
279 | if (class != PCI_CLASS_BRIDGE_PCI) |
277 | goto bad; |
280 | goto bad; |
278 | /* The PCI-to-PCI bridge spec requires that subtractive |
281 | /* The PCI-to-PCI bridge spec requires that subtractive |
279 | decoding (i.e. transparent) bridge must have programming |
282 | decoding (i.e. transparent) bridge must have programming |
280 | interface code of 0x01. */ |
283 | interface code of 0x01. */ |
281 | pci_read_irq(dev); |
284 | pci_read_irq(dev); |
282 | dev->transparent = ((dev->class & 0xff) == 1); |
285 | dev->transparent = ((dev->class & 0xff) == 1); |
283 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
286 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
284 | break; |
287 | break; |
285 | 288 | ||
286 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
289 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
287 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
290 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
288 | goto bad; |
291 | goto bad; |
289 | pci_read_irq(dev); |
292 | pci_read_irq(dev); |
290 | pci_read_bases(dev, 1, 0); |
293 | pci_read_bases(dev, 1, 0); |
291 | dev->subsystem_vendor = PciRead16(dev->busnr, |
294 | dev->subsystem_vendor = PciRead16(dev->busnr, |
292 | dev->devfn, |
295 | dev->devfn, |
293 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
296 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
294 | 297 | ||
295 | dev->subsystem_device = PciRead16(dev->busnr, |
298 | dev->subsystem_device = PciRead16(dev->busnr, |
296 | dev->devfn, |
299 | dev->devfn, |
297 | PCI_CB_SUBSYSTEM_ID); |
300 | PCI_CB_SUBSYSTEM_ID); |
298 | break; |
301 | break; |
299 | 302 | ||
300 | default: /* unknown header */ |
303 | default: /* unknown header */ |
301 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
304 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
302 | pci_name(dev), dev->hdr_type); |
305 | pci_name(dev), dev->hdr_type); |
303 | return -1; |
306 | return -1; |
304 | 307 | ||
305 | bad: |
308 | bad: |
306 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
309 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
307 | pci_name(dev), class, dev->hdr_type); |
310 | pci_name(dev), class, dev->hdr_type); |
308 | dev->class = PCI_CLASS_NOT_DEFINED; |
311 | dev->class = PCI_CLASS_NOT_DEFINED; |
309 | } |
312 | } |
310 | 313 | ||
311 | /* We found a fine healthy device, go go go... */ |
314 | /* We found a fine healthy device, go go go... */ |
312 | 315 | ||
313 | return 0; |
316 | return 0; |
314 | }; |
317 | }; |
315 | 318 | ||
316 | static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
319 | static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
317 | { |
320 | { |
318 | pci_dev_t *dev; |
321 | pci_dev_t *dev; |
319 | 322 | ||
320 | u32 id; |
323 | u32 id; |
321 | u8 hdr; |
324 | u8 hdr; |
322 | 325 | ||
323 | int timeout = 10; |
326 | int timeout = 10; |
324 | 327 | ||
325 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
328 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
326 | 329 | ||
327 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
330 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
328 | if (id == 0xffffffff || id == 0x00000000 || |
331 | if (id == 0xffffffff || id == 0x00000000 || |
329 | id == 0x0000ffff || id == 0xffff0000) |
332 | id == 0x0000ffff || id == 0xffff0000) |
330 | return NULL; |
333 | return NULL; |
331 | 334 | ||
332 | while (id == 0xffff0001) |
335 | while (id == 0xffff0001) |
333 | { |
336 | { |
334 | 337 | ||
335 | delay(timeout/10); |
338 | delay(timeout/10); |
336 | timeout *= 2; |
339 | timeout *= 2; |
337 | 340 | ||
338 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
341 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
339 | 342 | ||
340 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
343 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
341 | if (timeout > 60 * 100) |
344 | if (timeout > 60 * 100) |
342 | { |
345 | { |
343 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
346 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
344 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
347 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
345 | return NULL; |
348 | return NULL; |
346 | } |
349 | } |
347 | }; |
350 | }; |
348 | 351 | ||
349 | if( pci_scan_filter(id, busnr, devfn) == 0) |
352 | if( pci_scan_filter(id, busnr, devfn) == 0) |
350 | return NULL; |
353 | return NULL; |
351 | 354 | ||
352 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
355 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
353 | 356 | ||
354 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
357 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
355 | if(unlikely(dev == NULL)) |
358 | if(unlikely(dev == NULL)) |
356 | return NULL; |
359 | return NULL; |
357 | 360 | ||
358 | INIT_LIST_HEAD(&dev->link); |
361 | INIT_LIST_HEAD(&dev->link); |
359 | 362 | ||
360 | 363 | ||
361 | dev->pci_dev.busnr = busnr; |
364 | dev->pci_dev.busnr = busnr; |
362 | dev->pci_dev.devfn = devfn; |
365 | dev->pci_dev.devfn = devfn; |
363 | dev->pci_dev.hdr_type = hdr & 0x7f; |
366 | dev->pci_dev.hdr_type = hdr & 0x7f; |
364 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
367 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
365 | dev->pci_dev.vendor = id & 0xffff; |
368 | dev->pci_dev.vendor = id & 0xffff; |
366 | dev->pci_dev.device = (id >> 16) & 0xffff; |
369 | dev->pci_dev.device = (id >> 16) & 0xffff; |
367 | 370 | ||
368 | pci_setup_device(&dev->pci_dev); |
371 | pci_setup_device(&dev->pci_dev); |
369 | 372 | ||
370 | return dev; |
373 | return dev; |
371 | 374 | ||
372 | }; |
375 | }; |
373 | 376 | ||
374 | 377 | ||
375 | 378 | ||
376 | 379 | ||
377 | int _pci_scan_slot(u32 bus, int devfn) |
380 | int _pci_scan_slot(u32 bus, int devfn) |
378 | { |
381 | { |
379 | int func, nr = 0; |
382 | int func, nr = 0; |
380 | 383 | ||
381 | for (func = 0; func < 8; func++, devfn++) |
384 | for (func = 0; func < 8; func++, devfn++) |
382 | { |
385 | { |
383 | pci_dev_t *dev; |
386 | pci_dev_t *dev; |
384 | 387 | ||
385 | dev = pci_scan_device(bus, devfn); |
388 | dev = pci_scan_device(bus, devfn); |
386 | if( dev ) |
389 | if( dev ) |
387 | { |
390 | { |
388 | list_add(&dev->link, &devices); |
391 | list_add(&dev->link, &devices); |
389 | 392 | ||
390 | nr++; |
393 | nr++; |
391 | 394 | ||
392 | /* |
395 | /* |
393 | * If this is a single function device, |
396 | * If this is a single function device, |
394 | * don't scan past the first function. |
397 | * don't scan past the first function. |
395 | */ |
398 | */ |
396 | if (!dev->pci_dev.multifunction) |
399 | if (!dev->pci_dev.multifunction) |
397 | { |
400 | { |
398 | if (func > 0) { |
401 | if (func > 0) { |
399 | dev->pci_dev.multifunction = 1; |
402 | dev->pci_dev.multifunction = 1; |
400 | } |
403 | } |
401 | else { |
404 | else { |
402 | break; |
405 | break; |
403 | } |
406 | } |
404 | } |
407 | } |
405 | } |
408 | } |
406 | else { |
409 | else { |
407 | if (func == 0) |
410 | if (func == 0) |
408 | break; |
411 | break; |
409 | } |
412 | } |
410 | }; |
413 | }; |
411 | 414 | ||
412 | return nr; |
415 | return nr; |
413 | }; |
416 | }; |
414 | 417 | ||
415 | #define PCI_FIND_CAP_TTL 48 |
418 | #define PCI_FIND_CAP_TTL 48 |
416 | 419 | ||
417 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
420 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
418 | u8 pos, int cap, int *ttl) |
421 | u8 pos, int cap, int *ttl) |
419 | { |
422 | { |
420 | u8 id; |
423 | u8 id; |
421 | 424 | ||
422 | while ((*ttl)--) { |
425 | while ((*ttl)--) { |
423 | pos = PciRead8(bus, devfn, pos); |
426 | pos = PciRead8(bus, devfn, pos); |
424 | if (pos < 0x40) |
427 | if (pos < 0x40) |
425 | break; |
428 | break; |
426 | pos &= ~3; |
429 | pos &= ~3; |
427 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
430 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
428 | if (id == 0xff) |
431 | if (id == 0xff) |
429 | break; |
432 | break; |
430 | if (id == cap) |
433 | if (id == cap) |
431 | return pos; |
434 | return pos; |
432 | pos += PCI_CAP_LIST_NEXT; |
435 | pos += PCI_CAP_LIST_NEXT; |
433 | } |
436 | } |
434 | return 0; |
437 | return 0; |
435 | } |
438 | } |
436 | 439 | ||
437 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
440 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
438 | u8 pos, int cap) |
441 | u8 pos, int cap) |
439 | { |
442 | { |
440 | int ttl = PCI_FIND_CAP_TTL; |
443 | int ttl = PCI_FIND_CAP_TTL; |
441 | 444 | ||
442 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
445 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
443 | } |
446 | } |
444 | 447 | ||
445 | static int __pci_bus_find_cap_start(unsigned int bus, |
448 | static int __pci_bus_find_cap_start(unsigned int bus, |
446 | unsigned int devfn, u8 hdr_type) |
449 | unsigned int devfn, u8 hdr_type) |
447 | { |
450 | { |
448 | u16 status; |
451 | u16 status; |
449 | 452 | ||
450 | status = PciRead16(bus, devfn, PCI_STATUS); |
453 | status = PciRead16(bus, devfn, PCI_STATUS); |
451 | if (!(status & PCI_STATUS_CAP_LIST)) |
454 | if (!(status & PCI_STATUS_CAP_LIST)) |
452 | return 0; |
455 | return 0; |
453 | 456 | ||
454 | switch (hdr_type) { |
457 | switch (hdr_type) { |
455 | case PCI_HEADER_TYPE_NORMAL: |
458 | case PCI_HEADER_TYPE_NORMAL: |
456 | case PCI_HEADER_TYPE_BRIDGE: |
459 | case PCI_HEADER_TYPE_BRIDGE: |
457 | return PCI_CAPABILITY_LIST; |
460 | return PCI_CAPABILITY_LIST; |
458 | case PCI_HEADER_TYPE_CARDBUS: |
461 | case PCI_HEADER_TYPE_CARDBUS: |
459 | return PCI_CB_CAPABILITY_LIST; |
462 | return PCI_CB_CAPABILITY_LIST; |
460 | default: |
463 | default: |
461 | return 0; |
464 | return 0; |
462 | } |
465 | } |
463 | 466 | ||
464 | return 0; |
467 | return 0; |
465 | } |
468 | } |
466 | 469 | ||
467 | 470 | ||
468 | int pci_find_capability(struct pci_dev *dev, int cap) |
471 | int pci_find_capability(struct pci_dev *dev, int cap) |
469 | { |
472 | { |
470 | int pos; |
473 | int pos; |
471 | 474 | ||
472 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
475 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
473 | if (pos) |
476 | if (pos) |
474 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
477 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
475 | 478 | ||
476 | return pos; |
479 | return pos; |
477 | } |
480 | } |
478 | 481 | ||
479 | 482 | ||
480 | 483 | ||
481 | 484 | ||
482 | int enum_pci_devices() |
485 | int enum_pci_devices() |
483 | { |
486 | { |
484 | pci_dev_t *dev; |
487 | pci_dev_t *dev; |
485 | u32 last_bus; |
488 | u32 last_bus; |
486 | u32 bus = 0 , devfn = 0; |
489 | u32 bus = 0 , devfn = 0; |
487 | 490 | ||
488 | 491 | ||
489 | last_bus = PciApi(1); |
492 | last_bus = PciApi(1); |
490 | 493 | ||
491 | 494 | ||
492 | if( unlikely(last_bus == -1)) |
495 | if( unlikely(last_bus == -1)) |
493 | return -1; |
496 | return -1; |
494 | 497 | ||
495 | for(;bus <= last_bus; bus++) |
498 | for(;bus <= last_bus; bus++) |
496 | { |
499 | { |
497 | for (devfn = 0; devfn < 0x100; devfn += 8) |
500 | for (devfn = 0; devfn < 0x100; devfn += 8) |
498 | _pci_scan_slot(bus, devfn); |
501 | _pci_scan_slot(bus, devfn); |
499 | 502 | ||
500 | 503 | ||
501 | } |
504 | } |
502 | for(dev = (pci_dev_t*)devices.next; |
505 | for(dev = (pci_dev_t*)devices.next; |
503 | &dev->link != &devices; |
506 | &dev->link != &devices; |
504 | dev = (pci_dev_t*)dev->link.next) |
507 | dev = (pci_dev_t*)dev->link.next) |
505 | { |
508 | { |
506 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
509 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
507 | dev->pci_dev.vendor, |
510 | dev->pci_dev.vendor, |
508 | dev->pci_dev.device, |
511 | dev->pci_dev.device, |
509 | dev->pci_dev.busnr, |
512 | dev->pci_dev.busnr, |
510 | dev->pci_dev.devfn); |
513 | dev->pci_dev.devfn); |
511 | 514 | ||
512 | } |
515 | } |
513 | return 0; |
516 | return 0; |
514 | } |
517 | } |
515 | 518 | ||
516 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
519 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
517 | { |
520 | { |
518 | pci_dev_t *dev; |
521 | pci_dev_t *dev; |
519 | const struct pci_device_id *ent; |
522 | const struct pci_device_id *ent; |
520 | 523 | ||
521 | for(dev = (pci_dev_t*)devices.next; |
524 | for(dev = (pci_dev_t*)devices.next; |
522 | &dev->link != &devices; |
525 | &dev->link != &devices; |
523 | dev = (pci_dev_t*)dev->link.next) |
526 | dev = (pci_dev_t*)dev->link.next) |
524 | { |
527 | { |
525 | if( dev->pci_dev.vendor != idlist->vendor ) |
528 | if( dev->pci_dev.vendor != idlist->vendor ) |
526 | continue; |
529 | continue; |
527 | 530 | ||
528 | for(ent = idlist; ent->vendor != 0; ent++) |
531 | for(ent = idlist; ent->vendor != 0; ent++) |
529 | { |
532 | { |
530 | if(unlikely(ent->device == dev->pci_dev.device)) |
533 | if(unlikely(ent->device == dev->pci_dev.device)) |
531 | { |
534 | { |
532 | pdev->pci_dev = dev->pci_dev; |
535 | pdev->pci_dev = dev->pci_dev; |
533 | return ent; |
536 | return ent; |
534 | } |
537 | } |
535 | }; |
538 | }; |
536 | } |
539 | } |
537 | 540 | ||
538 | return NULL; |
541 | return NULL; |
539 | }; |
542 | }; |
540 | 543 | ||
541 | struct pci_dev * |
544 | struct pci_dev * |
542 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
545 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
543 | { |
546 | { |
544 | pci_dev_t *dev; |
547 | pci_dev_t *dev; |
545 | 548 | ||
546 | dev = (pci_dev_t*)devices.next; |
549 | dev = (pci_dev_t*)devices.next; |
547 | 550 | ||
548 | if(from != NULL) |
551 | if(from != NULL) |
549 | { |
552 | { |
550 | for(; &dev->link != &devices; |
553 | for(; &dev->link != &devices; |
551 | dev = (pci_dev_t*)dev->link.next) |
554 | dev = (pci_dev_t*)dev->link.next) |
552 | { |
555 | { |
553 | if( &dev->pci_dev == from) |
556 | if( &dev->pci_dev == from) |
554 | { |
557 | { |
555 | dev = (pci_dev_t*)dev->link.next; |
558 | dev = (pci_dev_t*)dev->link.next; |
556 | break; |
559 | break; |
557 | }; |
560 | }; |
558 | } |
561 | } |
559 | }; |
562 | }; |
560 | 563 | ||
561 | for(; &dev->link != &devices; |
564 | for(; &dev->link != &devices; |
562 | dev = (pci_dev_t*)dev->link.next) |
565 | dev = (pci_dev_t*)dev->link.next) |
563 | { |
566 | { |
564 | if( dev->pci_dev.vendor != vendor ) |
567 | if( dev->pci_dev.vendor != vendor ) |
565 | continue; |
568 | continue; |
566 | 569 | ||
567 | if(dev->pci_dev.device == device) |
570 | if(dev->pci_dev.device == device) |
568 | { |
571 | { |
569 | return &dev->pci_dev; |
572 | return &dev->pci_dev; |
570 | } |
573 | } |
571 | } |
574 | } |
572 | return NULL; |
575 | return NULL; |
573 | }; |
576 | }; |
574 | 577 | ||
575 | 578 | ||
576 | struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
579 | struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
577 | { |
580 | { |
578 | pci_dev_t *dev; |
581 | pci_dev_t *dev; |
579 | 582 | ||
580 | for(dev = (pci_dev_t*)devices.next; |
583 | for(dev = (pci_dev_t*)devices.next; |
581 | &dev->link != &devices; |
584 | &dev->link != &devices; |
582 | dev = (pci_dev_t*)dev->link.next) |
585 | dev = (pci_dev_t*)dev->link.next) |
583 | { |
586 | { |
584 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
587 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
585 | return &dev->pci_dev; |
588 | return &dev->pci_dev; |
586 | } |
589 | } |
587 | return NULL; |
590 | return NULL; |
588 | } |
591 | } |
589 | 592 | ||
590 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
593 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
591 | { |
594 | { |
592 | pci_dev_t *dev; |
595 | pci_dev_t *dev; |
593 | 596 | ||
594 | dev = (pci_dev_t*)devices.next; |
597 | dev = (pci_dev_t*)devices.next; |
595 | 598 | ||
596 | if(from != NULL) |
599 | if(from != NULL) |
597 | { |
600 | { |
598 | for(; &dev->link != &devices; |
601 | for(; &dev->link != &devices; |
599 | dev = (pci_dev_t*)dev->link.next) |
602 | dev = (pci_dev_t*)dev->link.next) |
600 | { |
603 | { |
601 | if( &dev->pci_dev == from) |
604 | if( &dev->pci_dev == from) |
602 | { |
605 | { |
603 | dev = (pci_dev_t*)dev->link.next; |
606 | dev = (pci_dev_t*)dev->link.next; |
604 | break; |
607 | break; |
605 | }; |
608 | }; |
606 | } |
609 | } |
607 | }; |
610 | }; |
608 | 611 | ||
609 | for(; &dev->link != &devices; |
612 | for(; &dev->link != &devices; |
610 | dev = (pci_dev_t*)dev->link.next) |
613 | dev = (pci_dev_t*)dev->link.next) |
611 | { |
614 | { |
612 | if( dev->pci_dev.class == class) |
615 | if( dev->pci_dev.class == class) |
613 | { |
616 | { |
614 | return &dev->pci_dev; |
617 | return &dev->pci_dev; |
615 | } |
618 | } |
616 | } |
619 | } |
617 | 620 | ||
618 | return NULL; |
621 | return NULL; |
619 | } |
622 | } |
620 | 623 | ||
621 | 624 | ||
622 | #define PIO_OFFSET 0x10000UL |
625 | #define PIO_OFFSET 0x10000UL |
623 | #define PIO_MASK 0x0ffffUL |
626 | #define PIO_MASK 0x0ffffUL |
624 | #define PIO_RESERVED 0x40000UL |
627 | #define PIO_RESERVED 0x40000UL |
625 | 628 | ||
626 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
629 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
627 | unsigned long port = (unsigned long __force)addr; \ |
630 | unsigned long port = (unsigned long __force)addr; \ |
628 | if (port >= PIO_RESERVED) { \ |
631 | if (port >= PIO_RESERVED) { \ |
629 | is_mmio; \ |
632 | is_mmio; \ |
630 | } else if (port > PIO_OFFSET) { \ |
633 | } else if (port > PIO_OFFSET) { \ |
631 | port &= PIO_MASK; \ |
634 | port &= PIO_MASK; \ |
632 | is_pio; \ |
635 | is_pio; \ |
633 | }; \ |
636 | }; \ |
634 | } while (0) |
637 | } while (0) |
635 | 638 | ||
636 | /* Create a virtual mapping cookie for an IO port range */ |
639 | /* Create a virtual mapping cookie for an IO port range */ |
637 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
640 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
638 | { |
641 | { |
639 | if (port > PIO_MASK) |
642 | if (port > PIO_MASK) |
640 | return NULL; |
643 | return NULL; |
641 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
644 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
642 | } |
645 | } |
643 | 646 | ||
644 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
647 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
645 | { |
648 | { |
646 | resource_size_t start = pci_resource_start(dev, bar); |
649 | resource_size_t start = pci_resource_start(dev, bar); |
647 | resource_size_t len = pci_resource_len(dev, bar); |
650 | resource_size_t len = pci_resource_len(dev, bar); |
648 | unsigned long flags = pci_resource_flags(dev, bar); |
651 | unsigned long flags = pci_resource_flags(dev, bar); |
649 | 652 | ||
650 | if (!len || !start) |
653 | if (!len || !start) |
651 | return NULL; |
654 | return NULL; |
652 | if (maxlen && len > maxlen) |
655 | if (maxlen && len > maxlen) |
653 | len = maxlen; |
656 | len = maxlen; |
654 | if (flags & IORESOURCE_IO) |
657 | if (flags & IORESOURCE_IO) |
655 | return ioport_map(start, len); |
658 | return ioport_map(start, len); |
656 | if (flags & IORESOURCE_MEM) { |
659 | if (flags & IORESOURCE_MEM) { |
657 | return ioremap(start, len); |
660 | return ioremap(start, len); |
658 | } |
661 | } |
659 | /* What? */ |
662 | /* What? */ |
660 | return NULL; |
663 | return NULL; |
661 | } |
664 | } |
662 | 665 | ||
663 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
666 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
664 | { |
667 | { |
665 | IO_COND(addr, /* nothing */, iounmap(addr)); |
668 | IO_COND(addr, /* nothing */, iounmap(addr)); |
666 | } |
669 | } |
667 | 670 | ||
668 | 671 | ||
669 | 672 | ||
670 | 673 | ||
671 | int pci_enable_rom(struct pci_dev *pdev) |
674 | int pci_enable_rom(struct pci_dev *pdev) |
672 | { |
675 | { |
673 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
676 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
674 | struct pci_bus_region region; |
677 | struct pci_bus_region region; |
675 | u32 rom_addr; |
678 | u32 rom_addr; |
676 | 679 | ||
677 | if (!res->flags) |
680 | if (!res->flags) |
678 | return -1; |
681 | return -1; |
679 | 682 | ||
680 | _pcibios_resource_to_bus(pdev, ®ion, res); |
683 | _pcibios_resource_to_bus(pdev, ®ion, res); |
681 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
684 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
682 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
685 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
683 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
686 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
684 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
687 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
685 | return 0; |
688 | return 0; |
686 | } |
689 | } |
687 | 690 | ||
688 | void pci_disable_rom(struct pci_dev *pdev) |
691 | void pci_disable_rom(struct pci_dev *pdev) |
689 | { |
692 | { |
690 | u32 rom_addr; |
693 | u32 rom_addr; |
691 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
694 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
692 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
695 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
693 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
696 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
694 | } |
697 | } |
695 | 698 | ||
696 | /** |
699 | /** |
697 | * pci_get_rom_size - obtain the actual size of the ROM image |
700 | * pci_get_rom_size - obtain the actual size of the ROM image |
698 | * @pdev: target PCI device |
701 | * @pdev: target PCI device |
699 | * @rom: kernel virtual pointer to image of ROM |
702 | * @rom: kernel virtual pointer to image of ROM |
700 | * @size: size of PCI window |
703 | * @size: size of PCI window |
701 | * return: size of actual ROM image |
704 | * return: size of actual ROM image |
702 | * |
705 | * |
703 | * Determine the actual length of the ROM image. |
706 | * Determine the actual length of the ROM image. |
704 | * The PCI window size could be much larger than the |
707 | * The PCI window size could be much larger than the |
705 | * actual image size. |
708 | * actual image size. |
706 | */ |
709 | */ |
707 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
710 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
708 | { |
711 | { |
709 | void __iomem *image; |
712 | void __iomem *image; |
710 | int last_image; |
713 | int last_image; |
711 | 714 | ||
712 | image = rom; |
715 | image = rom; |
713 | do { |
716 | do { |
714 | void __iomem *pds; |
717 | void __iomem *pds; |
715 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
718 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
716 | if (readb(image) != 0x55) { |
719 | if (readb(image) != 0x55) { |
717 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
720 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
718 | break; |
721 | break; |
719 | } |
722 | } |
720 | if (readb(image + 1) != 0xAA) |
723 | if (readb(image + 1) != 0xAA) |
721 | break; |
724 | break; |
722 | /* get the PCI data structure and check its signature */ |
725 | /* get the PCI data structure and check its signature */ |
723 | pds = image + readw(image + 24); |
726 | pds = image + readw(image + 24); |
724 | if (readb(pds) != 'P') |
727 | if (readb(pds) != 'P') |
725 | break; |
728 | break; |
726 | if (readb(pds + 1) != 'C') |
729 | if (readb(pds + 1) != 'C') |
727 | break; |
730 | break; |
728 | if (readb(pds + 2) != 'I') |
731 | if (readb(pds + 2) != 'I') |
729 | break; |
732 | break; |
730 | if (readb(pds + 3) != 'R') |
733 | if (readb(pds + 3) != 'R') |
731 | break; |
734 | break; |
732 | last_image = readb(pds + 21) & 0x80; |
735 | last_image = readb(pds + 21) & 0x80; |
733 | /* this length is reliable */ |
736 | /* this length is reliable */ |
734 | image += readw(pds + 16) * 512; |
737 | image += readw(pds + 16) * 512; |
735 | } while (!last_image); |
738 | } while (!last_image); |
736 | 739 | ||
737 | /* never return a size larger than the PCI resource window */ |
740 | /* never return a size larger than the PCI resource window */ |
738 | /* there are known ROMs that get the size wrong */ |
741 | /* there are known ROMs that get the size wrong */ |
739 | return min((size_t)(image - rom), size); |
742 | return min((size_t)(image - rom), size); |
740 | } |
743 | } |
741 | 744 | ||
742 | 745 | ||
743 | /** |
746 | /** |
744 | * pci_map_rom - map a PCI ROM to kernel space |
747 | * pci_map_rom - map a PCI ROM to kernel space |
745 | * @pdev: pointer to pci device struct |
748 | * @pdev: pointer to pci device struct |
746 | * @size: pointer to receive size of pci window over ROM |
749 | * @size: pointer to receive size of pci window over ROM |
747 | * |
750 | * |
748 | * Return: kernel virtual pointer to image of ROM |
751 | * Return: kernel virtual pointer to image of ROM |
749 | * |
752 | * |
750 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
753 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
751 | * the shadow BIOS copy will be returned instead of the |
754 | * the shadow BIOS copy will be returned instead of the |
752 | * actual ROM. |
755 | * actual ROM. |
753 | */ |
756 | */ |
754 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
757 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
755 | { |
758 | { |
756 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
759 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
757 | loff_t start; |
760 | loff_t start; |
758 | void __iomem *rom; |
761 | void __iomem *rom; |
759 | 762 | ||
760 | /* |
763 | /* |
761 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
764 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
762 | * memory map if the VGA enable bit of the Bridge Control register is |
765 | * memory map if the VGA enable bit of the Bridge Control register is |
763 | * set for embedded VGA. |
766 | * set for embedded VGA. |
764 | */ |
767 | */ |
765 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
768 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
766 | /* primary video rom always starts here */ |
769 | /* primary video rom always starts here */ |
767 | start = (loff_t)0xC0000; |
770 | start = (loff_t)0xC0000; |
768 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
771 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
769 | } else { |
772 | } else { |
770 | if (res->flags & |
773 | if (res->flags & |
771 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
774 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
772 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
775 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
773 | return (void __iomem *)(unsigned long) |
776 | return (void __iomem *)(unsigned long) |
774 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
777 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
775 | } else { |
778 | } else { |
776 | start = (loff_t)0xC0000; |
779 | start = (loff_t)0xC0000; |
777 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
780 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
778 | 781 | ||
779 | } |
782 | } |
780 | } |
783 | } |
781 | 784 | ||
782 | rom = ioremap(start, *size); |
785 | rom = ioremap(start, *size); |
783 | if (!rom) { |
786 | if (!rom) { |
784 | /* restore enable if ioremap fails */ |
787 | /* restore enable if ioremap fails */ |
785 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
788 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
786 | IORESOURCE_ROM_SHADOW | |
789 | IORESOURCE_ROM_SHADOW | |
787 | IORESOURCE_ROM_COPY))) |
790 | IORESOURCE_ROM_COPY))) |
788 | pci_disable_rom(pdev); |
791 | pci_disable_rom(pdev); |
789 | return NULL; |
792 | return NULL; |
790 | } |
793 | } |
791 | 794 | ||
792 | /* |
795 | /* |
793 | * Try to find the true size of the ROM since sometimes the PCI window |
796 | * Try to find the true size of the ROM since sometimes the PCI window |
794 | * size is much larger than the actual size of the ROM. |
797 | * size is much larger than the actual size of the ROM. |
795 | * True size is important if the ROM is going to be copied. |
798 | * True size is important if the ROM is going to be copied. |
796 | */ |
799 | */ |
797 | *size = pci_get_rom_size(pdev, rom, *size); |
800 | *size = pci_get_rom_size(pdev, rom, *size); |
798 | return rom; |
801 | return rom; |
799 | } |
802 | } |
800 | 803 | ||
801 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
804 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
802 | { |
805 | { |
803 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
806 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
804 | 807 | ||
805 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
808 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
806 | return; |
809 | return; |
807 | 810 | ||
808 | iounmap(rom); |
811 | iounmap(rom); |
809 | 812 | ||
810 | /* Disable again before continuing, leave enabled if pci=rom */ |
813 | /* Disable again before continuing, leave enabled if pci=rom */ |
811 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
814 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
812 | pci_disable_rom(pdev); |
815 | pci_disable_rom(pdev); |
813 | } |
816 | } |
814 | 817 | ||
815 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
818 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
816 | { |
819 | { |
817 | u16 old_cmd, cmd; |
820 | u16 old_cmd, cmd; |
818 | 821 | ||
819 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
822 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
820 | if (enable) |
823 | if (enable) |
821 | cmd = old_cmd | PCI_COMMAND_MASTER; |
824 | cmd = old_cmd | PCI_COMMAND_MASTER; |
822 | else |
825 | else |
823 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
826 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
824 | if (cmd != old_cmd) { |
827 | if (cmd != old_cmd) { |
825 | dbgprintf("%s bus mastering\n", |
828 | dbgprintf("%s bus mastering\n", |
826 | enable ? "enabling" : "disabling"); |
829 | enable ? "enabling" : "disabling"); |
827 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
830 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
828 | } |
831 | } |
829 | dev->is_busmaster = enable; |
832 | dev->is_busmaster = enable; |
830 | } |
833 | } |
831 | 834 | ||
832 | 835 | ||
833 | /* pci_set_master - enables bus-mastering for device dev |
836 | /* pci_set_master - enables bus-mastering for device dev |
834 | * @dev: the PCI device to enable |
837 | * @dev: the PCI device to enable |
835 | * |
838 | * |
836 | * Enables bus-mastering on the device and calls pcibios_set_master() |
839 | * Enables bus-mastering on the device and calls pcibios_set_master() |
837 | * to do the needed arch specific settings. |
840 | * to do the needed arch specific settings. |
838 | */ |
841 | */ |
839 | void pci_set_master(struct pci_dev *dev) |
842 | void pci_set_master(struct pci_dev *dev) |
840 | { |
843 | { |
841 | __pci_set_master(dev, true); |
844 | __pci_set_master(dev, true); |
842 | // pcibios_set_master(dev); |
845 | // pcibios_set_master(dev); |
843 | } |
846 | } |
844 | 847 | ||
845 | /** |
848 | /** |
846 | * pci_clear_master - disables bus-mastering for device dev |
849 | * pci_clear_master - disables bus-mastering for device dev |
847 | * @dev: the PCI device to disable |
850 | * @dev: the PCI device to disable |
848 | */ |
851 | */ |
849 | void pci_clear_master(struct pci_dev *dev) |
852 | void pci_clear_master(struct pci_dev *dev) |
850 | { |
853 | { |
851 | __pci_set_master(dev, false); |
854 | __pci_set_master(dev, false); |
852 | } |
855 | } |
853 | 856 | ||
854 | 857 | ||
855 | static inline int pcie_cap_version(const struct pci_dev *dev) |
858 | static inline int pcie_cap_version(const struct pci_dev *dev) |
856 | { |
859 | { |
857 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
860 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
858 | } |
861 | } |
859 | 862 | ||
860 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
863 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
861 | { |
864 | { |
862 | return true; |
865 | return true; |
863 | } |
866 | } |
864 | 867 | ||
865 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
868 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
866 | { |
869 | { |
867 | int type = pci_pcie_type(dev); |
870 | int type = pci_pcie_type(dev); |
868 | 871 | ||
869 | return pcie_cap_version(dev) > 1 || |
872 | return pcie_cap_version(dev) > 1 || |
870 | type == PCI_EXP_TYPE_ROOT_PORT || |
873 | type == PCI_EXP_TYPE_ROOT_PORT || |
871 | type == PCI_EXP_TYPE_ENDPOINT || |
874 | type == PCI_EXP_TYPE_ENDPOINT || |
872 | type == PCI_EXP_TYPE_LEG_END; |
875 | type == PCI_EXP_TYPE_LEG_END; |
873 | } |
876 | } |
874 | 877 | ||
875 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
878 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
876 | { |
879 | { |
877 | int type = pci_pcie_type(dev); |
880 | int type = pci_pcie_type(dev); |
878 | 881 | ||
879 | return pcie_cap_version(dev) > 1 || |
882 | return pcie_cap_version(dev) > 1 || |
880 | type == PCI_EXP_TYPE_ROOT_PORT || |
883 | type == PCI_EXP_TYPE_ROOT_PORT || |
881 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
884 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
882 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
885 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
883 | } |
886 | } |
884 | 887 | ||
885 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
888 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
886 | { |
889 | { |
887 | int type = pci_pcie_type(dev); |
890 | int type = pci_pcie_type(dev); |
888 | 891 | ||
889 | return pcie_cap_version(dev) > 1 || |
892 | return pcie_cap_version(dev) > 1 || |
890 | type == PCI_EXP_TYPE_ROOT_PORT || |
893 | type == PCI_EXP_TYPE_ROOT_PORT || |
891 | type == PCI_EXP_TYPE_RC_EC; |
894 | type == PCI_EXP_TYPE_RC_EC; |
892 | } |
895 | } |
893 | 896 | ||
894 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
897 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
895 | { |
898 | { |
896 | if (!pci_is_pcie(dev)) |
899 | if (!pci_is_pcie(dev)) |
897 | return false; |
900 | return false; |
898 | 901 | ||
899 | switch (pos) { |
902 | switch (pos) { |
900 | case PCI_EXP_FLAGS_TYPE: |
903 | case PCI_EXP_FLAGS_TYPE: |
901 | return true; |
904 | return true; |
902 | case PCI_EXP_DEVCAP: |
905 | case PCI_EXP_DEVCAP: |
903 | case PCI_EXP_DEVCTL: |
906 | case PCI_EXP_DEVCTL: |
904 | case PCI_EXP_DEVSTA: |
907 | case PCI_EXP_DEVSTA: |
905 | return pcie_cap_has_devctl(dev); |
908 | return pcie_cap_has_devctl(dev); |
906 | case PCI_EXP_LNKCAP: |
909 | case PCI_EXP_LNKCAP: |
907 | case PCI_EXP_LNKCTL: |
910 | case PCI_EXP_LNKCTL: |
908 | case PCI_EXP_LNKSTA: |
911 | case PCI_EXP_LNKSTA: |
909 | return pcie_cap_has_lnkctl(dev); |
912 | return pcie_cap_has_lnkctl(dev); |
910 | case PCI_EXP_SLTCAP: |
913 | case PCI_EXP_SLTCAP: |
911 | case PCI_EXP_SLTCTL: |
914 | case PCI_EXP_SLTCTL: |
912 | case PCI_EXP_SLTSTA: |
915 | case PCI_EXP_SLTSTA: |
913 | return pcie_cap_has_sltctl(dev); |
916 | return pcie_cap_has_sltctl(dev); |
914 | case PCI_EXP_RTCTL: |
917 | case PCI_EXP_RTCTL: |
915 | case PCI_EXP_RTCAP: |
918 | case PCI_EXP_RTCAP: |
916 | case PCI_EXP_RTSTA: |
919 | case PCI_EXP_RTSTA: |
917 | return pcie_cap_has_rtctl(dev); |
920 | return pcie_cap_has_rtctl(dev); |
918 | case PCI_EXP_DEVCAP2: |
921 | case PCI_EXP_DEVCAP2: |
919 | case PCI_EXP_DEVCTL2: |
922 | case PCI_EXP_DEVCTL2: |
920 | case PCI_EXP_LNKCAP2: |
923 | case PCI_EXP_LNKCAP2: |
921 | case PCI_EXP_LNKCTL2: |
924 | case PCI_EXP_LNKCTL2: |
922 | case PCI_EXP_LNKSTA2: |
925 | case PCI_EXP_LNKSTA2: |
923 | return pcie_cap_version(dev) > 1; |
926 | return pcie_cap_version(dev) > 1; |
924 | default: |
927 | default: |
925 | return false; |
928 | return false; |
926 | } |
929 | } |
927 | } |
930 | } |
928 | 931 | ||
929 | /* |
932 | /* |
930 | * Note that these accessor functions are only for the "PCI Express |
933 | * Note that these accessor functions are only for the "PCI Express |
931 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
934 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
932 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
935 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
933 | */ |
936 | */ |
934 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
937 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
935 | { |
938 | { |
936 | int ret; |
939 | int ret; |
937 | 940 | ||
938 | *val = 0; |
941 | *val = 0; |
939 | if (pos & 1) |
942 | if (pos & 1) |
940 | return -EINVAL; |
943 | return -EINVAL; |
941 | 944 | ||
942 | if (pcie_capability_reg_implemented(dev, pos)) { |
945 | if (pcie_capability_reg_implemented(dev, pos)) { |
943 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
946 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
944 | /* |
947 | /* |
945 | * Reset *val to 0 if pci_read_config_word() fails, it may |
948 | * Reset *val to 0 if pci_read_config_word() fails, it may |
946 | * have been written as 0xFFFF if hardware error happens |
949 | * have been written as 0xFFFF if hardware error happens |
947 | * during pci_read_config_word(). |
950 | * during pci_read_config_word(). |
948 | */ |
951 | */ |
949 | if (ret) |
952 | if (ret) |
950 | *val = 0; |
953 | *val = 0; |
951 | return ret; |
954 | return ret; |
952 | } |
955 | } |
953 | 956 | ||
954 | /* |
957 | /* |
955 | * For Functions that do not implement the Slot Capabilities, |
958 | * For Functions that do not implement the Slot Capabilities, |
956 | * Slot Status, and Slot Control registers, these spaces must |
959 | * Slot Status, and Slot Control registers, these spaces must |
957 | * be hardwired to 0b, with the exception of the Presence Detect |
960 | * be hardwired to 0b, with the exception of the Presence Detect |
958 | * State bit in the Slot Status register of Downstream Ports, |
961 | * State bit in the Slot Status register of Downstream Ports, |
959 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
962 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
960 | */ |
963 | */ |
961 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
964 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
962 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
965 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
963 | *val = PCI_EXP_SLTSTA_PDS; |
966 | *val = PCI_EXP_SLTSTA_PDS; |
964 | } |
967 | } |
965 | 968 | ||
966 | return 0; |
969 | return 0; |
967 | } |
970 | } |
968 | EXPORT_SYMBOL(pcie_capability_read_word); |
971 | EXPORT_SYMBOL(pcie_capability_read_word); |
969 | 972 | ||
970 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
973 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
971 | { |
974 | { |
972 | int ret; |
975 | int ret; |
973 | 976 | ||
974 | *val = 0; |
977 | *val = 0; |
975 | if (pos & 3) |
978 | if (pos & 3) |
976 | return -EINVAL; |
979 | return -EINVAL; |
977 | 980 | ||
978 | if (pcie_capability_reg_implemented(dev, pos)) { |
981 | if (pcie_capability_reg_implemented(dev, pos)) { |
979 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
982 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
980 | /* |
983 | /* |
981 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
984 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
982 | * have been written as 0xFFFFFFFF if hardware error happens |
985 | * have been written as 0xFFFFFFFF if hardware error happens |
983 | * during pci_read_config_dword(). |
986 | * during pci_read_config_dword(). |
984 | */ |
987 | */ |
985 | if (ret) |
988 | if (ret) |
986 | *val = 0; |
989 | *val = 0; |
987 | return ret; |
990 | return ret; |
988 | } |
991 | } |
989 | 992 | ||
990 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
993 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
991 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
994 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
992 | *val = PCI_EXP_SLTSTA_PDS; |
995 | *val = PCI_EXP_SLTSTA_PDS; |
993 | } |
996 | } |
994 | 997 | ||
995 | return 0; |
998 | return 0; |
996 | } |
999 | } |
997 | EXPORT_SYMBOL(pcie_capability_read_dword); |
1000 | EXPORT_SYMBOL(pcie_capability_read_dword); |
998 | 1001 | ||
999 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1002 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
1000 | { |
1003 | { |
1001 | if (pos & 1) |
1004 | if (pos & 1) |
1002 | return -EINVAL; |
1005 | return -EINVAL; |
1003 | 1006 | ||
1004 | if (!pcie_capability_reg_implemented(dev, pos)) |
1007 | if (!pcie_capability_reg_implemented(dev, pos)) |
1005 | return 0; |
1008 | return 0; |
1006 | 1009 | ||
1007 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1010 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
1008 | } |
1011 | } |
1009 | EXPORT_SYMBOL(pcie_capability_write_word); |
1012 | EXPORT_SYMBOL(pcie_capability_write_word); |
1010 | 1013 | ||
1011 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1014 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1012 | { |
1015 | { |
1013 | if (pos & 3) |
1016 | if (pos & 3) |
1014 | return -EINVAL; |
1017 | return -EINVAL; |
1015 | 1018 | ||
1016 | if (!pcie_capability_reg_implemented(dev, pos)) |
1019 | if (!pcie_capability_reg_implemented(dev, pos)) |
1017 | return 0; |
1020 | return 0; |
1018 | 1021 | ||
1019 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1022 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
1020 | } |
1023 | } |
1021 | EXPORT_SYMBOL(pcie_capability_write_dword); |
1024 | EXPORT_SYMBOL(pcie_capability_write_dword); |
1022 | 1025 | ||
1023 | int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, |
1026 | int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, |
1024 | u16 clear, u16 set) |
1027 | u16 clear, u16 set) |
1025 | { |
1028 | { |
1026 | int ret; |
1029 | int ret; |
1027 | u16 val; |
1030 | u16 val; |
1028 | 1031 | ||
1029 | ret = pcie_capability_read_word(dev, pos, &val); |
1032 | ret = pcie_capability_read_word(dev, pos, &val); |
1030 | if (!ret) { |
1033 | if (!ret) { |
1031 | val &= ~clear; |
1034 | val &= ~clear; |
1032 | val |= set; |
1035 | val |= set; |
1033 | ret = pcie_capability_write_word(dev, pos, val); |
1036 | ret = pcie_capability_write_word(dev, pos, val); |
1034 | } |
1037 | } |
1035 | 1038 | ||
1036 | return ret; |
1039 | return ret; |
1037 | } |
1040 | } |
1038 | 1041 | ||
1039 | 1042 | ||
1040 | 1043 | ||
1041 | int pcie_get_readrq(struct pci_dev *dev) |
1044 | int pcie_get_readrq(struct pci_dev *dev) |
1042 | { |
1045 | { |
1043 | u16 ctl; |
1046 | u16 ctl; |
1044 | 1047 | ||
1045 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); |
1048 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); |
1046 | 1049 | ||
1047 | return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
1050 | return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
1048 | } |
1051 | } |
1049 | EXPORT_SYMBOL(pcie_get_readrq); |
1052 | EXPORT_SYMBOL(pcie_get_readrq); |
1050 | 1053 | ||
1051 | /** |
1054 | /** |
1052 | * pcie_set_readrq - set PCI Express maximum memory read request |
1055 | * pcie_set_readrq - set PCI Express maximum memory read request |
1053 | * @dev: PCI device to query |
1056 | * @dev: PCI device to query |
1054 | * @rq: maximum memory read count in bytes |
1057 | * @rq: maximum memory read count in bytes |
1055 | * valid values are 128, 256, 512, 1024, 2048, 4096 |
1058 | * valid values are 128, 256, 512, 1024, 2048, 4096 |
1056 | * |
1059 | * |
1057 | * If possible sets maximum memory read request in bytes |
1060 | * If possible sets maximum memory read request in bytes |
1058 | */ |
1061 | */ |
1059 | int pcie_set_readrq(struct pci_dev *dev, int rq) |
1062 | int pcie_set_readrq(struct pci_dev *dev, int rq) |
1060 | { |
1063 | { |
1061 | u16 v; |
1064 | u16 v; |
1062 | 1065 | ||
1063 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
1066 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
1064 | return -EINVAL; |
1067 | return -EINVAL; |
1065 | 1068 | ||
1066 | v = (ffs(rq) - 8) << 12; |
1069 | v = (ffs(rq) - 8) << 12; |
1067 | 1070 | ||
1068 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
1071 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
1069 | PCI_EXP_DEVCTL_READRQ, v); |
1072 | PCI_EXP_DEVCTL_READRQ, v); |
1070 | }><>>><>>=>>>><>><>><>>4)><4)> |
1073 | }><>>><>>=>>>><>><>><>>3)><3)>2)><2)>4)><4)> |