Rev 5271 | Rev 6321 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
6104 | serge | 1 | #define CONFIG_PCI |
1117 | serge | 2 | |
6104 | serge | 3 | #include |
4 | |||
1403 | serge | 5 | #include |
1630 | serge | 6 | #include |
1963 | serge | 7 | #include |
5271 | serge | 8 | #include |
6104 | serge | 9 | #include |
10 | |||
5271 | serge | 11 | #include |
1117 | serge | 12 | |
6104 | serge | 13 | |
5271 | serge | 14 | extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
5078 | serge | 15 | |
1120 | serge | 16 | static LIST_HEAD(devices); |
1117 | serge | 17 | |
18 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
||
19 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
||
20 | |||
21 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
||
22 | |||
23 | /* |
||
24 | * Translate the low bits of the PCI base |
||
25 | * to the resource type |
||
26 | */ |
||
27 | static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
||
28 | { |
||
29 | if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
||
30 | return IORESOURCE_IO; |
||
31 | |||
32 | if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
||
33 | return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
||
34 | |||
35 | return IORESOURCE_MEM; |
||
36 | } |
||
37 | |||
38 | |||
5271 | serge | 39 | static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
1117 | serge | 40 | { |
5271 | serge | 41 | u32 size = mask & maxbase; /* Find the significant bits */ |
1117 | serge | 42 | |
43 | if (!size) |
||
44 | return 0; |
||
45 | |||
46 | /* Get the lowest of them to find the decode size, and |
||
47 | from that the extent. */ |
||
48 | size = (size & ~(size-1)) - 1; |
||
49 | |||
50 | /* base == maxbase can be valid only if the BAR has |
||
51 | already been programmed with all 1s. */ |
||
52 | if (base == maxbase && ((base | size) & mask) != mask) |
||
53 | return 0; |
||
54 | |||
55 | return size; |
||
56 | } |
||
57 | |||
5271 | serge | 58 | static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
1117 | serge | 59 | { |
5271 | serge | 60 | u64 size = mask & maxbase; /* Find the significant bits */ |
1117 | serge | 61 | |
62 | if (!size) |
||
63 | return 0; |
||
64 | |||
65 | /* Get the lowest of them to find the decode size, and |
||
66 | from that the extent. */ |
||
67 | size = (size & ~(size-1)) - 1; |
||
68 | |||
69 | /* base == maxbase can be valid only if the BAR has |
||
70 | already been programmed with all 1s. */ |
||
71 | if (base == maxbase && ((base | size) & mask) != mask) |
||
72 | return 0; |
||
73 | |||
74 | return size; |
||
75 | } |
||
76 | |||
5271 | serge | 77 | static inline int is_64bit_memory(u32 mask) |
1117 | serge | 78 | { |
79 | if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
||
80 | (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
||
81 | return 1; |
||
82 | return 0; |
||
83 | } |
||
84 | |||
85 | static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
||
86 | { |
||
5271 | serge | 87 | u32 pos, reg, next; |
88 | u32 l, sz; |
||
1117 | serge | 89 | struct resource *res; |
90 | |||
91 | for(pos=0; pos < howmany; pos = next) |
||
92 | { |
||
5271 | serge | 93 | u64 l64; |
94 | u64 sz64; |
||
95 | u32 raw_sz; |
||
1117 | serge | 96 | |
97 | next = pos + 1; |
||
98 | |||
99 | res = &dev->resource[pos]; |
||
100 | |||
101 | reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
||
2160 | serge | 102 | l = PciRead32(dev->busnr, dev->devfn, reg); |
103 | PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
||
104 | sz = PciRead32(dev->busnr, dev->devfn, reg); |
||
105 | PciWrite32(dev->busnr, dev->devfn, reg, l); |
||
1117 | serge | 106 | |
107 | if (!sz || sz == 0xffffffff) |
||
108 | continue; |
||
109 | |||
110 | if (l == 0xffffffff) |
||
111 | l = 0; |
||
112 | |||
113 | raw_sz = sz; |
||
114 | if ((l & PCI_BASE_ADDRESS_SPACE) == |
||
115 | PCI_BASE_ADDRESS_SPACE_MEMORY) |
||
116 | { |
||
5271 | serge | 117 | sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
1117 | serge | 118 | /* |
119 | * For 64bit prefetchable memory sz could be 0, if the |
||
120 | * real size is bigger than 4G, so we need to check |
||
121 | * szhi for that. |
||
122 | */ |
||
123 | if (!is_64bit_memory(l) && !sz) |
||
124 | continue; |
||
125 | res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
||
126 | res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
||
127 | } |
||
128 | else { |
||
129 | sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
||
130 | if (!sz) |
||
131 | continue; |
||
132 | res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
||
133 | res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
||
134 | } |
||
135 | res->end = res->start + (unsigned long) sz; |
||
136 | res->flags |= pci_calc_resource_flags(l); |
||
137 | if (is_64bit_memory(l)) |
||
138 | { |
||
5271 | serge | 139 | u32 szhi, lhi; |
1117 | serge | 140 | |
2160 | serge | 141 | lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
142 | PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
||
143 | szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
||
144 | PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
||
5271 | serge | 145 | sz64 = ((u64)szhi << 32) | raw_sz; |
146 | l64 = ((u64)lhi << 32) | l; |
||
1117 | serge | 147 | sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
148 | next++; |
||
149 | |||
150 | #if BITS_PER_LONG == 64 |
||
151 | if (!sz64) { |
||
152 | res->start = 0; |
||
153 | res->end = 0; |
||
154 | res->flags = 0; |
||
155 | continue; |
||
156 | } |
||
157 | res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
||
158 | res->end = res->start + sz64; |
||
159 | #else |
||
160 | if (sz64 > 0x100000000ULL) { |
||
161 | printk(KERN_ERR "PCI: Unable to handle 64-bit " |
||
162 | "BAR for device %s\n", pci_name(dev)); |
||
163 | res->start = 0; |
||
164 | res->flags = 0; |
||
165 | } |
||
166 | else if (lhi) |
||
167 | { |
||
168 | /* 64-bit wide address, treat as disabled */ |
||
2160 | serge | 169 | PciWrite32(dev->busnr, dev->devfn, reg, |
5271 | serge | 170 | l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
2160 | serge | 171 | PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
1117 | serge | 172 | res->start = 0; |
173 | res->end = sz; |
||
174 | } |
||
175 | #endif |
||
176 | } |
||
177 | } |
||
178 | |||
179 | if ( rom ) |
||
180 | { |
||
181 | dev->rom_base_reg = rom; |
||
182 | res = &dev->resource[PCI_ROM_RESOURCE]; |
||
183 | |||
2160 | serge | 184 | l = PciRead32(dev->busnr, dev->devfn, rom); |
185 | PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
||
186 | sz = PciRead32(dev->busnr, dev->devfn, rom); |
||
187 | PciWrite32(dev->busnr, dev->devfn, rom, l); |
||
1117 | serge | 188 | |
189 | if (l == 0xffffffff) |
||
190 | l = 0; |
||
191 | |||
192 | if (sz && sz != 0xffffffff) |
||
193 | { |
||
5271 | serge | 194 | sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
1117 | serge | 195 | |
196 | if (sz) |
||
197 | { |
||
198 | res->flags = (l & IORESOURCE_ROM_ENABLE) | |
||
199 | IORESOURCE_MEM | IORESOURCE_PREFETCH | |
||
200 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
||
201 | res->start = l & PCI_ROM_ADDRESS_MASK; |
||
202 | res->end = res->start + (unsigned long) sz; |
||
203 | } |
||
204 | } |
||
205 | } |
||
206 | } |
||
207 | |||
208 | static void pci_read_irq(struct pci_dev *dev) |
||
209 | { |
||
5271 | serge | 210 | u8 irq; |
1117 | serge | 211 | |
2160 | serge | 212 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
1117 | serge | 213 | dev->pin = irq; |
214 | if (irq) |
||
2160 | serge | 215 | irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
1117 | serge | 216 | dev->irq = irq; |
217 | }; |
||
218 | |||
219 | |||
2160 | serge | 220 | int pci_setup_device(struct pci_dev *dev) |
1117 | serge | 221 | { |
5271 | serge | 222 | u32 class; |
1117 | serge | 223 | |
2160 | serge | 224 | class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
1117 | serge | 225 | dev->revision = class & 0xff; |
226 | class >>= 8; /* upper 3 bytes */ |
||
227 | dev->class = class; |
||
228 | |||
229 | /* "Unknown power state" */ |
||
230 | // dev->current_state = PCI_UNKNOWN; |
||
231 | |||
232 | /* Early fixups, before probing the BARs */ |
||
233 | // pci_fixup_device(pci_fixup_early, dev); |
||
234 | class = dev->class >> 8; |
||
235 | |||
236 | switch (dev->hdr_type) |
||
237 | { |
||
238 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
||
239 | if (class == PCI_CLASS_BRIDGE_PCI) |
||
240 | goto bad; |
||
241 | pci_read_irq(dev); |
||
242 | pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
||
2160 | serge | 243 | dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
244 | dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
||
1117 | serge | 245 | |
246 | /* |
||
247 | * Do the ugly legacy mode stuff here rather than broken chip |
||
248 | * quirk code. Legacy mode ATA controllers have fixed |
||
249 | * addresses. These are not always echoed in BAR0-3, and |
||
250 | * BAR0-3 in a few cases contain junk! |
||
251 | */ |
||
252 | if (class == PCI_CLASS_STORAGE_IDE) |
||
253 | { |
||
5271 | serge | 254 | u8 progif; |
1117 | serge | 255 | |
2160 | serge | 256 | progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
1117 | serge | 257 | if ((progif & 1) == 0) |
258 | { |
||
259 | dev->resource[0].start = 0x1F0; |
||
260 | dev->resource[0].end = 0x1F7; |
||
261 | dev->resource[0].flags = LEGACY_IO_RESOURCE; |
||
262 | dev->resource[1].start = 0x3F6; |
||
263 | dev->resource[1].end = 0x3F6; |
||
264 | dev->resource[1].flags = LEGACY_IO_RESOURCE; |
||
265 | } |
||
266 | if ((progif & 4) == 0) |
||
267 | { |
||
268 | dev->resource[2].start = 0x170; |
||
269 | dev->resource[2].end = 0x177; |
||
270 | dev->resource[2].flags = LEGACY_IO_RESOURCE; |
||
271 | dev->resource[3].start = 0x376; |
||
272 | dev->resource[3].end = 0x376; |
||
273 | dev->resource[3].flags = LEGACY_IO_RESOURCE; |
||
274 | }; |
||
275 | } |
||
276 | break; |
||
277 | |||
278 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
||
279 | if (class != PCI_CLASS_BRIDGE_PCI) |
||
280 | goto bad; |
||
281 | /* The PCI-to-PCI bridge spec requires that subtractive |
||
282 | decoding (i.e. transparent) bridge must have programming |
||
283 | interface code of 0x01. */ |
||
284 | pci_read_irq(dev); |
||
285 | dev->transparent = ((dev->class & 0xff) == 1); |
||
286 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
||
287 | break; |
||
288 | |||
289 | case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
||
290 | if (class != PCI_CLASS_BRIDGE_CARDBUS) |
||
291 | goto bad; |
||
292 | pci_read_irq(dev); |
||
293 | pci_read_bases(dev, 1, 0); |
||
2160 | serge | 294 | dev->subsystem_vendor = PciRead16(dev->busnr, |
1117 | serge | 295 | dev->devfn, |
296 | PCI_CB_SUBSYSTEM_VENDOR_ID); |
||
297 | |||
2160 | serge | 298 | dev->subsystem_device = PciRead16(dev->busnr, |
1117 | serge | 299 | dev->devfn, |
300 | PCI_CB_SUBSYSTEM_ID); |
||
301 | break; |
||
302 | |||
303 | default: /* unknown header */ |
||
304 | printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
||
305 | pci_name(dev), dev->hdr_type); |
||
306 | return -1; |
||
307 | |||
308 | bad: |
||
309 | printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
||
310 | pci_name(dev), class, dev->hdr_type); |
||
311 | dev->class = PCI_CLASS_NOT_DEFINED; |
||
312 | } |
||
313 | |||
314 | /* We found a fine healthy device, go go go... */ |
||
315 | |||
316 | return 0; |
||
317 | }; |
||
318 | |||
5271 | serge | 319 | static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
1117 | serge | 320 | { |
1403 | serge | 321 | pci_dev_t *dev; |
1117 | serge | 322 | |
5271 | serge | 323 | u32 id; |
324 | u8 hdr; |
||
1117 | serge | 325 | |
326 | int timeout = 10; |
||
327 | |||
2160 | serge | 328 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
1117 | serge | 329 | |
330 | /* some broken boards return 0 or ~0 if a slot is empty: */ |
||
331 | if (id == 0xffffffff || id == 0x00000000 || |
||
332 | id == 0x0000ffff || id == 0xffff0000) |
||
333 | return NULL; |
||
334 | |||
335 | while (id == 0xffff0001) |
||
336 | { |
||
337 | |||
338 | delay(timeout/10); |
||
339 | timeout *= 2; |
||
340 | |||
2160 | serge | 341 | id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
1117 | serge | 342 | |
343 | /* Card hasn't responded in 60 seconds? Must be stuck. */ |
||
344 | if (timeout > 60 * 100) |
||
345 | { |
||
346 | printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
||
2160 | serge | 347 | "responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
1117 | serge | 348 | return NULL; |
349 | } |
||
350 | }; |
||
351 | |||
2997 | Serge | 352 | if( pci_scan_filter(id, busnr, devfn) == 0) |
353 | return NULL; |
||
354 | |||
2160 | serge | 355 | hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
1117 | serge | 356 | |
1404 | serge | 357 | dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
2997 | Serge | 358 | if(unlikely(dev == NULL)) |
359 | return NULL; |
||
1117 | serge | 360 | |
1120 | serge | 361 | INIT_LIST_HEAD(&dev->link); |
1117 | serge | 362 | |
363 | |||
2160 | serge | 364 | dev->pci_dev.busnr = busnr; |
1117 | serge | 365 | dev->pci_dev.devfn = devfn; |
366 | dev->pci_dev.hdr_type = hdr & 0x7f; |
||
367 | dev->pci_dev.multifunction = !!(hdr & 0x80); |
||
368 | dev->pci_dev.vendor = id & 0xffff; |
||
369 | dev->pci_dev.device = (id >> 16) & 0xffff; |
||
370 | |||
371 | pci_setup_device(&dev->pci_dev); |
||
372 | |||
373 | return dev; |
||
374 | |||
375 | }; |
||
376 | |||
2997 | Serge | 377 | |
378 | |||
379 | |||
6104 | serge | 380 | int _pci_scan_slot(u32 bus, int devfn) |
1117 | serge | 381 | { |
382 | int func, nr = 0; |
||
383 | |||
384 | for (func = 0; func < 8; func++, devfn++) |
||
385 | { |
||
1403 | serge | 386 | pci_dev_t *dev; |
1117 | serge | 387 | |
388 | dev = pci_scan_device(bus, devfn); |
||
389 | if( dev ) |
||
390 | { |
||
1120 | serge | 391 | list_add(&dev->link, &devices); |
1117 | serge | 392 | |
393 | nr++; |
||
394 | |||
395 | /* |
||
396 | * If this is a single function device, |
||
397 | * don't scan past the first function. |
||
398 | */ |
||
399 | if (!dev->pci_dev.multifunction) |
||
400 | { |
||
401 | if (func > 0) { |
||
402 | dev->pci_dev.multifunction = 1; |
||
403 | } |
||
404 | else { |
||
405 | break; |
||
406 | } |
||
407 | } |
||
408 | } |
||
409 | else { |
||
410 | if (func == 0) |
||
411 | break; |
||
412 | } |
||
413 | }; |
||
414 | |||
415 | return nr; |
||
416 | }; |
||
417 | |||
1239 | serge | 418 | #define PCI_FIND_CAP_TTL 48 |
419 | |||
420 | static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
||
421 | u8 pos, int cap, int *ttl) |
||
422 | { |
||
423 | u8 id; |
||
424 | |||
425 | while ((*ttl)--) { |
||
426 | pos = PciRead8(bus, devfn, pos); |
||
427 | if (pos < 0x40) |
||
428 | break; |
||
429 | pos &= ~3; |
||
430 | id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
||
431 | if (id == 0xff) |
||
432 | break; |
||
433 | if (id == cap) |
||
434 | return pos; |
||
435 | pos += PCI_CAP_LIST_NEXT; |
||
436 | } |
||
437 | return 0; |
||
438 | } |
||
439 | |||
440 | static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
||
441 | u8 pos, int cap) |
||
442 | { |
||
443 | int ttl = PCI_FIND_CAP_TTL; |
||
444 | |||
445 | return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
||
446 | } |
||
447 | |||
448 | static int __pci_bus_find_cap_start(unsigned int bus, |
||
449 | unsigned int devfn, u8 hdr_type) |
||
450 | { |
||
451 | u16 status; |
||
452 | |||
453 | status = PciRead16(bus, devfn, PCI_STATUS); |
||
454 | if (!(status & PCI_STATUS_CAP_LIST)) |
||
455 | return 0; |
||
456 | |||
457 | switch (hdr_type) { |
||
458 | case PCI_HEADER_TYPE_NORMAL: |
||
459 | case PCI_HEADER_TYPE_BRIDGE: |
||
460 | return PCI_CAPABILITY_LIST; |
||
461 | case PCI_HEADER_TYPE_CARDBUS: |
||
462 | return PCI_CB_CAPABILITY_LIST; |
||
463 | default: |
||
464 | return 0; |
||
465 | } |
||
466 | |||
467 | return 0; |
||
468 | } |
||
469 | |||
470 | |||
471 | int pci_find_capability(struct pci_dev *dev, int cap) |
||
472 | { |
||
473 | int pos; |
||
474 | |||
2160 | serge | 475 | pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
1239 | serge | 476 | if (pos) |
2160 | serge | 477 | pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
1239 | serge | 478 | |
479 | return pos; |
||
480 | } |
||
481 | |||
482 | |||
2997 | Serge | 483 | |
484 | |||
485 | int enum_pci_devices() |
||
1963 | serge | 486 | { |
2997 | Serge | 487 | pci_dev_t *dev; |
5271 | serge | 488 | u32 last_bus; |
489 | u32 bus = 0 , devfn = 0; |
||
1117 | serge | 490 | |
1963 | serge | 491 | |
2997 | Serge | 492 | last_bus = PciApi(1); |
1963 | serge | 493 | |
494 | |||
2997 | Serge | 495 | if( unlikely(last_bus == -1)) |
496 | return -1; |
||
1963 | serge | 497 | |
2997 | Serge | 498 | for(;bus <= last_bus; bus++) |
499 | { |
||
500 | for (devfn = 0; devfn < 0x100; devfn += 8) |
||
6104 | serge | 501 | _pci_scan_slot(bus, devfn); |
1963 | serge | 502 | |
503 | |||
2997 | Serge | 504 | } |
505 | for(dev = (pci_dev_t*)devices.next; |
||
506 | &dev->link != &devices; |
||
507 | dev = (pci_dev_t*)dev->link.next) |
||
508 | { |
||
509 | dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
||
510 | dev->pci_dev.vendor, |
||
511 | dev->pci_dev.device, |
||
512 | dev->pci_dev.busnr, |
||
513 | dev->pci_dev.devfn); |
||
1963 | serge | 514 | |
2997 | Serge | 515 | } |
516 | return 0; |
||
517 | } |
||
1963 | serge | 518 | |
2997 | Serge | 519 | const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
520 | { |
||
521 | pci_dev_t *dev; |
||
522 | const struct pci_device_id *ent; |
||
1963 | serge | 523 | |
2997 | Serge | 524 | for(dev = (pci_dev_t*)devices.next; |
525 | &dev->link != &devices; |
||
526 | dev = (pci_dev_t*)dev->link.next) |
||
527 | { |
||
528 | if( dev->pci_dev.vendor != idlist->vendor ) |
||
529 | continue; |
||
530 | |||
531 | for(ent = idlist; ent->vendor != 0; ent++) |
||
532 | { |
||
533 | if(unlikely(ent->device == dev->pci_dev.device)) |
||
534 | { |
||
535 | pdev->pci_dev = dev->pci_dev; |
||
536 | return ent; |
||
537 | } |
||
538 | }; |
||
539 | } |
||
540 | |||
541 | return NULL; |
||
542 | }; |
||
543 | |||
544 | struct pci_dev * |
||
545 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
||
546 | { |
||
547 | pci_dev_t *dev; |
||
548 | |||
549 | dev = (pci_dev_t*)devices.next; |
||
550 | |||
551 | if(from != NULL) |
||
552 | { |
||
553 | for(; &dev->link != &devices; |
||
554 | dev = (pci_dev_t*)dev->link.next) |
||
555 | { |
||
556 | if( &dev->pci_dev == from) |
||
557 | { |
||
558 | dev = (pci_dev_t*)dev->link.next; |
||
1963 | serge | 559 | break; |
2997 | Serge | 560 | }; |
1963 | serge | 561 | } |
2997 | Serge | 562 | }; |
1963 | serge | 563 | |
2997 | Serge | 564 | for(; &dev->link != &devices; |
565 | dev = (pci_dev_t*)dev->link.next) |
||
566 | { |
||
567 | if( dev->pci_dev.vendor != vendor ) |
||
568 | continue; |
||
1963 | serge | 569 | |
2997 | Serge | 570 | if(dev->pci_dev.device == device) |
571 | { |
||
572 | return &dev->pci_dev; |
||
573 | } |
||
574 | } |
||
575 | return NULL; |
||
576 | }; |
||
1963 | serge | 577 | |
578 | |||
6104 | serge | 579 | struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
2997 | Serge | 580 | { |
581 | pci_dev_t *dev; |
||
1963 | serge | 582 | |
2997 | Serge | 583 | for(dev = (pci_dev_t*)devices.next; |
584 | &dev->link != &devices; |
||
585 | dev = (pci_dev_t*)dev->link.next) |
||
586 | { |
||
587 | if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
||
588 | return &dev->pci_dev; |
||
589 | } |
||
590 | return NULL; |
||
1963 | serge | 591 | } |
592 | |||
2997 | Serge | 593 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
1117 | serge | 594 | { |
2997 | Serge | 595 | pci_dev_t *dev; |
1117 | serge | 596 | |
2997 | Serge | 597 | dev = (pci_dev_t*)devices.next; |
598 | |||
599 | if(from != NULL) |
||
1117 | serge | 600 | { |
2997 | Serge | 601 | for(; &dev->link != &devices; |
602 | dev = (pci_dev_t*)dev->link.next) |
||
603 | { |
||
604 | if( &dev->pci_dev == from) |
||
605 | { |
||
606 | dev = (pci_dev_t*)dev->link.next; |
||
607 | break; |
||
608 | }; |
||
609 | } |
||
610 | }; |
||
1117 | serge | 611 | |
2997 | Serge | 612 | for(; &dev->link != &devices; |
613 | dev = (pci_dev_t*)dev->link.next) |
||
614 | { |
||
615 | if( dev->pci_dev.class == class) |
||
616 | { |
||
617 | return &dev->pci_dev; |
||
1117 | serge | 618 | } |
619 | } |
||
2997 | Serge | 620 | |
621 | return NULL; |
||
1117 | serge | 622 | } |
623 | |||
624 | |||
2997 | Serge | 625 | #define PIO_OFFSET 0x10000UL |
626 | #define PIO_MASK 0x0ffffUL |
||
627 | #define PIO_RESERVED 0x40000UL |
||
628 | |||
629 | #define IO_COND(addr, is_pio, is_mmio) do { \ |
||
630 | unsigned long port = (unsigned long __force)addr; \ |
||
631 | if (port >= PIO_RESERVED) { \ |
||
632 | is_mmio; \ |
||
633 | } else if (port > PIO_OFFSET) { \ |
||
634 | port &= PIO_MASK; \ |
||
635 | is_pio; \ |
||
636 | }; \ |
||
637 | } while (0) |
||
638 | |||
639 | /* Create a virtual mapping cookie for an IO port range */ |
||
640 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
||
1117 | serge | 641 | { |
3764 | Serge | 642 | if (port > PIO_MASK) |
643 | return NULL; |
||
644 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
||
2997 | Serge | 645 | } |
1117 | serge | 646 | |
2997 | Serge | 647 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
648 | { |
||
649 | resource_size_t start = pci_resource_start(dev, bar); |
||
650 | resource_size_t len = pci_resource_len(dev, bar); |
||
651 | unsigned long flags = pci_resource_flags(dev, bar); |
||
1117 | serge | 652 | |
2997 | Serge | 653 | if (!len || !start) |
654 | return NULL; |
||
655 | if (maxlen && len > maxlen) |
||
656 | len = maxlen; |
||
657 | if (flags & IORESOURCE_IO) |
||
658 | return ioport_map(start, len); |
||
659 | if (flags & IORESOURCE_MEM) { |
||
660 | return ioremap(start, len); |
||
661 | } |
||
662 | /* What? */ |
||
663 | return NULL; |
||
1117 | serge | 664 | } |
665 | |||
2997 | Serge | 666 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
667 | { |
||
668 | IO_COND(addr, /* nothing */, iounmap(addr)); |
||
669 | } |
||
1117 | serge | 670 | |
2997 | Serge | 671 | |
672 | static inline void |
||
6104 | serge | 673 | _pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
2997 | Serge | 674 | struct resource *res) |
1117 | serge | 675 | { |
2997 | Serge | 676 | region->start = res->start; |
677 | region->end = res->end; |
||
678 | } |
||
1117 | serge | 679 | |
680 | |||
2997 | Serge | 681 | int pci_enable_rom(struct pci_dev *pdev) |
682 | { |
||
683 | struct resource *res = pdev->resource + PCI_ROM_RESOURCE; |
||
684 | struct pci_bus_region region; |
||
685 | u32 rom_addr; |
||
1117 | serge | 686 | |
2997 | Serge | 687 | if (!res->flags) |
688 | return -1; |
||
689 | |||
6104 | serge | 690 | _pcibios_resource_to_bus(pdev, ®ion, res); |
2997 | Serge | 691 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
692 | rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
||
693 | rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
||
694 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
||
695 | return 0; |
||
1117 | serge | 696 | } |
697 | |||
2997 | Serge | 698 | void pci_disable_rom(struct pci_dev *pdev) |
699 | { |
||
700 | u32 rom_addr; |
||
701 | pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
||
702 | rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; |
||
703 | pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); |
||
704 | } |
||
1117 | serge | 705 | |
706 | /** |
||
2997 | Serge | 707 | * pci_get_rom_size - obtain the actual size of the ROM image |
708 | * @pdev: target PCI device |
||
709 | * @rom: kernel virtual pointer to image of ROM |
||
710 | * @size: size of PCI window |
||
711 | * return: size of actual ROM image |
||
1117 | serge | 712 | * |
2997 | Serge | 713 | * Determine the actual length of the ROM image. |
714 | * The PCI window size could be much larger than the |
||
715 | * actual image size. |
||
1117 | serge | 716 | */ |
2997 | Serge | 717 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) |
1117 | serge | 718 | { |
2997 | Serge | 719 | void __iomem *image; |
720 | int last_image; |
||
1117 | serge | 721 | |
2997 | Serge | 722 | image = rom; |
723 | do { |
||
724 | void __iomem *pds; |
||
725 | /* Standard PCI ROMs start out with these bytes 55 AA */ |
||
726 | if (readb(image) != 0x55) { |
||
727 | dev_err(&pdev->dev, "Invalid ROM contents\n"); |
||
728 | break; |
||
1117 | serge | 729 | } |
2997 | Serge | 730 | if (readb(image + 1) != 0xAA) |
731 | break; |
||
732 | /* get the PCI data structure and check its signature */ |
||
733 | pds = image + readw(image + 24); |
||
734 | if (readb(pds) != 'P') |
||
735 | break; |
||
736 | if (readb(pds + 1) != 'C') |
||
737 | break; |
||
738 | if (readb(pds + 2) != 'I') |
||
739 | break; |
||
740 | if (readb(pds + 3) != 'R') |
||
741 | break; |
||
742 | last_image = readb(pds + 21) & 0x80; |
||
743 | /* this length is reliable */ |
||
744 | image += readw(pds + 16) * 512; |
||
745 | } while (!last_image); |
||
1963 | serge | 746 | |
2997 | Serge | 747 | /* never return a size larger than the PCI resource window */ |
748 | /* there are known ROMs that get the size wrong */ |
||
749 | return min((size_t)(image - rom), size); |
||
750 | } |
||
1117 | serge | 751 | |
752 | |||
753 | /** |
||
754 | * pci_map_rom - map a PCI ROM to kernel space |
||
755 | * @pdev: pointer to pci device struct |
||
756 | * @size: pointer to receive size of pci window over ROM |
||
757 | * |
||
2997 | Serge | 758 | * Return: kernel virtual pointer to image of ROM |
759 | * |
||
1117 | serge | 760 | * Map a PCI ROM into kernel space. If ROM is boot video ROM, |
761 | * the shadow BIOS copy will be returned instead of the |
||
762 | * actual ROM. |
||
763 | */ |
||
2997 | Serge | 764 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
1117 | serge | 765 | { |
766 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
||
2997 | Serge | 767 | loff_t start; |
768 | void __iomem *rom; |
||
1117 | serge | 769 | |
1963 | serge | 770 | /* |
771 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
||
772 | * memory map if the VGA enable bit of the Bridge Control register is |
||
773 | * set for embedded VGA. |
||
774 | */ |
||
775 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
||
776 | /* primary video rom always starts here */ |
||
2997 | Serge | 777 | start = (loff_t)0xC0000; |
1963 | serge | 778 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
779 | } else { |
||
3764 | Serge | 780 | if (res->flags & |
781 | (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { |
||
1963 | serge | 782 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
2997 | Serge | 783 | return (void __iomem *)(unsigned long) |
784 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
||
1963 | serge | 785 | } else { |
5271 | serge | 786 | start = (loff_t)0xC0000; |
787 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
||
1963 | serge | 788 | |
789 | } |
||
790 | } |
||
791 | |||
792 | rom = ioremap(start, *size); |
||
793 | if (!rom) { |
||
794 | /* restore enable if ioremap fails */ |
||
795 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | |
||
796 | IORESOURCE_ROM_SHADOW | |
||
797 | IORESOURCE_ROM_COPY))) |
||
798 | pci_disable_rom(pdev); |
||
799 | return NULL; |
||
800 | } |
||
801 | |||
802 | /* |
||
803 | * Try to find the true size of the ROM since sometimes the PCI window |
||
804 | * size is much larger than the actual size of the ROM. |
||
805 | * True size is important if the ROM is going to be copied. |
||
806 | */ |
||
2997 | Serge | 807 | *size = pci_get_rom_size(pdev, rom, *size); |
808 | return rom; |
||
809 | } |
||
1963 | serge | 810 | |
2997 | Serge | 811 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) |
812 | { |
||
813 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
||
1117 | serge | 814 | |
2997 | Serge | 815 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
816 | return; |
||
1117 | serge | 817 | |
2997 | Serge | 818 | iounmap(rom); |
819 | |||
820 | /* Disable again before continuing, leave enabled if pci=rom */ |
||
821 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
||
822 | pci_disable_rom(pdev); |
||
823 | } |
||
824 | |||
825 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
||
826 | { |
||
827 | u16 old_cmd, cmd; |
||
828 | |||
829 | pci_read_config_word(dev, PCI_COMMAND, &old_cmd); |
||
830 | if (enable) |
||
831 | cmd = old_cmd | PCI_COMMAND_MASTER; |
||
1117 | serge | 832 | else |
2997 | Serge | 833 | cmd = old_cmd & ~PCI_COMMAND_MASTER; |
834 | if (cmd != old_cmd) { |
||
5271 | serge | 835 | dbgprintf("%s bus mastering\n", |
836 | enable ? "enabling" : "disabling"); |
||
2997 | Serge | 837 | pci_write_config_word(dev, PCI_COMMAND, cmd); |
838 | } |
||
839 | dev->is_busmaster = enable; |
||
840 | } |
||
1117 | serge | 841 | |
2997 | Serge | 842 | |
843 | /* pci_set_master - enables bus-mastering for device dev |
||
844 | * @dev: the PCI device to enable |
||
845 | * |
||
846 | * Enables bus-mastering on the device and calls pcibios_set_master() |
||
847 | * to do the needed arch specific settings. |
||
848 | */ |
||
849 | void pci_set_master(struct pci_dev *dev) |
||
850 | { |
||
851 | __pci_set_master(dev, true); |
||
852 | // pcibios_set_master(dev); |
||
1117 | serge | 853 | } |
854 | |||
2997 | Serge | 855 | /** |
856 | * pci_clear_master - disables bus-mastering for device dev |
||
857 | * @dev: the PCI device to disable |
||
858 | */ |
||
859 | void pci_clear_master(struct pci_dev *dev) |
||
860 | { |
||
861 | __pci_set_master(dev, false); |
||
862 | } |
||
1119 | serge | 863 | |
2997 | Serge | 864 | |
865 | static inline int pcie_cap_version(const struct pci_dev *dev) |
||
1119 | serge | 866 | { |
2997 | Serge | 867 | return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; |
868 | } |
||
1119 | serge | 869 | |
2997 | Serge | 870 | static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) |
871 | { |
||
872 | return true; |
||
873 | } |
||
1119 | serge | 874 | |
2997 | Serge | 875 | static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
876 | { |
||
877 | int type = pci_pcie_type(dev); |
||
878 | |||
879 | return pcie_cap_version(dev) > 1 || |
||
880 | type == PCI_EXP_TYPE_ROOT_PORT || |
||
881 | type == PCI_EXP_TYPE_ENDPOINT || |
||
882 | type == PCI_EXP_TYPE_LEG_END; |
||
883 | } |
||
884 | |||
885 | static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) |
||
886 | { |
||
887 | int type = pci_pcie_type(dev); |
||
888 | |||
889 | return pcie_cap_version(dev) > 1 || |
||
890 | type == PCI_EXP_TYPE_ROOT_PORT || |
||
891 | (type == PCI_EXP_TYPE_DOWNSTREAM && |
||
892 | dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); |
||
893 | } |
||
894 | |||
895 | static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) |
||
896 | { |
||
897 | int type = pci_pcie_type(dev); |
||
898 | |||
899 | return pcie_cap_version(dev) > 1 || |
||
900 | type == PCI_EXP_TYPE_ROOT_PORT || |
||
901 | type == PCI_EXP_TYPE_RC_EC; |
||
902 | } |
||
903 | |||
904 | static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) |
||
905 | { |
||
906 | if (!pci_is_pcie(dev)) |
||
907 | return false; |
||
908 | |||
909 | switch (pos) { |
||
910 | case PCI_EXP_FLAGS_TYPE: |
||
911 | return true; |
||
912 | case PCI_EXP_DEVCAP: |
||
913 | case PCI_EXP_DEVCTL: |
||
914 | case PCI_EXP_DEVSTA: |
||
915 | return pcie_cap_has_devctl(dev); |
||
916 | case PCI_EXP_LNKCAP: |
||
917 | case PCI_EXP_LNKCTL: |
||
918 | case PCI_EXP_LNKSTA: |
||
919 | return pcie_cap_has_lnkctl(dev); |
||
920 | case PCI_EXP_SLTCAP: |
||
921 | case PCI_EXP_SLTCTL: |
||
922 | case PCI_EXP_SLTSTA: |
||
923 | return pcie_cap_has_sltctl(dev); |
||
924 | case PCI_EXP_RTCTL: |
||
925 | case PCI_EXP_RTCAP: |
||
926 | case PCI_EXP_RTSTA: |
||
927 | return pcie_cap_has_rtctl(dev); |
||
928 | case PCI_EXP_DEVCAP2: |
||
929 | case PCI_EXP_DEVCTL2: |
||
930 | case PCI_EXP_LNKCAP2: |
||
931 | case PCI_EXP_LNKCTL2: |
||
932 | case PCI_EXP_LNKSTA2: |
||
933 | return pcie_cap_version(dev) > 1; |
||
934 | default: |
||
935 | return false; |
||
936 | } |
||
937 | } |
||
938 | |||
939 | /* |
||
940 | * Note that these accessor functions are only for the "PCI Express |
||
941 | * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the |
||
942 | * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) |
||
943 | */ |
||
944 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) |
||
945 | { |
||
946 | int ret; |
||
947 | |||
948 | *val = 0; |
||
949 | if (pos & 1) |
||
950 | return -EINVAL; |
||
951 | |||
952 | if (pcie_capability_reg_implemented(dev, pos)) { |
||
953 | ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); |
||
954 | /* |
||
955 | * Reset *val to 0 if pci_read_config_word() fails, it may |
||
956 | * have been written as 0xFFFF if hardware error happens |
||
957 | * during pci_read_config_word(). |
||
958 | */ |
||
959 | if (ret) |
||
960 | *val = 0; |
||
961 | return ret; |
||
962 | } |
||
963 | |||
964 | /* |
||
965 | * For Functions that do not implement the Slot Capabilities, |
||
966 | * Slot Status, and Slot Control registers, these spaces must |
||
967 | * be hardwired to 0b, with the exception of the Presence Detect |
||
968 | * State bit in the Slot Status register of Downstream Ports, |
||
969 | * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) |
||
970 | */ |
||
971 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && |
||
972 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
||
973 | *val = PCI_EXP_SLTSTA_PDS; |
||
974 | } |
||
975 | |||
976 | return 0; |
||
977 | } |
||
978 | EXPORT_SYMBOL(pcie_capability_read_word); |
||
979 | |||
980 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) |
||
981 | { |
||
982 | int ret; |
||
983 | |||
984 | *val = 0; |
||
985 | if (pos & 3) |
||
986 | return -EINVAL; |
||
987 | |||
988 | if (pcie_capability_reg_implemented(dev, pos)) { |
||
989 | ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
||
990 | /* |
||
991 | * Reset *val to 0 if pci_read_config_dword() fails, it may |
||
992 | * have been written as 0xFFFFFFFF if hardware error happens |
||
993 | * during pci_read_config_dword(). |
||
994 | */ |
||
995 | if (ret) |
||
996 | *val = 0; |
||
997 | return ret; |
||
998 | } |
||
999 | |||
1000 | if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && |
||
1001 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { |
||
1002 | *val = PCI_EXP_SLTSTA_PDS; |
||
1003 | } |
||
1004 | |||
1005 | return 0; |
||
1006 | } |
||
1007 | EXPORT_SYMBOL(pcie_capability_read_dword); |
||
1008 | |||
1009 | int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) |
||
1010 | { |
||
1011 | if (pos & 1) |
||
1012 | return -EINVAL; |
||
1013 | |||
1014 | if (!pcie_capability_reg_implemented(dev, pos)) |
||
1119 | serge | 1015 | return 0; |
2997 | Serge | 1016 | |
1017 | return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); |
||
1119 | serge | 1018 | } |
2997 | Serge | 1019 | EXPORT_SYMBOL(pcie_capability_write_word); |
1119 | serge | 1020 | |
2997 | Serge | 1021 | int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) |
1022 | { |
||
1023 | if (pos & 3) |
||
1024 | return -EINVAL; |
||
1963 | serge | 1025 | |
2997 | Serge | 1026 | if (!pcie_capability_reg_implemented(dev, pos)) |
1027 | return 0; |
||
1028 | |||
1029 | return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); |
||
1030 | } |
||
1031 | EXPORT_SYMBOL(pcie_capability_write_dword); |
||
1032 | |||
5078 | serge | 1033 | int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, |
1034 | u16 clear, u16 set) |
||
1035 | { |
||
1036 | int ret; |
||
1037 | u16 val; |
||
1038 | |||
1039 | ret = pcie_capability_read_word(dev, pos, &val); |
||
1040 | if (!ret) { |
||
1041 | val &= ~clear; |
||
1042 | val |= set; |
||
1043 | ret = pcie_capability_write_word(dev, pos, val); |
||
1044 | } |
||
1045 | |||
1046 | return ret; |
||
1047 | } |
||
1048 | |||
1049 | |||
1050 | |||
1051 | int pcie_get_readrq(struct pci_dev *dev) |
||
1052 | { |
||
1053 | u16 ctl; |
||
1054 | |||
1055 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); |
||
1056 | |||
1057 | return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); |
||
1058 | } |
||
1059 | EXPORT_SYMBOL(pcie_get_readrq); |
||
1060 | |||
1061 | /** |
||
1062 | * pcie_set_readrq - set PCI Express maximum memory read request |
||
1063 | * @dev: PCI device to query |
||
1064 | * @rq: maximum memory read count in bytes |
||
1065 | * valid values are 128, 256, 512, 1024, 2048, 4096 |
||
1066 | * |
||
1067 | * If possible sets maximum memory read request in bytes |
||
1068 | */ |
||
1069 | int pcie_set_readrq(struct pci_dev *dev, int rq) |
||
1070 | { |
||
1071 | u16 v; |
||
1072 | |||
1073 | if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) |
||
1074 | return -EINVAL; |
||
1075 | |||
1076 | v = (ffs(rq) - 8) << 12; |
||
1077 | |||
1078 | return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, |
||
1079 | PCI_EXP_DEVCTL_READRQ, v); |
||
1080 | }><>>><>>=>>>><>><>><>>4)><4)> |
||
1081 |