Rev 6934 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
6514 | serge | 1 | #ifndef _ASM_X86_DMA_MAPPING_H |
2 | #define _ASM_X86_DMA_MAPPING_H |
||
3 | |||
4 | /* |
||
5 | * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and |
||
6 | * Documentation/DMA-API.txt for documentation. |
||
7 | */ |
||
8 | |||
9 | #include |
||
10 | #include |
||
11 | #include |
||
12 | #include |
||
13 | #include |
||
14 | #include |
||
15 | #include |
||
16 | |||
17 | #ifdef CONFIG_ISA |
||
18 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) |
||
19 | #else |
||
20 | # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) |
||
21 | #endif |
||
22 | |||
23 | #define DMA_ERROR_CODE 0 |
||
24 | |||
25 | extern int iommu_merge; |
||
26 | extern struct device x86_dma_fallback_dev; |
||
27 | extern int panic_on_overflow; |
||
28 | |||
29 | extern struct dma_map_ops *dma_ops; |
||
30 | |||
31 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
||
32 | { |
||
33 | #ifndef CONFIG_X86_DEV_DMA_OPS |
||
34 | return dma_ops; |
||
35 | #else |
||
36 | if (unlikely(!dev) || !dev->archdata.dma_ops) |
||
37 | return dma_ops; |
||
38 | else |
||
39 | return dev->archdata.dma_ops; |
||
40 | #endif |
||
41 | } |
||
42 | |||
43 | bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); |
||
44 | #define arch_dma_alloc_attrs arch_dma_alloc_attrs |
||
45 | |||
46 | #define HAVE_ARCH_DMA_SUPPORTED 1 |
||
47 | extern int dma_supported(struct device *hwdev, u64 mask); |
||
48 | |||
49 | #include |
||
50 | |||
51 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
||
52 | dma_addr_t *dma_addr, gfp_t flag, |
||
53 | struct dma_attrs *attrs); |
||
54 | |||
55 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
||
56 | void *vaddr, dma_addr_t dma_addr, |
||
57 | struct dma_attrs *attrs); |
||
58 | |||
59 | #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ |
||
60 | extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); |
||
61 | extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); |
||
62 | extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); |
||
63 | #else |
||
64 | |||
65 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
||
66 | { |
||
67 | if (!dev->dma_mask) |
||
68 | return 0; |
||
69 | |||
70 | return addr + size - 1 <= *dev->dma_mask; |
||
71 | } |
||
72 | |||
73 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
||
74 | { |
||
75 | return paddr; |
||
76 | } |
||
77 | |||
78 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) |
||
79 | { |
||
80 | return daddr; |
||
81 | } |
||
82 | #endif /* CONFIG_X86_DMA_REMAP */ |
||
83 | |||
84 | static inline void |
||
85 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
||
86 | enum dma_data_direction dir) |
||
87 | { |
||
88 | flush_write_buffers(); |
||
89 | } |
||
90 | |||
91 | static inline unsigned long dma_alloc_coherent_mask(struct device *dev, |
||
92 | gfp_t gfp) |
||
93 | { |
||
94 | unsigned long dma_mask = 0; |
||
95 | |||
96 | dma_mask = dev->coherent_dma_mask; |
||
97 | if (!dma_mask) |
||
98 | dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); |
||
99 | |||
100 | return dma_mask; |
||
101 | } |
||
102 | |||
103 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) |
||
104 | { |
||
105 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
||
106 | |||
107 | if (dma_mask <= DMA_BIT_MASK(24)) |
||
108 | gfp |= GFP_DMA; |
||
109 | #ifdef CONFIG_X86_64 |
||
110 | if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
||
111 | gfp |= GFP_DMA32; |
||
112 | #endif |
||
113 | return gfp; |
||
114 | } |
||
115 | |||
116 | #endif=>=>=> |