Rev 1631 | Rev 6295 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1631 | Rev 5270 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * DMA Pool allocator |
2 | * DMA Pool allocator |
3 | * |
3 | * |
4 | * Copyright 2001 David Brownell |
4 | * Copyright 2001 David Brownell |
5 | * Copyright 2007 Intel Corporation |
5 | * Copyright 2007 Intel Corporation |
6 | * Author: Matthew Wilcox |
6 | * Author: Matthew Wilcox |
7 | * |
7 | * |
8 | * This software may be redistributed and/or modified under the terms of |
8 | * This software may be redistributed and/or modified under the terms of |
9 | * the GNU General Public License ("GPL") version 2 as published by the |
9 | * the GNU General Public License ("GPL") version 2 as published by the |
10 | * Free Software Foundation. |
10 | * Free Software Foundation. |
11 | * |
11 | * |
12 | * This allocator returns small blocks of a given size which are DMA-able by |
12 | * This allocator returns small blocks of a given size which are DMA-able by |
13 | * the given device. It uses the dma_alloc_coherent page allocator to get |
13 | * the given device. It uses the dma_alloc_coherent page allocator to get |
14 | * new pages, then splits them up into blocks of the required size. |
14 | * new pages, then splits them up into blocks of the required size. |
15 | * Many older drivers still have their own code to do this. |
15 | * Many older drivers still have their own code to do this. |
16 | * |
16 | * |
17 | * The current design of this allocator is fairly simple. The pool is |
17 | * The current design of this allocator is fairly simple. The pool is |
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of |
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of |
19 | * allocated pages. Each page in the page_list is split into blocks of at |
19 | * allocated pages. Each page in the page_list is split into blocks of at |
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
21 | * list of free blocks within the page. Used blocks aren't tracked, but we |
21 | * list of free blocks within the page. Used blocks aren't tracked, but we |
22 | * keep a count of how many are currently allocated from each page. |
22 | * keep a count of how many are currently allocated from each page. |
23 | */ |
23 | */ |
24 | 24 | ||
25 | 25 | ||
26 | #include |
26 | #include |
- | 27 | #include |
|
27 | #include |
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
- | 31 | #include |
|
30 | #include |
32 | #include |
31 | 33 | ||
32 | 34 | ||
33 | struct dma_pool { /* the pool */ |
35 | struct dma_pool { /* the pool */ |
34 | struct list_head page_list; |
36 | struct list_head page_list; |
35 | struct mutex lock; |
37 | struct mutex lock; |
36 | size_t size; |
38 | size_t size; |
37 | size_t allocation; |
39 | size_t allocation; |
38 | size_t boundary; |
40 | size_t boundary; |
39 | struct list_head pools; |
41 | struct list_head pools; |
40 | }; |
42 | }; |
41 | 43 | ||
42 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
44 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
43 | struct list_head page_list; |
45 | struct list_head page_list; |
44 | void *vaddr; |
46 | void *vaddr; |
45 | dma_addr_t dma; |
47 | dma_addr_t dma; |
46 | unsigned int in_use; |
48 | unsigned int in_use; |
47 | unsigned int offset; |
49 | unsigned int offset; |
48 | }; |
50 | }; |
49 | 51 | ||
50 | 52 | ||
51 | static DEFINE_MUTEX(pools_lock); |
53 | static DEFINE_MUTEX(pools_lock); |
52 | 54 | ||
53 | 55 | ||
54 | /** |
56 | /** |
55 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
57 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
56 | * @name: name of pool, for diagnostics |
58 | * @name: name of pool, for diagnostics |
57 | * @dev: device that will be doing the DMA |
59 | * @dev: device that will be doing the DMA |
58 | * @size: size of the blocks in this pool. |
60 | * @size: size of the blocks in this pool. |
59 | * @align: alignment requirement for blocks; must be a power of two |
61 | * @align: alignment requirement for blocks; must be a power of two |
60 | * @boundary: returned blocks won't cross this power of two boundary |
62 | * @boundary: returned blocks won't cross this power of two boundary |
61 | * Context: !in_interrupt() |
63 | * Context: !in_interrupt() |
62 | * |
64 | * |
63 | * Returns a dma allocation pool with the requested characteristics, or |
65 | * Returns a dma allocation pool with the requested characteristics, or |
64 | * null if one can't be created. Given one of these pools, dma_pool_alloc() |
66 | * null if one can't be created. Given one of these pools, dma_pool_alloc() |
65 | * may be used to allocate memory. Such memory will all have "consistent" |
67 | * may be used to allocate memory. Such memory will all have "consistent" |
66 | * DMA mappings, accessible by the device and its driver without using |
68 | * DMA mappings, accessible by the device and its driver without using |
67 | * cache flushing primitives. The actual size of blocks allocated may be |
69 | * cache flushing primitives. The actual size of blocks allocated may be |
68 | * larger than requested because of alignment. |
70 | * larger than requested because of alignment. |
69 | * |
71 | * |
70 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
72 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
71 | * cross that size boundary. This is useful for devices which have |
73 | * cross that size boundary. This is useful for devices which have |
72 | * addressing restrictions on individual DMA transfers, such as not crossing |
74 | * addressing restrictions on individual DMA transfers, such as not crossing |
73 | * boundaries of 4KBytes. |
75 | * boundaries of 4KBytes. |
74 | */ |
76 | */ |
75 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
77 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
76 | size_t size, size_t align, size_t boundary) |
78 | size_t size, size_t align, size_t boundary) |
77 | { |
79 | { |
78 | struct dma_pool *retval; |
80 | struct dma_pool *retval; |
79 | size_t allocation; |
81 | size_t allocation; |
80 | 82 | ||
81 | if (align == 0) { |
83 | if (align == 0) { |
82 | align = 1; |
84 | align = 1; |
83 | } else if (align & (align - 1)) { |
85 | } else if (align & (align - 1)) { |
84 | return NULL; |
86 | return NULL; |
85 | } |
87 | } |
86 | 88 | ||
87 | if (size == 0) { |
89 | if (size == 0) { |
88 | return NULL; |
90 | return NULL; |
89 | } else if (size < 4) { |
91 | } else if (size < 4) { |
90 | size = 4; |
92 | size = 4; |
91 | } |
93 | } |
92 | 94 | ||
93 | if ((size % align) != 0) |
95 | if ((size % align) != 0) |
94 | size = ALIGN(size, align); |
96 | size = ALIGN(size, align); |
95 | 97 | ||
96 | allocation = max_t(size_t, size, PAGE_SIZE); |
98 | allocation = max_t(size_t, size, PAGE_SIZE); |
97 | 99 | ||
98 | allocation = (allocation+0x7FFF) & ~0x7FFF; |
100 | allocation = (allocation+0x7FFF) & ~0x7FFF; |
99 | 101 | ||
100 | if (!boundary) { |
102 | if (!boundary) { |
101 | boundary = allocation; |
103 | boundary = allocation; |
102 | } else if ((boundary < size) || (boundary & (boundary - 1))) { |
104 | } else if ((boundary < size) || (boundary & (boundary - 1))) { |
103 | return NULL; |
105 | return NULL; |
104 | } |
106 | } |
105 | 107 | ||
106 | retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
108 | retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
107 | 109 | ||
108 | if (!retval) |
110 | if (!retval) |
109 | return retval; |
111 | return retval; |
110 | 112 | ||
111 | INIT_LIST_HEAD(&retval->page_list); |
113 | INIT_LIST_HEAD(&retval->page_list); |
112 | 114 | ||
113 | // spin_lock_init(&retval->lock); |
115 | // spin_lock_init(&retval->lock); |
114 | 116 | ||
115 | retval->size = size; |
117 | retval->size = size; |
116 | retval->boundary = boundary; |
118 | retval->boundary = boundary; |
117 | retval->allocation = allocation; |
119 | retval->allocation = allocation; |
118 | 120 | ||
119 | INIT_LIST_HEAD(&retval->pools); |
121 | INIT_LIST_HEAD(&retval->pools); |
120 | 122 | ||
121 | return retval; |
123 | return retval; |
122 | } |
124 | } |
123 | 125 | ||
124 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
126 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
125 | { |
127 | { |
126 | unsigned int offset = 0; |
128 | unsigned int offset = 0; |
127 | unsigned int next_boundary = pool->boundary; |
129 | unsigned int next_boundary = pool->boundary; |
128 | 130 | ||
129 | do { |
131 | do { |
130 | unsigned int next = offset + pool->size; |
132 | unsigned int next = offset + pool->size; |
131 | if (unlikely((next + pool->size) >= next_boundary)) { |
133 | if (unlikely((next + pool->size) >= next_boundary)) { |
132 | next = next_boundary; |
134 | next = next_boundary; |
133 | next_boundary += pool->boundary; |
135 | next_boundary += pool->boundary; |
134 | } |
136 | } |
135 | *(int *)(page->vaddr + offset) = next; |
137 | *(int *)(page->vaddr + offset) = next; |
136 | offset = next; |
138 | offset = next; |
137 | } while (offset < pool->allocation); |
139 | } while (offset < pool->allocation); |
138 | } |
140 | } |
139 | 141 | ||
140 | 142 | ||
141 | static struct dma_page *pool_alloc_page(struct dma_pool *pool) |
143 | static struct dma_page *pool_alloc_page(struct dma_pool *pool) |
142 | { |
144 | { |
143 | struct dma_page *page; |
145 | struct dma_page *page; |
144 | 146 | ||
145 | page = malloc(sizeof(*page)); |
147 | page = __builtin_malloc(sizeof(*page)); |
146 | if (!page) |
148 | if (!page) |
147 | return NULL; |
149 | return NULL; |
148 | page->vaddr = (void*)KernelAlloc(pool->allocation); |
150 | page->vaddr = (void*)KernelAlloc(pool->allocation); |
149 | 151 | ||
150 | dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); |
152 | dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); |
151 | 153 | ||
152 | if (page->vaddr) |
154 | if (page->vaddr) |
153 | { |
155 | { |
154 | page->dma = GetPgAddr(page->vaddr); |
156 | page->dma = GetPgAddr(page->vaddr); |
155 | 157 | ||
156 | dbgprintf("dma 0x%0x\n", page->dma); |
158 | dbgprintf("dma 0x%0x\n", page->dma); |
157 | 159 | ||
158 | pool_initialise_page(pool, page); |
160 | pool_initialise_page(pool, page); |
159 | list_add(&page->page_list, &pool->page_list); |
161 | list_add(&page->page_list, &pool->page_list); |
160 | page->in_use = 0; |
162 | page->in_use = 0; |
161 | page->offset = 0; |
163 | page->offset = 0; |
162 | } else { |
164 | } else { |
163 | free(page); |
165 | free(page); |
164 | page = NULL; |
166 | page = NULL; |
165 | } |
167 | } |
166 | return page; |
168 | return page; |
167 | } |
169 | } |
168 | 170 | ||
169 | static inline int is_page_busy(struct dma_page *page) |
171 | static inline int is_page_busy(struct dma_page *page) |
170 | { |
172 | { |
171 | return page->in_use != 0; |
173 | return page->in_use != 0; |
172 | } |
174 | } |
173 | 175 | ||
174 | 176 | ||
175 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
177 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
176 | { |
178 | { |
177 | dma_addr_t dma = page->dma; |
179 | dma_addr_t dma = page->dma; |
178 | 180 | ||
179 | KernelFree(page->vaddr); |
181 | KernelFree(page->vaddr); |
180 | list_del(&page->page_list); |
182 | list_del(&page->page_list); |
181 | free(page); |
183 | free(page); |
182 | } |
184 | } |
183 | 185 | ||
184 | 186 | ||
185 | /** |
187 | /** |
186 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
188 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
187 | * @pool: dma pool that will be destroyed |
189 | * @pool: dma pool that will be destroyed |
188 | * Context: !in_interrupt() |
190 | * Context: !in_interrupt() |
189 | * |
191 | * |
190 | * Caller guarantees that no more memory from the pool is in use, |
192 | * Caller guarantees that no more memory from the pool is in use, |
191 | * and that nothing will try to use the pool after this call. |
193 | * and that nothing will try to use the pool after this call. |
192 | */ |
194 | */ |
193 | void dma_pool_destroy(struct dma_pool *pool) |
195 | void dma_pool_destroy(struct dma_pool *pool) |
194 | { |
196 | { |
195 | mutex_lock(&pools_lock); |
197 | mutex_lock(&pools_lock); |
196 | list_del(&pool->pools); |
198 | list_del(&pool->pools); |
197 | mutex_unlock(&pools_lock); |
199 | mutex_unlock(&pools_lock); |
198 | 200 | ||
199 | while (!list_empty(&pool->page_list)) { |
201 | while (!list_empty(&pool->page_list)) { |
200 | struct dma_page *page; |
202 | struct dma_page *page; |
201 | page = list_entry(pool->page_list.next, |
203 | page = list_entry(pool->page_list.next, |
202 | struct dma_page, page_list); |
204 | struct dma_page, page_list); |
203 | if (is_page_busy(page)) |
205 | if (is_page_busy(page)) |
204 | { |
206 | { |
205 | printk(KERN_ERR "dma_pool_destroy %p busy\n", |
207 | printk(KERN_ERR "dma_pool_destroy %p busy\n", |
206 | page->vaddr); |
208 | page->vaddr); |
207 | /* leak the still-in-use consistent memory */ |
209 | /* leak the still-in-use consistent memory */ |
208 | list_del(&page->page_list); |
210 | list_del(&page->page_list); |
209 | kfree(page); |
211 | kfree(page); |
210 | } else |
212 | } else |
211 | pool_free_page(pool, page); |
213 | pool_free_page(pool, page); |
212 | } |
214 | } |
213 | 215 | ||
214 | kfree(pool); |
216 | kfree(pool); |
215 | } |
217 | } |
216 | 218 | ||
217 | 219 | ||
218 | /** |
220 | /** |
219 | * dma_pool_alloc - get a block of consistent memory |
221 | * dma_pool_alloc - get a block of consistent memory |
220 | * @pool: dma pool that will produce the block |
222 | * @pool: dma pool that will produce the block |
221 | * @mem_flags: GFP_* bitmask |
223 | * @mem_flags: GFP_* bitmask |
222 | * @handle: pointer to dma address of block |
224 | * @handle: pointer to dma address of block |
223 | * |
225 | * |
224 | * This returns the kernel virtual address of a currently unused block, |
226 | * This returns the kernel virtual address of a currently unused block, |
225 | * and reports its dma address through the handle. |
227 | * and reports its dma address through the handle. |
226 | * If such a memory block can't be allocated, %NULL is returned. |
228 | * If such a memory block can't be allocated, %NULL is returned. |
227 | */ |
229 | */ |
228 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
230 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
229 | dma_addr_t *handle) |
231 | dma_addr_t *handle) |
230 | { |
232 | { |
231 | u32_t efl; |
233 | u32 efl; |
232 | struct dma_page *page; |
234 | struct dma_page *page; |
233 | size_t offset; |
235 | size_t offset; |
234 | void *retval; |
236 | void *retval; |
235 | 237 | ||
236 | efl = safe_cli(); |
238 | efl = safe_cli(); |
237 | restart: |
239 | restart: |
238 | list_for_each_entry(page, &pool->page_list, page_list) { |
240 | list_for_each_entry(page, &pool->page_list, page_list) { |
239 | if (page->offset < pool->allocation) |
241 | if (page->offset < pool->allocation) |
240 | goto ready; |
242 | goto ready; |
241 | } |
243 | } |
242 | page = pool_alloc_page(pool); |
244 | page = pool_alloc_page(pool); |
243 | if (!page) |
245 | if (!page) |
244 | { |
246 | { |
245 | retval = NULL; |
247 | retval = NULL; |
246 | goto done; |
248 | goto done; |
247 | } |
249 | } |
248 | 250 | ||
249 | ready: |
251 | ready: |
250 | page->in_use++; |
252 | page->in_use++; |
251 | offset = page->offset; |
253 | offset = page->offset; |
252 | page->offset = *(int *)(page->vaddr + offset); |
254 | page->offset = *(int *)(page->vaddr + offset); |
253 | retval = offset + page->vaddr; |
255 | retval = offset + page->vaddr; |
254 | *handle = offset + page->dma; |
256 | *handle = offset + page->dma; |
255 | done: |
257 | done: |
256 | safe_sti(efl); |
258 | safe_sti(efl); |
257 | return retval; |
259 | return retval; |
258 | } |
260 | } |
259 | 261 | ||
260 | 262 | ||
261 | 263 | ||
262 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
264 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
263 | { |
265 | { |
264 | struct dma_page *page; |
266 | struct dma_page *page; |
265 | u32_t efl; |
267 | u32 efl; |
266 | 268 | ||
267 | efl = safe_cli(); |
269 | efl = safe_cli(); |
268 | 270 | ||
269 | list_for_each_entry(page, &pool->page_list, page_list) { |
271 | list_for_each_entry(page, &pool->page_list, page_list) { |
270 | if (dma < page->dma) |
272 | if (dma < page->dma) |
271 | continue; |
273 | continue; |
272 | if (dma < (page->dma + pool->allocation)) |
274 | if (dma < (page->dma + pool->allocation)) |
273 | goto done; |
275 | goto done; |
274 | } |
276 | } |
275 | page = NULL; |
277 | page = NULL; |
276 | done: |
278 | done: |
277 | safe_sti(efl); |
279 | safe_sti(efl); |
278 | 280 | ||
279 | return page; |
281 | return page; |
280 | } |
282 | } |
281 | 283 | ||
282 | /** |
284 | /** |
283 | * dma_pool_free - put block back into dma pool |
285 | * dma_pool_free - put block back into dma pool |
284 | * @pool: the dma pool holding the block |
286 | * @pool: the dma pool holding the block |
285 | * @vaddr: virtual address of block |
287 | * @vaddr: virtual address of block |
286 | * @dma: dma address of block |
288 | * @dma: dma address of block |
287 | * |
289 | * |
288 | * Caller promises neither device nor driver will again touch this block |
290 | * Caller promises neither device nor driver will again touch this block |
289 | * unless it is first re-allocated. |
291 | * unless it is first re-allocated. |
290 | */ |
292 | */ |
291 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
293 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
292 | { |
294 | { |
293 | struct dma_page *page; |
295 | struct dma_page *page; |
294 | unsigned long flags; |
296 | unsigned long flags; |
295 | unsigned int offset; |
297 | unsigned int offset; |
296 | 298 | ||
297 | u32_t efl; |
299 | u32 efl; |
298 | 300 | ||
299 | page = pool_find_page(pool, dma); |
301 | page = pool_find_page(pool, dma); |
300 | if (!page) { |
302 | if (!page) { |
301 | printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", |
303 | printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", |
302 | vaddr, (unsigned long)dma); |
304 | vaddr, (unsigned long)dma); |
303 | return; |
305 | return; |
304 | } |
306 | } |
305 | 307 | ||
306 | offset = vaddr - page->vaddr; |
308 | offset = vaddr - page->vaddr; |
307 | 309 | ||
308 | efl = safe_cli(); |
310 | efl = safe_cli(); |
309 | { |
311 | { |
310 | page->in_use--; |
312 | page->in_use--; |
311 | *(int *)vaddr = page->offset; |
313 | *(int *)vaddr = page->offset; |
312 | page->offset = offset; |
314 | page->offset = offset; |
313 | /* |
315 | /* |
314 | * Resist a temptation to do |
316 | * Resist a temptation to do |
315 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
317 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
316 | * Better have a few empty pages hang around. |
318 | * Better have a few empty pages hang around. |
317 | */ |
319 | */ |
318 | }safe_sti(efl); |
320 | }safe_sti(efl); |
319 | }>>>>>> |
321 | }>>>>>> |