Rev 1616 | Rev 6295 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1616 | serge | 1 | /* |
2 | * DMA Pool allocator |
||
3 | * |
||
4 | * Copyright 2001 David Brownell |
||
5 | * Copyright 2007 Intel Corporation |
||
6 | * Author: Matthew Wilcox |
||
7 | * |
||
8 | * This software may be redistributed and/or modified under the terms of |
||
9 | * the GNU General Public License ("GPL") version 2 as published by the |
||
10 | * Free Software Foundation. |
||
11 | * |
||
12 | * This allocator returns small blocks of a given size which are DMA-able by |
||
13 | * the given device. It uses the dma_alloc_coherent page allocator to get |
||
14 | * new pages, then splits them up into blocks of the required size. |
||
15 | * Many older drivers still have their own code to do this. |
||
16 | * |
||
17 | * The current design of this allocator is fairly simple. The pool is |
||
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of |
||
19 | * allocated pages. Each page in the page_list is split into blocks of at |
||
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
||
21 | * list of free blocks within the page. Used blocks aren't tracked, but we |
||
22 | * keep a count of how many are currently allocated from each page. |
||
23 | */ |
||
24 | |||
25 | |||
26 | #include |
||
1631 | serge | 27 | #include |
1616 | serge | 28 | #include |
1631 | serge | 29 | #include |
1616 | serge | 30 | #include |
31 | |||
32 | |||
33 | struct dma_pool { /* the pool */ |
||
34 | struct list_head page_list; |
||
35 | struct mutex lock; |
||
36 | size_t size; |
||
37 | size_t allocation; |
||
38 | size_t boundary; |
||
39 | struct list_head pools; |
||
40 | }; |
||
41 | |||
42 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
||
43 | struct list_head page_list; |
||
44 | void *vaddr; |
||
45 | dma_addr_t dma; |
||
46 | unsigned int in_use; |
||
47 | unsigned int offset; |
||
48 | }; |
||
49 | |||
50 | |||
51 | static DEFINE_MUTEX(pools_lock); |
||
52 | |||
53 | |||
54 | /** |
||
55 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
||
56 | * @name: name of pool, for diagnostics |
||
57 | * @dev: device that will be doing the DMA |
||
58 | * @size: size of the blocks in this pool. |
||
59 | * @align: alignment requirement for blocks; must be a power of two |
||
60 | * @boundary: returned blocks won't cross this power of two boundary |
||
61 | * Context: !in_interrupt() |
||
62 | * |
||
63 | * Returns a dma allocation pool with the requested characteristics, or |
||
64 | * null if one can't be created. Given one of these pools, dma_pool_alloc() |
||
65 | * may be used to allocate memory. Such memory will all have "consistent" |
||
66 | * DMA mappings, accessible by the device and its driver without using |
||
67 | * cache flushing primitives. The actual size of blocks allocated may be |
||
68 | * larger than requested because of alignment. |
||
69 | * |
||
70 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
||
71 | * cross that size boundary. This is useful for devices which have |
||
72 | * addressing restrictions on individual DMA transfers, such as not crossing |
||
73 | * boundaries of 4KBytes. |
||
74 | */ |
||
75 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
||
76 | size_t size, size_t align, size_t boundary) |
||
77 | { |
||
78 | struct dma_pool *retval; |
||
79 | size_t allocation; |
||
80 | |||
81 | if (align == 0) { |
||
82 | align = 1; |
||
83 | } else if (align & (align - 1)) { |
||
84 | return NULL; |
||
85 | } |
||
86 | |||
87 | if (size == 0) { |
||
88 | return NULL; |
||
89 | } else if (size < 4) { |
||
90 | size = 4; |
||
91 | } |
||
92 | |||
93 | if ((size % align) != 0) |
||
94 | size = ALIGN(size, align); |
||
95 | |||
96 | allocation = max_t(size_t, size, PAGE_SIZE); |
||
97 | |||
98 | allocation = (allocation+0x7FFF) & ~0x7FFF; |
||
99 | |||
100 | if (!boundary) { |
||
101 | boundary = allocation; |
||
102 | } else if ((boundary < size) || (boundary & (boundary - 1))) { |
||
103 | return NULL; |
||
104 | } |
||
105 | |||
106 | retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
||
107 | |||
108 | if (!retval) |
||
109 | return retval; |
||
110 | |||
111 | INIT_LIST_HEAD(&retval->page_list); |
||
112 | |||
113 | // spin_lock_init(&retval->lock); |
||
114 | |||
115 | retval->size = size; |
||
116 | retval->boundary = boundary; |
||
117 | retval->allocation = allocation; |
||
118 | |||
119 | INIT_LIST_HEAD(&retval->pools); |
||
120 | |||
121 | return retval; |
||
122 | } |
||
123 | |||
124 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
||
125 | { |
||
126 | unsigned int offset = 0; |
||
127 | unsigned int next_boundary = pool->boundary; |
||
128 | |||
129 | do { |
||
130 | unsigned int next = offset + pool->size; |
||
131 | if (unlikely((next + pool->size) >= next_boundary)) { |
||
132 | next = next_boundary; |
||
133 | next_boundary += pool->boundary; |
||
134 | } |
||
135 | *(int *)(page->vaddr + offset) = next; |
||
136 | offset = next; |
||
137 | } while (offset < pool->allocation); |
||
138 | } |
||
139 | |||
140 | |||
141 | static struct dma_page *pool_alloc_page(struct dma_pool *pool) |
||
142 | { |
||
143 | struct dma_page *page; |
||
144 | |||
145 | page = malloc(sizeof(*page)); |
||
146 | if (!page) |
||
147 | return NULL; |
||
148 | page->vaddr = (void*)KernelAlloc(pool->allocation); |
||
149 | |||
150 | dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); |
||
151 | |||
152 | if (page->vaddr) |
||
153 | { |
||
154 | page->dma = GetPgAddr(page->vaddr); |
||
155 | |||
156 | dbgprintf("dma 0x%0x\n", page->dma); |
||
157 | |||
158 | pool_initialise_page(pool, page); |
||
159 | list_add(&page->page_list, &pool->page_list); |
||
160 | page->in_use = 0; |
||
161 | page->offset = 0; |
||
162 | } else { |
||
163 | free(page); |
||
164 | page = NULL; |
||
165 | } |
||
166 | return page; |
||
167 | } |
||
168 | |||
169 | static inline int is_page_busy(struct dma_page *page) |
||
170 | { |
||
171 | return page->in_use != 0; |
||
172 | } |
||
173 | |||
174 | |||
175 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
||
176 | { |
||
177 | dma_addr_t dma = page->dma; |
||
178 | |||
179 | KernelFree(page->vaddr); |
||
180 | list_del(&page->page_list); |
||
181 | free(page); |
||
182 | } |
||
183 | |||
184 | |||
185 | /** |
||
186 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
||
187 | * @pool: dma pool that will be destroyed |
||
188 | * Context: !in_interrupt() |
||
189 | * |
||
190 | * Caller guarantees that no more memory from the pool is in use, |
||
191 | * and that nothing will try to use the pool after this call. |
||
192 | */ |
||
193 | void dma_pool_destroy(struct dma_pool *pool) |
||
194 | { |
||
195 | mutex_lock(&pools_lock); |
||
196 | list_del(&pool->pools); |
||
197 | mutex_unlock(&pools_lock); |
||
198 | |||
199 | while (!list_empty(&pool->page_list)) { |
||
200 | struct dma_page *page; |
||
201 | page = list_entry(pool->page_list.next, |
||
202 | struct dma_page, page_list); |
||
203 | if (is_page_busy(page)) |
||
204 | { |
||
205 | printk(KERN_ERR "dma_pool_destroy %p busy\n", |
||
206 | page->vaddr); |
||
207 | /* leak the still-in-use consistent memory */ |
||
208 | list_del(&page->page_list); |
||
209 | kfree(page); |
||
210 | } else |
||
211 | pool_free_page(pool, page); |
||
212 | } |
||
213 | |||
214 | kfree(pool); |
||
215 | } |
||
216 | |||
217 | |||
218 | /** |
||
219 | * dma_pool_alloc - get a block of consistent memory |
||
220 | * @pool: dma pool that will produce the block |
||
221 | * @mem_flags: GFP_* bitmask |
||
222 | * @handle: pointer to dma address of block |
||
223 | * |
||
224 | * This returns the kernel virtual address of a currently unused block, |
||
225 | * and reports its dma address through the handle. |
||
226 | * If such a memory block can't be allocated, %NULL is returned. |
||
227 | */ |
||
228 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
||
229 | dma_addr_t *handle) |
||
230 | { |
||
231 | u32_t efl; |
||
232 | struct dma_page *page; |
||
233 | size_t offset; |
||
234 | void *retval; |
||
235 | |||
236 | efl = safe_cli(); |
||
237 | restart: |
||
238 | list_for_each_entry(page, &pool->page_list, page_list) { |
||
239 | if (page->offset < pool->allocation) |
||
240 | goto ready; |
||
241 | } |
||
242 | page = pool_alloc_page(pool); |
||
243 | if (!page) |
||
244 | { |
||
245 | retval = NULL; |
||
246 | goto done; |
||
247 | } |
||
248 | |||
249 | ready: |
||
250 | page->in_use++; |
||
251 | offset = page->offset; |
||
252 | page->offset = *(int *)(page->vaddr + offset); |
||
253 | retval = offset + page->vaddr; |
||
254 | *handle = offset + page->dma; |
||
255 | done: |
||
256 | safe_sti(efl); |
||
257 | return retval; |
||
258 | } |
||
259 | |||
260 | |||
261 | |||
262 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
||
263 | { |
||
264 | struct dma_page *page; |
||
265 | u32_t efl; |
||
266 | |||
267 | efl = safe_cli(); |
||
268 | |||
269 | list_for_each_entry(page, &pool->page_list, page_list) { |
||
270 | if (dma < page->dma) |
||
271 | continue; |
||
272 | if (dma < (page->dma + pool->allocation)) |
||
273 | goto done; |
||
274 | } |
||
275 | page = NULL; |
||
276 | done: |
||
277 | safe_sti(efl); |
||
278 | |||
279 | return page; |
||
280 | } |
||
281 | |||
282 | /** |
||
283 | * dma_pool_free - put block back into dma pool |
||
284 | * @pool: the dma pool holding the block |
||
285 | * @vaddr: virtual address of block |
||
286 | * @dma: dma address of block |
||
287 | * |
||
288 | * Caller promises neither device nor driver will again touch this block |
||
289 | * unless it is first re-allocated. |
||
290 | */ |
||
291 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
||
292 | { |
||
293 | struct dma_page *page; |
||
294 | unsigned long flags; |
||
295 | unsigned int offset; |
||
296 | |||
297 | u32_t efl; |
||
298 | |||
299 | page = pool_find_page(pool, dma); |
||
300 | if (!page) { |
||
301 | printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", |
||
302 | vaddr, (unsigned long)dma); |
||
303 | return; |
||
304 | } |
||
305 | |||
306 | offset = vaddr - page->vaddr; |
||
307 | |||
308 | efl = safe_cli(); |
||
309 | { |
||
310 | page->in_use--; |
||
311 | *(int *)vaddr = page->offset; |
||
312 | page->offset = offset; |
||
313 | /* |
||
314 | * Resist a temptation to do |
||
315 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
||
316 | * Better have a few empty pages hang around. |
||
317 | */ |
||
318 | }safe_sti(efl); |
||
319 | }>>>>>> |
||
320 |