Subversion Repositories Kolibri OS

Rev

Rev 1631 | Rev 6295 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1616 serge 1
/*
2
 * DMA Pool allocator
3
 *
4
 * Copyright 2001 David Brownell
5
 * Copyright 2007 Intel Corporation
6
 *   Author: Matthew Wilcox 
7
 *
8
 * This software may be redistributed and/or modified under the terms of
9
 * the GNU General Public License ("GPL") version 2 as published by the
10
 * Free Software Foundation.
11
 *
12
 * This allocator returns small blocks of a given size which are DMA-able by
13
 * the given device.  It uses the dma_alloc_coherent page allocator to get
14
 * new pages, then splits them up into blocks of the required size.
15
 * Many older drivers still have their own code to do this.
16
 *
17
 * The current design of this allocator is fairly simple.  The pool is
18
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19
 * allocated pages.  Each page in the page_list is split into blocks of at
20
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
21
 * list of free blocks within the page.  Used blocks aren't tracked, but we
22
 * keep a count of how many are currently allocated from each page.
23
 */
24
 
25
 
26
#include 
5270 serge 27
#include 
1631 serge 28
#include 
1616 serge 29
#include 
5270 serge 30
#include 
31
#include 
1616 serge 32
#include 
33
 
34
 
35
struct dma_pool {       /* the pool */
36
    struct list_head page_list;
37
    struct mutex lock;
38
    size_t size;
39
    size_t allocation;
40
    size_t boundary;
41
    struct list_head pools;
42
};
43
 
44
struct dma_page {       /* cacheable header for 'allocation' bytes */
45
    struct list_head page_list;
46
    void *vaddr;
47
    dma_addr_t dma;
48
    unsigned int in_use;
49
    unsigned int offset;
50
};
51
 
52
 
53
static DEFINE_MUTEX(pools_lock);
54
 
55
 
56
/**
57
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
58
 * @name: name of pool, for diagnostics
59
 * @dev: device that will be doing the DMA
60
 * @size: size of the blocks in this pool.
61
 * @align: alignment requirement for blocks; must be a power of two
62
 * @boundary: returned blocks won't cross this power of two boundary
63
 * Context: !in_interrupt()
64
 *
65
 * Returns a dma allocation pool with the requested characteristics, or
66
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
67
 * may be used to allocate memory.  Such memory will all have "consistent"
68
 * DMA mappings, accessible by the device and its driver without using
69
 * cache flushing primitives.  The actual size of blocks allocated may be
70
 * larger than requested because of alignment.
71
 *
72
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
73
 * cross that size boundary.  This is useful for devices which have
74
 * addressing restrictions on individual DMA transfers, such as not crossing
75
 * boundaries of 4KBytes.
76
 */
77
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
78
                 size_t size, size_t align, size_t boundary)
79
{
80
    struct dma_pool *retval;
81
    size_t allocation;
82
 
83
    if (align == 0) {
84
        align = 1;
85
    } else if (align & (align - 1)) {
86
        return NULL;
87
    }
88
 
89
    if (size == 0) {
90
        return NULL;
91
    } else if (size < 4) {
92
        size = 4;
93
    }
94
 
95
    if ((size % align) != 0)
96
        size = ALIGN(size, align);
97
 
98
    allocation = max_t(size_t, size, PAGE_SIZE);
99
 
100
    allocation = (allocation+0x7FFF) & ~0x7FFF;
101
 
102
    if (!boundary) {
103
        boundary = allocation;
104
    } else if ((boundary < size) || (boundary & (boundary - 1))) {
105
        return NULL;
106
    }
107
 
108
    retval = kmalloc(sizeof(*retval), GFP_KERNEL);
109
 
110
    if (!retval)
111
        return retval;
112
 
113
    INIT_LIST_HEAD(&retval->page_list);
114
 
115
//    spin_lock_init(&retval->lock);
116
 
117
    retval->size = size;
118
    retval->boundary = boundary;
119
    retval->allocation = allocation;
120
 
121
    INIT_LIST_HEAD(&retval->pools);
122
 
123
    return retval;
124
}
125
 
126
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
127
{
128
    unsigned int offset = 0;
129
    unsigned int next_boundary = pool->boundary;
130
 
131
    do {
132
        unsigned int next = offset + pool->size;
133
        if (unlikely((next + pool->size) >= next_boundary)) {
134
            next = next_boundary;
135
            next_boundary += pool->boundary;
136
        }
137
        *(int *)(page->vaddr + offset) = next;
138
        offset = next;
139
    } while (offset < pool->allocation);
140
}
141
 
142
 
143
static struct dma_page *pool_alloc_page(struct dma_pool *pool)
144
{
145
    struct dma_page *page;
146
 
5270 serge 147
    page = __builtin_malloc(sizeof(*page));
1616 serge 148
    if (!page)
149
        return NULL;
150
    page->vaddr = (void*)KernelAlloc(pool->allocation);
151
 
152
    dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
153
 
154
    if (page->vaddr)
155
    {
156
        page->dma = GetPgAddr(page->vaddr);
157
 
158
        dbgprintf("dma 0x%0x\n", page->dma);
159
 
160
        pool_initialise_page(pool, page);
161
        list_add(&page->page_list, &pool->page_list);
162
        page->in_use = 0;
163
        page->offset = 0;
164
    } else {
165
        free(page);
166
        page = NULL;
167
    }
168
    return page;
169
}
170
 
171
static inline int is_page_busy(struct dma_page *page)
172
{
173
    return page->in_use != 0;
174
}
175
 
176
 
177
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
178
{
179
    dma_addr_t dma = page->dma;
180
 
181
    KernelFree(page->vaddr);
182
    list_del(&page->page_list);
183
    free(page);
184
}
185
 
186
 
187
/**
188
 * dma_pool_destroy - destroys a pool of dma memory blocks.
189
 * @pool: dma pool that will be destroyed
190
 * Context: !in_interrupt()
191
 *
192
 * Caller guarantees that no more memory from the pool is in use,
193
 * and that nothing will try to use the pool after this call.
194
 */
195
void dma_pool_destroy(struct dma_pool *pool)
196
{
197
    mutex_lock(&pools_lock);
198
    list_del(&pool->pools);
199
    mutex_unlock(&pools_lock);
200
 
201
    while (!list_empty(&pool->page_list)) {
202
        struct dma_page *page;
203
        page = list_entry(pool->page_list.next,
204
                  struct dma_page, page_list);
205
        if (is_page_busy(page))
206
        {
207
            printk(KERN_ERR "dma_pool_destroy %p busy\n",
208
                   page->vaddr);
209
            /* leak the still-in-use consistent memory */
210
            list_del(&page->page_list);
211
            kfree(page);
212
        } else
213
            pool_free_page(pool, page);
214
    }
215
 
216
    kfree(pool);
217
}
218
 
219
 
220
/**
221
 * dma_pool_alloc - get a block of consistent memory
222
 * @pool: dma pool that will produce the block
223
 * @mem_flags: GFP_* bitmask
224
 * @handle: pointer to dma address of block
225
 *
226
 * This returns the kernel virtual address of a currently unused block,
227
 * and reports its dma address through the handle.
228
 * If such a memory block can't be allocated, %NULL is returned.
229
 */
230
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
231
             dma_addr_t *handle)
232
{
5270 serge 233
    u32   efl;
1616 serge 234
    struct  dma_page *page;
235
    size_t  offset;
236
    void   *retval;
237
 
238
    efl = safe_cli();
239
 restart:
240
    list_for_each_entry(page, &pool->page_list, page_list) {
241
        if (page->offset < pool->allocation)
242
            goto ready;
243
    }
244
    page = pool_alloc_page(pool);
245
    if (!page)
246
    {
247
        retval = NULL;
248
        goto done;
249
    }
250
 
251
 ready:
252
    page->in_use++;
253
    offset = page->offset;
254
    page->offset = *(int *)(page->vaddr + offset);
255
    retval = offset + page->vaddr;
256
    *handle = offset + page->dma;
257
 done:
258
    safe_sti(efl);
259
    return retval;
260
}
261
 
262
 
263
 
264
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
265
{
266
    struct dma_page *page;
5270 serge 267
    u32  efl;
1616 serge 268
 
269
    efl = safe_cli();
270
 
271
    list_for_each_entry(page, &pool->page_list, page_list) {
272
        if (dma < page->dma)
273
            continue;
274
        if (dma < (page->dma + pool->allocation))
275
            goto done;
276
    }
277
    page = NULL;
278
 done:
279
    safe_sti(efl);
280
 
281
    return page;
282
}
283
 
284
/**
285
 * dma_pool_free - put block back into dma pool
286
 * @pool: the dma pool holding the block
287
 * @vaddr: virtual address of block
288
 * @dma: dma address of block
289
 *
290
 * Caller promises neither device nor driver will again touch this block
291
 * unless it is first re-allocated.
292
 */
293
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
294
{
295
    struct dma_page *page;
296
    unsigned long flags;
297
    unsigned int offset;
298
 
5270 serge 299
    u32 efl;
1616 serge 300
 
301
    page = pool_find_page(pool, dma);
302
    if (!page) {
303
        printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n",
304
               vaddr, (unsigned long)dma);
305
        return;
306
    }
307
 
308
    offset = vaddr - page->vaddr;
309
 
310
    efl = safe_cli();
311
    {
312
        page->in_use--;
313
        *(int *)vaddr = page->offset;
314
        page->offset = offset;
315
    /*
316
     * Resist a temptation to do
317
     *    if (!is_page_busy(page)) pool_free_page(pool, page);
318
     * Better have a few empty pages hang around.
319
     */
320
    }safe_sti(efl);
321
}
322