Subversion Repositories Kolibri OS

Rev

Rev 5270 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1616 serge 1
/*
2
 * DMA Pool allocator
3
 *
4
 * Copyright 2001 David Brownell
5
 * Copyright 2007 Intel Corporation
6
 *   Author: Matthew Wilcox 
7
 *
8
 * This software may be redistributed and/or modified under the terms of
9
 * the GNU General Public License ("GPL") version 2 as published by the
10
 * Free Software Foundation.
11
 *
12
 * This allocator returns small blocks of a given size which are DMA-able by
13
 * the given device.  It uses the dma_alloc_coherent page allocator to get
14
 * new pages, then splits them up into blocks of the required size.
15
 * Many older drivers still have their own code to do this.
16
 *
17
 * The current design of this allocator is fairly simple.  The pool is
18
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19
 * allocated pages.  Each page in the page_list is split into blocks of at
20
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
21
 * list of free blocks within the page.  Used blocks aren't tracked, but we
22
 * keep a count of how many are currently allocated from each page.
23
 */
24
 
6295 serge 25
#include 
26
#include 
27
#include 
28
#include 
29
#include 
1616 serge 30
 
5270 serge 31
#include 
6295 serge 32
#include 
33
#include 
34
 
1616 serge 35
#include 
5270 serge 36
#include 
1616 serge 37
#include 
38
 
39
 
40
struct dma_pool {       /* the pool */
41
    struct list_head page_list;
6295 serge 42
	spinlock_t lock;
43
   size_t size;
44
	struct device *dev;
1616 serge 45
    size_t allocation;
46
    size_t boundary;
6295 serge 47
	char name[32];
1616 serge 48
    struct list_head pools;
49
};
50
 
51
struct dma_page {       /* cacheable header for 'allocation' bytes */
52
    struct list_head page_list;
53
    void *vaddr;
54
    dma_addr_t dma;
55
    unsigned int in_use;
56
    unsigned int offset;
57
};
58
 
59
static DEFINE_MUTEX(pools_lock);
6295 serge 60
static DEFINE_MUTEX(pools_reg_lock);
1616 serge 61
 
62
 
6295 serge 63
 
64
 
1616 serge 65
/**
66
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
67
 * @name: name of pool, for diagnostics
68
 * @dev: device that will be doing the DMA
69
 * @size: size of the blocks in this pool.
70
 * @align: alignment requirement for blocks; must be a power of two
71
 * @boundary: returned blocks won't cross this power of two boundary
72
 * Context: !in_interrupt()
73
 *
74
 * Returns a dma allocation pool with the requested characteristics, or
75
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
76
 * may be used to allocate memory.  Such memory will all have "consistent"
77
 * DMA mappings, accessible by the device and its driver without using
78
 * cache flushing primitives.  The actual size of blocks allocated may be
79
 * larger than requested because of alignment.
80
 *
81
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
82
 * cross that size boundary.  This is useful for devices which have
83
 * addressing restrictions on individual DMA transfers, such as not crossing
84
 * boundaries of 4KBytes.
85
 */
86
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
6295 serge 87
				 size_t size, size_t align, size_t boundary)
1616 serge 88
{
6295 serge 89
	struct dma_pool *retval;
90
	size_t allocation;
91
	bool empty = false;
1616 serge 92
 
6295 serge 93
	if (align == 0)
94
		align = 1;
95
	else if (align & (align - 1))
96
		return NULL;
1616 serge 97
 
6295 serge 98
	if (size == 0)
99
		return NULL;
100
	else if (size < 4)
101
		size = 4;
1616 serge 102
 
6295 serge 103
	if ((size % align) != 0)
104
		size = ALIGN(size, align);
1616 serge 105
 
6295 serge 106
	allocation = max_t(size_t, size, PAGE_SIZE);
1616 serge 107
 
108
    allocation = (allocation+0x7FFF) & ~0x7FFF;
109
 
6295 serge 110
	if (!boundary)
111
		boundary = allocation;
112
	else if ((boundary < size) || (boundary & (boundary - 1)))
113
		return NULL;
1616 serge 114
 
115
    retval = kmalloc(sizeof(*retval), GFP_KERNEL);
116
 
117
    if (!retval)
118
        return retval;
119
 
6295 serge 120
	strlcpy(retval->name, name, sizeof(retval->name));
1616 serge 121
 
6295 serge 122
	retval->dev = dev;
1616 serge 123
 
6295 serge 124
	INIT_LIST_HEAD(&retval->page_list);
125
	spin_lock_init(&retval->lock);
1616 serge 126
    retval->size = size;
127
    retval->boundary = boundary;
128
    retval->allocation = allocation;
129
 
130
    INIT_LIST_HEAD(&retval->pools);
131
 
132
    return retval;
133
}
134
 
135
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
136
{
137
    unsigned int offset = 0;
138
    unsigned int next_boundary = pool->boundary;
139
 
140
    do {
141
        unsigned int next = offset + pool->size;
142
        if (unlikely((next + pool->size) >= next_boundary)) {
143
            next = next_boundary;
144
            next_boundary += pool->boundary;
145
        }
146
        *(int *)(page->vaddr + offset) = next;
147
        offset = next;
148
    } while (offset < pool->allocation);
149
}
150
 
6295 serge 151
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
1616 serge 152
{
153
    struct dma_page *page;
154
 
6295 serge 155
	page = kmalloc(sizeof(*page), mem_flags);
156
	if (!page)
157
		return NULL;
1616 serge 158
    page->vaddr = (void*)KernelAlloc(pool->allocation);
159
 
160
    dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr);
161
 
6295 serge 162
	if (page->vaddr) {
163
#ifdef	DMAPOOL_DEBUG
164
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
165
#endif
166
 
1616 serge 167
        page->dma = GetPgAddr(page->vaddr);
168
 
169
        dbgprintf("dma 0x%0x\n", page->dma);
170
 
171
        pool_initialise_page(pool, page);
172
        page->in_use = 0;
173
        page->offset = 0;
174
    } else {
6295 serge 175
		kfree(page);
1616 serge 176
        page = NULL;
177
    }
178
    return page;
179
}
180
 
6295 serge 181
static inline bool is_page_busy(struct dma_page *page)
1616 serge 182
{
6295 serge 183
	return page->in_use != 0;
1616 serge 184
}
185
 
186
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
187
{
6295 serge 188
	dma_addr_t dma = page->dma;
1616 serge 189
 
6295 serge 190
#ifdef	DMAPOOL_DEBUG
191
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
192
#endif
193
 
1616 serge 194
    KernelFree(page->vaddr);
6295 serge 195
	list_del(&page->page_list);
196
	kfree(page);
1616 serge 197
}
198
 
199
/**
200
 * dma_pool_destroy - destroys a pool of dma memory blocks.
201
 * @pool: dma pool that will be destroyed
202
 * Context: !in_interrupt()
203
 *
204
 * Caller guarantees that no more memory from the pool is in use,
205
 * and that nothing will try to use the pool after this call.
206
 */
207
void dma_pool_destroy(struct dma_pool *pool)
208
{
6295 serge 209
	bool empty = false;
210
 
211
	if (unlikely(!pool))
212
		return;
213
 
214
	mutex_lock(&pools_reg_lock);
1616 serge 215
    mutex_lock(&pools_lock);
216
    list_del(&pool->pools);
217
    mutex_unlock(&pools_lock);
218
 
6295 serge 219
	mutex_unlock(&pools_reg_lock);
220
 
1616 serge 221
    while (!list_empty(&pool->page_list)) {
222
        struct dma_page *page;
223
        page = list_entry(pool->page_list.next,
224
                  struct dma_page, page_list);
6295 serge 225
        if (is_page_busy(page)) {
1616 serge 226
            printk(KERN_ERR "dma_pool_destroy %p busy\n",
227
                   page->vaddr);
228
            /* leak the still-in-use consistent memory */
229
            list_del(&page->page_list);
230
            kfree(page);
231
        } else
232
            pool_free_page(pool, page);
233
    }
234
 
235
    kfree(pool);
236
}
6295 serge 237
EXPORT_SYMBOL(dma_pool_destroy);
1616 serge 238
 
239
/**
240
 * dma_pool_alloc - get a block of consistent memory
241
 * @pool: dma pool that will produce the block
242
 * @mem_flags: GFP_* bitmask
243
 * @handle: pointer to dma address of block
244
 *
245
 * This returns the kernel virtual address of a currently unused block,
246
 * and reports its dma address through the handle.
247
 * If such a memory block can't be allocated, %NULL is returned.
248
 */
249
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
250
             dma_addr_t *handle)
251
{
6295 serge 252
	unsigned long flags;
1616 serge 253
    struct  dma_page *page;
254
    size_t  offset;
255
    void   *retval;
256
 
6295 serge 257
 
258
	spin_lock_irqsave(&pool->lock, flags);
1616 serge 259
    list_for_each_entry(page, &pool->page_list, page_list) {
260
        if (page->offset < pool->allocation)
261
            goto ready;
262
    }
6295 serge 263
 
264
	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
265
	spin_unlock_irqrestore(&pool->lock, flags);
266
 
267
	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
1616 serge 268
    if (!page)
6295 serge 269
		return NULL;
1616 serge 270
 
6295 serge 271
	spin_lock_irqsave(&pool->lock, flags);
272
 
273
	list_add(&page->page_list, &pool->page_list);
1616 serge 274
 ready:
6295 serge 275
	page->in_use++;
1616 serge 276
    offset = page->offset;
277
    page->offset = *(int *)(page->vaddr + offset);
278
    retval = offset + page->vaddr;
6295 serge 279
	*handle = offset + page->dma;
280
#ifdef	DMAPOOL_DEBUG
281
	{
282
		int i;
283
		u8 *data = retval;
284
		/* page->offset is stored in first 4 bytes */
285
		for (i = sizeof(page->offset); i < pool->size; i++) {
286
			if (data[i] == POOL_POISON_FREED)
287
				continue;
288
			if (pool->dev)
289
				dev_err(pool->dev,
290
					"dma_pool_alloc %s, %p (corrupted)\n",
291
					pool->name, retval);
292
			else
293
				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
294
					pool->name, retval);
295
 
296
			/*
297
			 * Dump the first 4 bytes even if they are not
298
			 * POOL_POISON_FREED
299
			 */
300
			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
301
					data, pool->size, 1);
302
			break;
303
		}
304
	}
305
	if (!(mem_flags & __GFP_ZERO))
306
		memset(retval, POOL_POISON_ALLOCATED, pool->size);
307
#endif
308
	spin_unlock_irqrestore(&pool->lock, flags);
309
 
310
	if (mem_flags & __GFP_ZERO)
311
		memset(retval, 0, pool->size);
312
 
1616 serge 313
    return retval;
314
}
6295 serge 315
EXPORT_SYMBOL(dma_pool_alloc);
1616 serge 316
 
317
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
318
{
319
    struct dma_page *page;
320
 
321
    list_for_each_entry(page, &pool->page_list, page_list) {
322
        if (dma < page->dma)
323
            continue;
6295 serge 324
		if ((dma - page->dma) < pool->allocation)
325
			return page;
1616 serge 326
    }
6295 serge 327
	return NULL;
1616 serge 328
}
329
 
330
/**
331
 * dma_pool_free - put block back into dma pool
332
 * @pool: the dma pool holding the block
333
 * @vaddr: virtual address of block
334
 * @dma: dma address of block
335
 *
336
 * Caller promises neither device nor driver will again touch this block
337
 * unless it is first re-allocated.
338
 */
339
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
340
{
341
    struct dma_page *page;
342
    unsigned long flags;
343
    unsigned int offset;
344
 
6295 serge 345
	spin_lock_irqsave(&pool->lock, flags);
1616 serge 346
    page = pool_find_page(pool, dma);
347
    if (!page) {
6295 serge 348
		spin_unlock_irqrestore(&pool->lock, flags);
349
		printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
350
		       pool->name, vaddr, (unsigned long)dma);
1616 serge 351
        return;
352
    }
353
 
354
    offset = vaddr - page->vaddr;
6295 serge 355
#ifdef	DMAPOOL_DEBUG
356
	if ((dma - page->dma) != offset) {
357
		spin_unlock_irqrestore(&pool->lock, flags);
358
		if (pool->dev)
359
			dev_err(pool->dev,
360
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
361
				pool->name, vaddr, (unsigned long long)dma);
362
		else
363
			printk(KERN_ERR
364
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
365
			       pool->name, vaddr, (unsigned long long)dma);
366
		return;
367
	}
368
	{
369
		unsigned int chain = page->offset;
370
		while (chain < pool->allocation) {
371
			if (chain != offset) {
372
				chain = *(int *)(page->vaddr + chain);
373
				continue;
374
			}
375
			spin_unlock_irqrestore(&pool->lock, flags);
376
			if (pool->dev)
377
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
378
					"already free\n", pool->name,
379
					(unsigned long long)dma);
380
			else
381
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
382
					"already free\n", pool->name,
383
					(unsigned long long)dma);
384
			return;
385
		}
386
	}
387
	memset(vaddr, POOL_POISON_FREED, pool->size);
388
#endif
1616 serge 389
 
6295 serge 390
	page->in_use--;
391
	*(int *)vaddr = page->offset;
392
	page->offset = offset;
1616 serge 393
    /*
394
     * Resist a temptation to do
395
     *    if (!is_page_busy(page)) pool_free_page(pool, page);
396
     * Better have a few empty pages hang around.
397
     */
6295 serge 398
	spin_unlock_irqrestore(&pool->lock, flags);
1616 serge 399
}
6295 serge 400
EXPORT_SYMBOL(dma_pool_free);
1616 serge 401
 
6295 serge 402
/*
403
 * Managed DMA pool
404
 */
405
static void dmam_pool_release(struct device *dev, void *res)
406
{
407
	struct dma_pool *pool = *(struct dma_pool **)res;
408
 
409
	dma_pool_destroy(pool);
410
}
411
 
412
static int dmam_pool_match(struct device *dev, void *res, void *match_data)
413
{
414
	return *(struct dma_pool **)res == match_data;
415
}
416