22,17 → 22,12 |
* keep a count of how many are currently allocated from each page. |
*/ |
|
#include <linux/device.h> |
#include <linux/dmapool.h> |
#include <linux/kernel.h> |
#include <linux/list.h> |
#include <linux/mutex.h> |
|
#include <ddk.h> |
#include <linux/slab.h> |
#include <linux/spinlock.h> |
#include <linux/types.h> |
|
#include <linux/errno.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
#include <linux/gfp.h> |
#include <syscall.h> |
|
39,12 → 34,10 |
|
struct dma_pool { /* the pool */ |
struct list_head page_list; |
spinlock_t lock; |
struct mutex lock; |
size_t size; |
struct device *dev; |
size_t allocation; |
size_t boundary; |
char name[32]; |
struct list_head pools; |
}; |
|
56,12 → 49,10 |
unsigned int offset; |
}; |
|
|
static DEFINE_MUTEX(pools_lock); |
static DEFINE_MUTEX(pools_reg_lock); |
|
|
|
|
/** |
* dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
* @name: name of pool, for diagnostics |
88,17 → 79,18 |
{ |
struct dma_pool *retval; |
size_t allocation; |
bool empty = false; |
|
if (align == 0) |
if (align == 0) { |
align = 1; |
else if (align & (align - 1)) |
} else if (align & (align - 1)) { |
return NULL; |
} |
|
if (size == 0) |
if (size == 0) { |
return NULL; |
else if (size < 4) |
} else if (size < 4) { |
size = 4; |
} |
|
if ((size % align) != 0) |
size = ALIGN(size, align); |
107,10 → 99,11 |
|
allocation = (allocation+0x7FFF) & ~0x7FFF; |
|
if (!boundary) |
if (!boundary) { |
boundary = allocation; |
else if ((boundary < size) || (boundary & (boundary - 1))) |
} else if ((boundary < size) || (boundary & (boundary - 1))) { |
return NULL; |
} |
|
retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
|
117,12 → 110,10 |
if (!retval) |
return retval; |
|
strlcpy(retval->name, name, sizeof(retval->name)); |
INIT_LIST_HEAD(&retval->page_list); |
|
retval->dev = dev; |
// spin_lock_init(&retval->lock); |
|
INIT_LIST_HEAD(&retval->page_list); |
spin_lock_init(&retval->lock); |
retval->size = size; |
retval->boundary = boundary; |
retval->allocation = allocation; |
148,11 → 139,12 |
} while (offset < pool->allocation); |
} |
|
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
|
static struct dma_page *pool_alloc_page(struct dma_pool *pool) |
{ |
struct dma_page *page; |
|
page = kmalloc(sizeof(*page), mem_flags); |
page = __builtin_malloc(sizeof(*page)); |
if (!page) |
return NULL; |
page->vaddr = (void*)KernelAlloc(pool->allocation); |
159,43 → 151,39 |
|
dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); |
|
if (page->vaddr) { |
#ifdef DMAPOOL_DEBUG |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
|
if (page->vaddr) |
{ |
page->dma = GetPgAddr(page->vaddr); |
|
dbgprintf("dma 0x%0x\n", page->dma); |
|
pool_initialise_page(pool, page); |
list_add(&page->page_list, &pool->page_list); |
page->in_use = 0; |
page->offset = 0; |
} else { |
kfree(page); |
free(page); |
page = NULL; |
} |
return page; |
} |
|
static inline bool is_page_busy(struct dma_page *page) |
static inline int is_page_busy(struct dma_page *page) |
{ |
return page->in_use != 0; |
} |
|
|
static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
{ |
dma_addr_t dma = page->dma; |
|
#ifdef DMAPOOL_DEBUG |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
|
KernelFree(page->vaddr); |
list_del(&page->page_list); |
kfree(page); |
free(page); |
} |
|
|
/** |
* dma_pool_destroy - destroys a pool of dma memory blocks. |
* @pool: dma pool that will be destroyed |
206,23 → 194,16 |
*/ |
void dma_pool_destroy(struct dma_pool *pool) |
{ |
bool empty = false; |
|
if (unlikely(!pool)) |
return; |
|
mutex_lock(&pools_reg_lock); |
mutex_lock(&pools_lock); |
list_del(&pool->pools); |
mutex_unlock(&pools_lock); |
|
mutex_unlock(&pools_reg_lock); |
|
while (!list_empty(&pool->page_list)) { |
struct dma_page *page; |
page = list_entry(pool->page_list.next, |
struct dma_page, page_list); |
if (is_page_busy(page)) { |
if (is_page_busy(page)) |
{ |
printk(KERN_ERR "dma_pool_destroy %p busy\n", |
page->vaddr); |
/* leak the still-in-use consistent memory */ |
234,8 → 215,8 |
|
kfree(pool); |
} |
EXPORT_SYMBOL(dma_pool_destroy); |
|
|
/** |
* dma_pool_alloc - get a block of consistent memory |
* @pool: dma pool that will produce the block |
249,28 → 230,24 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
dma_addr_t *handle) |
{ |
unsigned long flags; |
u32 efl; |
struct dma_page *page; |
size_t offset; |
void *retval; |
|
|
spin_lock_irqsave(&pool->lock, flags); |
efl = safe_cli(); |
restart: |
list_for_each_entry(page, &pool->page_list, page_list) { |
if (page->offset < pool->allocation) |
goto ready; |
} |
|
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
spin_unlock_irqrestore(&pool->lock, flags); |
|
page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
page = pool_alloc_page(pool); |
if (!page) |
return NULL; |
{ |
retval = NULL; |
goto done; |
} |
|
spin_lock_irqsave(&pool->lock, flags); |
|
list_add(&page->page_list, &pool->page_list); |
ready: |
page->in_use++; |
offset = page->offset; |
277,55 → 254,32 |
page->offset = *(int *)(page->vaddr + offset); |
retval = offset + page->vaddr; |
*handle = offset + page->dma; |
#ifdef DMAPOOL_DEBUG |
{ |
int i; |
u8 *data = retval; |
/* page->offset is stored in first 4 bytes */ |
for (i = sizeof(page->offset); i < pool->size; i++) { |
if (data[i] == POOL_POISON_FREED) |
continue; |
if (pool->dev) |
dev_err(pool->dev, |
"dma_pool_alloc %s, %p (corrupted)\n", |
pool->name, retval); |
else |
pr_err("dma_pool_alloc %s, %p (corrupted)\n", |
pool->name, retval); |
|
/* |
* Dump the first 4 bytes even if they are not |
* POOL_POISON_FREED |
*/ |
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, |
data, pool->size, 1); |
break; |
done: |
safe_sti(efl); |
return retval; |
} |
} |
if (!(mem_flags & __GFP_ZERO)) |
memset(retval, POOL_POISON_ALLOCATED, pool->size); |
#endif |
spin_unlock_irqrestore(&pool->lock, flags); |
|
if (mem_flags & __GFP_ZERO) |
memset(retval, 0, pool->size); |
|
return retval; |
} |
EXPORT_SYMBOL(dma_pool_alloc); |
|
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
{ |
struct dma_page *page; |
u32 efl; |
|
efl = safe_cli(); |
|
list_for_each_entry(page, &pool->page_list, page_list) { |
if (dma < page->dma) |
continue; |
if ((dma - page->dma) < pool->allocation) |
if (dma < (page->dma + pool->allocation)) |
goto done; |
} |
page = NULL; |
done: |
safe_sti(efl); |
|
return page; |
} |
return NULL; |
} |
|
/** |
* dma_pool_free - put block back into dma pool |
342,51 → 296,19 |
unsigned long flags; |
unsigned int offset; |
|
spin_lock_irqsave(&pool->lock, flags); |
u32 efl; |
|
page = pool_find_page(pool, dma); |
if (!page) { |
spin_unlock_irqrestore(&pool->lock, flags); |
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
pool->name, vaddr, (unsigned long)dma); |
printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", |
vaddr, (unsigned long)dma); |
return; |
} |
|
offset = vaddr - page->vaddr; |
#ifdef DMAPOOL_DEBUG |
if ((dma - page->dma) != offset) { |
spin_unlock_irqrestore(&pool->lock, flags); |
if (pool->dev) |
dev_err(pool->dev, |
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
pool->name, vaddr, (unsigned long long)dma); |
else |
printk(KERN_ERR |
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
pool->name, vaddr, (unsigned long long)dma); |
return; |
} |
|
efl = safe_cli(); |
{ |
unsigned int chain = page->offset; |
while (chain < pool->allocation) { |
if (chain != offset) { |
chain = *(int *)(page->vaddr + chain); |
continue; |
} |
spin_unlock_irqrestore(&pool->lock, flags); |
if (pool->dev) |
dev_err(pool->dev, "dma_pool_free %s, dma %Lx " |
"already free\n", pool->name, |
(unsigned long long)dma); |
else |
printk(KERN_ERR "dma_pool_free %s, dma %Lx " |
"already free\n", pool->name, |
(unsigned long long)dma); |
return; |
} |
} |
memset(vaddr, POOL_POISON_FREED, pool->size); |
#endif |
|
page->in_use--; |
*(int *)vaddr = page->offset; |
page->offset = offset; |
395,22 → 317,6 |
* if (!is_page_busy(page)) pool_free_page(pool, page); |
* Better have a few empty pages hang around. |
*/ |
spin_unlock_irqrestore(&pool->lock, flags); |
}safe_sti(efl); |
} |
EXPORT_SYMBOL(dma_pool_free); |
|
/* |
* Managed DMA pool |
*/ |
static void dmam_pool_release(struct device *dev, void *res) |
{ |
struct dma_pool *pool = *(struct dma_pool **)res; |
|
dma_pool_destroy(pool); |
} |
|
static int dmam_pool_match(struct device *dev, void *res, void *match_data) |
{ |
return *(struct dma_pool **)res == match_data; |
} |
|