/drivers/ddk/Makefile |
---|
25,6 → 25,8 |
NAME_SRCS:= \ |
debug/dbglog.c \ |
debug/chkstk.S \ |
dma/dma_alloc.c \ |
dma/fence.c \ |
io/create.c \ |
io/finfo.c \ |
io/ssize.c \ |
33,6 → 35,7 |
linux/ctype.c \ |
linux/dmapool.c \ |
linux/dmi.c \ |
linux/fbsysfs.c \ |
linux/find_next_bit.c \ |
linux/firmware.c \ |
linux/gcd.c \ |
/drivers/ddk/dma/dma_alloc.c |
---|
0,0 → 1,22 |
#include <linux/types.h> |
#include <linux/gfp.h> |
#include <linux/spinlock.h> |
#include <linux/dma-mapping.h> |
#include <linux/scatterlist.h> |
void *dma_alloc_coherent(struct device *dev, size_t size, |
dma_addr_t *dma_handle, gfp_t gfp) |
{ |
void *ret; |
size = ALIGN(size,32768); |
ret = (void *)KernelAlloc(size); |
if (ret) { |
__builtin_memset(ret, 0, size); |
*dma_handle = GetPgAddr(ret); |
} |
return ret; |
} |
/drivers/ddk/dma/fence.c |
---|
0,0 → 1,370 |
/* |
* Fence mechanism for dma-buf and to allow for asynchronous dma access |
* |
* Copyright (C) 2012 Canonical Ltd |
* Copyright (C) 2012 Texas Instruments |
* |
* Authors: |
* Rob Clark <robdclark@gmail.com> |
* Maarten Lankhorst <maarten.lankhorst@canonical.com> |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License version 2 as published by |
* the Free Software Foundation. |
* |
* This program is distributed in the hope that it will be useful, but WITHOUT |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
* more details. |
*/ |
#include <linux/slab.h> |
#include <linux/export.h> |
#include <linux/atomic.h> |
#include <linux/fence.h> |
/* |
* fence context counter: each execution context should have its own |
* fence context, this allows checking if fences belong to the same |
* context or not. One device can have multiple separate contexts, |
* and they're used if some engine can run independently of another. |
*/ |
static atomic_t fence_context_counter = ATOMIC_INIT(0); |
/** |
* fence_context_alloc - allocate an array of fence contexts |
* @num: [in] amount of contexts to allocate |
* |
* This function will return the first index of the number of fences allocated. |
* The fence context is used for setting fence->context to a unique number. |
*/ |
unsigned fence_context_alloc(unsigned num) |
{ |
BUG_ON(!num); |
return atomic_add_return(num, &fence_context_counter) - num; |
} |
EXPORT_SYMBOL(fence_context_alloc); |
/** |
* fence_signal_locked - signal completion of a fence |
* @fence: the fence to signal |
* |
* Signal completion for software callbacks on a fence, this will unblock |
* fence_wait() calls and run all the callbacks added with |
* fence_add_callback(). Can be called multiple times, but since a fence |
* can only go from unsignaled to signaled state, it will only be effective |
* the first time. |
* |
* Unlike fence_signal, this function must be called with fence->lock held. |
*/ |
int fence_signal_locked(struct fence *fence) |
{ |
struct fence_cb *cur, *tmp; |
int ret = 0; |
if (WARN_ON(!fence)) |
return -EINVAL; |
if (!ktime_to_ns(fence->timestamp)) { |
fence->timestamp = ktime_get(); |
smp_mb__before_atomic(); |
} |
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
ret = -EINVAL; |
/* |
* we might have raced with the unlocked fence_signal, |
* still run through all callbacks |
*/ |
} |
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
list_del_init(&cur->node); |
cur->func(fence, cur); |
} |
return ret; |
} |
EXPORT_SYMBOL(fence_signal_locked); |
/** |
* fence_signal - signal completion of a fence |
* @fence: the fence to signal |
* |
* Signal completion for software callbacks on a fence, this will unblock |
* fence_wait() calls and run all the callbacks added with |
* fence_add_callback(). Can be called multiple times, but since a fence |
* can only go from unsignaled to signaled state, it will only be effective |
* the first time. |
*/ |
int fence_signal(struct fence *fence) |
{ |
unsigned long flags; |
if (!fence) |
return -EINVAL; |
if (!ktime_to_ns(fence->timestamp)) { |
fence->timestamp = ktime_get(); |
smp_mb__before_atomic(); |
} |
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return -EINVAL; |
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { |
struct fence_cb *cur, *tmp; |
spin_lock_irqsave(fence->lock, flags); |
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
list_del_init(&cur->node); |
cur->func(fence, cur); |
} |
spin_unlock_irqrestore(fence->lock, flags); |
} |
return 0; |
} |
EXPORT_SYMBOL(fence_signal); |
/** |
* fence_wait_timeout - sleep until the fence gets signaled |
* or until timeout elapses |
* @fence: [in] the fence to wait on |
* @intr: [in] if true, do an interruptible wait |
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
* |
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the |
* remaining timeout in jiffies on success. Other error values may be |
* returned on custom implementations. |
* |
* Performs a synchronous wait on this fence. It is assumed the caller |
* directly or indirectly (buf-mgr between reservation and committing) |
* holds a reference to the fence, otherwise the fence might be |
* freed before return, resulting in undefined behavior. |
*/ |
signed long |
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) |
{ |
signed long ret; |
if (WARN_ON(timeout < 0)) |
return -EINVAL; |
if (timeout == 0) |
return fence_is_signaled(fence); |
ret = fence->ops->wait(fence, intr, timeout); |
return ret; |
} |
EXPORT_SYMBOL(fence_wait_timeout); |
void fence_release(struct kref *kref) |
{ |
struct fence *fence = |
container_of(kref, struct fence, refcount); |
BUG_ON(!list_empty(&fence->cb_list)); |
if (fence->ops->release) |
fence->ops->release(fence); |
else |
fence_free(fence); |
} |
EXPORT_SYMBOL(fence_release); |
void fence_free(struct fence *fence) |
{ |
kfree_rcu(fence, rcu); |
} |
EXPORT_SYMBOL(fence_free); |
/** |
* fence_enable_sw_signaling - enable signaling on fence |
* @fence: [in] the fence to enable |
* |
* this will request for sw signaling to be enabled, to make the fence |
* complete as soon as possible |
*/ |
void fence_enable_sw_signaling(struct fence *fence) |
{ |
unsigned long flags; |
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && |
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
spin_lock_irqsave(fence->lock, flags); |
if (!fence->ops->enable_signaling(fence)) |
fence_signal_locked(fence); |
spin_unlock_irqrestore(fence->lock, flags); |
} |
} |
EXPORT_SYMBOL(fence_enable_sw_signaling); |
/** |
* fence_add_callback - add a callback to be called when the fence |
* is signaled |
* @fence: [in] the fence to wait on |
* @cb: [in] the callback to register |
* @func: [in] the function to call |
* |
* cb will be initialized by fence_add_callback, no initialization |
* by the caller is required. Any number of callbacks can be registered |
* to a fence, but a callback can only be registered to one fence at a time. |
* |
* Note that the callback can be called from an atomic context. If |
* fence is already signaled, this function will return -ENOENT (and |
* *not* call the callback) |
* |
* Add a software callback to the fence. Same restrictions apply to |
* refcount as it does to fence_wait, however the caller doesn't need to |
* keep a refcount to fence afterwards: when software access is enabled, |
* the creator of the fence is required to keep the fence alive until |
* after it signals with fence_signal. The callback itself can be called |
* from irq context. |
* |
*/ |
int fence_add_callback(struct fence *fence, struct fence_cb *cb, |
fence_func_t func) |
{ |
unsigned long flags; |
int ret = 0; |
bool was_set; |
if (WARN_ON(!fence || !func)) |
return -EINVAL; |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
INIT_LIST_HEAD(&cb->node); |
return -ENOENT; |
} |
spin_lock_irqsave(fence->lock, flags); |
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
ret = -ENOENT; |
else if (!was_set) { |
if (!fence->ops->enable_signaling(fence)) { |
fence_signal_locked(fence); |
ret = -ENOENT; |
} |
} |
if (!ret) { |
cb->func = func; |
list_add_tail(&cb->node, &fence->cb_list); |
} else |
INIT_LIST_HEAD(&cb->node); |
spin_unlock_irqrestore(fence->lock, flags); |
return ret; |
} |
EXPORT_SYMBOL(fence_add_callback); |
/** |
* fence_remove_callback - remove a callback from the signaling list |
* @fence: [in] the fence to wait on |
* @cb: [in] the callback to remove |
* |
* Remove a previously queued callback from the fence. This function returns |
* true if the callback is successfully removed, or false if the fence has |
* already been signaled. |
* |
* *WARNING*: |
* Cancelling a callback should only be done if you really know what you're |
* doing, since deadlocks and race conditions could occur all too easily. For |
* this reason, it should only ever be done on hardware lockup recovery, |
* with a reference held to the fence. |
*/ |
bool |
fence_remove_callback(struct fence *fence, struct fence_cb *cb) |
{ |
unsigned long flags; |
bool ret; |
spin_lock_irqsave(fence->lock, flags); |
ret = !list_empty(&cb->node); |
if (ret) |
list_del_init(&cb->node); |
spin_unlock_irqrestore(fence->lock, flags); |
return ret; |
} |
EXPORT_SYMBOL(fence_remove_callback); |
struct default_wait_cb { |
struct fence_cb base; |
struct task_struct *task; |
}; |
static bool |
fence_test_signaled_any(struct fence **fences, uint32_t count) |
{ |
int i; |
for (i = 0; i < count; ++i) { |
struct fence *fence = fences[i]; |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return true; |
} |
return false; |
} |
/** |
* fence_wait_any_timeout - sleep until any fence gets signaled |
* or until timeout elapses |
* @fences: [in] array of fences to wait on |
* @count: [in] number of fences to wait on |
* @intr: [in] if true, do an interruptible wait |
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
* |
* Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if |
* interrupted, 0 if the wait timed out, or the remaining timeout in jiffies |
* on success. |
* |
* Synchronous waits for the first fence in the array to be signaled. The |
* caller needs to hold a reference to all fences in the array, otherwise a |
* fence might be freed before return, resulting in undefined behavior. |
*/ |
/** |
* fence_init - Initialize a custom fence. |
* @fence: [in] the fence to initialize |
* @ops: [in] the fence_ops for operations on this fence |
* @lock: [in] the irqsafe spinlock to use for locking this fence |
* @context: [in] the execution context this fence is run on |
* @seqno: [in] a linear increasing sequence number for this context |
* |
* Initializes an allocated fence, the caller doesn't have to keep its |
* refcount after committing with this fence, but it will need to hold a |
* refcount again if fence_ops.enable_signaling gets called. This can |
* be used for other implementing other types of fence. |
* |
* context and seqno are used for easy comparison between fences, allowing |
* to check which fence is later by simply using fence_later. |
*/ |
void |
fence_init(struct fence *fence, const struct fence_ops *ops, |
spinlock_t *lock, unsigned context, unsigned seqno) |
{ |
BUG_ON(!lock); |
BUG_ON(!ops || !ops->wait || !ops->enable_signaling || |
!ops->get_driver_name || !ops->get_timeline_name); |
kref_init(&fence->refcount); |
fence->ops = ops; |
INIT_LIST_HEAD(&fence->cb_list); |
fence->lock = lock; |
fence->context = context; |
fence->seqno = seqno; |
fence->flags = 0UL; |
} |
EXPORT_SYMBOL(fence_init); |
/drivers/ddk/linux/dmapool.c |
---|
22,12 → 22,17 |
* keep a count of how many are currently allocated from each page. |
*/ |
#include <linux/device.h> |
#include <linux/dmapool.h> |
#include <linux/kernel.h> |
#include <linux/list.h> |
#include <linux/mutex.h> |
#include <ddk.h> |
#include <linux/slab.h> |
#include <linux/errno.h> |
#include <linux/spinlock.h> |
#include <linux/types.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
#include <linux/gfp.h> |
#include <syscall.h> |
34,10 → 39,12 |
struct dma_pool { /* the pool */ |
struct list_head page_list; |
struct mutex lock; |
spinlock_t lock; |
size_t size; |
struct device *dev; |
size_t allocation; |
size_t boundary; |
char name[32]; |
struct list_head pools; |
}; |
49,10 → 56,12 |
unsigned int offset; |
}; |
static DEFINE_MUTEX(pools_lock); |
static DEFINE_MUTEX(pools_reg_lock); |
/** |
* dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
* @name: name of pool, for diagnostics |
79,18 → 88,17 |
{ |
struct dma_pool *retval; |
size_t allocation; |
bool empty = false; |
if (align == 0) { |
if (align == 0) |
align = 1; |
} else if (align & (align - 1)) { |
else if (align & (align - 1)) |
return NULL; |
} |
if (size == 0) { |
if (size == 0) |
return NULL; |
} else if (size < 4) { |
else if (size < 4) |
size = 4; |
} |
if ((size % align) != 0) |
size = ALIGN(size, align); |
99,11 → 107,10 |
allocation = (allocation+0x7FFF) & ~0x7FFF; |
if (!boundary) { |
if (!boundary) |
boundary = allocation; |
} else if ((boundary < size) || (boundary & (boundary - 1))) { |
else if ((boundary < size) || (boundary & (boundary - 1))) |
return NULL; |
} |
retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
110,10 → 117,12 |
if (!retval) |
return retval; |
INIT_LIST_HEAD(&retval->page_list); |
strlcpy(retval->name, name, sizeof(retval->name)); |
// spin_lock_init(&retval->lock); |
retval->dev = dev; |
INIT_LIST_HEAD(&retval->page_list); |
spin_lock_init(&retval->lock); |
retval->size = size; |
retval->boundary = boundary; |
retval->allocation = allocation; |
139,12 → 148,11 |
} while (offset < pool->allocation); |
} |
static struct dma_page *pool_alloc_page(struct dma_pool *pool) |
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
{ |
struct dma_page *page; |
page = __builtin_malloc(sizeof(*page)); |
page = kmalloc(sizeof(*page), mem_flags); |
if (!page) |
return NULL; |
page->vaddr = (void*)KernelAlloc(pool->allocation); |
151,39 → 159,43 |
dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); |
if (page->vaddr) |
{ |
if (page->vaddr) { |
#ifdef DMAPOOL_DEBUG |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
page->dma = GetPgAddr(page->vaddr); |
dbgprintf("dma 0x%0x\n", page->dma); |
pool_initialise_page(pool, page); |
list_add(&page->page_list, &pool->page_list); |
page->in_use = 0; |
page->offset = 0; |
} else { |
free(page); |
kfree(page); |
page = NULL; |
} |
return page; |
} |
static inline int is_page_busy(struct dma_page *page) |
static inline bool is_page_busy(struct dma_page *page) |
{ |
return page->in_use != 0; |
} |
static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
{ |
dma_addr_t dma = page->dma; |
#ifdef DMAPOOL_DEBUG |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
KernelFree(page->vaddr); |
list_del(&page->page_list); |
free(page); |
kfree(page); |
} |
/** |
* dma_pool_destroy - destroys a pool of dma memory blocks. |
* @pool: dma pool that will be destroyed |
194,16 → 206,23 |
*/ |
void dma_pool_destroy(struct dma_pool *pool) |
{ |
bool empty = false; |
if (unlikely(!pool)) |
return; |
mutex_lock(&pools_reg_lock); |
mutex_lock(&pools_lock); |
list_del(&pool->pools); |
mutex_unlock(&pools_lock); |
mutex_unlock(&pools_reg_lock); |
while (!list_empty(&pool->page_list)) { |
struct dma_page *page; |
page = list_entry(pool->page_list.next, |
struct dma_page, page_list); |
if (is_page_busy(page)) |
{ |
if (is_page_busy(page)) { |
printk(KERN_ERR "dma_pool_destroy %p busy\n", |
page->vaddr); |
/* leak the still-in-use consistent memory */ |
215,8 → 234,8 |
kfree(pool); |
} |
EXPORT_SYMBOL(dma_pool_destroy); |
/** |
* dma_pool_alloc - get a block of consistent memory |
* @pool: dma pool that will produce the block |
230,24 → 249,28 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
dma_addr_t *handle) |
{ |
u32 efl; |
unsigned long flags; |
struct dma_page *page; |
size_t offset; |
void *retval; |
efl = safe_cli(); |
restart: |
spin_lock_irqsave(&pool->lock, flags); |
list_for_each_entry(page, &pool->page_list, page_list) { |
if (page->offset < pool->allocation) |
goto ready; |
} |
page = pool_alloc_page(pool); |
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
spin_unlock_irqrestore(&pool->lock, flags); |
page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
if (!page) |
{ |
retval = NULL; |
goto done; |
} |
return NULL; |
spin_lock_irqsave(&pool->lock, flags); |
list_add(&page->page_list, &pool->page_list); |
ready: |
page->in_use++; |
offset = page->offset; |
254,32 → 277,55 |
page->offset = *(int *)(page->vaddr + offset); |
retval = offset + page->vaddr; |
*handle = offset + page->dma; |
done: |
safe_sti(efl); |
return retval; |
#ifdef DMAPOOL_DEBUG |
{ |
int i; |
u8 *data = retval; |
/* page->offset is stored in first 4 bytes */ |
for (i = sizeof(page->offset); i < pool->size; i++) { |
if (data[i] == POOL_POISON_FREED) |
continue; |
if (pool->dev) |
dev_err(pool->dev, |
"dma_pool_alloc %s, %p (corrupted)\n", |
pool->name, retval); |
else |
pr_err("dma_pool_alloc %s, %p (corrupted)\n", |
pool->name, retval); |
/* |
* Dump the first 4 bytes even if they are not |
* POOL_POISON_FREED |
*/ |
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, |
data, pool->size, 1); |
break; |
} |
} |
if (!(mem_flags & __GFP_ZERO)) |
memset(retval, POOL_POISON_ALLOCATED, pool->size); |
#endif |
spin_unlock_irqrestore(&pool->lock, flags); |
if (mem_flags & __GFP_ZERO) |
memset(retval, 0, pool->size); |
return retval; |
} |
EXPORT_SYMBOL(dma_pool_alloc); |
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
{ |
struct dma_page *page; |
u32 efl; |
efl = safe_cli(); |
list_for_each_entry(page, &pool->page_list, page_list) { |
if (dma < page->dma) |
continue; |
if (dma < (page->dma + pool->allocation)) |
goto done; |
} |
page = NULL; |
done: |
safe_sti(efl); |
if ((dma - page->dma) < pool->allocation) |
return page; |
} |
return NULL; |
} |
/** |
* dma_pool_free - put block back into dma pool |
296,19 → 342,51 |
unsigned long flags; |
unsigned int offset; |
u32 efl; |
spin_lock_irqsave(&pool->lock, flags); |
page = pool_find_page(pool, dma); |
if (!page) { |
printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", |
vaddr, (unsigned long)dma); |
spin_unlock_irqrestore(&pool->lock, flags); |
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
pool->name, vaddr, (unsigned long)dma); |
return; |
} |
offset = vaddr - page->vaddr; |
#ifdef DMAPOOL_DEBUG |
if ((dma - page->dma) != offset) { |
spin_unlock_irqrestore(&pool->lock, flags); |
if (pool->dev) |
dev_err(pool->dev, |
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
pool->name, vaddr, (unsigned long long)dma); |
else |
printk(KERN_ERR |
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
pool->name, vaddr, (unsigned long long)dma); |
return; |
} |
{ |
unsigned int chain = page->offset; |
while (chain < pool->allocation) { |
if (chain != offset) { |
chain = *(int *)(page->vaddr + chain); |
continue; |
} |
spin_unlock_irqrestore(&pool->lock, flags); |
if (pool->dev) |
dev_err(pool->dev, "dma_pool_free %s, dma %Lx " |
"already free\n", pool->name, |
(unsigned long long)dma); |
else |
printk(KERN_ERR "dma_pool_free %s, dma %Lx " |
"already free\n", pool->name, |
(unsigned long long)dma); |
return; |
} |
} |
memset(vaddr, POOL_POISON_FREED, pool->size); |
#endif |
efl = safe_cli(); |
{ |
page->in_use--; |
*(int *)vaddr = page->offset; |
page->offset = offset; |
317,6 → 395,22 |
* if (!is_page_busy(page)) pool_free_page(pool, page); |
* Better have a few empty pages hang around. |
*/ |
}safe_sti(efl); |
spin_unlock_irqrestore(&pool->lock, flags); |
} |
EXPORT_SYMBOL(dma_pool_free); |
/* |
* Managed DMA pool |
*/ |
static void dmam_pool_release(struct device *dev, void *res) |
{ |
struct dma_pool *pool = *(struct dma_pool **)res; |
dma_pool_destroy(pool); |
} |
static int dmam_pool_match(struct device *dev, void *res, void *match_data) |
{ |
return *(struct dma_pool **)res == match_data; |
} |
/drivers/ddk/linux/fbsysfs.c |
---|
0,0 → 1,87 |
/* |
* fbsysfs.c - framebuffer device class and attributes |
* |
* Copyright (c) 2004 James Simmons <jsimmons@infradead.org> |
* |
* This program is free software you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; either version |
* 2 of the License, or (at your option) any later version. |
*/ |
/* |
* Note: currently there's only stubs for framebuffer_alloc and |
* framebuffer_release here. The reson for that is that until all drivers |
* are converted to use it a sysfsification will open OOPSable races. |
*/ |
#include <linux/kernel.h> |
#include <linux/slab.h> |
#include <linux/fb.h> |
#include <linux/module.h> |
#define FB_SYSFS_FLAG_ATTR 1 |
/** |
* framebuffer_alloc - creates a new frame buffer info structure |
* |
* @size: size of driver private data, can be zero |
* @dev: pointer to the device for this fb, this can be NULL |
* |
* Creates a new frame buffer info structure. Also reserves @size bytes |
* for driver private data (info->par). info->par (if any) will be |
* aligned to sizeof(long). |
* |
* Returns the new structure, or NULL if an error occurred. |
* |
*/ |
struct fb_info *framebuffer_alloc(size_t size, struct device *dev) |
{ |
#define BYTES_PER_LONG (BITS_PER_LONG/8) |
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG)) |
int fb_info_size = sizeof(struct fb_info); |
struct fb_info *info; |
char *p; |
if (size) |
fb_info_size += PADDING; |
p = kzalloc(fb_info_size + size, GFP_KERNEL); |
if (!p) |
return NULL; |
info = (struct fb_info *) p; |
if (size) |
info->par = p + fb_info_size; |
info->device = dev; |
#ifdef CONFIG_FB_BACKLIGHT |
mutex_init(&info->bl_curve_mutex); |
#endif |
return info; |
#undef PADDING |
#undef BYTES_PER_LONG |
} |
EXPORT_SYMBOL(framebuffer_alloc); |
/** |
* framebuffer_release - marks the structure available for freeing |
* |
* @info: frame buffer info structure |
* |
* Drop the reference count of the device embedded in the |
* framebuffer info structure. |
* |
*/ |
void framebuffer_release(struct fb_info *info) |
{ |
if (!info) |
return; |
kfree(info->apertures); |
kfree(info); |
} |
EXPORT_SYMBOL(framebuffer_release); |