/drivers/ddk/dma/fence.c |
---|
File deleted |
/drivers/ddk/dma/dma_alloc.c |
---|
File deleted |
/drivers/ddk/Makefile |
---|
25,8 → 25,6 |
NAME_SRCS:= \ |
debug/dbglog.c \ |
debug/chkstk.S \ |
dma/dma_alloc.c \ |
dma/fence.c \ |
io/create.c \ |
io/finfo.c \ |
io/ssize.c \ |
35,7 → 33,6 |
linux/ctype.c \ |
linux/dmapool.c \ |
linux/dmi.c \ |
linux/fbsysfs.c \ |
linux/find_next_bit.c \ |
linux/firmware.c \ |
linux/gcd.c \ |
/drivers/ddk/linux/fbsysfs.c |
---|
File deleted |
/drivers/ddk/linux/dmapool.c |
---|
22,17 → 22,12 |
* keep a count of how many are currently allocated from each page. |
*/ |
#include <linux/device.h> |
#include <linux/dmapool.h> |
#include <linux/kernel.h> |
#include <linux/list.h> |
#include <linux/mutex.h> |
#include <ddk.h> |
#include <linux/slab.h> |
#include <linux/spinlock.h> |
#include <linux/types.h> |
#include <linux/errno.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
#include <linux/gfp.h> |
#include <syscall.h> |
39,12 → 34,10 |
struct dma_pool { /* the pool */ |
struct list_head page_list; |
spinlock_t lock; |
struct mutex lock; |
size_t size; |
struct device *dev; |
size_t allocation; |
size_t boundary; |
char name[32]; |
struct list_head pools; |
}; |
56,12 → 49,10 |
unsigned int offset; |
}; |
static DEFINE_MUTEX(pools_lock); |
static DEFINE_MUTEX(pools_reg_lock); |
/** |
* dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
* @name: name of pool, for diagnostics |
88,17 → 79,18 |
{ |
struct dma_pool *retval; |
size_t allocation; |
bool empty = false; |
if (align == 0) |
if (align == 0) { |
align = 1; |
else if (align & (align - 1)) |
} else if (align & (align - 1)) { |
return NULL; |
} |
if (size == 0) |
if (size == 0) { |
return NULL; |
else if (size < 4) |
} else if (size < 4) { |
size = 4; |
} |
if ((size % align) != 0) |
size = ALIGN(size, align); |
107,10 → 99,11 |
allocation = (allocation+0x7FFF) & ~0x7FFF; |
if (!boundary) |
if (!boundary) { |
boundary = allocation; |
else if ((boundary < size) || (boundary & (boundary - 1))) |
} else if ((boundary < size) || (boundary & (boundary - 1))) { |
return NULL; |
} |
retval = kmalloc(sizeof(*retval), GFP_KERNEL); |
117,12 → 110,10 |
if (!retval) |
return retval; |
strlcpy(retval->name, name, sizeof(retval->name)); |
INIT_LIST_HEAD(&retval->page_list); |
retval->dev = dev; |
// spin_lock_init(&retval->lock); |
INIT_LIST_HEAD(&retval->page_list); |
spin_lock_init(&retval->lock); |
retval->size = size; |
retval->boundary = boundary; |
retval->allocation = allocation; |
148,11 → 139,12 |
} while (offset < pool->allocation); |
} |
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
static struct dma_page *pool_alloc_page(struct dma_pool *pool) |
{ |
struct dma_page *page; |
page = kmalloc(sizeof(*page), mem_flags); |
page = __builtin_malloc(sizeof(*page)); |
if (!page) |
return NULL; |
page->vaddr = (void*)KernelAlloc(pool->allocation); |
159,43 → 151,39 |
dbgprintf("%s 0x%0x ",__FUNCTION__, page->vaddr); |
if (page->vaddr) { |
#ifdef DMAPOOL_DEBUG |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
if (page->vaddr) |
{ |
page->dma = GetPgAddr(page->vaddr); |
dbgprintf("dma 0x%0x\n", page->dma); |
pool_initialise_page(pool, page); |
list_add(&page->page_list, &pool->page_list); |
page->in_use = 0; |
page->offset = 0; |
} else { |
kfree(page); |
free(page); |
page = NULL; |
} |
return page; |
} |
static inline bool is_page_busy(struct dma_page *page) |
static inline int is_page_busy(struct dma_page *page) |
{ |
return page->in_use != 0; |
} |
static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
{ |
dma_addr_t dma = page->dma; |
#ifdef DMAPOOL_DEBUG |
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
#endif |
KernelFree(page->vaddr); |
list_del(&page->page_list); |
kfree(page); |
free(page); |
} |
/** |
* dma_pool_destroy - destroys a pool of dma memory blocks. |
* @pool: dma pool that will be destroyed |
206,23 → 194,16 |
*/ |
void dma_pool_destroy(struct dma_pool *pool) |
{ |
bool empty = false; |
if (unlikely(!pool)) |
return; |
mutex_lock(&pools_reg_lock); |
mutex_lock(&pools_lock); |
list_del(&pool->pools); |
mutex_unlock(&pools_lock); |
mutex_unlock(&pools_reg_lock); |
while (!list_empty(&pool->page_list)) { |
struct dma_page *page; |
page = list_entry(pool->page_list.next, |
struct dma_page, page_list); |
if (is_page_busy(page)) { |
if (is_page_busy(page)) |
{ |
printk(KERN_ERR "dma_pool_destroy %p busy\n", |
page->vaddr); |
/* leak the still-in-use consistent memory */ |
234,8 → 215,8 |
kfree(pool); |
} |
EXPORT_SYMBOL(dma_pool_destroy); |
/** |
* dma_pool_alloc - get a block of consistent memory |
* @pool: dma pool that will produce the block |
249,28 → 230,24 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
dma_addr_t *handle) |
{ |
unsigned long flags; |
u32 efl; |
struct dma_page *page; |
size_t offset; |
void *retval; |
spin_lock_irqsave(&pool->lock, flags); |
efl = safe_cli(); |
restart: |
list_for_each_entry(page, &pool->page_list, page_list) { |
if (page->offset < pool->allocation) |
goto ready; |
} |
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
spin_unlock_irqrestore(&pool->lock, flags); |
page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
page = pool_alloc_page(pool); |
if (!page) |
return NULL; |
{ |
retval = NULL; |
goto done; |
} |
spin_lock_irqsave(&pool->lock, flags); |
list_add(&page->page_list, &pool->page_list); |
ready: |
page->in_use++; |
offset = page->offset; |
277,55 → 254,32 |
page->offset = *(int *)(page->vaddr + offset); |
retval = offset + page->vaddr; |
*handle = offset + page->dma; |
#ifdef DMAPOOL_DEBUG |
{ |
int i; |
u8 *data = retval; |
/* page->offset is stored in first 4 bytes */ |
for (i = sizeof(page->offset); i < pool->size; i++) { |
if (data[i] == POOL_POISON_FREED) |
continue; |
if (pool->dev) |
dev_err(pool->dev, |
"dma_pool_alloc %s, %p (corrupted)\n", |
pool->name, retval); |
else |
pr_err("dma_pool_alloc %s, %p (corrupted)\n", |
pool->name, retval); |
/* |
* Dump the first 4 bytes even if they are not |
* POOL_POISON_FREED |
*/ |
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, |
data, pool->size, 1); |
break; |
done: |
safe_sti(efl); |
return retval; |
} |
} |
if (!(mem_flags & __GFP_ZERO)) |
memset(retval, POOL_POISON_ALLOCATED, pool->size); |
#endif |
spin_unlock_irqrestore(&pool->lock, flags); |
if (mem_flags & __GFP_ZERO) |
memset(retval, 0, pool->size); |
return retval; |
} |
EXPORT_SYMBOL(dma_pool_alloc); |
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
{ |
struct dma_page *page; |
u32 efl; |
efl = safe_cli(); |
list_for_each_entry(page, &pool->page_list, page_list) { |
if (dma < page->dma) |
continue; |
if ((dma - page->dma) < pool->allocation) |
if (dma < (page->dma + pool->allocation)) |
goto done; |
} |
page = NULL; |
done: |
safe_sti(efl); |
return page; |
} |
return NULL; |
} |
/** |
* dma_pool_free - put block back into dma pool |
342,51 → 296,19 |
unsigned long flags; |
unsigned int offset; |
spin_lock_irqsave(&pool->lock, flags); |
u32 efl; |
page = pool_find_page(pool, dma); |
if (!page) { |
spin_unlock_irqrestore(&pool->lock, flags); |
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
pool->name, vaddr, (unsigned long)dma); |
printk(KERN_ERR "dma_pool_free %p/%lx (bad dma)\n", |
vaddr, (unsigned long)dma); |
return; |
} |
offset = vaddr - page->vaddr; |
#ifdef DMAPOOL_DEBUG |
if ((dma - page->dma) != offset) { |
spin_unlock_irqrestore(&pool->lock, flags); |
if (pool->dev) |
dev_err(pool->dev, |
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
pool->name, vaddr, (unsigned long long)dma); |
else |
printk(KERN_ERR |
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
pool->name, vaddr, (unsigned long long)dma); |
return; |
} |
efl = safe_cli(); |
{ |
unsigned int chain = page->offset; |
while (chain < pool->allocation) { |
if (chain != offset) { |
chain = *(int *)(page->vaddr + chain); |
continue; |
} |
spin_unlock_irqrestore(&pool->lock, flags); |
if (pool->dev) |
dev_err(pool->dev, "dma_pool_free %s, dma %Lx " |
"already free\n", pool->name, |
(unsigned long long)dma); |
else |
printk(KERN_ERR "dma_pool_free %s, dma %Lx " |
"already free\n", pool->name, |
(unsigned long long)dma); |
return; |
} |
} |
memset(vaddr, POOL_POISON_FREED, pool->size); |
#endif |
page->in_use--; |
*(int *)vaddr = page->offset; |
page->offset = offset; |
395,22 → 317,6 |
* if (!is_page_busy(page)) pool_free_page(pool, page); |
* Better have a few empty pages hang around. |
*/ |
spin_unlock_irqrestore(&pool->lock, flags); |
}safe_sti(efl); |
} |
EXPORT_SYMBOL(dma_pool_free); |
/* |
* Managed DMA pool |
*/ |
static void dmam_pool_release(struct device *dev, void *res) |
{ |
struct dma_pool *pool = *(struct dma_pool **)res; |
dma_pool_destroy(pool); |
} |
static int dmam_pool_match(struct device *dev, void *res, void *match_data) |
{ |
return *(struct dma_pool **)res == match_data; |
} |
/drivers/include/linux/compiler.h |
---|
144,7 → 144,7 |
*/ |
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
#define __trace_if(cond) \ |
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
if (__builtin_constant_p((cond)) ? !!(cond) : \ |
({ \ |
int ______r; \ |
static struct ftrace_branch_data \ |
/drivers/include/linux/rcupdate.h |
---|
458,10 → 458,46 |
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
* RCU-sched read-side critical section. In absence of |
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
* critical section unless it can prove otherwise. |
* critical section unless it can prove otherwise. Note that disabling |
* of preemption (including disabling irqs) counts as an RCU-sched |
* read-side critical section. This is useful for debug checks in functions |
* that required that they be called within an RCU-sched read-side |
* critical section. |
* |
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
* and while lockdep is disabled. |
* |
* Note that if the CPU is in the idle loop from an RCU point of |
* view (ie: that we are in the section between rcu_idle_enter() and |
* rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU |
* did an rcu_read_lock(). The reason for this is that RCU ignores CPUs |
* that are in such a section, considering these as in extended quiescent |
* state, so such a CPU is effectively never in an RCU read-side critical |
* section regardless of what RCU primitives it invokes. This state of |
* affairs is required --- we need to keep an RCU-free window in idle |
* where the CPU may possibly enter into low power mode. This way we can |
* notice an extended quiescent state to other CPUs that started a grace |
* period. Otherwise we would delay any grace period as long as we run in |
* the idle task. |
* |
* Similarly, we avoid claiming an SRCU read lock held if the current |
* CPU is offline. |
*/ |
#ifdef CONFIG_PREEMPT_COUNT |
int rcu_read_lock_sched_held(void); |
static inline int rcu_read_lock_sched_held(void) |
{ |
int lockdep_opinion = 0; |
if (!debug_lockdep_rcu_enabled()) |
return 1; |
if (!rcu_is_watching()) |
return 0; |
if (!rcu_lockdep_current_cpu_online()) |
return 0; |
if (debug_locks) |
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
} |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
static inline int rcu_read_lock_sched_held(void) |
{ |
501,14 → 537,14 |
#ifdef CONFIG_PROVE_RCU |
/** |
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met |
* rcu_lockdep_assert - emit lockdep splat if specified condition not met |
* @c: condition to check |
* @s: informative message |
*/ |
#define RCU_LOCKDEP_WARN(c, s) \ |
#define rcu_lockdep_assert(c, s) \ |
do { \ |
static bool __section(.data.unlikely) __warned; \ |
if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ |
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
__warned = true; \ |
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
} \ |
517,7 → 553,7 |
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) |
static inline void rcu_preempt_sleep_check(void) |
{ |
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), |
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), |
"Illegal context switch in RCU read-side critical section"); |
} |
#else /* #ifdef CONFIG_PROVE_RCU */ |
529,15 → 565,15 |
#define rcu_sleep_check() \ |
do { \ |
rcu_preempt_sleep_check(); \ |
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ |
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ |
"Illegal context switch in RCU-bh read-side critical section"); \ |
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ |
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ |
"Illegal context switch in RCU-sched read-side critical section"); \ |
} while (0) |
#else /* #ifdef CONFIG_PROVE_RCU */ |
#define RCU_LOCKDEP_WARN(c, s) do { } while (0) |
#define rcu_lockdep_assert(c, s) do { } while (0) |
#define rcu_sleep_check() do { } while (0) |
#endif /* #else #ifdef CONFIG_PROVE_RCU */ |
568,13 → 604,13 |
({ \ |
/* Dependency order vs. p above. */ \ |
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ |
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ |
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ |
rcu_dereference_sparse(p, space); \ |
((typeof(*p) __force __kernel *)(________p1)); \ |
}) |
#define __rcu_dereference_protected(p, c, space) \ |
({ \ |
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ |
rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ |
rcu_dereference_sparse(p, space); \ |
((typeof(*p) __force __kernel *)(p)); \ |
}) |
798,7 → 834,7 |
__rcu_read_lock(); |
__acquire(RCU); |
rcu_lock_acquire(&rcu_lock_map); |
RCU_LOCKDEP_WARN(!rcu_is_watching(), |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock() used illegally while idle"); |
} |
849,7 → 885,7 |
*/ |
static inline void rcu_read_unlock(void) |
{ |
RCU_LOCKDEP_WARN(!rcu_is_watching(), |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock() used illegally while idle"); |
__release(RCU); |
__rcu_read_unlock(); |
878,7 → 914,7 |
local_bh_disable(); |
__acquire(RCU_BH); |
rcu_lock_acquire(&rcu_bh_lock_map); |
RCU_LOCKDEP_WARN(!rcu_is_watching(), |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock_bh() used illegally while idle"); |
} |
889,7 → 925,7 |
*/ |
static inline void rcu_read_unlock_bh(void) |
{ |
RCU_LOCKDEP_WARN(!rcu_is_watching(), |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock_bh() used illegally while idle"); |
rcu_lock_release(&rcu_bh_lock_map); |
__release(RCU_BH); |
914,7 → 950,7 |
preempt_disable(); |
__acquire(RCU_SCHED); |
rcu_lock_acquire(&rcu_sched_lock_map); |
RCU_LOCKDEP_WARN(!rcu_is_watching(), |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock_sched() used illegally while idle"); |
} |
932,7 → 968,7 |
*/ |
static inline void rcu_read_unlock_sched(void) |
{ |
RCU_LOCKDEP_WARN(!rcu_is_watching(), |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock_sched() used illegally while idle"); |
rcu_lock_release(&rcu_sched_lock_map); |
__release(RCU_SCHED); |
/drivers/include/linux/mm.h |
---|
62,8 → 62,4 |
}; |
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
static inline int set_page_dirty(struct page *page) |
{ return 0; }; |
#endif |
/drivers/include/linux/vmalloc.h |
---|
1,35 → 1,3 |
#ifndef _LINUX_VMALLOC_H |
#define _LINUX_VMALLOC_H |
#include <linux/spinlock.h> |
#include <linux/init.h> |
#include <linux/list.h> |
struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
/* bits in flags of vmalloc's vm_struct below */ |
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
#define VM_ALLOC 0x00000002 /* vmalloc() */ |
#define VM_MAP 0x00000004 /* vmap()ed pages */ |
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ |
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
#define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
/* bits [20..32] reserved for arch specific ioremap internals */ |
/* |
* Maximum alignment for ioremap() regions. |
* Can be overriden by arch-specific value. |
*/ |
#ifndef IOREMAP_MAX_ORDER |
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ |
#endif |
extern void *vmalloc(unsigned long size); |
extern void *vzalloc(unsigned long size); |
extern void vfree(const void *addr); |
extern void *vmap(struct page **pages, unsigned int count, |
unsigned long flags, pgprot_t prot); |
extern void vunmap(const void *addr); |
#endif /* _LINUX_VMALLOC_H */ |
/drivers/include/drm/ttm/ttm_bo_driver.h |
---|
465,7 → 465,7 |
* Constant after init. |
*/ |
struct kobject kobj; |
// struct kobject kobj; |
struct ttm_mem_global *mem_glob; |
struct page *dummy_read_page; |
struct ttm_mem_shrink shrink; |
/drivers/include/drm/ttm/ttm_object.h |
---|
40,7 → 40,7 |
#include <linux/list.h> |
#include <drm/drm_hashtab.h> |
#include <linux/kref.h> |
#include <linux/rcupdate.h> |
//#include <linux/rcupdate.h> |
#include <linux/dma-buf.h> |
#include <ttm/ttm_memory.h> |
345,6 → 345,6 |
uint32_t handle, uint32_t flags, |
int *prime_fd); |
#define ttm_prime_object_kfree(__obj, __prime) \ |
kfree_rcu(__obj, __prime.base.rhead) |
//#define ttm_prime_object_kfree(__obj, __prime) \ |
// kfree_rcu(__obj, __prime.base.rhead) |
#endif |
/drivers/include/syscall.h |
---|
49,7 → 49,7 |
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace"); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32 flags)__asm__("MapIoMem"); |
void* STDCALL KernelAlloc(size_t size)__asm__("KernelAlloc"); |
void* STDCALL KernelFree(const void *mem)__asm__("KernelFree"); |
void* STDCALL KernelFree(void *mem)__asm__("KernelFree"); |
void* STDCALL UserAlloc(size_t size)__asm__("UserAlloc"); |
int STDCALL UserFree(void *mem)__asm__("UserFree"); |
527,6 → 527,10 |
return mem; |
}; |
static inline void vfree(void *addr) |
{ |
KernelFree(addr); |
} |
static inline int power_supply_is_system_supplied(void) { return -1; }; |