Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5269 → Rev 5270

/drivers/include/asm-generic/atomic-long.h
0,0 → 1,258
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
* Christoph Lameter
*
* Allows to provide arch independent atomic definitions without the need to
* edit all arch specific atomic.h files.
*/
 
#include <asm/types.h>
 
/*
* Suppport for atomic_long_t
*
* Casts for parameters are avoided for existing atomic functions in order to
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the
* macros of a platform may have.
*/
 
#if BITS_PER_LONG == 64
 
typedef atomic64_t atomic_long_t;
 
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
 
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_read(v);
}
 
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_set(v, i);
}
 
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_inc(v);
}
 
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_dec(v);
}
 
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_add(i, v);
}
 
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_sub(i, v);
}
 
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_sub_and_test(i, v);
}
 
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_dec_and_test(v);
}
 
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_inc_and_test(v);
}
 
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_add_negative(i, v);
}
 
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_add_return(i, v);
}
 
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_sub_return(i, v);
}
 
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_inc_return(v);
}
 
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_dec_return(v);
}
 
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_add_unless(v, a, u);
}
 
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
 
#define atomic_long_cmpxchg(l, old, new) \
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic64_xchg((atomic64_t *)(v), (new)))
 
#else /* BITS_PER_LONG == 64 */
 
typedef atomic_t atomic_long_t;
 
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_read(v);
}
 
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
 
atomic_set(v, i);
}
 
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
atomic_inc(v);
}
 
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
atomic_dec(v);
}
 
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
atomic_add(i, v);
}
 
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
atomic_sub(i, v);
}
 
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return atomic_sub_and_test(i, v);
}
 
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return atomic_dec_and_test(v);
}
 
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return atomic_inc_and_test(v);
}
 
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return atomic_add_negative(i, v);
}
 
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_add_return(i, v);
}
 
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_sub_return(i, v);
}
 
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_inc_return(v);
}
 
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_dec_return(v);
}
 
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_add_unless(v, a, u);
}
 
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
 
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic_xchg((atomic_t *)(v), (new)))
 
#endif /* BITS_PER_LONG == 64 */
 
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
/drivers/include/asm-generic/bitops/const_hweight.h
0,0 → 1,43
#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
 
/*
* Compile time versions of __arch_hweightN()
*/
#define __const_hweight8(w) \
((unsigned int) \
((!!((w) & (1ULL << 0))) + \
(!!((w) & (1ULL << 1))) + \
(!!((w) & (1ULL << 2))) + \
(!!((w) & (1ULL << 3))) + \
(!!((w) & (1ULL << 4))) + \
(!!((w) & (1ULL << 5))) + \
(!!((w) & (1ULL << 6))) + \
(!!((w) & (1ULL << 7)))))
 
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
 
/*
* Generic interface.
*/
#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
 
/*
* Interface for known constant arguments
*/
#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
 
/*
* Type invariant interface to the compile time constant hweight functions.
*/
#define HWEIGHT(w) HWEIGHT64((u64)w)
 
#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
/drivers/include/asm-generic/bitops/ext2-atomic-setbit.h
0,0 → 1,11
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
 
/*
* Atomic bitops based version of ext2 atomic bitops
*/
 
#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr)
#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr)
 
#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */
/drivers/include/asm-generic/bitops/ext2-non-atomic.h
0,0 → 1,20
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
 
#include <asm-generic/bitops/le.h>
 
#define ext2_set_bit(nr,addr) \
generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_clear_bit(nr,addr) \
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
 
#define ext2_test_bit(nr,addr) \
generic_test_le_bit((nr),(unsigned long *)(addr))
#define ext2_find_first_zero_bit(addr, size) \
generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
#define ext2_find_next_zero_bit(addr, size, off) \
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
#define ext2_find_next_bit(addr, size, off) \
generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
 
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
/drivers/include/asm-generic/bitops/find.h
0,0 → 1,62
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
#define _ASM_GENERIC_BITOPS_FIND_H_
 
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
#endif
 
#ifndef find_next_zero_bit
/**
* find_next_zero_bit - find the next cleared bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @size: The bitmap size in bits
*
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
 
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
 
/**
* find_first_bit - find the first set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
extern unsigned long find_first_bit(const unsigned long *addr,
unsigned long size);
 
/**
* find_first_zero_bit - find the first cleared bit in a memory region
* @addr: The address to start the search at
* @size: The maximum number of bits to search
*
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
extern unsigned long find_first_zero_bit(const unsigned long *addr,
unsigned long size);
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
 
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
 
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
 
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
/drivers/include/asm-generic/bitops/fls64.h
0,0 → 1,36
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
#define _ASM_GENERIC_BITOPS_FLS64_H_
 
#include <asm/types.h>
 
/**
* fls64 - find last set bit in a 64-bit word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffsll, but returns the position of the most significant set bit.
*
* fls64(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 64.
*/
#if BITS_PER_LONG == 32
static __always_inline int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
#elif BITS_PER_LONG == 64
static __always_inline int fls64(__u64 x)
{
if (x == 0)
return 0;
return __fls(x) + 1;
}
#else
#error BITS_PER_LONG not 32 or 64
#endif
 
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
/drivers/include/asm-generic/bitops/hweight.h
0,0 → 1,7
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
 
#include <asm-generic/bitops/arch_hweight.h>
#include <asm-generic/bitops/const_hweight.h>
 
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
/drivers/include/asm-generic/bitops/le.h
0,0 → 1,97
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
 
#include <asm/types.h>
#include <asm/byteorder.h>
 
#if defined(__LITTLE_ENDIAN)
 
#define BITOP_LE_SWIZZLE 0
 
static inline unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
 
static inline unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
 
static inline unsigned long find_first_zero_bit_le(const void *addr,
unsigned long size)
{
return find_first_zero_bit(addr, size);
}
 
#elif defined(__BIG_ENDIAN)
 
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
 
#ifndef find_next_zero_bit_le
extern unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset);
#endif
 
#ifndef find_next_bit_le
extern unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
#endif
 
#ifndef find_first_zero_bit_le
#define find_first_zero_bit_le(addr, size) \
find_next_zero_bit_le((addr), (size), 0)
#endif
 
#else
#error "Please fix <asm/byteorder.h>"
#endif
 
static inline int test_bit_le(int nr, const void *addr)
{
return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline void set_bit_le(int nr, void *addr)
{
set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline void clear_bit_le(int nr, void *addr)
{
clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline void __set_bit_le(int nr, void *addr)
{
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline void __clear_bit_le(int nr, void *addr)
{
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline int test_and_set_bit_le(int nr, void *addr)
{
return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline int test_and_clear_bit_le(int nr, void *addr)
{
return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline int __test_and_set_bit_le(int nr, void *addr)
{
return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
static inline int __test_and_clear_bit_le(int nr, void *addr)
{
return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
 
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
/drivers/include/asm-generic/bitops/minix.h
0,0 → 1,15
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
#define _ASM_GENERIC_BITOPS_MINIX_H_
 
#define minix_test_and_set_bit(nr,addr) \
__test_and_set_bit((nr),(unsigned long *)(addr))
#define minix_set_bit(nr,addr) \
__set_bit((nr),(unsigned long *)(addr))
#define minix_test_and_clear_bit(nr,addr) \
__test_and_clear_bit((nr),(unsigned long *)(addr))
#define minix_test_bit(nr,addr) \
test_bit((nr),(unsigned long *)(addr))
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((unsigned long *)(addr),(size))
 
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
/drivers/include/asm-generic/bitops/sched.h
0,0 → 1,31
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
#define _ASM_GENERIC_BITOPS_SCHED_H_
 
#include <linux/compiler.h> /* unlikely() */
#include <asm/types.h>
 
/*
* Every architecture must define this function. It's the fastest
* way of searching a 100-bit bitmap. It's guaranteed that at least
* one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
if (b[0])
return __ffs(b[0]);
return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
if (b[0])
return __ffs(b[0]);
if (b[1])
return __ffs(b[1]) + 32;
if (b[2])
return __ffs(b[2]) + 64;
return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
}
 
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */
/drivers/include/asm-generic/bitsperlong.h
0,0 → 1,25
#ifndef __ASM_GENERIC_BITS_PER_LONG
#define __ASM_GENERIC_BITS_PER_LONG
 
#include <uapi/asm-generic/bitsperlong.h>
 
 
#ifdef CONFIG_64BIT
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif /* CONFIG_64BIT */
 
/*
* FIXME: The check currently breaks x86-64 build, so it's
* temporarily disabled. Please fix x86-64 and reenable
*/
#if 0 && BITS_PER_LONG != __BITS_PER_LONG
#error Inconsistent word size. Check asm/bitsperlong.h
#endif
 
#ifndef BITS_PER_LONG_LONG
#define BITS_PER_LONG_LONG 64
#endif
 
#endif /* __ASM_GENERIC_BITS_PER_LONG */
/drivers/include/asm-generic/cacheflush.h
0,0 → 1,34
#ifndef __ASM_CACHEFLUSH_H
#define __ASM_CACHEFLUSH_H
 
/* Keep includes the same across arches. */
#include <linux/mm.h>
 
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
 
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
 
#endif /* __ASM_CACHEFLUSH_H */
/drivers/include/asm-generic/delay.h
0,0 → 1,44
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
 
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
 
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
 
/*
* The weird n/20000 thing suppresses a "comparison is always false due to
* limited range of data type" warning with non-const 8-bit arguments.
*/
 
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) \
({ \
if (__builtin_constant_p(n)) { \
if ((n) / 20000 >= 1) \
__bad_udelay(); \
else \
__const_udelay((n) * 0x10c7ul); \
} else { \
__udelay(n); \
} \
})
 
/* 0x5 is 2**32 / 1000000000 (rounded up) */
#define ndelay(n) \
({ \
if (__builtin_constant_p(n)) { \
if ((n) / 20000 >= 1) \
__bad_ndelay(); \
else \
__const_udelay((n) * 5ul); \
} else { \
__ndelay(n); \
} \
})
 
#endif /* __ASM_GENERIC_DELAY_H */
/drivers/include/asm-generic/getorder.h
0,0 → 1,61
#ifndef __ASM_GENERIC_GETORDER_H
#define __ASM_GENERIC_GETORDER_H
 
#ifndef __ASSEMBLY__
 
#include <linux/compiler.h>
#include <linux/log2.h>
 
/*
* Runtime evaluation of get_order()
*/
static inline __attribute_const__
int __get_order(unsigned long size)
{
int order;
 
size--;
size >>= PAGE_SHIFT;
#if BITS_PER_LONG == 32
order = fls(size);
#else
order = fls64(size);
#endif
return order;
}
 
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
*
* Determine the allocation order of a particular sized block of memory. This
* is on a logarithmic scale, where:
*
* 0 -> 2^0 * PAGE_SIZE and below
* 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
* 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
* 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
* 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
* ...
*
* The order returned is used to find the smallest allocation granule required
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
*
* This function may be used to initialise variables with compile time
* evaluations of constants.
*/
#define get_order(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \
ilog2((n) - 1) - PAGE_SHIFT + 1) \
) : \
__get_order(n) \
)
 
#endif /* __ASSEMBLY__ */
 
#endif /* __ASM_GENERIC_GETORDER_H */
/drivers/include/asm-generic/int-ll64.h
0,0 → 1,49
/*
* asm-generic/int-ll64.h
*
* Integer declarations for architectures which use "long long"
* for 64-bit types.
*/
#ifndef _ASM_GENERIC_INT_LL64_H
#define _ASM_GENERIC_INT_LL64_H
 
#include <uapi/asm-generic/int-ll64.h>
 
 
#ifndef __ASSEMBLY__
 
typedef signed char s8;
typedef unsigned char u8;
 
typedef signed short s16;
typedef unsigned short u16;
 
typedef signed int s32;
typedef unsigned int u32;
 
typedef signed long long s64;
typedef unsigned long long u64;
 
#define S8_C(x) x
#define U8_C(x) x ## U
#define S16_C(x) x
#define U16_C(x) x ## U
#define S32_C(x) x
#define U32_C(x) x ## U
#define S64_C(x) x ## LL
#define U64_C(x) x ## ULL
 
#else /* __ASSEMBLY__ */
 
#define S8_C(x) x
#define U8_C(x) x
#define S16_C(x) x
#define U16_C(x) x
#define S32_C(x) x
#define U32_C(x) x
#define S64_C(x) x
#define U64_C(x) x
 
#endif /* __ASSEMBLY__ */
 
#endif /* _ASM_GENERIC_INT_LL64_H */
/drivers/include/asm-generic/memory_model.h
0,0 → 1,77
#ifndef __ASM_MEMORY_MODEL_H
#define __ASM_MEMORY_MODEL_H
 
#ifndef __ASSEMBLY__
 
#if defined(CONFIG_FLATMEM)
 
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
 
#elif defined(CONFIG_DISCONTIGMEM)
 
#ifndef arch_pfn_to_nid
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
#endif
 
#ifndef arch_local_page_offset
#define arch_local_page_offset(pfn, nid) \
((pfn) - NODE_DATA(nid)->node_start_pfn)
#endif
 
#endif /* CONFIG_DISCONTIGMEM */
 
/*
* supports 3 memory models.
*/
#if defined(CONFIG_FLATMEM)
 
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
#elif defined(CONFIG_DISCONTIGMEM)
 
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
unsigned long __nid = arch_pfn_to_nid(__pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
 
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
__pgdat->node_start_pfn; \
})
 
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
 
/* memmap is virtually contiguous. */
#define __pfn_to_page(pfn) (vmemmap + (pfn))
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
 
#elif defined(CONFIG_SPARSEMEM)
/*
* Note: section's mem_map is encoded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
 
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
 
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
 
#endif /* __ASSEMBLY__ */
 
#endif
/drivers/include/asm-generic/percpu.h
0,0 → 1,420
#ifndef _ASM_GENERIC_PERCPU_H_
#define _ASM_GENERIC_PERCPU_H_
 
#include <linux/compiler.h>
#include <linux/threads.h>
#include <linux/percpu-defs.h>
 
#ifdef CONFIG_SMP
 
/*
* per_cpu_offset() is the offset that has to be added to a
* percpu variable to get to the instance for a certain processor.
*
* Most arches use the __per_cpu_offset array for those offsets but
* some arches have their own ways of determining the offset (x86_64, s390).
*/
#ifndef __per_cpu_offset
extern unsigned long __per_cpu_offset[NR_CPUS];
 
#define per_cpu_offset(x) (__per_cpu_offset[x])
#endif
 
/*
* Determine the offset for the currently active processor.
* An arch may define __my_cpu_offset to provide a more effective
* means of obtaining the offset to the per cpu variables of the
* current processor.
*/
#ifndef __my_cpu_offset
#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
#endif
#ifdef CONFIG_DEBUG_PREEMPT
#define my_cpu_offset per_cpu_offset(smp_processor_id())
#else
#define my_cpu_offset __my_cpu_offset
#endif
 
/*
* Arch may define arch_raw_cpu_ptr() to provide more efficient address
* translations for raw_cpu_ptr().
*/
#ifndef arch_raw_cpu_ptr
#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
#endif
 
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
#endif
 
#endif /* SMP */
 
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
#define PER_CPU_BASE_SECTION ".data..percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
#endif
 
#ifndef PER_CPU_ATTRIBUTES
#define PER_CPU_ATTRIBUTES
#endif
 
#ifndef PER_CPU_DEF_ATTRIBUTES
#define PER_CPU_DEF_ATTRIBUTES
#endif
 
#define raw_cpu_generic_to_op(pcp, val, op) \
do { \
*raw_cpu_ptr(&(pcp)) op val; \
} while (0)
 
#define raw_cpu_generic_add_return(pcp, val) \
({ \
raw_cpu_add(pcp, val); \
raw_cpu_read(pcp); \
})
 
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
typeof(pcp) __ret; \
__ret = raw_cpu_read(pcp); \
raw_cpu_write(pcp, nval); \
__ret; \
})
 
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) __ret; \
__ret = raw_cpu_read(pcp); \
if (__ret == (oval)) \
raw_cpu_write(pcp, nval); \
__ret; \
})
 
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int __ret = 0; \
if (raw_cpu_read(pcp1) == (oval1) && \
raw_cpu_read(pcp2) == (oval2)) { \
raw_cpu_write(pcp1, nval1); \
raw_cpu_write(pcp2, nval2); \
__ret = 1; \
} \
(__ret); \
})
 
#define this_cpu_generic_read(pcp) \
({ \
typeof(pcp) __ret; \
preempt_disable(); \
__ret = *this_cpu_ptr(&(pcp)); \
preempt_enable(); \
__ret; \
})
 
#define this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long __flags; \
raw_local_irq_save(__flags); \
*raw_cpu_ptr(&(pcp)) op val; \
raw_local_irq_restore(__flags); \
} while (0)
 
#define this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
raw_cpu_add(pcp, val); \
__ret = raw_cpu_read(pcp); \
raw_local_irq_restore(__flags); \
__ret; \
})
 
#define this_cpu_generic_xchg(pcp, nval) \
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_read(pcp); \
raw_cpu_write(pcp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
 
#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_read(pcp); \
if (__ret == (oval)) \
raw_cpu_write(pcp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
 
#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
int __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
raw_local_irq_restore(__flags); \
__ret; \
})
 
#ifndef raw_cpu_read_1
#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
#endif
#ifndef raw_cpu_read_2
#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
#endif
#ifndef raw_cpu_read_4
#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
#endif
#ifndef raw_cpu_read_8
#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
#endif
 
#ifndef raw_cpu_write_1
#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
#endif
#ifndef raw_cpu_write_2
#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
#endif
#ifndef raw_cpu_write_4
#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
#endif
#ifndef raw_cpu_write_8
#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
#endif
 
#ifndef raw_cpu_add_1
#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
#endif
#ifndef raw_cpu_add_2
#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
#endif
#ifndef raw_cpu_add_4
#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
#endif
#ifndef raw_cpu_add_8
#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
#endif
 
#ifndef raw_cpu_and_1
#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
#endif
#ifndef raw_cpu_and_2
#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
#endif
#ifndef raw_cpu_and_4
#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
#endif
#ifndef raw_cpu_and_8
#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
#endif
 
#ifndef raw_cpu_or_1
#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
#endif
#ifndef raw_cpu_or_2
#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
#endif
#ifndef raw_cpu_or_4
#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
#endif
#ifndef raw_cpu_or_8
#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
#endif
 
#ifndef raw_cpu_add_return_1
#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
#endif
#ifndef raw_cpu_add_return_2
#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
#endif
#ifndef raw_cpu_add_return_4
#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
#endif
#ifndef raw_cpu_add_return_8
#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
#endif
 
#ifndef raw_cpu_xchg_1
#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
#ifndef raw_cpu_xchg_2
#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
#ifndef raw_cpu_xchg_4
#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
#ifndef raw_cpu_xchg_8
#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
 
#ifndef raw_cpu_cmpxchg_1
#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef raw_cpu_cmpxchg_2
#define raw_cpu_cmpxchg_2(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef raw_cpu_cmpxchg_4
#define raw_cpu_cmpxchg_4(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef raw_cpu_cmpxchg_8
#define raw_cpu_cmpxchg_8(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
 
#ifndef raw_cpu_cmpxchg_double_1
#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
#ifndef raw_cpu_cmpxchg_double_2
#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
#ifndef raw_cpu_cmpxchg_double_4
#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
#ifndef raw_cpu_cmpxchg_double_8
#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
 
#ifndef this_cpu_read_1
#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp)
#endif
#ifndef this_cpu_read_2
#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp)
#endif
#ifndef this_cpu_read_4
#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp)
#endif
#ifndef this_cpu_read_8
#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp)
#endif
 
#ifndef this_cpu_write_1
#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =)
#endif
#ifndef this_cpu_write_2
#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =)
#endif
#ifndef this_cpu_write_4
#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =)
#endif
#ifndef this_cpu_write_8
#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =)
#endif
 
#ifndef this_cpu_add_1
#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
#endif
#ifndef this_cpu_add_2
#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
#endif
#ifndef this_cpu_add_4
#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
#endif
#ifndef this_cpu_add_8
#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
#endif
 
#ifndef this_cpu_and_1
#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
#endif
#ifndef this_cpu_and_2
#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
#endif
#ifndef this_cpu_and_4
#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
#endif
#ifndef this_cpu_and_8
#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
#endif
 
#ifndef this_cpu_or_1
#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
#endif
#ifndef this_cpu_or_2
#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
#endif
#ifndef this_cpu_or_4
#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
#endif
#ifndef this_cpu_or_8
#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
#endif
 
#ifndef this_cpu_add_return_1
#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val)
#endif
#ifndef this_cpu_add_return_2
#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val)
#endif
#ifndef this_cpu_add_return_4
#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val)
#endif
#ifndef this_cpu_add_return_8
#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val)
#endif
 
#ifndef this_cpu_xchg_1
#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
#ifndef this_cpu_xchg_2
#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
#ifndef this_cpu_xchg_4
#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
#ifndef this_cpu_xchg_8
#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
 
#ifndef this_cpu_cmpxchg_1
#define this_cpu_cmpxchg_1(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef this_cpu_cmpxchg_2
#define this_cpu_cmpxchg_2(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef this_cpu_cmpxchg_4
#define this_cpu_cmpxchg_4(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef this_cpu_cmpxchg_8
#define this_cpu_cmpxchg_8(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
 
#ifndef this_cpu_cmpxchg_double_1
#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
#ifndef this_cpu_cmpxchg_double_2
#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
#ifndef this_cpu_cmpxchg_double_4
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
#ifndef this_cpu_cmpxchg_double_8
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
#endif
 
#endif /* _ASM_GENERIC_PERCPU_H_ */
/drivers/include/asm-generic/pgtable-nopmd.h
0,0 → 1,69
#ifndef _PGTABLE_NOPMD_H
#define _PGTABLE_NOPMD_H
 
#ifndef __ASSEMBLY__
 
#include <asm-generic/pgtable-nopud.h>
 
struct mm_struct;
 
#define __PAGETABLE_PMD_FOLDED
 
/*
* Having the pmd type consist of a pud gets the size right, and allows
* us to conceptually access the pud entry that this pmd is folded into
* without casting.
*/
typedef struct { pud_t pud; } pmd_t;
 
#define PMD_SHIFT PUD_SHIFT
#define PTRS_PER_PMD 1
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
 
/*
* The "pud_xxx()" functions here are trivial for a folded two-level
* setup: the pmd is never bad, and a pmd always exists (as it's folded
* into the pud entry)
*/
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
 
#define pud_populate(mm, pmd, pte) do { } while (0)
 
/*
* (pmds are folded into puds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
 
static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
 
#define pmd_val(x) (pud_val((x).pud))
#define __pmd(x) ((pmd_t) { __pud(x) } )
 
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
 
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pud, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, address) NULL
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
}
#define __pmd_free_tlb(tlb, x, a) do { } while (0)
 
#undef pmd_addr_end
#define pmd_addr_end(addr, end) (end)
 
#endif /* __ASSEMBLY__ */
 
#endif /* _PGTABLE_NOPMD_H */
/drivers/include/asm-generic/pgtable-nopud.h
0,0 → 1,61
#ifndef _PGTABLE_NOPUD_H
#define _PGTABLE_NOPUD_H
 
#ifndef __ASSEMBLY__
 
#define __PAGETABLE_PUD_FOLDED
 
/*
* Having the pud type consist of a pgd gets the size right, and allows
* us to conceptually access the pgd entry that this pud is folded into
* without casting.
*/
typedef struct { pgd_t pgd; } pud_t;
 
#define PUD_SHIFT PGDIR_SHIFT
#define PTRS_PER_PUD 1
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
 
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pud is never bad, and a pud always exists (as it's folded
* into the pgd entry)
*/
static inline int pgd_none(pgd_t pgd) { return 0; }
static inline int pgd_bad(pgd_t pgd) { return 0; }
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
 
#define pgd_populate(mm, pgd, pud) do { } while (0)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
*/
#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
 
static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
{
return (pud_t *)pgd;
}
 
#define pud_val(x) (pgd_val((x).pgd))
#define __pud(x) ((pud_t) { __pgd(x) } )
 
#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
 
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
* inside the pgd, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x, a) do { } while (0)
 
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
 
#endif /* __ASSEMBLY__ */
#endif /* _PGTABLE_NOPUD_H */
/drivers/include/asm-generic/ptrace.h
0,0 → 1,74
/*
* Common low level (register) ptrace helpers
*
* Copyright 2004-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
 
#ifndef __ASM_GENERIC_PTRACE_H__
#define __ASM_GENERIC_PTRACE_H__
 
#ifndef __ASSEMBLY__
 
/* Helpers for working with the instruction pointer */
#ifndef GET_IP
#define GET_IP(regs) ((regs)->pc)
#endif
#ifndef SET_IP
#define SET_IP(regs, val) (GET_IP(regs) = (val))
#endif
 
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
return GET_IP(regs);
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
SET_IP(regs, val);
}
 
#ifndef profile_pc
#define profile_pc(regs) instruction_pointer(regs)
#endif
 
/* Helpers for working with the user stack pointer */
#ifndef GET_USP
#define GET_USP(regs) ((regs)->usp)
#endif
#ifndef SET_USP
#define SET_USP(regs, val) (GET_USP(regs) = (val))
#endif
 
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
return GET_USP(regs);
}
static inline void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
SET_USP(regs, val);
}
 
/* Helpers for working with the frame pointer */
#ifndef GET_FP
#define GET_FP(regs) ((regs)->fp)
#endif
#ifndef SET_FP
#define SET_FP(regs, val) (GET_FP(regs) = (val))
#endif
 
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
return GET_FP(regs);
}
static inline void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
SET_FP(regs, val);
}
 
#endif /* __ASSEMBLY__ */
 
#endif
/drivers/include/asm-generic/types.h
0,0 → 1,42
#ifndef _ASM_GENERIC_TYPES_H
#define _ASM_GENERIC_TYPES_H
/*
* int-ll64 is used practically everywhere now,
* so use it as a reasonable default.
*/
#include <asm-generic/int-ll64.h>
 
#ifndef __ASSEMBLY__
 
typedef unsigned short umode_t;
 
#endif /* __ASSEMBLY__ */
 
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* DMA addresses may be very different from physical addresses
* and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
* systems, while sparc64 uses 32 bit DMA addresses for 64 bit
* physical addresses.
* This default defines dma_addr_t to have the same size as
* phys_addr_t, which is the most common way.
* Do not define the dma64_addr_t type, which never really
* worked.
*/
#ifndef dma_addr_t
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif /* CONFIG_PHYS_ADDR_T_64BIT */
#endif /* dma_addr_t */
 
#endif /* __ASSEMBLY__ */
 
#endif /* __KERNEL__ */
 
#endif /* _ASM_GENERIC_TYPES_H */