/drivers/include/linux/asm/alternative.h |
---|
0,0 → 1,164 |
#ifndef _ASM_X86_ALTERNATIVE_H |
#define _ASM_X86_ALTERNATIVE_H |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
#include <asm/asm.h> |
/* |
* Alternative inline assembly for SMP. |
* |
* The LOCK_PREFIX macro defined here replaces the LOCK and |
* LOCK_PREFIX macros used everywhere in the source tree. |
* |
* SMP alternatives use the same data structures as the other |
* alternatives and the X86_FEATURE_UP flag to indicate the case of a |
* UP system running a SMP kernel. The existing apply_alternatives() |
* works fine for patching a SMP kernel for UP. |
* |
* The SMP alternative tables can be kept after boot and contain both |
* UP and SMP versions of the instructions to allow switching back to |
* SMP at runtime, when hotplugging in a new CPU, which is especially |
* useful in virtualized environments. |
* |
* The very common lock prefix is handled as special case in a |
* separate table which is a pure address list without replacement ptr |
* and size information. That keeps the table sizes small. |
*/ |
#ifdef CONFIG_SMP |
#define LOCK_PREFIX \ |
".section .smp_locks,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661f\n" /* address */ \ |
".previous\n" \ |
"661:\n\tlock; " |
#else /* ! CONFIG_SMP */ |
#define LOCK_PREFIX "" |
#endif |
/* This must be included *after* the definition of LOCK_PREFIX */ |
#include <asm/cpufeature.h> |
struct alt_instr { |
u8 *instr; /* original instruction */ |
u8 *replacement; |
u8 cpuid; /* cpuid bit set for replacement */ |
u8 instrlen; /* length of original instruction */ |
u8 replacementlen; /* length of new instruction, <= instrlen */ |
u8 pad1; |
#ifdef CONFIG_X86_64 |
u32 pad2; |
#endif |
}; |
extern void alternative_instructions(void); |
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); |
struct module; |
#ifdef CONFIG_SMP |
extern void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end); |
extern void alternatives_smp_module_del(struct module *mod); |
extern void alternatives_smp_switch(int smp); |
#else |
static inline void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end) {} |
static inline void alternatives_smp_module_del(struct module *mod) {} |
static inline void alternatives_smp_switch(int smp) {} |
#endif /* CONFIG_SMP */ |
/* alternative assembly primitive: */ |
#define ALTERNATIVE(oldinstr, newinstr, feature) \ |
\ |
"661:\n\t" oldinstr "\n662:\n" \ |
".section .altinstructions,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661b\n" /* label */ \ |
_ASM_PTR "663f\n" /* new instruction */ \ |
" .byte " __stringify(feature) "\n" /* feature bit */ \ |
" .byte 662b-661b\n" /* sourcelen */ \ |
" .byte 664f-663f\n" /* replacementlen */ \ |
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ |
".previous\n" \ |
".section .altinstr_replacement, \"ax\"\n" \ |
"663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
".previous" |
/* |
* Alternative instructions for different CPU types or capabilities. |
* |
* This allows to use optimized instructions even on generic binary |
* kernels. |
* |
* length of oldinstr must be longer or equal the length of newinstr |
* It can be padded with nops as needed. |
* |
* For non barrier like inlines please define new variants |
* without volatile and memory clobber. |
*/ |
#define alternative(oldinstr, newinstr, feature) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
/* |
* Alternative inline assembly with input. |
* |
* Pecularities: |
* No memory clobber here. |
* Argument numbers start with 1. |
* Best is to use constraints that are fixed size (like (%1) ... "r") |
* If you use variable sized constraints like "m" or "g" in the |
* replacement make sure to pad to the worst case length. |
* Leaving an unused argument 0 to keep API compatibility. |
*/ |
#define alternative_input(oldinstr, newinstr, feature, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: : "i" (0), ## input) |
/* Like alternative_input, but with a single output argument */ |
#define alternative_io(oldinstr, newinstr, feature, output, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: output : "i" (0), ## input) |
/* |
* use this macro(s) if you need more than one output parameter |
* in alternative_io |
*/ |
#define ASM_OUTPUT2(a, b) a, b |
struct paravirt_patch_site; |
#ifdef CONFIG_PARAVIRT |
void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end); |
#else |
static inline void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end) |
{} |
#define __parainstructions NULL |
#define __parainstructions_end NULL |
#endif |
/* |
* Clear and restore the kernel write-protection flag on the local CPU. |
* Allows the kernel to edit read-only pages. |
* Side-effect: any interrupt handler running between save and restore will have |
* the ability to write to read-only pages. |
* |
* Warning: |
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and |
* no thread can be preempted in the instructions being modified (no iret to an |
* invalid instruction possible) or if the instructions are changed from a |
* consistent state to another consistent state atomically. |
* More care must be taken when modifying code in the SMP case because of |
* Intel's errata. |
* On the local CPU you need to be protected again NMI or MCE handlers seeing an |
* inconsistent instruction while you patch. |
*/ |
extern void *text_poke(void *addr, const void *opcode, size_t len); |
#endif /* _ASM_X86_ALTERNATIVE_H */ |
/drivers/include/linux/asm/asm.h |
---|
0,0 → 1,55 |
#ifndef _ASM_X86_ASM_H |
#define _ASM_X86_ASM_H |
#ifdef __ASSEMBLY__ |
# define __ASM_FORM(x) x |
# define __ASM_EX_SEC .section __ex_table, "a" |
#else |
# define __ASM_FORM(x) " " #x " " |
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" |
#endif |
#ifdef CONFIG_X86_32 |
# define __ASM_SEL(a,b) __ASM_FORM(a) |
#else |
# define __ASM_SEL(a,b) __ASM_FORM(b) |
#endif |
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) |
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) |
#define _ASM_PTR __ASM_SEL(.long, .quad) |
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
#define _ASM_MOV __ASM_SIZE(mov) |
#define _ASM_INC __ASM_SIZE(inc) |
#define _ASM_DEC __ASM_SIZE(dec) |
#define _ASM_ADD __ASM_SIZE(add) |
#define _ASM_SUB __ASM_SIZE(sub) |
#define _ASM_XADD __ASM_SIZE(xadd) |
#define _ASM_AX __ASM_REG(ax) |
#define _ASM_BX __ASM_REG(bx) |
#define _ASM_CX __ASM_REG(cx) |
#define _ASM_DX __ASM_REG(dx) |
#define _ASM_SP __ASM_REG(sp) |
#define _ASM_BP __ASM_REG(bp) |
#define _ASM_SI __ASM_REG(si) |
#define _ASM_DI __ASM_REG(di) |
/* Exception table entry */ |
#ifdef __ASSEMBLY__ |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC ; \ |
_ASM_ALIGN ; \ |
_ASM_PTR from , to ; \ |
.previous |
#else |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR #from "," #to "\n" \ |
" .previous\n" |
#endif |
#endif /* _ASM_X86_ASM_H */ |
/drivers/include/linux/asm/atomic.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "atomic_32.h" |
#else |
# include "atomic_64.h" |
#endif |
/drivers/include/linux/asm/atomic_32.h |
---|
0,0 → 1,415 |
#ifndef _ASM_X86_ATOMIC_32_H |
#define _ASM_X86_ATOMIC_32_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
//#include <asm/processor.h> |
#include <asm/cmpxchg.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return v->counter; |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "decl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "incl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_add_negative - add and test if negative |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_add_return - add integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
int __i; |
#ifdef CONFIG_M386 |
unsigned long flags; |
if (unlikely(boot_cpu_data.x86 <= 3)) |
goto no_xadd; |
#endif |
/* Modern 486+ processor */ |
__i = i; |
asm volatile(LOCK_PREFIX "xaddl %0, %1" |
: "+r" (i), "+m" (v->counter) |
: : "memory"); |
return i + __i; |
#ifdef CONFIG_M386 |
no_xadd: /* Legacy 386 processor */ |
local_irq_save(flags); |
__i = atomic_read(v); |
atomic_set(v, i + __i); |
local_irq_restore(flags); |
return i + __i; |
#endif |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c != (u); |
} |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" (mask), "m" (*(addr)) : "memory") |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic_dec() barrier() |
#define smp_mb__after_atomic_dec() barrier() |
#define smp_mb__before_atomic_inc() barrier() |
#define smp_mb__after_atomic_inc() barrier() |
/* An 64bit atomic type */ |
typedef struct { |
u64 __aligned(8) counter; |
} atomic64_t; |
#define ATOMIC64_INIT(val) { (val) } |
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); |
/** |
* atomic64_xchg - xchg atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically xchgs the value of @ptr to @new_val and returns |
* the old value. |
*/ |
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); |
/** |
* atomic64_set - set atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically sets the value of @ptr to @new_val. |
*/ |
extern void atomic64_set(atomic64_t *ptr, u64 new_val); |
/** |
* atomic64_read - read atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically reads the value of @ptr and returns it. |
*/ |
static inline u64 atomic64_read(atomic64_t *ptr) |
{ |
u64 res; |
/* |
* Note, we inline this atomic64_t primitive because |
* it only clobbers EAX/EDX and leaves the others |
* untouched. We also (somewhat subtly) rely on the |
* fact that cmpxchg8b returns the current 64-bit value |
* of the memory location we are touching: |
*/ |
asm volatile( |
"mov %%ebx, %%eax\n\t" |
"mov %%ecx, %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b %1\n" |
: "=&A" (res) |
: "m" (*ptr) |
); |
return res; |
} |
extern u64 atomic64_read(atomic64_t *ptr); |
/** |
* atomic64_add_return - add and return |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns @delta + *@ptr |
*/ |
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); |
/* |
* Other variants with different arithmetic operators: |
*/ |
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); |
extern u64 atomic64_inc_return(atomic64_t *ptr); |
extern u64 atomic64_dec_return(atomic64_t *ptr); |
/** |
* atomic64_add - add integer to atomic64 variable |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr. |
*/ |
extern void atomic64_add(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub - subtract the atomic64 variable |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr. |
*/ |
extern void atomic64_sub(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub_and_test - subtract value from variable and test result |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_inc - increment atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1. |
*/ |
extern void atomic64_inc(atomic64_t *ptr); |
/** |
* atomic64_dec - decrement atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1. |
*/ |
extern void atomic64_dec(atomic64_t *ptr); |
/** |
* atomic64_dec_and_test - decrement and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
extern int atomic64_dec_and_test(atomic64_t *ptr); |
/** |
* atomic64_inc_and_test - increment and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_inc_and_test(atomic64_t *ptr); |
/** |
* atomic64_add_negative - add and test if negative |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); |
#include <asm-generic/atomic-long.h> |
#endif /* _ASM_X86_ATOMIC_32_H */ |
/drivers/include/linux/asm/bitops.h |
---|
0,0 → 1,465 |
#ifndef _ASM_X86_BITOPS_H |
#define _ASM_X86_BITOPS_H |
/* |
* Copyright 1992, Linus Torvalds. |
* |
* Note: inlines with more than a single statement should be marked |
* __always_inline to avoid problems with older gcc's inlining heuristics. |
*/ |
#ifndef _LINUX_BITOPS_H |
#error only <linux/bitops.h> can be included directly |
#endif |
#include <linux/compiler.h> |
#include <asm/alternative.h> |
/* |
* These have to be done with inline assembly: that way the bit-setting |
* is guaranteed to be atomic. All bit operations return 0 if the bit |
* was cleared before the operation and != 0 if it was not. |
* |
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
*/ |
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
/* Technically wrong, but this avoids compilation errors on some gcc |
versions. */ |
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
#else |
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
#endif |
#define ADDR BITOP_ADDR(addr) |
/* |
* We do the locked ops that don't return the old value as |
* a mask operation on a byte. |
*/ |
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
#define CONST_MASK(nr) (1 << ((nr) & 7)) |
/** |
* set_bit - Atomically set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* This function is atomic and may not be reordered. See __set_bit() |
* if you do not require the atomic guarantees. |
* |
* Note: there are no guarantees that this function will not be reordered |
* on non x86 architectures, so if you are writing portable code, |
* make sure not to rely on its reordering guarantees. |
* |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static __always_inline void |
set_bit(unsigned int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "orb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr)) |
: "memory"); |
} else { |
asm volatile(LOCK_PREFIX "bts %1,%0" |
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
} |
} |
/** |
* __set_bit - Set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* Unlike set_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __set_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
} |
/** |
* clear_bit - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and may not be reordered. However, it does |
* not contain a memory barrier, so if it is used for locking purposes, |
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
* in order to ensure changes are visible on other processors. |
*/ |
static __always_inline void |
clear_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "andb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)~CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btr %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/* |
* clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and implies release semantics before the memory |
* operation. It can be used for an unlock. |
*/ |
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
clear_bit(nr, addr); |
} |
static inline void __clear_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
} |
/* |
* __clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* __clear_bit() is non-atomic and implies release semantics before the memory |
* operation. It can be used for an unlock if no other CPUs can concurrently |
* modify other bits in the word. |
* |
* No memory barrier is required here, because x86 cannot reorder stores past |
* older loads. Same principle as spin_unlock. |
*/ |
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
__clear_bit(nr, addr); |
} |
#define smp_mb__before_clear_bit() barrier() |
#define smp_mb__after_clear_bit() barrier() |
/** |
* __change_bit - Toggle a bit in memory |
* @nr: the bit to change |
* @addr: the address to start counting from |
* |
* Unlike change_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __change_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
} |
/** |
* change_bit - Toggle a bit in memory |
* @nr: Bit to change |
* @addr: Address to start counting from |
* |
* change_bit() is atomic and may not be reordered. |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static inline void change_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "xorb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btc %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/** |
* test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_set_bit_lock - Set a bit and return its old value for lock |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This is the same as test_and_set_bit on x86. |
*/ |
static __always_inline int |
test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
{ |
return test_and_set_bit(nr, addr); |
} |
/** |
* __test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm("bts %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/** |
* test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* __test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/* WARNING: non atomic and it can be reordered! */ |
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_change_bit - Change a bit and return its old value |
* @nr: Bit to change |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
{ |
return ((1UL << (nr % BITS_PER_LONG)) & |
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
} |
static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
{ |
int oldbit; |
asm volatile("bt %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
} |
#if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
/** |
* test_bit - Determine whether a bit is set |
* @nr: bit number to test |
* @addr: Address to start counting from |
*/ |
static int test_bit(int nr, const volatile unsigned long *addr); |
#endif |
#define test_bit(nr, addr) \ |
(__builtin_constant_p((nr)) \ |
? constant_test_bit((nr), (addr)) \ |
: variable_test_bit((nr), (addr))) |
/** |
* __ffs - find first set bit in word |
* @word: The word to search |
* |
* Undefined if no bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __ffs(unsigned long word) |
{ |
asm("bsf %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
/** |
* ffz - find first zero bit in word |
* @word: The word to search |
* |
* Undefined if no zero exists, so code should check against ~0UL first. |
*/ |
static inline unsigned long ffz(unsigned long word) |
{ |
asm("bsf %1,%0" |
: "=r" (word) |
: "r" (~word)); |
return word; |
} |
/* |
* __fls: find last set bit in word |
* @word: The word to search |
* |
* Undefined if no set bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __fls(unsigned long word) |
{ |
asm("bsr %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
#ifdef __KERNEL__ |
/** |
* ffs - find first set bit in word |
* @x: the word to search |
* |
* This is defined the same way as the libc and compiler builtin ffs |
* routines, therefore differs in spirit from the other bitops. |
* |
* ffs(value) returns 0 if value is 0 or the position of the first |
* set bit if value is nonzero. The first (least significant) bit |
* is at position 1. |
*/ |
static inline int ffs(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsfl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=r" (r) : "rm" (x), "r" (-1)); |
#else |
asm("bsfl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
/** |
* fls - find last set bit in word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffs, but returns the position of the most significant set bit. |
* |
* fls(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 32. |
*/ |
static inline int fls(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsrl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "rm" (-1)); |
#else |
asm("bsrl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
#endif /* __KERNEL__ */ |
#undef ADDR |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/sched.h> |
#define ARCH_HAS_FAST_MULTIPLIER 1 |
#include <asm-generic/bitops/hweight.h> |
#endif /* __KERNEL__ */ |
#include <asm-generic/bitops/fls64.h> |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/ext2-non-atomic.h> |
#define ext2_set_bit_atomic(lock, nr, addr) \ |
test_and_set_bit((nr), (unsigned long *)(addr)) |
#define ext2_clear_bit_atomic(lock, nr, addr) \ |
test_and_clear_bit((nr), (unsigned long *)(addr)) |
#include <asm-generic/bitops/minix.h> |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_BITOPS_H */ |
/drivers/include/linux/asm/bitsperlong.h |
---|
0,0 → 1,13 |
#ifndef __ASM_X86_BITSPERLONG_H |
#define __ASM_X86_BITSPERLONG_H |
#ifdef __x86_64__ |
# define __BITS_PER_LONG 64 |
#else |
# define __BITS_PER_LONG 32 |
#endif |
#include <asm-generic/bitsperlong.h> |
#endif /* __ASM_X86_BITSPERLONG_H */ |
/drivers/include/linux/asm/byteorder.h |
---|
0,0 → 1,6 |
#ifndef _ASM_X86_BYTEORDER_H |
#define _ASM_X86_BYTEORDER_H |
#include <linux/byteorder/little_endian.h> |
#endif /* _ASM_X86_BYTEORDER_H */ |
/drivers/include/linux/asm/cmpxchg.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "cmpxchg_32.h" |
#else |
# include "cmpxchg_64.h" |
#endif |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
0,0 → 1,274 |
#ifndef _ASM_X86_CMPXCHG_32_H |
#define _ASM_X86_CMPXCHG_32_H |
#include <linux/bitops.h> /* for LOCK_PREFIX */ |
/* |
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
* you need to test for the feature in boot_cpu_data. |
*/ |
extern void __xchg_wrong_size(void); |
/* |
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
* Note 2: xchg has side effect, so that attribute volatile is necessary, |
* but generally the primitive is invalid, *ptr is output argument. --ANK |
*/ |
struct __xchg_dummy { |
unsigned long a[100]; |
}; |
#define __xg(x) ((struct __xchg_dummy *)(x)) |
#define __xchg(x, ptr, size) \ |
({ \ |
__typeof(*(ptr)) __x = (x); \ |
switch (size) { \ |
case 1: \ |
asm volatile("xchgb %b0,%1" \ |
: "=q" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm volatile("xchgw %w0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm volatile("xchgl %0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
default: \ |
__xchg_wrong_size(); \ |
} \ |
__x; \ |
}) |
#define xchg(ptr, v) \ |
__xchg((v), (ptr), sizeof(*ptr)) |
/* |
* The semantics of XCHGCMP8B are a bit strange, this is why |
* there is a loop and the loading of %%eax and %%edx has to |
* be inside. This inlines well in most cases, the cached |
* cost is around ~38 cycles. (in the future we might want |
* to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that |
* might have an implicit FPU-save as a cost, so it's not |
* clear which path to go.) |
* |
* cmpxchg8b must be used with the lock prefix here to allow |
* the instruction to be executed atomically, see page 3-102 |
* of the instruction set reference 24319102.pdf. We need |
* the reader side to see the coherent 64bit value. |
*/ |
static inline void __set_64bit(unsigned long long *ptr, |
unsigned int low, unsigned int high) |
{ |
asm volatile("\n1:\t" |
"movl (%0), %%eax\n\t" |
"movl 4(%0), %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b (%0)\n\t" |
"jnz 1b" |
: /* no outputs */ |
: "D"(ptr), |
"b"(low), |
"c"(high) |
: "ax", "dx", "memory"); |
} |
static inline void __set_64bit_constant(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
} |
#define ll_low(x) *(((unsigned int *)&(x)) + 0) |
#define ll_high(x) *(((unsigned int *)&(x)) + 1) |
static inline void __set_64bit_var(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, ll_low(value), ll_high(value)); |
} |
#define set_64bit(ptr, value) \ |
(__builtin_constant_p((value)) \ |
? __set_64bit_constant((ptr), (value)) \ |
: __set_64bit_var((ptr), (value))) |
#define _set_64bit(ptr, value) \ |
(__builtin_constant_p(value) \ |
? __set_64bit(ptr, (unsigned int)(value), \ |
(unsigned int)((value) >> 32)) \ |
: __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
extern void __cmpxchg_wrong_size(void); |
/* |
* Atomic compare and exchange. Compare OLD with MEM, if identical, |
* store NEW in MEM. Return the initial value in MEM. Success is |
* indicated by comparing RETURN with OLD. |
*/ |
#define __raw_cmpxchg(ptr, old, new, size, lock) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (old); \ |
__typeof__(*(ptr)) __new = (new); \ |
switch (size) { \ |
case 1: \ |
asm volatile(lock "cmpxchgb %b1,%2" \ |
: "=a"(__ret) \ |
: "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm volatile(lock "cmpxchgw %w1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm volatile(lock "cmpxchgl %1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
default: \ |
__cmpxchg_wrong_size(); \ |
} \ |
__ret; \ |
}) |
#define __cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) |
#define __sync_cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
#define __cmpxchg_local(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "") |
#ifdef CONFIG_X86_CMPXCHG |
#define __HAVE_ARCH_CMPXCHG 1 |
#define cmpxchg(ptr, old, new) \ |
__cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define sync_cmpxchg(ptr, old, new) \ |
__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define cmpxchg_local(ptr, old, new) \ |
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
#define cmpxchg64(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#define cmpxchg64_local(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#endif |
static inline unsigned long long __cmpxchg64(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
{ |
unsigned long long prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
"0"(old) |
: "memory"); |
return prev; |
} |
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
{ |
unsigned long long prev; |
asm volatile("cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
"0"(old) |
: "memory"); |
return prev; |
} |
#ifndef CONFIG_X86_CMPXCHG |
/* |
* Building a kernel capable running on 80386. It may be necessary to |
* simulate the cmpxchg on the 80386 CPU. For that purpose we define |
* a function for each of the sizes we support. |
*/ |
#define cmpxchg(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#define cmpxchg_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#endif |
#ifndef CONFIG_X86_CMPXCHG64 |
/* |
* Building a kernel capable running on 80386 and 80486. It may be necessary |
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
*/ |
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
#define cmpxchg64(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
"lock; cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#define cmpxchg64_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
if (likely(boot_cpu_data.x86 > 4)) \ |
__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
else \ |
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
__ret; \ |
}) |
#endif |
#endif /* _ASM_X86_CMPXCHG_32_H */ |
/drivers/include/linux/asm/cpufeature.h |
---|
0,0 → 1,283 |
/* |
* Defines x86 CPU feature bits |
*/ |
#ifndef _ASM_X86_CPUFEATURE_H |
#define _ASM_X86_CPUFEATURE_H |
#include <asm/required-features.h> |
#define NCAPINTS 9 /* N 32-bit words worth of info */ |
/* |
* Note: If the comment begins with a quoted string, that string is used |
* in /proc/cpuinfo instead of the macro name. If the string is "", |
* this feature bit is not displayed in /proc/cpuinfo at all. |
*/ |
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ |
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ |
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ |
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ |
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
/* (plus FCMOVcc, FCOMI with FPU) */ |
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
#define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ |
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
/* Don't duplicate feature flags which are redundant with Intel! */ |
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ |
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ |
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ |
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ |
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ |
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ |
/* Other features, Linux-defined mapping, word 3 */ |
/* This range is used for feature bits which conflict or are synthesized */ |
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ |
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
/* cpu types for specific tunings: */ |
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ |
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ |
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ |
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ |
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ |
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ |
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ |
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ |
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ |
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ |
#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ |
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ |
#define X86_FEATURE_CID (4*32+10) /* Context ID */ |
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ |
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ |
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ |
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ |
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ |
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ |
#define X86_FEATURE_AES (4*32+25) /* AES instructions */ |
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ |
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ |
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ |
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ |
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ |
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ |
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ |
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ |
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ |
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
/* |
* Auxiliary flags: Linux defined - For features scattered in various |
* CPUID levels like 0x6, 0xA etc |
*/ |
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ |
/* Virtualization flags: Linux defined */ |
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ |
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ |
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ |
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ |
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
#include <linux/bitops.h> |
extern const char * const x86_cap_flags[NCAPINTS*32]; |
extern const char * const x86_power_flags[32]; |
#define test_cpu_cap(c, bit) \ |
test_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define cpu_has(c, bit) \ |
(__builtin_constant_p(bit) && \ |
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ |
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ |
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ |
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ |
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ |
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ |
? 1 : \ |
test_cpu_cap(c, bit)) |
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define setup_clear_cpu_cap(bit) do { \ |
clear_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_cleared); \ |
} while (0) |
#define setup_force_cpu_cap(bit) do { \ |
set_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_set); \ |
} while (0) |
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) |
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) |
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) |
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) |
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) |
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) |
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) |
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) |
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) |
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) |
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) |
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) |
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) |
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) |
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) |
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) |
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) |
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) |
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) |
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) |
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) |
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) |
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) |
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) |
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) |
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) |
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) |
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) |
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) |
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) |
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
# define cpu_has_invlpg 1 |
#else |
# define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
#endif |
#ifdef CONFIG_X86_64 |
#undef cpu_has_vme |
#define cpu_has_vme 0 |
#undef cpu_has_pae |
#define cpu_has_pae ___BUG___ |
#undef cpu_has_mp |
#define cpu_has_mp 1 |
#undef cpu_has_k6_mtrr |
#define cpu_has_k6_mtrr 0 |
#undef cpu_has_cyrix_arr |
#define cpu_has_cyrix_arr 0 |
#undef cpu_has_centaur_mcr |
#define cpu_has_centaur_mcr 0 |
#endif /* CONFIG_X86_64 */ |
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
#endif /* _ASM_X86_CPUFEATURE_H */ |
/drivers/include/linux/asm/posix_types.h |
---|
0,0 → 1,13 |
#ifdef __KERNEL__ |
# ifdef CONFIG_X86_32 |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#else |
# ifdef __i386__ |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#endif |
/drivers/include/linux/asm/posix_types_32.h |
---|
0,0 → 1,85 |
#ifndef _ASM_X86_POSIX_TYPES_32_H |
#define _ASM_X86_POSIX_TYPES_32_H |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. Also, we cannot |
* assume GCC is being used. |
*/ |
typedef unsigned long __kernel_ino_t; |
typedef unsigned short __kernel_mode_t; |
typedef unsigned short __kernel_nlink_t; |
typedef long __kernel_off_t; |
typedef int __kernel_pid_t; |
typedef unsigned short __kernel_ipc_pid_t; |
typedef unsigned short __kernel_uid_t; |
typedef unsigned short __kernel_gid_t; |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
typedef long __kernel_time_t; |
typedef long __kernel_suseconds_t; |
typedef long __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef int __kernel_daddr_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
typedef unsigned short __kernel_old_uid_t; |
typedef unsigned short __kernel_old_gid_t; |
typedef unsigned short __kernel_old_dev_t; |
#ifdef __GNUC__ |
typedef long long __kernel_loff_t; |
#endif |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#if defined(__KERNEL__) |
#undef __FD_SET |
#define __FD_SET(fd,fdsetp) \ |
asm volatile("btsl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int)(fd))) |
#undef __FD_CLR |
#define __FD_CLR(fd,fdsetp) \ |
asm volatile("btrl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int) (fd))) |
#undef __FD_ISSET |
#define __FD_ISSET(fd,fdsetp) \ |
(__extension__ \ |
({ \ |
unsigned char __result; \ |
asm volatile("btl %1,%2 ; setb %0" \ |
: "=q" (__result) \ |
: "r" ((int)(fd)), \ |
"m" (*(__kernel_fd_set *)(fdsetp))); \ |
__result; \ |
})) |
#undef __FD_ZERO |
#define __FD_ZERO(fdsetp) \ |
do { \ |
int __d0, __d1; \ |
asm volatile("cld ; rep ; stosl" \ |
: "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
"=&c" (__d0), "=&D" (__d1) \ |
: "a" (0), "1" (__FDSET_LONGS), \ |
"2" ((__kernel_fd_set *)(fdsetp)) \ |
: "memory"); \ |
} while (0) |
#endif /* defined(__KERNEL__) */ |
#endif /* _ASM_X86_POSIX_TYPES_32_H */ |
/drivers/include/linux/asm/required-features.h |
---|
0,0 → 1,88 |
#ifndef _ASM_X86_REQUIRED_FEATURES_H |
#define _ASM_X86_REQUIRED_FEATURES_H |
/* Define minimum CPUID feature set for kernel These bits are checked |
really early to actually display a visible error message before the |
kernel dies. Make sure to assign features to the proper mask! |
Some requirements that are not in CPUID yet are also in the |
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. |
The real information is in arch/x86/Kconfig.cpu, this just converts |
the CONFIGs into a bitmask */ |
#ifndef CONFIG_MATH_EMULATION |
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) |
#else |
# define NEED_FPU 0 |
#endif |
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) |
#else |
# define NEED_PAE 0 |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) |
#else |
# define NEED_CX8 0 |
#endif |
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) |
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) |
#else |
# define NEED_CMOV 0 |
#endif |
#ifdef CONFIG_X86_USE_3DNOW |
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) |
#else |
# define NEED_3DNOW 0 |
#endif |
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) |
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) |
#else |
# define NEED_NOPL 0 |
#endif |
#ifdef CONFIG_X86_64 |
#ifdef CONFIG_PARAVIRT |
/* Paravirtualized systems may not have PSE or PGE available */ |
#define NEED_PSE 0 |
#define NEED_PGE 0 |
#else |
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31) |
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31) |
#endif |
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) |
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) |
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) |
#define NEED_LM (1<<(X86_FEATURE_LM & 31)) |
#else |
#define NEED_PSE 0 |
#define NEED_MSR 0 |
#define NEED_PGE 0 |
#define NEED_FXSR 0 |
#define NEED_XMM 0 |
#define NEED_XMM2 0 |
#define NEED_LM 0 |
#endif |
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ |
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ |
NEED_XMM|NEED_XMM2) |
#define SSE_MASK (NEED_XMM|NEED_XMM2) |
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
#define REQUIRED_MASK2 0 |
#define REQUIRED_MASK3 (NEED_NOPL) |
#define REQUIRED_MASK4 0 |
#define REQUIRED_MASK5 0 |
#define REQUIRED_MASK6 0 |
#define REQUIRED_MASK7 0 |
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ |
/drivers/include/linux/asm/spinlock_types.h |
---|
0,0 → 1,20 |
#ifndef _ASM_X86_SPINLOCK_TYPES_H |
#define _ASM_X86_SPINLOCK_TYPES_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
typedef struct raw_spinlock { |
unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
typedef struct { |
unsigned int lock; |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
#endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
/drivers/include/linux/asm/string.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "string_32.h" |
#else |
# include "string_64.h" |
#endif |
/drivers/include/linux/asm/string_32.h |
---|
0,0 → 1,342 |
#ifndef _ASM_X86_STRING_32_H |
#define _ASM_X86_STRING_32_H |
#ifdef __KERNEL__ |
/* Let gcc decide whether to inline or use the out of line functions */ |
#define __HAVE_ARCH_STRCPY |
extern char *strcpy(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCPY |
extern char *strncpy(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCAT |
extern char *strcat(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCAT |
extern char *strncat(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCMP |
extern int strcmp(const char *cs, const char *ct); |
#define __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *cs, const char *ct, size_t count); |
#define __HAVE_ARCH_STRCHR |
extern char *strchr(const char *s, int c); |
#define __HAVE_ARCH_STRLEN |
extern size_t strlen(const char *s); |
static __always_inline void *__memcpy(void *to, const void *from, size_t n) |
{ |
int d0, d1, d2; |
asm volatile("rep ; movsl\n\t" |
"movl %4,%%ecx\n\t" |
"andl $3,%%ecx\n\t" |
"jz 1f\n\t" |
"rep ; movsb\n\t" |
"1:" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) |
: "memory"); |
return to; |
} |
/* |
* This looks ugly, but the compiler can optimize it totally, |
* as the count is constant. |
*/ |
static __always_inline void *__constant_memcpy(void *to, const void *from, |
size_t n) |
{ |
long esi, edi; |
if (!n) |
return to; |
switch (n) { |
case 1: |
*(char *)to = *(char *)from; |
return to; |
case 2: |
*(short *)to = *(short *)from; |
return to; |
case 4: |
*(int *)to = *(int *)from; |
return to; |
case 3: |
*(short *)to = *(short *)from; |
*((char *)to + 2) = *((char *)from + 2); |
return to; |
case 5: |
*(int *)to = *(int *)from; |
*((char *)to + 4) = *((char *)from + 4); |
return to; |
case 6: |
*(int *)to = *(int *)from; |
*((short *)to + 2) = *((short *)from + 2); |
return to; |
case 8: |
*(int *)to = *(int *)from; |
*((int *)to + 1) = *((int *)from + 1); |
return to; |
} |
esi = (long)from; |
edi = (long)to; |
if (n >= 5 * 4) { |
/* large block: use rep prefix */ |
int ecx; |
asm volatile("rep ; movsl" |
: "=&c" (ecx), "=&D" (edi), "=&S" (esi) |
: "0" (n / 4), "1" (edi), "2" (esi) |
: "memory" |
); |
} else { |
/* small block: don't clobber ecx + smaller code */ |
if (n >= 4 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 3 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 2 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 1 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
} |
switch (n % 4) { |
/* tail */ |
case 0: |
return to; |
case 1: |
asm volatile("movsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
case 2: |
asm volatile("movsw" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
default: |
asm volatile("movsw\n\tmovsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
} |
} |
#define __HAVE_ARCH_MEMCPY |
#ifdef CONFIG_X86_USE_3DNOW |
#include <asm/mmx.h> |
/* |
* This CPU favours 3DNow strongly (eg AMD Athlon) |
*/ |
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __constant_memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
static inline void *__memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy3d((t), (f), (n)) \ |
: __memcpy3d((t), (f), (n))) |
#else |
/* |
* No 3D Now! |
*/ |
#ifndef CONFIG_KMEMCHECK |
#if (__GNUC__ >= 4) |
#define memcpy(t, f, n) __builtin_memcpy(t, f, n) |
#else |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy((t), (f), (n)) \ |
: __memcpy((t), (f), (n))) |
#endif |
#else |
/* |
* kmemcheck becomes very happy if we use the REP instructions unconditionally, |
* because it means that we know both memory operands in advance. |
*/ |
#define memcpy(t, f, n) __memcpy((t), (f), (n)) |
#endif |
#endif |
#define __HAVE_ARCH_MEMMOVE |
void *memmove(void *dest, const void *src, size_t n); |
#define memcmp __builtin_memcmp |
#define __HAVE_ARCH_MEMCHR |
extern void *memchr(const void *cs, int c, size_t count); |
static inline void *__memset_generic(void *s, char c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep\n\t" |
"stosb" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "1" (s), "0" (count) |
: "memory"); |
return s; |
} |
/* we might want to write optimized versions of these later */ |
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) |
/* |
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill |
* things 32 bits at a time even when we don't know the size of the |
* area at compile-time.. |
*/ |
static __always_inline |
void *__constant_c_memset(void *s, unsigned long c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep ; stosl\n\t" |
"testb $2,%b3\n\t" |
"je 1f\n\t" |
"stosw\n" |
"1:\ttestb $1,%b3\n\t" |
"je 2f\n\t" |
"stosb\n" |
"2:" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s) |
: "memory"); |
return s; |
} |
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern size_t strnlen(const char *s, size_t count); |
/* end of additional stuff */ |
#define __HAVE_ARCH_STRSTR |
extern char *strstr(const char *cs, const char *ct); |
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as we by now know that both pattern and count is constant.. |
*/ |
static __always_inline |
void *__constant_c_and_count_memset(void *s, unsigned long pattern, |
size_t count) |
{ |
switch (count) { |
case 0: |
return s; |
case 1: |
*(unsigned char *)s = pattern & 0xff; |
return s; |
case 2: |
*(unsigned short *)s = pattern & 0xffff; |
return s; |
case 3: |
*(unsigned short *)s = pattern & 0xffff; |
*((unsigned char *)s + 2) = pattern & 0xff; |
return s; |
case 4: |
*(unsigned long *)s = pattern; |
return s; |
} |
#define COMMON(x) \ |
asm volatile("rep ; stosl" \ |
x \ |
: "=&c" (d0), "=&D" (d1) \ |
: "a" (eax), "0" (count/4), "1" ((long)s) \ |
: "memory") |
{ |
int d0, d1; |
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 |
/* Workaround for broken gcc 4.0 */ |
register unsigned long eax asm("%eax") = pattern; |
#else |
unsigned long eax = pattern; |
#endif |
switch (count % 4) { |
case 0: |
COMMON(""); |
return s; |
case 1: |
COMMON("\n\tstosb"); |
return s; |
case 2: |
COMMON("\n\tstosw"); |
return s; |
default: |
COMMON("\n\tstosw\n\tstosb"); |
return s; |
} |
} |
#undef COMMON |
} |
#define __constant_c_x_memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_c_and_count_memset((s), (c), (count)) \ |
: __constant_c_memset((s), (c), (count))) |
#define __memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_count_memset((s), (c), (count)) \ |
: __memset_generic((s), (c), (count))) |
#define __HAVE_ARCH_MEMSET |
#if (__GNUC__ >= 4) |
#define memset(s, c, count) __builtin_memset(s, c, count) |
#else |
#define memset(s, c, count) \ |
(__builtin_constant_p(c) \ |
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
(count)) \ |
: __memset((s), (c), (count))) |
#endif |
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern void *memscan(void *addr, int c, size_t size); |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_STRING_32_H */ |
/drivers/include/linux/asm/swab.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_SWAB_H |
#define _ASM_X86_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
{ |
#ifdef __i386__ |
# ifdef CONFIG_X86_BSWAP |
asm("bswap %0" : "=r" (val) : "0" (val)); |
# else |
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
"rorl $16,%0\n\t" /* swap words */ |
"xchgb %b0,%h0" /* swap higher bytes */ |
: "=q" (val) |
: "0" (val)); |
# endif |
#else /* __i386__ */ |
asm("bswapl %0" |
: "=r" (val) |
: "0" (val)); |
#endif |
return val; |
} |
#define __arch_swab32 __arch_swab32 |
static inline __attribute_const__ __u64 __arch_swab64(__u64 val) |
{ |
#ifdef __i386__ |
union { |
struct { |
__u32 a; |
__u32 b; |
} s; |
__u64 u; |
} v; |
v.u = val; |
# ifdef CONFIG_X86_BSWAP |
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# else |
v.s.a = __arch_swab32(v.s.a); |
v.s.b = __arch_swab32(v.s.b); |
asm("xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# endif |
return v.u; |
#else /* __i386__ */ |
asm("bswapq %0" |
: "=r" (val) |
: "0" (val)); |
return val; |
#endif |
} |
#define __arch_swab64 __arch_swab64 |
#endif /* _ASM_X86_SWAB_H */ |
/drivers/include/linux/asm/types.h |
---|
0,0 → 1,22 |
#ifndef _ASM_X86_TYPES_H |
#define _ASM_X86_TYPES_H |
#define dma_addr_t dma_addr_t |
#include <asm-generic/types.h> |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef u64 dma64_addr_t; |
#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) |
/* DMA addresses come in 32-bit and 64-bit flavours. */ |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_TYPES_H */ |
/drivers/include/linux/asm-generic/atomic-long.h |
---|
0,0 → 1,258 |
#ifndef _ASM_GENERIC_ATOMIC_LONG_H |
#define _ASM_GENERIC_ATOMIC_LONG_H |
/* |
* Copyright (C) 2005 Silicon Graphics, Inc. |
* Christoph Lameter |
* |
* Allows to provide arch independent atomic definitions without the need to |
* edit all arch specific atomic.h files. |
*/ |
#include <asm/types.h> |
/* |
* Suppport for atomic_long_t |
* |
* Casts for parameters are avoided for existing atomic functions in order to |
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the |
* macros of a platform may have. |
*/ |
#if BITS_PER_LONG == 64 |
typedef atomic64_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic64_xchg((atomic64_t *)(v), (new))) |
#else /* BITS_PER_LONG == 64 */ |
typedef atomic_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic_cmpxchg((atomic_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic_xchg((atomic_t *)(v), (new))) |
#endif /* BITS_PER_LONG == 64 */ |
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ |
/drivers/include/linux/asm-generic/bitops/ext2-non-atomic.h |
---|
0,0 → 1,20 |
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#include <asm-generic/bitops/le.h> |
#define ext2_set_bit(nr,addr) \ |
generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_clear_bit(nr,addr) \ |
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_test_bit(nr,addr) \ |
generic_test_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_find_first_zero_bit(addr, size) \ |
generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) |
#define ext2_find_next_zero_bit(addr, size, off) \ |
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) |
#define ext2_find_next_bit(addr, size, off) \ |
generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) |
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ |
/drivers/include/linux/asm-generic/bitops/fls64.h |
---|
0,0 → 1,36 |
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ |
#define _ASM_GENERIC_BITOPS_FLS64_H_ |
#include <asm/types.h> |
/** |
* fls64 - find last set bit in a 64-bit word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffsll, but returns the position of the most significant set bit. |
* |
* fls64(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 64. |
*/ |
#if BITS_PER_LONG == 32 |
static __always_inline int fls64(__u64 x) |
{ |
__u32 h = x >> 32; |
if (h) |
return fls(h) + 32; |
return fls(x); |
} |
#elif BITS_PER_LONG == 64 |
static __always_inline int fls64(__u64 x) |
{ |
if (x == 0) |
return 0; |
return __fls(x) + 1; |
} |
#else |
#error BITS_PER_LONG not 32 or 64 |
#endif |
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ |
/drivers/include/linux/asm-generic/bitops/hweight.h |
---|
0,0 → 1,11 |
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#include <asm/types.h> |
extern unsigned int hweight32(unsigned int w); |
extern unsigned int hweight16(unsigned int w); |
extern unsigned int hweight8(unsigned int w); |
extern unsigned long hweight64(__u64 w); |
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ |
/drivers/include/linux/asm-generic/bitops/le.h |
---|
0,0 → 1,57 |
#ifndef _ASM_GENERIC_BITOPS_LE_H_ |
#define _ASM_GENERIC_BITOPS_LE_H_ |
#include <asm/types.h> |
#include <asm/byteorder.h> |
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
#if defined(__LITTLE_ENDIAN) |
#define generic_test_le_bit(nr, addr) test_bit(nr, addr) |
#define generic___set_le_bit(nr, addr) __set_bit(nr, addr) |
#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) |
#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) |
#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) |
#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) |
#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) |
#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) |
#define generic_find_next_le_bit(addr, size, offset) \ |
find_next_bit(addr, size, offset) |
#elif defined(__BIG_ENDIAN) |
#define generic_test_le_bit(nr, addr) \ |
test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___set_le_bit(nr, addr) \ |
__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___clear_le_bit(nr, addr) \ |
__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic_test_and_set_le_bit(nr, addr) \ |
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic_test_and_clear_le_bit(nr, addr) \ |
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___test_and_set_le_bit(nr, addr) \ |
__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
#define generic___test_and_clear_le_bit(nr, addr) \ |
__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) |
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
extern unsigned long generic_find_next_le_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
#else |
#error "Please fix <asm/byteorder.h>" |
#endif |
#define generic_find_first_zero_le_bit(addr, size) \ |
generic_find_next_zero_le_bit((addr), (size), 0) |
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ |
/drivers/include/linux/asm-generic/bitops/minix.h |
---|
0,0 → 1,15 |
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_ |
#define _ASM_GENERIC_BITOPS_MINIX_H_ |
#define minix_test_and_set_bit(nr,addr) \ |
__test_and_set_bit((nr),(unsigned long *)(addr)) |
#define minix_set_bit(nr,addr) \ |
__set_bit((nr),(unsigned long *)(addr)) |
#define minix_test_and_clear_bit(nr,addr) \ |
__test_and_clear_bit((nr),(unsigned long *)(addr)) |
#define minix_test_bit(nr,addr) \ |
test_bit((nr),(unsigned long *)(addr)) |
#define minix_find_first_zero_bit(addr,size) \ |
find_first_zero_bit((unsigned long *)(addr),(size)) |
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ |
/drivers/include/linux/asm-generic/bitops/sched.h |
---|
0,0 → 1,31 |
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ |
#define _ASM_GENERIC_BITOPS_SCHED_H_ |
#include <linux/compiler.h> /* unlikely() */ |
#include <asm/types.h> |
/* |
* Every architecture must define this function. It's the fastest |
* way of searching a 100-bit bitmap. It's guaranteed that at least |
* one of the 100 bits is cleared. |
*/ |
static inline int sched_find_first_bit(const unsigned long *b) |
{ |
#if BITS_PER_LONG == 64 |
if (b[0]) |
return __ffs(b[0]); |
return __ffs(b[1]) + 64; |
#elif BITS_PER_LONG == 32 |
if (b[0]) |
return __ffs(b[0]); |
if (b[1]) |
return __ffs(b[1]) + 32; |
if (b[2]) |
return __ffs(b[2]) + 64; |
return __ffs(b[3]) + 96; |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ |
/drivers/include/linux/asm-generic/bitsperlong.h |
---|
0,0 → 1,32 |
#ifndef __ASM_GENERIC_BITS_PER_LONG |
#define __ASM_GENERIC_BITS_PER_LONG |
/* |
* There seems to be no way of detecting this automatically from user |
* space, so 64 bit architectures should override this in their |
* bitsperlong.h. In particular, an architecture that supports |
* both 32 and 64 bit user space must not rely on CONFIG_64BIT |
* to decide it, but rather check a compiler provided macro. |
*/ |
#ifndef __BITS_PER_LONG |
#define __BITS_PER_LONG 32 |
#endif |
#ifdef __KERNEL__ |
#ifdef CONFIG_64BIT |
#define BITS_PER_LONG 64 |
#else |
#define BITS_PER_LONG 32 |
#endif /* CONFIG_64BIT */ |
/* |
* FIXME: The check currently breaks x86-64 build, so it's |
* temporarily disabled. Please fix x86-64 and reenable |
*/ |
#if 0 && BITS_PER_LONG != __BITS_PER_LONG |
#error Inconsistent word size. Check asm/bitsperlong.h |
#endif |
#endif /* __KERNEL__ */ |
#endif /* __ASM_GENERIC_BITS_PER_LONG */ |
/drivers/include/linux/asm-generic/int-ll64.h |
---|
0,0 → 1,78 |
/* |
* asm-generic/int-ll64.h |
* |
* Integer declarations for architectures which use "long long" |
* for 64-bit types. |
*/ |
#ifndef _ASM_GENERIC_INT_LL64_H |
#define _ASM_GENERIC_INT_LL64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
#ifdef __GNUC__ |
__extension__ typedef __signed__ long long __s64; |
__extension__ typedef unsigned long long __u64; |
#else |
typedef __signed__ long long __s64; |
typedef unsigned long long __u64; |
#endif |
#endif /* __ASSEMBLY__ */ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef signed char s8; |
typedef unsigned char u8; |
typedef signed short s16; |
typedef unsigned short u16; |
typedef signed int s32; |
typedef unsigned int u32; |
typedef signed long long s64; |
typedef unsigned long long u64; |
#define S8_C(x) x |
#define U8_C(x) x ## U |
#define S16_C(x) x |
#define U16_C(x) x ## U |
#define S32_C(x) x |
#define U32_C(x) x ## U |
#define S64_C(x) x ## LL |
#define U64_C(x) x ## ULL |
#else /* __ASSEMBLY__ */ |
#define S8_C(x) x |
#define U8_C(x) x |
#define S16_C(x) x |
#define U16_C(x) x |
#define S32_C(x) x |
#define U32_C(x) x |
#define S64_C(x) x |
#define U64_C(x) x |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_INT_LL64_H */ |
/drivers/include/linux/asm-generic/types.h |
---|
0,0 → 1,42 |
#ifndef _ASM_GENERIC_TYPES_H |
#define _ASM_GENERIC_TYPES_H |
/* |
* int-ll64 is used practically everywhere now, |
* so use it as a reasonable default. |
*/ |
#include <asm-generic/int-ll64.h> |
#ifndef __ASSEMBLY__ |
typedef unsigned short umode_t; |
#endif /* __ASSEMBLY__ */ |
/* |
* These aren't exported outside the kernel to avoid name space clashes |
*/ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
/* |
* DMA addresses may be very different from physical addresses |
* and pointers. i386 and powerpc may have 64 bit DMA on 32 bit |
* systems, while sparc64 uses 32 bit DMA addresses for 64 bit |
* physical addresses. |
* This default defines dma_addr_t to have the same size as |
* phys_addr_t, which is the most common way. |
* Do not define the dma64_addr_t type, which never really |
* worked. |
*/ |
#ifndef dma_addr_t |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif /* CONFIG_PHYS_ADDR_T_64BIT */ |
#endif /* dma_addr_t */ |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_TYPES_H */ |
/drivers/include/linux/bitmap.h |
---|
0,0 → 1,293 |
#ifndef __LINUX_BITMAP_H |
#define __LINUX_BITMAP_H |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
#include <linux/bitops.h> |
#include <linux/string.h> |
#include <linux/kernel.h> |
/* |
* bitmaps provide bit arrays that consume one or more unsigned |
* longs. The bitmap interface and available operations are listed |
* here, in bitmap.h |
* |
* Function implementations generic to all architectures are in |
* lib/bitmap.c. Functions implementations that are architecture |
* specific are in various include/asm-<arch>/bitops.h headers |
* and other arch/<arch> specific files. |
* |
* See lib/bitmap.c for more details. |
*/ |
/* |
* The available bitmap operations and their rough meaning in the |
* case that the bitmap is a single unsigned long are thus: |
* |
* Note that nbits should be always a compile time evaluable constant. |
* Otherwise many inlines will generate horrible code. |
* |
* bitmap_zero(dst, nbits) *dst = 0UL |
* bitmap_fill(dst, nbits) *dst = ~0UL |
* bitmap_copy(dst, src, nbits) *dst = *src |
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 |
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 |
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 |
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) |
* bitmap_complement(dst, src, nbits) *dst = ~(*src) |
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? |
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? |
* bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? |
* bitmap_empty(src, nbits) Are all bits zero in *src? |
* bitmap_full(src, nbits) Are all bits set in *src? |
* bitmap_weight(src, nbits) Hamming Weight: number set bits |
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) |
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap |
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz |
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf |
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf |
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf |
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf |
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list |
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
* bitmap_release_region(bitmap, pos, order) Free specified bit region |
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
*/ |
/* |
* Also the following operations in asm/bitops.h apply to bitmaps. |
* |
* set_bit(bit, addr) *addr |= bit |
* clear_bit(bit, addr) *addr &= ~bit |
* change_bit(bit, addr) *addr ^= bit |
* test_bit(bit, addr) Is bit set in *addr? |
* test_and_set_bit(bit, addr) Set bit and return old value |
* test_and_clear_bit(bit, addr) Clear bit and return old value |
* test_and_change_bit(bit, addr) Change bit and return old value |
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr |
* find_first_bit(addr, nbits) Position first set bit in *addr |
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit |
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit |
*/ |
/* |
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used |
* to declare an array named 'name' of just enough unsigned longs to |
* contain all bit positions from 0 to 'bits' - 1. |
*/ |
/* |
* lib/bitmap.c provides these functions: |
*/ |
extern int __bitmap_empty(const unsigned long *bitmap, int bits); |
extern int __bitmap_full(const unsigned long *bitmap, int bits); |
extern int __bitmap_equal(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, |
int bits); |
extern void __bitmap_shift_right(unsigned long *dst, |
const unsigned long *src, int shift, int bits); |
extern void __bitmap_shift_left(unsigned long *dst, |
const unsigned long *src, int shift, int bits); |
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_intersects(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_subset(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_weight(const unsigned long *bitmap, int bits); |
extern int bitmap_scnprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
unsigned long *dst, int nbits); |
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, |
unsigned long *dst, int nbits); |
extern int bitmap_scnlistprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int bitmap_parselist(const char *buf, unsigned long *maskp, |
int nmaskbits); |
extern void bitmap_remap(unsigned long *dst, const unsigned long *src, |
const unsigned long *old, const unsigned long *new, int bits); |
extern int bitmap_bitremap(int oldbit, |
const unsigned long *old, const unsigned long *new, int bits); |
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, |
const unsigned long *relmap, int bits); |
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, |
int sz, int bits); |
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); |
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); |
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); |
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
#define BITMAP_LAST_WORD_MASK(nbits) \ |
( \ |
((nbits) % BITS_PER_LONG) ? \ |
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ |
) |
#define small_const_nbits(nbits) \ |
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) |
static inline void bitmap_zero(unsigned long *dst, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = 0UL; |
else { |
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
memset(dst, 0, len); |
} |
} |
static inline void bitmap_fill(unsigned long *dst, int nbits) |
{ |
size_t nlongs = BITS_TO_LONGS(nbits); |
if (!small_const_nbits(nbits)) { |
int len = (nlongs - 1) * sizeof(unsigned long); |
memset(dst, 0xff, len); |
} |
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); |
} |
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src; |
else { |
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
memcpy(dst, src, len); |
} |
} |
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return (*dst = *src1 & *src2) != 0; |
return __bitmap_and(dst, src1, src2, nbits); |
} |
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src1 | *src2; |
else |
__bitmap_or(dst, src1, src2, nbits); |
} |
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src1 ^ *src2; |
else |
__bitmap_xor(dst, src1, src2, nbits); |
} |
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return (*dst = *src1 & ~(*src2)) != 0; |
return __bitmap_andnot(dst, src1, src2, nbits); |
} |
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); |
else |
__bitmap_complement(dst, src, nbits); |
} |
static inline int bitmap_equal(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_equal(src1, src2, nbits); |
} |
static inline int bitmap_intersects(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
else |
return __bitmap_intersects(src1, src2, nbits); |
} |
static inline int bitmap_subset(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_subset(src1, src2, nbits); |
} |
static inline int bitmap_empty(const unsigned long *src, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_empty(src, nbits); |
} |
static inline int bitmap_full(const unsigned long *src, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); |
else |
return __bitmap_full(src, nbits); |
} |
static inline int bitmap_weight(const unsigned long *src, int nbits) |
{ |
if (small_const_nbits(nbits)) |
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); |
return __bitmap_weight(src, nbits); |
} |
static inline void bitmap_shift_right(unsigned long *dst, |
const unsigned long *src, int n, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src >> n; |
else |
__bitmap_shift_right(dst, src, n, nbits); |
} |
static inline void bitmap_shift_left(unsigned long *dst, |
const unsigned long *src, int n, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); |
else |
__bitmap_shift_left(dst, src, n, nbits); |
} |
static inline int bitmap_parse(const char *buf, unsigned int buflen, |
unsigned long *maskp, int nmaskbits) |
{ |
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); |
} |
#endif /* __ASSEMBLY__ */ |
#endif /* __LINUX_BITMAP_H */ |
/drivers/include/linux/bitops.h |
---|
0,0 → 1,191 |
#ifndef _LINUX_BITOPS_H |
#define _LINUX_BITOPS_H |
#define BIT(nr) (1UL << (nr)) |
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BITS_PER_BYTE 8 |
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
/* |
* Include this here because some architectures need generic_ffs/fls in |
* scope |
*/ |
#include <asm/bitops.h> |
#define for_each_bit(bit, addr, size) \ |
for ((bit) = find_first_bit((addr), (size)); \ |
(bit) < (size); \ |
(bit) = find_next_bit((addr), (size), (bit) + 1)) |
static __inline__ int get_bitmask_order(unsigned int count) |
{ |
int order; |
order = fls(count); |
return order; /* We could be slightly more clever with -1 here... */ |
} |
static __inline__ int get_count_order(unsigned int count) |
{ |
int order; |
order = fls(count) - 1; |
if (count & (count - 1)) |
order++; |
return order; |
} |
static inline unsigned long hweight_long(unsigned long w) |
{ |
return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
} |
/** |
* rol32 - rotate a 32-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u32 rol32(__u32 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (32 - shift)); |
} |
/** |
* ror32 - rotate a 32-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u32 ror32(__u32 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (32 - shift)); |
} |
/** |
* rol16 - rotate a 16-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u16 rol16(__u16 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (16 - shift)); |
} |
/** |
* ror16 - rotate a 16-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u16 ror16(__u16 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (16 - shift)); |
} |
/** |
* rol8 - rotate an 8-bit value left |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u8 rol8(__u8 word, unsigned int shift) |
{ |
return (word << shift) | (word >> (8 - shift)); |
} |
/** |
* ror8 - rotate an 8-bit value right |
* @word: value to rotate |
* @shift: bits to roll |
*/ |
static inline __u8 ror8(__u8 word, unsigned int shift) |
{ |
return (word >> shift) | (word << (8 - shift)); |
} |
static inline unsigned fls_long(unsigned long l) |
{ |
if (sizeof(l) == 4) |
return fls(l); |
return fls64(l); |
} |
/** |
* __ffs64 - find first set bit in a 64 bit word |
* @word: The 64 bit word |
* |
* On 64 bit arches this is a synomyn for __ffs |
* The result is not defined if no bits are set, so check that @word |
* is non-zero before calling this. |
*/ |
static inline unsigned long __ffs64(u64 word) |
{ |
#if BITS_PER_LONG == 32 |
if (((u32)word) == 0UL) |
return __ffs((u32)(word >> 32)) + 32; |
#elif BITS_PER_LONG != 64 |
#error BITS_PER_LONG not 32 or 64 |
#endif |
return __ffs((unsigned long)word); |
} |
#ifdef __KERNEL__ |
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
/** |
* find_first_bit - find the first set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first set bit. |
*/ |
extern unsigned long find_first_bit(const unsigned long *addr, |
unsigned long size); |
/** |
* find_first_zero_bit - find the first cleared bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first cleared bit. |
*/ |
extern unsigned long find_first_zero_bit(const unsigned long *addr, |
unsigned long size); |
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
#ifdef CONFIG_GENERIC_FIND_LAST_BIT |
/** |
* find_last_bit - find the last set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum size to search |
* |
* Returns the bit number of the first set bit, or size. |
*/ |
extern unsigned long find_last_bit(const unsigned long *addr, |
unsigned long size); |
#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ |
#ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
/** |
* find_next_bit - find the next set bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
*/ |
extern unsigned long find_next_bit(const unsigned long *addr, |
unsigned long size, unsigned long offset); |
/** |
* find_next_zero_bit - find the next cleared bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
*/ |
extern unsigned long find_next_zero_bit(const unsigned long *addr, |
unsigned long size, |
unsigned long offset); |
#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/byteorder/generic.h |
---|
0,0 → 1,173 |
#ifndef _LINUX_BYTEORDER_GENERIC_H |
#define _LINUX_BYTEORDER_GENERIC_H |
/* |
* linux/byteorder_generic.h |
* Generic Byte-reordering support |
* |
* The "... p" macros, like le64_to_cpup, can be used with pointers |
* to unaligned data, but there will be a performance penalty on |
* some architectures. Use get_unaligned for unaligned data. |
* |
* Francois-Rene Rideau <fare@tunes.org> 19970707 |
* gathered all the good ideas from all asm-foo/byteorder.h into one file, |
* cleaned them up. |
* I hope it is compliant with non-GCC compilers. |
* I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, |
* because I wasn't sure it would be ok to put it in types.h |
* Upgraded it to 2.1.43 |
* Francois-Rene Rideau <fare@tunes.org> 19971012 |
* Upgraded it to 2.1.57 |
* to please Linus T., replaced huge #ifdef's between little/big endian |
* by nestedly #include'd files. |
* Francois-Rene Rideau <fare@tunes.org> 19971205 |
* Made it to 2.1.71; now a facelift: |
* Put files under include/linux/byteorder/ |
* Split swab from generic support. |
* |
* TODO: |
* = Regular kernel maintainers could also replace all these manual |
* byteswap macros that remain, disseminated among drivers, |
* after some grep or the sources... |
* = Linus might want to rename all these macros and files to fit his taste, |
* to fit his personal naming scheme. |
* = it seems that a few drivers would also appreciate |
* nybble swapping support... |
* = every architecture could add their byteswap macro in asm/byteorder.h |
* see how some architectures already do (i386, alpha, ppc, etc) |
* = cpu_to_beXX and beXX_to_cpu might some day need to be well |
* distinguished throughout the kernel. This is not the case currently, |
* since little endian, big endian, and pdp endian machines needn't it. |
* But this might be the case for, say, a port of Linux to 20/21 bit |
* architectures (and F21 Linux addict around?). |
*/ |
/* |
* The following macros are to be defined by <asm/byteorder.h>: |
* |
* Conversion of long and short int between network and host format |
* ntohl(__u32 x) |
* ntohs(__u16 x) |
* htonl(__u32 x) |
* htons(__u16 x) |
* It seems that some programs (which? where? or perhaps a standard? POSIX?) |
* might like the above to be functions, not macros (why?). |
* if that's true, then detect them, and take measures. |
* Anyway, the measure is: define only ___ntohl as a macro instead, |
* and in a separate file, have |
* unsigned long inline ntohl(x){return ___ntohl(x);} |
* |
* The same for constant arguments |
* __constant_ntohl(__u32 x) |
* __constant_ntohs(__u16 x) |
* __constant_htonl(__u32 x) |
* __constant_htons(__u16 x) |
* |
* Conversion of XX-bit integers (16- 32- or 64-) |
* between native CPU format and little/big endian format |
* 64-bit stuff only defined for proper architectures |
* cpu_to_[bl]eXX(__uXX x) |
* [bl]eXX_to_cpu(__uXX x) |
* |
* The same, but takes a pointer to the value to convert |
* cpu_to_[bl]eXXp(__uXX x) |
* [bl]eXX_to_cpup(__uXX x) |
* |
* The same, but change in situ |
* cpu_to_[bl]eXXs(__uXX x) |
* [bl]eXX_to_cpus(__uXX x) |
* |
* See asm-foo/byteorder.h for examples of how to provide |
* architecture-optimized versions |
* |
*/ |
#define cpu_to_le64 __cpu_to_le64 |
#define le64_to_cpu __le64_to_cpu |
#define cpu_to_le32 __cpu_to_le32 |
#define le32_to_cpu __le32_to_cpu |
#define cpu_to_le16 __cpu_to_le16 |
#define le16_to_cpu __le16_to_cpu |
#define cpu_to_be64 __cpu_to_be64 |
#define be64_to_cpu __be64_to_cpu |
#define cpu_to_be32 __cpu_to_be32 |
#define be32_to_cpu __be32_to_cpu |
#define cpu_to_be16 __cpu_to_be16 |
#define be16_to_cpu __be16_to_cpu |
#define cpu_to_le64p __cpu_to_le64p |
#define le64_to_cpup __le64_to_cpup |
#define cpu_to_le32p __cpu_to_le32p |
#define le32_to_cpup __le32_to_cpup |
#define cpu_to_le16p __cpu_to_le16p |
#define le16_to_cpup __le16_to_cpup |
#define cpu_to_be64p __cpu_to_be64p |
#define be64_to_cpup __be64_to_cpup |
#define cpu_to_be32p __cpu_to_be32p |
#define be32_to_cpup __be32_to_cpup |
#define cpu_to_be16p __cpu_to_be16p |
#define be16_to_cpup __be16_to_cpup |
#define cpu_to_le64s __cpu_to_le64s |
#define le64_to_cpus __le64_to_cpus |
#define cpu_to_le32s __cpu_to_le32s |
#define le32_to_cpus __le32_to_cpus |
#define cpu_to_le16s __cpu_to_le16s |
#define le16_to_cpus __le16_to_cpus |
#define cpu_to_be64s __cpu_to_be64s |
#define be64_to_cpus __be64_to_cpus |
#define cpu_to_be32s __cpu_to_be32s |
#define be32_to_cpus __be32_to_cpus |
#define cpu_to_be16s __cpu_to_be16s |
#define be16_to_cpus __be16_to_cpus |
/* |
* They have to be macros in order to do the constant folding |
* correctly - if the argument passed into a inline function |
* it is no longer constant according to gcc.. |
*/ |
#undef ntohl |
#undef ntohs |
#undef htonl |
#undef htons |
#define ___htonl(x) __cpu_to_be32(x) |
#define ___htons(x) __cpu_to_be16(x) |
#define ___ntohl(x) __be32_to_cpu(x) |
#define ___ntohs(x) __be16_to_cpu(x) |
#define htonl(x) ___htonl(x) |
#define ntohl(x) ___ntohl(x) |
#define htons(x) ___htons(x) |
#define ntohs(x) ___ntohs(x) |
static inline void le16_add_cpu(__le16 *var, u16 val) |
{ |
*var = cpu_to_le16(le16_to_cpu(*var) + val); |
} |
static inline void le32_add_cpu(__le32 *var, u32 val) |
{ |
*var = cpu_to_le32(le32_to_cpu(*var) + val); |
} |
static inline void le64_add_cpu(__le64 *var, u64 val) |
{ |
*var = cpu_to_le64(le64_to_cpu(*var) + val); |
} |
static inline void be16_add_cpu(__be16 *var, u16 val) |
{ |
*var = cpu_to_be16(be16_to_cpu(*var) + val); |
} |
static inline void be32_add_cpu(__be32 *var, u32 val) |
{ |
*var = cpu_to_be32(be32_to_cpu(*var) + val); |
} |
static inline void be64_add_cpu(__be64 *var, u64 val) |
{ |
*var = cpu_to_be64(be64_to_cpu(*var) + val); |
} |
#endif /* _LINUX_BYTEORDER_GENERIC_H */ |
/drivers/include/linux/byteorder/little_endian.h |
---|
0,0 → 1,108 |
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#ifndef __LITTLE_ENDIAN |
#define __LITTLE_ENDIAN 1234 |
#endif |
#ifndef __LITTLE_ENDIAN_BITFIELD |
#define __LITTLE_ENDIAN_BITFIELD |
#endif |
#include <linux/types.h> |
#include <linux/swab.h> |
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) |
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) |
#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) |
#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) |
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) |
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) |
#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) |
#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) |
#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) |
#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) |
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) |
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) |
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) |
#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) |
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) |
#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) |
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) |
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) |
#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) |
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) |
#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) |
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) |
#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) |
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) |
#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) |
#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) |
#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) |
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) |
static inline __le64 __cpu_to_le64p(const __u64 *p) |
{ |
return (__force __le64)*p; |
} |
static inline __u64 __le64_to_cpup(const __le64 *p) |
{ |
return (__force __u64)*p; |
} |
static inline __le32 __cpu_to_le32p(const __u32 *p) |
{ |
return (__force __le32)*p; |
} |
static inline __u32 __le32_to_cpup(const __le32 *p) |
{ |
return (__force __u32)*p; |
} |
static inline __le16 __cpu_to_le16p(const __u16 *p) |
{ |
return (__force __le16)*p; |
} |
static inline __u16 __le16_to_cpup(const __le16 *p) |
{ |
return (__force __u16)*p; |
} |
static inline __be64 __cpu_to_be64p(const __u64 *p) |
{ |
return (__force __be64)__swab64p(p); |
} |
static inline __u64 __be64_to_cpup(const __be64 *p) |
{ |
return __swab64p((__u64 *)p); |
} |
static inline __be32 __cpu_to_be32p(const __u32 *p) |
{ |
return (__force __be32)__swab32p(p); |
} |
static inline __u32 __be32_to_cpup(const __be32 *p) |
{ |
return __swab32p((__u32 *)p); |
} |
static inline __be16 __cpu_to_be16p(const __u16 *p) |
{ |
return (__force __be16)__swab16p(p); |
} |
static inline __u16 __be16_to_cpup(const __be16 *p) |
{ |
return __swab16p((__u16 *)p); |
} |
#define __cpu_to_le64s(x) do { (void)(x); } while (0) |
#define __le64_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_le32s(x) do { (void)(x); } while (0) |
#define __le32_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_le16s(x) do { (void)(x); } while (0) |
#define __le16_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_be64s(x) __swab64s((x)) |
#define __be64_to_cpus(x) __swab64s((x)) |
#define __cpu_to_be32s(x) __swab32s((x)) |
#define __be32_to_cpus(x) __swab32s((x)) |
#define __cpu_to_be16s(x) __swab16s((x)) |
#define __be16_to_cpus(x) __swab16s((x)) |
#ifdef __KERNEL__ |
#include <linux/byteorder/generic.h> |
#endif |
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ |
/drivers/include/linux/compiler-gcc.h |
---|
0,0 → 1,87 |
#ifndef __LINUX_COMPILER_H |
#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." |
#endif |
/* |
* Common definitions for all gcc versions go here. |
*/ |
/* Optimization barrier */ |
/* The "volatile" is due to gcc bugs */ |
#define barrier() __asm__ __volatile__("": : :"memory") |
/* |
* This macro obfuscates arithmetic on a variable address so that gcc |
* shouldn't recognize the original var, and make assumptions about it. |
* |
* This is needed because the C standard makes it undefined to do |
* pointer arithmetic on "objects" outside their boundaries and the |
* gcc optimizers assume this is the case. In particular they |
* assume such arithmetic does not wrap. |
* |
* A miscompilation has been observed because of this on PPC. |
* To work around it we hide the relationship of the pointer and the object |
* using this macro. |
* |
* Versions of the ppc64 compiler before 4.1 had a bug where use of |
* RELOC_HIDE could trash r30. The bug can be worked around by changing |
* the inline assembly constraint from =g to =r, in this particular |
* case either is valid. |
*/ |
#define RELOC_HIDE(ptr, off) \ |
({ unsigned long __ptr; \ |
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ |
(typeof(ptr)) (__ptr + (off)); }) |
/* &a[0] degrades to a pointer: a different type from an array */ |
#define __must_be_array(a) \ |
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) |
/* |
* Force always-inline if the user requests it so via the .config, |
* or if gcc is too old: |
*/ |
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
# define inline inline __attribute__((always_inline)) |
# define __inline__ __inline__ __attribute__((always_inline)) |
# define __inline __inline __attribute__((always_inline)) |
#endif |
#define __deprecated __attribute__((deprecated)) |
#define __packed __attribute__((packed)) |
#define __weak __attribute__((weak)) |
/* |
* it doesn't make sense on ARM (currently the only user of __naked) to trace |
* naked functions because then mcount is called without stack and frame pointer |
* being set up and there is no chance to restore the lr register to the value |
* before mcount was called. |
*/ |
#define __naked __attribute__((naked)) notrace |
#define __noreturn __attribute__((noreturn)) |
/* |
* From the GCC manual: |
* |
* Many functions have no effects except the return value and their |
* return value depends only on the parameters and/or global |
* variables. Such a function can be subject to common subexpression |
* elimination and loop optimization just as an arithmetic operator |
* would be. |
* [...] |
*/ |
#define __pure __attribute__((pure)) |
#define __aligned(x) __attribute__((aligned(x))) |
#define __printf(a,b) __attribute__((format(printf,a,b))) |
#define noinline __attribute__((noinline)) |
#define __attribute_const__ __attribute__((__const__)) |
#define __maybe_unused __attribute__((unused)) |
#define __always_unused __attribute__((unused)) |
#define __gcc_header(x) #x |
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) |
#define gcc_header(x) _gcc_header(x) |
#include gcc_header(__GNUC__) |
/drivers/include/linux/compiler-gcc4.h |
---|
0,0 → 1,61 |
#ifndef __LINUX_COMPILER_H |
#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." |
#endif |
/* GCC 4.1.[01] miscompiles __weak */ |
#ifdef __KERNEL__ |
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 |
# error Your version of gcc miscompiles the __weak directive |
# endif |
#endif |
#define __used __attribute__((__used__)) |
#define __must_check __attribute__((warn_unused_result)) |
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) |
#define __always_inline inline __attribute__((always_inline)) |
/* |
* A trick to suppress uninitialized variable warning without generating any |
* code |
*/ |
#define uninitialized_var(x) x = x |
#if __GNUC_MINOR__ >= 3 |
/* Mark functions as cold. gcc will assume any path leading to a call |
to them will be unlikely. This means a lot of manual unlikely()s |
are unnecessary now for any paths leading to the usual suspects |
like BUG(), printk(), panic() etc. [but let's keep them for now for |
older compilers] |
Early snapshots of gcc 4.3 don't support this and we can't detect this |
in the preprocessor, but we can live with this because they're unreleased. |
Maketime probing would be overkill here. |
gcc also has a __attribute__((__hot__)) to move hot functions into |
a special section, but I don't see any sense in this right now in |
the kernel context */ |
#define __cold __attribute__((__cold__)) |
#if __GNUC_MINOR__ >= 5 |
/* |
* Mark a position in code as unreachable. This can be used to |
* suppress control flow warnings after asm blocks that transfer |
* control elsewhere. |
* |
* Early snapshots of gcc 4.5 don't support this and we can't detect |
* this in the preprocessor, but we can live with this because they're |
* unreleased. Really, we need to have autoconf for the kernel. |
*/ |
#define unreachable() __builtin_unreachable() |
#endif |
#endif |
#if __GNUC_MINOR__ > 0 |
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
#endif |
#if __GNUC_MINOR__ >= 4 |
#define __compiletime_warning(message) __attribute__((warning(message))) |
#define __compiletime_error(message) __attribute__((error(message))) |
#endif |
/drivers/include/linux/compiler.h |
---|
0,0 → 1,303 |
#ifndef __LINUX_COMPILER_H |
#define __LINUX_COMPILER_H |
#ifndef __ASSEMBLY__ |
#ifdef __CHECKER__ |
# define __user __attribute__((noderef, address_space(1))) |
# define __kernel /* default address space */ |
# define __safe __attribute__((safe)) |
# define __force __attribute__((force)) |
# define __nocast __attribute__((nocast)) |
# define __iomem __attribute__((noderef, address_space(2))) |
# define __acquires(x) __attribute__((context(x,0,1))) |
# define __releases(x) __attribute__((context(x,1,0))) |
# define __acquire(x) __context__(x,1) |
# define __release(x) __context__(x,-1) |
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
extern void __chk_user_ptr(const volatile void __user *); |
extern void __chk_io_ptr(const volatile void __iomem *); |
#else |
# define __user |
# define __kernel |
# define __safe |
# define __force |
# define __nocast |
# define __iomem |
# define __chk_user_ptr(x) (void)0 |
# define __chk_io_ptr(x) (void)0 |
# define __builtin_warning(x, y...) (1) |
# define __acquires(x) |
# define __releases(x) |
# define __acquire(x) (void)0 |
# define __release(x) (void)0 |
# define __cond_lock(x,c) (c) |
#endif |
#ifdef __KERNEL__ |
#ifdef __GNUC__ |
#include <linux/compiler-gcc.h> |
#endif |
#define notrace __attribute__((no_instrument_function)) |
/* Intel compiler defines __GNUC__. So we will overwrite implementations |
* coming from above header files here |
*/ |
#ifdef __INTEL_COMPILER |
# include <linux/compiler-intel.h> |
#endif |
/* |
* Generic compiler-dependent macros required for kernel |
* build go below this comment. Actual compiler/compiler version |
* specific implementations come from the above header files |
*/ |
struct ftrace_branch_data { |
const char *func; |
const char *file; |
unsigned line; |
union { |
struct { |
unsigned long correct; |
unsigned long incorrect; |
}; |
struct { |
unsigned long miss; |
unsigned long hit; |
}; |
unsigned long miss_hit[2]; |
}; |
}; |
/* |
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
* to disable branch tracing on a per file basis. |
*/ |
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); |
#define likely_notrace(x) __builtin_expect(!!(x), 1) |
#define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
#define __branch_check__(x, expect) ({ \ |
int ______r; \ |
static struct ftrace_branch_data \ |
__attribute__((__aligned__(4))) \ |
__attribute__((section("_ftrace_annotated_branch"))) \ |
______f = { \ |
.func = __func__, \ |
.file = __FILE__, \ |
.line = __LINE__, \ |
}; \ |
______r = likely_notrace(x); \ |
ftrace_likely_update(&______f, ______r, expect); \ |
______r; \ |
}) |
/* |
* Using __builtin_constant_p(x) to ignore cases where the return |
* value is always the same. This idea is taken from a similar patch |
* written by Daniel Walker. |
*/ |
# ifndef likely |
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) |
# endif |
# ifndef unlikely |
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) |
# endif |
#ifdef CONFIG_PROFILE_ALL_BRANCHES |
/* |
* "Define 'is'", Bill Clinton |
* "Define 'if'", Steven Rostedt |
*/ |
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
#define __trace_if(cond) \ |
if (__builtin_constant_p((cond)) ? !!(cond) : \ |
({ \ |
int ______r; \ |
static struct ftrace_branch_data \ |
__attribute__((__aligned__(4))) \ |
__attribute__((section("_ftrace_branch"))) \ |
______f = { \ |
.func = __func__, \ |
.file = __FILE__, \ |
.line = __LINE__, \ |
}; \ |
______r = !!(cond); \ |
______f.miss_hit[______r]++; \ |
______r; \ |
})) |
#endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
#else |
# define likely(x) __builtin_expect(!!(x), 1) |
# define unlikely(x) __builtin_expect(!!(x), 0) |
#endif |
/* Optimization barrier */ |
#ifndef barrier |
# define barrier() __memory_barrier() |
#endif |
/* Unreachable code */ |
#ifndef unreachable |
# define unreachable() do { } while (1) |
#endif |
#ifndef RELOC_HIDE |
# define RELOC_HIDE(ptr, off) \ |
({ unsigned long __ptr; \ |
__ptr = (unsigned long) (ptr); \ |
(typeof(ptr)) (__ptr + (off)); }) |
#endif |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
#ifdef __KERNEL__ |
/* |
* Allow us to mark functions as 'deprecated' and have gcc emit a nice |
* warning for each use, in hopes of speeding the functions removal. |
* Usage is: |
* int __deprecated foo(void) |
*/ |
#ifndef __deprecated |
# define __deprecated /* unimplemented */ |
#endif |
#ifdef MODULE |
#define __deprecated_for_modules __deprecated |
#else |
#define __deprecated_for_modules |
#endif |
#ifndef __must_check |
#define __must_check |
#endif |
#ifndef CONFIG_ENABLE_MUST_CHECK |
#undef __must_check |
#define __must_check |
#endif |
#ifndef CONFIG_ENABLE_WARN_DEPRECATED |
#undef __deprecated |
#undef __deprecated_for_modules |
#define __deprecated |
#define __deprecated_for_modules |
#endif |
/* |
* Allow us to avoid 'defined but not used' warnings on functions and data, |
* as well as force them to be emitted to the assembly file. |
* |
* As of gcc 3.4, static functions that are not marked with attribute((used)) |
* may be elided from the assembly file. As of gcc 3.4, static data not so |
* marked will not be elided, but this may change in a future gcc version. |
* |
* NOTE: Because distributions shipped with a backported unit-at-a-time |
* compiler in gcc 3.3, we must define __used to be __attribute__((used)) |
* for gcc >=3.3 instead of 3.4. |
* |
* In prior versions of gcc, such functions and data would be emitted, but |
* would be warned about except with attribute((unused)). |
* |
* Mark functions that are referenced only in inline assembly as __used so |
* the code is emitted even though it appears to be unreferenced. |
*/ |
#ifndef __used |
# define __used /* unimplemented */ |
#endif |
#ifndef __maybe_unused |
# define __maybe_unused /* unimplemented */ |
#endif |
#ifndef __always_unused |
# define __always_unused /* unimplemented */ |
#endif |
#ifndef noinline |
#define noinline |
#endif |
/* |
* Rather then using noinline to prevent stack consumption, use |
* noinline_for_stack instead. For documentaiton reasons. |
*/ |
#define noinline_for_stack noinline |
#ifndef __always_inline |
#define __always_inline inline |
#endif |
#endif /* __KERNEL__ */ |
/* |
* From the GCC manual: |
* |
* Many functions do not examine any values except their arguments, |
* and have no effects except the return value. Basically this is |
* just slightly more strict class than the `pure' attribute above, |
* since function is not allowed to read global memory. |
* |
* Note that a function that has pointer arguments and examines the |
* data pointed to must _not_ be declared `const'. Likewise, a |
* function that calls a non-`const' function usually must not be |
* `const'. It does not make sense for a `const' function to return |
* `void'. |
*/ |
#ifndef __attribute_const__ |
# define __attribute_const__ /* unimplemented */ |
#endif |
/* |
* Tell gcc if a function is cold. The compiler will assume any path |
* directly leading to the call is unlikely. |
*/ |
#ifndef __cold |
#define __cold |
#endif |
/* Simple shorthand for a section definition */ |
#ifndef __section |
# define __section(S) __attribute__ ((__section__(#S))) |
#endif |
/* Are two types/vars the same type (ignoring qualifiers)? */ |
#ifndef __same_type |
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
#endif |
/* Compile time object size, -1 for unknown */ |
#ifndef __compiletime_object_size |
# define __compiletime_object_size(obj) -1 |
#endif |
#ifndef __compiletime_warning |
# define __compiletime_warning(message) |
#endif |
#ifndef __compiletime_error |
# define __compiletime_error(message) |
#endif |
/* |
* Prevent the compiler from merging or refetching accesses. The compiler |
* is also forbidden from reordering successive instances of ACCESS_ONCE(), |
* but only when the compiler is aware of some particular ordering. One way |
* to make the compiler aware of ordering is to put the two invocations of |
* ACCESS_ONCE() in different C statements. |
* |
* This macro does absolutely -nothing- to prevent the CPU from reordering, |
* merging, or refetching absolutely anything at any time. Its main intended |
* use is to mediate communication between process-level code and irq/NMI |
* handlers, all running on the same CPU. |
*/ |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
#endif /* __LINUX_COMPILER_H */ |
/drivers/include/linux/errno.h |
---|
0,0 → 1,114 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#include <errno-base.h> |
#define ERESTARTSYS 512 |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
#endif |
/drivers/include/linux/fb.h |
---|
0,0 → 1,1055 |
#ifndef _LINUX_FB_H |
#define _LINUX_FB_H |
#include <linux/types.h> |
#include <list.h> |
#include <linux/i2c.h> |
struct dentry; |
/* Definitions of frame buffers */ |
#define FB_MAX 32 /* sufficient for now */ |
#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ |
#define FB_TYPE_PLANES 1 /* Non interleaved planes */ |
#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */ |
#define FB_TYPE_TEXT 3 /* Text/attributes */ |
#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */ |
#define FB_AUX_TEXT_MDA 0 /* Monochrome text */ |
#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */ |
#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */ |
#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */ |
#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */ |
#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */ |
#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */ |
#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */ |
#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */ |
#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */ |
#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */ |
#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ |
#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ |
#define FB_VISUAL_TRUECOLOR 2 /* True color */ |
#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ |
#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ |
#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ |
#define FB_ACCEL_NONE 0 /* no hardware accelerator */ |
#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */ |
#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */ |
#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */ |
#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */ |
#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */ |
#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */ |
#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */ |
#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */ |
#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */ |
#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */ |
#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */ |
#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */ |
#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */ |
#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */ |
#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */ |
#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */ |
#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */ |
#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */ |
#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */ |
#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */ |
#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */ |
#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */ |
#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */ |
#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */ |
#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */ |
#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */ |
#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */ |
#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */ |
#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */ |
#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */ |
#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */ |
#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */ |
#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */ |
#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */ |
#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */ |
#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */ |
#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */ |
#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */ |
#define FB_ACCEL_I810 39 /* Intel 810/815 */ |
#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */ |
#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */ |
#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */ |
#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */ |
#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ |
#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ |
#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ |
#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ |
#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ |
#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ |
#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ |
#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ |
#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ |
#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ |
#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ |
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ |
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ |
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ |
#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */ |
#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */ |
#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */ |
#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ |
#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ |
#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ |
#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ |
#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ |
#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ |
#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */ |
#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */ |
#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */ |
#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */ |
#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */ |
#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */ |
#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */ |
#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */ |
#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */ |
#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */ |
#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */ |
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */ |
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */ |
struct fb_fix_screeninfo { |
char id[16]; /* identification string eg "TT Builtin" */ |
unsigned long smem_start; /* Start of frame buffer mem */ |
/* (physical address) */ |
__u32 smem_len; /* Length of frame buffer mem */ |
__u32 type; /* see FB_TYPE_* */ |
__u32 type_aux; /* Interleave for interleaved Planes */ |
__u32 visual; /* see FB_VISUAL_* */ |
__u16 xpanstep; /* zero if no hardware panning */ |
__u16 ypanstep; /* zero if no hardware panning */ |
__u16 ywrapstep; /* zero if no hardware ywrap */ |
__u32 line_length; /* length of a line in bytes */ |
unsigned long mmio_start; /* Start of Memory Mapped I/O */ |
/* (physical address) */ |
__u32 mmio_len; /* Length of Memory Mapped I/O */ |
__u32 accel; /* Indicate to driver which */ |
/* specific chip/card we have */ |
__u16 reserved[3]; /* Reserved for future compatibility */ |
}; |
/* Interpretation of offset for color fields: All offsets are from the right, |
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you |
* can use the offset as right argument to <<). A pixel afterwards is a bit |
* stream and is written to video memory as that unmodified. |
* |
* For pseudocolor: offset and length should be the same for all color |
* components. Offset specifies the position of the least significant bit |
* of the pallette index in a pixel value. Length indicates the number |
* of available palette entries (i.e. # of entries = 1 << length). |
*/ |
struct fb_bitfield { |
__u32 offset; /* beginning of bitfield */ |
__u32 length; /* length of bitfield */ |
__u32 msb_right; /* != 0 : Most significant bit is */ |
/* right */ |
}; |
#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ |
#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ |
#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ |
#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ |
#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ |
#define FB_ACTIVATE_MASK 15 |
/* values */ |
#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ |
#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ |
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ |
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ |
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ |
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ |
#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ |
#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ |
#define FB_SYNC_EXT 4 /* external sync */ |
#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ |
#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ |
/* vtotal = 144d/288n/576i => PAL */ |
/* vtotal = 121d/242n/484i => NTSC */ |
#define FB_SYNC_ON_GREEN 32 /* sync on green */ |
#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ |
#define FB_VMODE_INTERLACED 1 /* interlaced */ |
#define FB_VMODE_DOUBLE 2 /* double scan */ |
#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ |
#define FB_VMODE_MASK 255 |
#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ |
#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ |
#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ |
/* |
* Display rotation support |
*/ |
#define FB_ROTATE_UR 0 |
#define FB_ROTATE_CW 1 |
#define FB_ROTATE_UD 2 |
#define FB_ROTATE_CCW 3 |
#define PICOS2KHZ(a) (1000000000UL/(a)) |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
struct fb_var_screeninfo { |
__u32 xres; /* visible resolution */ |
__u32 yres; |
__u32 xres_virtual; /* virtual resolution */ |
__u32 yres_virtual; |
__u32 xoffset; /* offset from virtual to visible */ |
__u32 yoffset; /* resolution */ |
__u32 bits_per_pixel; /* guess what */ |
__u32 grayscale; /* != 0 Graylevels instead of colors */ |
struct fb_bitfield red; /* bitfield in fb mem if true color, */ |
struct fb_bitfield green; /* else only length is significant */ |
struct fb_bitfield blue; |
struct fb_bitfield transp; /* transparency */ |
__u32 nonstd; /* != 0 Non standard pixel format */ |
__u32 activate; /* see FB_ACTIVATE_* */ |
__u32 height; /* height of picture in mm */ |
__u32 width; /* width of picture in mm */ |
__u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ |
/* Timing: All values in pixclocks, except pixclock (of course) */ |
__u32 pixclock; /* pixel clock in ps (pico seconds) */ |
__u32 left_margin; /* time from sync to picture */ |
__u32 right_margin; /* time from picture to sync */ |
__u32 upper_margin; /* time from sync to picture */ |
__u32 lower_margin; |
__u32 hsync_len; /* length of horizontal sync */ |
__u32 vsync_len; /* length of vertical sync */ |
__u32 sync; /* see FB_SYNC_* */ |
__u32 vmode; /* see FB_VMODE_* */ |
__u32 rotate; /* angle we rotate counter clockwise */ |
__u32 reserved[5]; /* Reserved for future compatibility */ |
}; |
struct fb_cmap { |
__u32 start; /* First entry */ |
__u32 len; /* Number of entries */ |
__u16 *red; /* Red values */ |
__u16 *green; |
__u16 *blue; |
__u16 *transp; /* transparency, can be NULL */ |
}; |
struct fb_con2fbmap { |
__u32 console; |
__u32 framebuffer; |
}; |
/* VESA Blanking Levels */ |
#define VESA_NO_BLANKING 0 |
#define VESA_VSYNC_SUSPEND 1 |
#define VESA_HSYNC_SUSPEND 2 |
#define VESA_POWERDOWN 3 |
enum { |
/* screen: unblanked, hsync: on, vsync: on */ |
FB_BLANK_UNBLANK = VESA_NO_BLANKING, |
/* screen: blanked, hsync: on, vsync: on */ |
FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, |
/* screen: blanked, hsync: on, vsync: off */ |
FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, |
/* screen: blanked, hsync: off, vsync: on */ |
FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, |
/* screen: blanked, hsync: off, vsync: off */ |
FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 |
}; |
#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ |
#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ |
#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ |
#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ |
#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ |
#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ |
#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ |
#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ |
#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ |
struct fb_vblank { |
__u32 flags; /* FB_VBLANK flags */ |
__u32 count; /* counter of retraces since boot */ |
__u32 vcount; /* current scanline position */ |
__u32 hcount; /* current scandot position */ |
__u32 reserved[4]; /* reserved for future compatibility */ |
}; |
/* Internal HW accel */ |
#define ROP_COPY 0 |
#define ROP_XOR 1 |
struct fb_copyarea { |
__u32 dx; |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 sx; |
__u32 sy; |
}; |
struct fb_fillrect { |
__u32 dx; /* screen-relative */ |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 color; |
__u32 rop; |
}; |
struct fb_image { |
__u32 dx; /* Where to place image */ |
__u32 dy; |
__u32 width; /* Size of image */ |
__u32 height; |
__u32 fg_color; /* Only used when a mono bitmap */ |
__u32 bg_color; |
__u8 depth; /* Depth of the image */ |
const char *data; /* Pointer to image data */ |
struct fb_cmap cmap; /* color map info */ |
}; |
/* |
* hardware cursor control |
*/ |
#define FB_CUR_SETIMAGE 0x01 |
#define FB_CUR_SETPOS 0x02 |
#define FB_CUR_SETHOT 0x04 |
#define FB_CUR_SETCMAP 0x08 |
#define FB_CUR_SETSHAPE 0x10 |
#define FB_CUR_SETSIZE 0x20 |
#define FB_CUR_SETALL 0xFF |
struct fbcurpos { |
__u16 x, y; |
}; |
struct fb_cursor { |
__u16 set; /* what to set */ |
__u16 enable; /* cursor on/off */ |
__u16 rop; /* bitop operation */ |
const char *mask; /* cursor mask bits */ |
struct fbcurpos hot; /* cursor hot spot */ |
struct fb_image image; /* Cursor image */ |
}; |
#ifdef CONFIG_FB_BACKLIGHT |
/* Settings for the generic backlight code */ |
#define FB_BACKLIGHT_LEVELS 128 |
#define FB_BACKLIGHT_MAX 0xFF |
#endif |
//#ifdef __KERNEL__ |
//#include <linux/fs.h> |
//#include <linux/init.h> |
//#include <linux/device.h> |
//#include <linux/workqueue.h> |
//#include <linux/notifier.h> |
#include <linux/list.h> |
//#include <linux/backlight.h> |
//#include <asm/io.h> |
//struct vm_area_struct; |
//struct fb_info; |
//struct device; |
//struct file; |
/* Definitions below are used in the parsed monitor specs */ |
#define FB_DPMS_ACTIVE_OFF 1 |
#define FB_DPMS_SUSPEND 2 |
#define FB_DPMS_STANDBY 4 |
#define FB_DISP_DDI 1 |
#define FB_DISP_ANA_700_300 2 |
#define FB_DISP_ANA_714_286 4 |
#define FB_DISP_ANA_1000_400 8 |
#define FB_DISP_ANA_700_000 16 |
#define FB_DISP_MONO 32 |
#define FB_DISP_RGB 64 |
#define FB_DISP_MULTI 128 |
#define FB_DISP_UNKNOWN 256 |
#define FB_SIGNAL_NONE 0 |
#define FB_SIGNAL_BLANK_BLANK 1 |
#define FB_SIGNAL_SEPARATE 2 |
#define FB_SIGNAL_COMPOSITE 4 |
#define FB_SIGNAL_SYNC_ON_GREEN 8 |
#define FB_SIGNAL_SERRATION_ON 16 |
#define FB_MISC_PRIM_COLOR 1 |
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ |
struct fb_chroma { |
__u32 redx; /* in fraction of 1024 */ |
__u32 greenx; |
__u32 bluex; |
__u32 whitex; |
__u32 redy; |
__u32 greeny; |
__u32 bluey; |
__u32 whitey; |
}; |
struct fb_monspecs { |
struct fb_chroma chroma; |
struct fb_videomode *modedb; /* mode database */ |
__u8 manufacturer[4]; /* Manufacturer */ |
__u8 monitor[14]; /* Monitor String */ |
__u8 serial_no[14]; /* Serial Number */ |
__u8 ascii[14]; /* ? */ |
__u32 modedb_len; /* mode database length */ |
__u32 model; /* Monitor Model */ |
__u32 serial; /* Serial Number - Integer */ |
__u32 year; /* Year manufactured */ |
__u32 week; /* Week Manufactured */ |
__u32 hfmin; /* hfreq lower limit (Hz) */ |
__u32 hfmax; /* hfreq upper limit (Hz) */ |
__u32 dclkmin; /* pixelclock lower limit (Hz) */ |
__u32 dclkmax; /* pixelclock upper limit (Hz) */ |
__u16 input; /* display type - see FB_DISP_* */ |
__u16 dpms; /* DPMS support - see FB_DPMS_ */ |
__u16 signal; /* Signal Type - see FB_SIGNAL_* */ |
__u16 vfmin; /* vfreq lower limit (Hz) */ |
__u16 vfmax; /* vfreq upper limit (Hz) */ |
__u16 gamma; /* Gamma - in fractions of 100 */ |
__u16 gtf : 1; /* supports GTF */ |
__u16 misc; /* Misc flags - see FB_MISC_* */ |
__u8 version; /* EDID version... */ |
__u8 revision; /* ...and revision */ |
__u8 max_x; /* Maximum horizontal size (cm) */ |
__u8 max_y; /* Maximum vertical size (cm) */ |
}; |
struct fb_cmap_user { |
__u32 start; /* First entry */ |
__u32 len; /* Number of entries */ |
__u16 __user *red; /* Red values */ |
__u16 __user *green; |
__u16 __user *blue; |
__u16 __user *transp; /* transparency, can be NULL */ |
}; |
struct fb_image_user { |
__u32 dx; /* Where to place image */ |
__u32 dy; |
__u32 width; /* Size of image */ |
__u32 height; |
__u32 fg_color; /* Only used when a mono bitmap */ |
__u32 bg_color; |
__u8 depth; /* Depth of the image */ |
const char __user *data; /* Pointer to image data */ |
struct fb_cmap_user cmap; /* color map info */ |
}; |
struct fb_cursor_user { |
__u16 set; /* what to set */ |
__u16 enable; /* cursor on/off */ |
__u16 rop; /* bitop operation */ |
const char __user *mask; /* cursor mask bits */ |
struct fbcurpos hot; /* cursor hot spot */ |
struct fb_image_user image; /* Cursor image */ |
}; |
/* |
* Register/unregister for framebuffer events |
*/ |
/* The resolution of the passed in fb_info about to change */ |
#define FB_EVENT_MODE_CHANGE 0x01 |
/* The display on this fb_info is beeing suspended, no access to the |
* framebuffer is allowed any more after that call returns |
*/ |
#define FB_EVENT_SUSPEND 0x02 |
/* The display on this fb_info was resumed, you can restore the display |
* if you own it |
*/ |
#define FB_EVENT_RESUME 0x03 |
/* An entry from the modelist was removed */ |
#define FB_EVENT_MODE_DELETE 0x04 |
/* A driver registered itself */ |
#define FB_EVENT_FB_REGISTERED 0x05 |
/* A driver unregistered itself */ |
#define FB_EVENT_FB_UNREGISTERED 0x06 |
/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ |
#define FB_EVENT_GET_CONSOLE_MAP 0x07 |
/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ |
#define FB_EVENT_SET_CONSOLE_MAP 0x08 |
/* A hardware display blank change occured */ |
#define FB_EVENT_BLANK 0x09 |
/* Private modelist is to be replaced */ |
#define FB_EVENT_NEW_MODELIST 0x0A |
/* The resolution of the passed in fb_info about to change and |
all vc's should be changed */ |
#define FB_EVENT_MODE_CHANGE_ALL 0x0B |
/* A software display blank change occured */ |
#define FB_EVENT_CONBLANK 0x0C |
/* Get drawing requirements */ |
#define FB_EVENT_GET_REQ 0x0D |
/* Unbind from the console if possible */ |
#define FB_EVENT_FB_UNBIND 0x0E |
struct fb_event { |
struct fb_info *info; |
void *data; |
}; |
struct fb_blit_caps { |
u32 x; |
u32 y; |
u32 len; |
u32 flags; |
}; |
/* |
* Pixmap structure definition |
* |
* The purpose of this structure is to translate data |
* from the hardware independent format of fbdev to what |
* format the hardware needs. |
*/ |
#define FB_PIXMAP_DEFAULT 1 /* used internally by fbcon */ |
#define FB_PIXMAP_SYSTEM 2 /* memory is in system RAM */ |
#define FB_PIXMAP_IO 4 /* memory is iomapped */ |
#define FB_PIXMAP_SYNC 256 /* set if GPU can DMA */ |
struct fb_pixmap { |
u8 *addr; /* pointer to memory */ |
u32 size; /* size of buffer in bytes */ |
u32 offset; /* current offset to buffer */ |
u32 buf_align; /* byte alignment of each bitmap */ |
u32 scan_align; /* alignment per scanline */ |
u32 access_align; /* alignment per read/write (bits) */ |
u32 flags; /* see FB_PIXMAP_* */ |
u32 blit_x; /* supported bit block dimensions (1-32)*/ |
u32 blit_y; /* Format: blit_x = 1 << (width - 1) */ |
/* blit_y = 1 << (height - 1) */ |
/* if 0, will be set to 0xffffffff (all)*/ |
/* access methods */ |
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size); |
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size); |
}; |
#ifdef CONFIG_FB_DEFERRED_IO |
struct fb_deferred_io { |
/* delay between mkwrite and deferred handler */ |
unsigned long delay; |
struct mutex lock; /* mutex that protects the page list */ |
struct list_head pagelist; /* list of touched pages */ |
/* callback */ |
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); |
}; |
#endif |
/* |
* Frame buffer operations |
* |
* LOCKING NOTE: those functions must _ALL_ be called with the console |
* semaphore held, this is the only suitable locking mechanism we have |
* in 2.6. Some may be called at interrupt time at this point though. |
*/ |
struct fb_ops { |
/* open/release and usage marking */ |
struct module *owner; |
int (*fb_open)(struct fb_info *info, int user); |
int (*fb_release)(struct fb_info *info, int user); |
/* For framebuffers with strange non linear layouts or that do not |
* work with normal memory mapped access |
*/ |
ssize_t (*fb_read)(struct fb_info *info, char __user *buf, |
size_t count, loff_t *ppos); |
ssize_t (*fb_write)(struct fb_info *info, const char __user *buf, |
size_t count, loff_t *ppos); |
/* checks var and eventually tweaks it to something supported, |
* DO NOT MODIFY PAR */ |
int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info); |
/* set the video mode according to info->var */ |
int (*fb_set_par)(struct fb_info *info); |
/* set color register */ |
int (*fb_setcolreg)(unsigned regno, unsigned red, unsigned green, |
unsigned blue, unsigned transp, struct fb_info *info); |
/* set color registers in batch */ |
int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); |
/* blank display */ |
int (*fb_blank)(int blank, struct fb_info *info); |
/* pan display */ |
int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info); |
/* Draws a rectangle */ |
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); |
/* Copy data from area to another */ |
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); |
/* Draws a image to the display */ |
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); |
/* Draws cursor */ |
int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor); |
/* Rotates the display */ |
void (*fb_rotate)(struct fb_info *info, int angle); |
/* wait for blit idle, optional */ |
int (*fb_sync)(struct fb_info *info); |
/* perform fb specific ioctl (optional) */ |
int (*fb_ioctl)(struct fb_info *info, unsigned int cmd, |
unsigned long arg); |
/* Handle 32bit compat ioctl (optional) */ |
int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd, |
unsigned long arg); |
/* perform fb specific mmap */ |
// int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma); |
/* get capability given var */ |
void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, |
struct fb_var_screeninfo *var); |
/* teardown any resources to do with this framebuffer */ |
void (*fb_destroy)(struct fb_info *info); |
}; |
#ifdef CONFIG_FB_TILEBLITTING |
#define FB_TILE_CURSOR_NONE 0 |
#define FB_TILE_CURSOR_UNDERLINE 1 |
#define FB_TILE_CURSOR_LOWER_THIRD 2 |
#define FB_TILE_CURSOR_LOWER_HALF 3 |
#define FB_TILE_CURSOR_TWO_THIRDS 4 |
#define FB_TILE_CURSOR_BLOCK 5 |
struct fb_tilemap { |
__u32 width; /* width of each tile in pixels */ |
__u32 height; /* height of each tile in scanlines */ |
__u32 depth; /* color depth of each tile */ |
__u32 length; /* number of tiles in the map */ |
const __u8 *data; /* actual tile map: a bitmap array, packed |
to the nearest byte */ |
}; |
struct fb_tilerect { |
__u32 sx; /* origin in the x-axis */ |
__u32 sy; /* origin in the y-axis */ |
__u32 width; /* number of tiles in the x-axis */ |
__u32 height; /* number of tiles in the y-axis */ |
__u32 index; /* what tile to use: index to tile map */ |
__u32 fg; /* foreground color */ |
__u32 bg; /* background color */ |
__u32 rop; /* raster operation */ |
}; |
struct fb_tilearea { |
__u32 sx; /* source origin in the x-axis */ |
__u32 sy; /* source origin in the y-axis */ |
__u32 dx; /* destination origin in the x-axis */ |
__u32 dy; /* destination origin in the y-axis */ |
__u32 width; /* number of tiles in the x-axis */ |
__u32 height; /* number of tiles in the y-axis */ |
}; |
struct fb_tileblit { |
__u32 sx; /* origin in the x-axis */ |
__u32 sy; /* origin in the y-axis */ |
__u32 width; /* number of tiles in the x-axis */ |
__u32 height; /* number of tiles in the y-axis */ |
__u32 fg; /* foreground color */ |
__u32 bg; /* background color */ |
__u32 length; /* number of tiles to draw */ |
__u32 *indices; /* array of indices to tile map */ |
}; |
struct fb_tilecursor { |
__u32 sx; /* cursor position in the x-axis */ |
__u32 sy; /* cursor position in the y-axis */ |
__u32 mode; /* 0 = erase, 1 = draw */ |
__u32 shape; /* see FB_TILE_CURSOR_* */ |
__u32 fg; /* foreground color */ |
__u32 bg; /* background color */ |
}; |
struct fb_tile_ops { |
/* set tile characteristics */ |
void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map); |
/* all dimensions from hereon are in terms of tiles */ |
/* move a rectangular region of tiles from one area to another*/ |
void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area); |
/* fill a rectangular region with a tile */ |
void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect); |
/* copy an array of tiles */ |
void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit); |
/* cursor */ |
void (*fb_tilecursor)(struct fb_info *info, |
struct fb_tilecursor *cursor); |
/* get maximum length of the tile map */ |
int (*fb_get_tilemax)(struct fb_info *info); |
}; |
#endif /* CONFIG_FB_TILEBLITTING */ |
/* FBINFO_* = fb_info.flags bit flags */ |
#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */ |
#define FBINFO_HWACCEL_DISABLED 0x0002 |
/* When FBINFO_HWACCEL_DISABLED is set: |
* Hardware acceleration is turned off. Software implementations |
* of required functions (copyarea(), fillrect(), and imageblit()) |
* takes over; acceleration engine should be in a quiescent state */ |
/* hints */ |
#define FBINFO_PARTIAL_PAN_OK 0x0040 /* otw use pan only for double-buffering */ |
#define FBINFO_READS_FAST 0x0080 /* soft-copy faster than rendering */ |
/* hardware supported ops */ |
/* semantics: when a bit is set, it indicates that the operation is |
* accelerated by hardware. |
* required functions will still work even if the bit is not set. |
* optional functions may not even exist if the flag bit is not set. |
*/ |
#define FBINFO_HWACCEL_NONE 0x0000 |
#define FBINFO_HWACCEL_COPYAREA 0x0100 /* required */ |
#define FBINFO_HWACCEL_FILLRECT 0x0200 /* required */ |
#define FBINFO_HWACCEL_IMAGEBLIT 0x0400 /* required */ |
#define FBINFO_HWACCEL_ROTATE 0x0800 /* optional */ |
#define FBINFO_HWACCEL_XPAN 0x1000 /* optional */ |
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ |
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ |
#define FBINFO_MISC_USEREVENT 0x10000 /* event request |
from userspace */ |
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ |
#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware |
inited framebuffer */ |
/* A driver may set this flag to indicate that it does want a set_par to be |
* called every time when fbcon_switch is executed. The advantage is that with |
* this flag set you can really be sure that set_par is always called before |
* any of the functions dependant on the correct hardware state or altering |
* that state, even if you are using some broken X releases. The disadvantage |
* is that it introduces unwanted delays to every console switch if set_par |
* is slow. It is a good idea to try this flag in the drivers initialization |
* code whenever there is a bug report related to switching between X and the |
* framebuffer console. |
*/ |
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 |
/* |
* Host and GPU endianness differ. |
*/ |
#define FBINFO_FOREIGN_ENDIAN 0x100000 |
/* |
* Big endian math. This is the same flags as above, but with different |
* meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag |
* and host endianness. Drivers should not use this flag. |
*/ |
#define FBINFO_BE_MATH 0x100000 |
struct fb_info { |
int node; |
int flags; |
// struct mutex lock; /* Lock for open/release/ioctl funcs */ |
// struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ |
struct fb_var_screeninfo var; /* Current var */ |
struct fb_fix_screeninfo fix; /* Current fix */ |
struct fb_monspecs monspecs; /* Current Monitor specs */ |
// struct work_struct queue; /* Framebuffer event queue */ |
// struct fb_pixmap pixmap; /* Image hardware mapper */ |
// struct fb_pixmap sprite; /* Cursor hardware mapper */ |
// struct fb_cmap cmap; /* Current cmap */ |
struct list_head modelist; /* mode list */ |
struct fb_videomode *mode; /* current mode */ |
#ifdef CONFIG_FB_BACKLIGHT |
/* assigned backlight device */ |
/* set before framebuffer registration, |
remove after unregister */ |
struct backlight_device *bl_dev; |
/* Backlight level curve */ |
struct mutex bl_curve_mutex; |
u8 bl_curve[FB_BACKLIGHT_LEVELS]; |
#endif |
#ifdef CONFIG_FB_DEFERRED_IO |
struct delayed_work deferred_work; |
struct fb_deferred_io *fbdefio; |
#endif |
struct fb_ops *fbops; |
// struct device *device; /* This is the parent */ |
// struct device *dev; /* This is this fb device */ |
int class_flag; /* private sysfs flags */ |
#ifdef CONFIG_FB_TILEBLITTING |
struct fb_tile_ops *tileops; /* Tile Blitting */ |
#endif |
char __iomem *screen_base; /* Virtual address */ |
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ |
void *pseudo_palette; /* Fake palette of 16 colors */ |
#define FBINFO_STATE_RUNNING 0 |
#define FBINFO_STATE_SUSPENDED 1 |
u32 state; /* Hardware state i.e suspend */ |
void *fbcon_par; /* fbcon use-only private area */ |
/* From here on everything is device dependent */ |
void *par; |
/* we need the PCI or similiar aperture base/size not |
smem_start/size as smem_start may just be an object |
allocated inside the aperture so may not actually overlap */ |
resource_size_t aperture_base; |
resource_size_t aperture_size; |
}; |
#ifdef MODULE |
#define FBINFO_DEFAULT FBINFO_MODULE |
#else |
#define FBINFO_DEFAULT 0 |
#endif |
// This will go away |
#define FBINFO_FLAG_MODULE FBINFO_MODULE |
#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT |
/* This will go away |
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags |
* when it wants to turn the acceleration engine on. This is |
* really a separate operation, and should be modified via sysfs. |
* But for now, we leave it broken with the following define |
*/ |
#define STUPID_ACCELF_TEXT_SHIT |
#define fb_readb(addr) (*(volatile u8 *) (addr)) |
#define fb_readw(addr) (*(volatile u16 *) (addr)) |
#define fb_readl(addr) (*(volatile u32 *) (addr)) |
#define fb_readq(addr) (*(volatile u64 *) (addr)) |
#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b)) |
#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b)) |
#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b)) |
#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b)) |
#define fb_memset memset |
#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0) |
#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \ |
(val) << (bits)) |
#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \ |
(val) >> (bits)) |
/* |
* `Generic' versions of the frame buffer device operations |
*/ |
extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var); |
extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var); |
extern int fb_blank(struct fb_info *info, int blank); |
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); |
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image); |
/* |
* Drawing operations where framebuffer is in system RAM |
*/ |
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area); |
extern void sys_imageblit(struct fb_info *info, const struct fb_image *image); |
extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf, |
size_t count, loff_t *ppos); |
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, |
size_t count, loff_t *ppos); |
/* drivers/video/fbmem.c */ |
extern int register_framebuffer(struct fb_info *fb_info); |
extern int unregister_framebuffer(struct fb_info *fb_info); |
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
extern int fb_show_logo(struct fb_info *fb_info, int rotate); |
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); |
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, |
u32 height, u32 shift_high, u32 shift_low, u32 mod); |
extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height); |
extern void fb_set_suspend(struct fb_info *info, int state); |
extern int fb_get_color_depth(struct fb_var_screeninfo *var, |
struct fb_fix_screeninfo *fix); |
extern int fb_get_options(char *name, char **option); |
extern int fb_new_modelist(struct fb_info *info); |
extern struct fb_info *registered_fb[FB_MAX]; |
extern int num_registered_fb; |
extern struct class *fb_class; |
extern int lock_fb_info(struct fb_info *info); |
static inline void unlock_fb_info(struct fb_info *info) |
{ |
// mutex_unlock(&info->lock); |
} |
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, |
u8 *src, u32 s_pitch, u32 height) |
{ |
int i, j; |
d_pitch -= s_pitch; |
for (i = height; i--; ) { |
/* s_pitch is a few bytes at the most, memcpy is suboptimal */ |
for (j = 0; j < s_pitch; j++) |
*dst++ = *src++; |
dst += d_pitch; |
} |
} |
/* drivers/video/fb_defio.c */ |
static inline bool fb_be_math(struct fb_info *info) |
{ |
#ifdef CONFIG_FB_FOREIGN_ENDIAN |
#if defined(CONFIG_FB_BOTH_ENDIAN) |
return info->flags & FBINFO_BE_MATH; |
#elif defined(CONFIG_FB_BIG_ENDIAN) |
return true; |
#elif defined(CONFIG_FB_LITTLE_ENDIAN) |
return false; |
#endif /* CONFIG_FB_BOTH_ENDIAN */ |
#else |
#ifdef __BIG_ENDIAN |
return true; |
#else |
return false; |
#endif /* __BIG_ENDIAN */ |
#endif /* CONFIG_FB_FOREIGN_ENDIAN */ |
} |
/* drivers/video/fbsysfs.c */ |
//extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); |
//extern void framebuffer_release(struct fb_info *info); |
//extern int fb_init_device(struct fb_info *fb_info); |
//extern void fb_cleanup_device(struct fb_info *head); |
//extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max); |
/* drivers/video/fbmon.c */ |
#define FB_MAXTIMINGS 0 |
#define FB_VSYNCTIMINGS 1 |
#define FB_HSYNCTIMINGS 2 |
#define FB_DCLKTIMINGS 3 |
#define FB_IGNOREMON 0x100 |
#define FB_MODE_IS_UNKNOWN 0 |
#define FB_MODE_IS_DETAILED 1 |
#define FB_MODE_IS_STANDARD 2 |
#define FB_MODE_IS_VESA 4 |
#define FB_MODE_IS_CALCULATED 8 |
#define FB_MODE_IS_FIRST 16 |
#define FB_MODE_IS_FROM_VAR 32 |
extern int fbmon_dpms(const struct fb_info *fb_info); |
extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, |
struct fb_info *info); |
extern int fb_validate_mode(const struct fb_var_screeninfo *var, |
struct fb_info *info); |
extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); |
//extern const unsigned char *fb_firmware_edid(struct device *device); |
extern void fb_edid_to_monspecs(unsigned char *edid, |
struct fb_monspecs *specs); |
extern void fb_destroy_modedb(struct fb_videomode *modedb); |
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); |
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); |
/* drivers/video/modedb.c */ |
#define VESA_MODEDB_SIZE 34 |
extern void fb_var_to_videomode(struct fb_videomode *mode, |
const struct fb_var_screeninfo *var); |
extern void fb_videomode_to_var(struct fb_var_screeninfo *var, |
const struct fb_videomode *mode); |
extern int fb_mode_is_equal(const struct fb_videomode *mode1, |
const struct fb_videomode *mode2); |
extern int fb_add_videomode(const struct fb_videomode *mode, |
struct list_head *head); |
extern void fb_delete_videomode(const struct fb_videomode *mode, |
struct list_head *head); |
extern const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var, |
struct list_head *head); |
extern const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var, |
struct list_head *head); |
extern const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, |
struct list_head *head); |
extern void fb_destroy_modelist(struct list_head *head); |
extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num, |
struct list_head *head); |
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs, |
struct list_head *head); |
/* drivers/video/fbcmap.c */ |
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); |
extern void fb_dealloc_cmap(struct fb_cmap *cmap); |
extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); |
extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); |
extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info); |
extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info); |
extern const struct fb_cmap *fb_default_cmap(int len); |
extern void fb_invert_cmaps(void); |
struct fb_videomode { |
const char *name; /* optional */ |
u32 refresh; /* optional */ |
u32 xres; |
u32 yres; |
u32 pixclock; |
u32 left_margin; |
u32 right_margin; |
u32 upper_margin; |
u32 lower_margin; |
u32 hsync_len; |
u32 vsync_len; |
u32 sync; |
u32 vmode; |
u32 flag; |
}; |
extern const char *fb_mode_option; |
extern const struct fb_videomode vesa_modes[]; |
struct fb_modelist { |
struct list_head list; |
struct fb_videomode mode; |
}; |
extern int fb_find_mode(struct fb_var_screeninfo *var, |
struct fb_info *info, const char *mode_option, |
const struct fb_videomode *db, |
unsigned int dbsize, |
const struct fb_videomode *default_mode, |
unsigned int default_bpp); |
#endif /* _LINUX_FB_H */ |
/drivers/include/linux/firmware.h |
---|
0,0 → 1,65 |
#ifndef _LINUX_FIRMWARE_H |
#define _LINUX_FIRMWARE_H |
#include <linux/module.h> |
#include <linux/types.h> |
//#include <linux/compiler.h> |
#define FW_ACTION_NOHOTPLUG 0 |
#define FW_ACTION_HOTPLUG 1 |
struct firmware { |
size_t size; |
const u8 *data; |
}; |
struct device; |
struct builtin_fw { |
char *name; |
void *data; |
unsigned long size; |
}; |
/* We have to play tricks here much like stringify() to get the |
__COUNTER__ macro to be expanded as we want it */ |
#define __fw_concat1(x, y) x##y |
#define __fw_concat(x, y) __fw_concat1(x, y) |
#define DECLARE_BUILTIN_FIRMWARE(name, blob) \ |
DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) |
#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ |
static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ |
__used __section(.builtin_fw) = { name, blob, size } |
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) |
int request_firmware(const struct firmware **fw, const char *name, |
struct device *device); |
int request_firmware_nowait( |
struct module *module, int uevent, |
const char *name, struct device *device, void *context, |
void (*cont)(const struct firmware *fw, void *context)); |
void release_firmware(const struct firmware *fw); |
#else |
static inline int request_firmware(const struct firmware **fw, |
const char *name, |
struct device *device) |
{ |
return -EINVAL; |
} |
static inline int request_firmware_nowait( |
struct module *module, int uevent, |
const char *name, struct device *device, void *context, |
void (*cont)(const struct firmware *fw, void *context)) |
{ |
return -EINVAL; |
} |
static inline void release_firmware(const struct firmware *fw) |
{ |
} |
#endif |
#endif |
/drivers/include/linux/i2c-algo-bit.h |
---|
0,0 → 1,51 |
/* ------------------------------------------------------------------------- */ |
/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-99 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even |
Frodo Looijaard <frodol@dds.nl> */ |
#ifndef _LINUX_I2C_ALGO_BIT_H |
#define _LINUX_I2C_ALGO_BIT_H |
/* --- Defines for bit-adapters --------------------------------------- */ |
/* |
* This struct contains the hw-dependent functions of bit-style adapters to |
* manipulate the line states, and to init any hw-specific features. This is |
* only used if you have more than one hw-type of adapter running. |
*/ |
struct i2c_algo_bit_data { |
void *data; /* private data for lowlevel routines */ |
void (*setsda) (void *data, int state); |
void (*setscl) (void *data, int state); |
int (*getsda) (void *data); |
int (*getscl) (void *data); |
/* local settings */ |
int udelay; /* half clock cycle time in us, |
minimum 2 us for fast-mode I2C, |
minimum 5 us for standard-mode I2C and SMBus, |
maximum 50 us for SMBus */ |
int timeout; /* in jiffies */ |
}; |
int i2c_bit_add_bus(struct i2c_adapter *); |
int i2c_bit_add_numbered_bus(struct i2c_adapter *); |
#endif /* _LINUX_I2C_ALGO_BIT_H */ |
/drivers/include/linux/i2c-id.h |
---|
0,0 → 1,59 |
/* ------------------------------------------------------------------------- */ |
/* */ |
/* i2c-id.h - identifier values for i2c drivers and adapters */ |
/* */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-1999 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
#ifndef LINUX_I2C_ID_H |
#define LINUX_I2C_ID_H |
/* Please note that I2C driver IDs are optional. They are only needed if a |
legacy chip driver needs to identify a bus or a bus driver needs to |
identify a legacy client. If you don't need them, just don't set them. */ |
/* |
* ---- Adapter types ---------------------------------------------------- |
*/ |
/* --- Bit algorithm adapters */ |
#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ |
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ |
#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ |
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ |
#define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */ |
#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ |
#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ |
#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ |
#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ |
#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ |
/* --- SGI adapters */ |
#define I2C_HW_SGI_VINO 0x160000 |
/* --- SMBus only adapters */ |
#define I2C_HW_SMBUS_W9968CF 0x04000d |
#define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ |
#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ |
#define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ |
/* --- Miscellaneous adapters */ |
#define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */ |
#define I2C_HW_SAA7134 0x090000 /* SAA7134 video decoder bus */ |
#endif /* LINUX_I2C_ID_H */ |
/drivers/include/linux/i2c.h |
---|
0,0 → 1,299 |
/* ------------------------------------------------------------------------- */ |
/* */ |
/* i2c.h - definitions for the i2c-bus interface */ |
/* */ |
/* ------------------------------------------------------------------------- */ |
/* Copyright (C) 1995-2000 Simon G. Vogl |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
/* ------------------------------------------------------------------------- */ |
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and |
Frodo Looijaard <frodol@dds.nl> */ |
#ifndef _LINUX_I2C_H |
#define _LINUX_I2C_H |
#include <types.h> |
#include <list.h> |
#define I2C_NAME_SIZE 20 |
struct i2c_msg; |
struct i2c_algorithm; |
struct i2c_adapter; |
struct i2c_client; |
union i2c_smbus_data; |
/* Transfer num messages. |
*/ |
extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, |
int num); |
/** |
* struct i2c_client - represent an I2C slave device |
* @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; |
* I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking |
* @addr: Address used on the I2C bus connected to the parent adapter. |
* @name: Indicates the type of the device, usually a chip name that's |
* generic enough to hide second-sourcing and compatible revisions. |
* @adapter: manages the bus segment hosting this I2C device |
* @driver: device's driver, hence pointer to access routines |
* @dev: Driver model device node for the slave. |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
* userspace_devices list |
* |
* An i2c_client identifies a single device (i.e. chip) connected to an |
* i2c bus. The behaviour exposed to Linux is defined by the driver |
* managing the device. |
*/ |
struct i2c_client { |
unsigned short flags; /* div., see below */ |
unsigned short addr; /* chip address - NOTE: 7bit */ |
/* addresses are stored in the */ |
/* _LOWER_ 7 bits */ |
char name[I2C_NAME_SIZE]; |
struct i2c_adapter *adapter; /* the adapter we sit on */ |
// struct i2c_driver *driver; /* and our access routines */ |
// struct device dev; /* the device structure */ |
int irq; /* irq issued by device (or -1) */ |
struct list_head detected; |
}; |
#define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
/* |
* The following structs are for those who like to implement new bus drivers: |
* i2c_algorithm is the interface to a class of hardware solutions which can |
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 |
* to name two of the most common. |
*/ |
struct i2c_algorithm { |
/* If an adapter algorithm can't do I2C-level access, set master_xfer |
to NULL. If an adapter algorithm can do SMBus access, set |
smbus_xfer. If set to NULL, the SMBus protocol is simulated |
using common I2C messages */ |
/* master_xfer should return the number of messages successfully |
processed, or a negative value on error */ |
int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs, |
int num); |
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, |
unsigned short flags, char read_write, |
u8 command, int size, union i2c_smbus_data *data); |
/* To determine what the adapter supports */ |
u32 (*functionality) (struct i2c_adapter *); |
}; |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
*/ |
struct i2c_adapter { |
unsigned int id; |
unsigned int class; /* classes to allow probing for */ |
const struct i2c_algorithm *algo; /* the algorithm to access the bus */ |
void *algo_data; |
/* data fields that are valid for all devices */ |
u8 level; /* nesting level for lockdep */ |
int timeout; /* in jiffies */ |
int retries; |
// struct device dev; /* the adapter device */ |
int nr; |
char name[48]; |
}; |
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) |
/*flags for the client struct: */ |
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ |
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ |
/* Must equal I2C_M_TEN below */ |
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ |
/* i2c adapter classes (bitmask) */ |
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ |
#define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ |
#define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ |
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ |
/* i2c_client_address_data is the struct for holding default client |
* addresses for a driver and for the parameters supplied on the |
* command line |
*/ |
struct i2c_client_address_data { |
const unsigned short *normal_i2c; |
const unsigned short *probe; |
const unsigned short *ignore; |
const unsigned short * const *forces; |
}; |
/* Internal numbers to terminate lists */ |
#define I2C_CLIENT_END 0xfffeU |
/* The numbers to use to set I2C bus address */ |
#define ANY_I2C_BUS 0xffff |
/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ |
#define I2C_ADDRS(addr, addrs...) \ |
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) |
/** |
* struct i2c_msg - an I2C transaction segment beginning with START |
* @addr: Slave address, either seven or ten bits. When this is a ten |
* bit address, I2C_M_TEN must be set in @flags and the adapter |
* must support I2C_FUNC_10BIT_ADDR. |
* @flags: I2C_M_RD is handled by all adapters. No other flags may be |
* provided unless the adapter exported the relevant I2C_FUNC_* |
* flags through i2c_check_functionality(). |
* @len: Number of data bytes in @buf being read from or written to the |
* I2C slave address. For read transactions where I2C_M_RECV_LEN |
* is set, the caller guarantees that this buffer can hold up to |
* 32 bytes in addition to the initial length byte sent by the |
* slave (plus, if used, the SMBus PEC); and this value will be |
* incremented by the number of block data bytes received. |
* @buf: The buffer into which data is read, or from which it's written. |
* |
* An i2c_msg is the low level representation of one segment of an I2C |
* transaction. It is visible to drivers in the @i2c_transfer() procedure, |
* to userspace from i2c-dev, and to I2C adapter drivers through the |
* @i2c_adapter.@master_xfer() method. |
* |
* Except when I2C "protocol mangling" is used, all I2C adapters implement |
* the standard rules for I2C transactions. Each transaction begins with a |
* START. That is followed by the slave address, and a bit encoding read |
* versus write. Then follow all the data bytes, possibly including a byte |
* with SMBus PEC. The transfer terminates with a NAK, or when all those |
* bytes have been transferred and ACKed. If this is the last message in a |
* group, it is followed by a STOP. Otherwise it is followed by the next |
* @i2c_msg transaction segment, beginning with a (repeated) START. |
* |
* Alternatively, when the adapter supports I2C_FUNC_PROTOCOL_MANGLING then |
* passing certain @flags may have changed those standard protocol behaviors. |
* Those flags are only for use with broken/nonconforming slaves, and with |
* adapters which are known to support the specific mangling options they |
* need (one or more of IGNORE_NAK, NO_RD_ACK, NOSTART, and REV_DIR_ADDR). |
*/ |
struct i2c_msg { |
u16 addr; /* slave address */ |
u16 flags; |
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ |
#define I2C_M_RD 0x0001 /* read data, from slave to master */ |
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ |
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ |
u16 len; /* msg length */ |
u8 *buf; /* pointer to msg data */ |
}; |
/* To determine what functionality is present */ |
#define I2C_FUNC_I2C 0x00000001 |
#define I2C_FUNC_10BIT_ADDR 0x00000002 |
#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */ |
#define I2C_FUNC_SMBUS_PEC 0x00000008 |
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ |
#define I2C_FUNC_SMBUS_QUICK 0x00010000 |
#define I2C_FUNC_SMBUS_READ_BYTE 0x00020000 |
#define I2C_FUNC_SMBUS_WRITE_BYTE 0x00040000 |
#define I2C_FUNC_SMBUS_READ_BYTE_DATA 0x00080000 |
#define I2C_FUNC_SMBUS_WRITE_BYTE_DATA 0x00100000 |
#define I2C_FUNC_SMBUS_READ_WORD_DATA 0x00200000 |
#define I2C_FUNC_SMBUS_WRITE_WORD_DATA 0x00400000 |
#define I2C_FUNC_SMBUS_PROC_CALL 0x00800000 |
#define I2C_FUNC_SMBUS_READ_BLOCK_DATA 0x01000000 |
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 |
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ |
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ |
#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ |
I2C_FUNC_SMBUS_WRITE_BYTE) |
#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \ |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA) |
#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \ |
I2C_FUNC_SMBUS_WRITE_WORD_DATA) |
#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) |
#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \ |
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) |
#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \ |
I2C_FUNC_SMBUS_BYTE | \ |
I2C_FUNC_SMBUS_BYTE_DATA | \ |
I2C_FUNC_SMBUS_WORD_DATA | \ |
I2C_FUNC_SMBUS_PROC_CALL | \ |
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \ |
I2C_FUNC_SMBUS_I2C_BLOCK | \ |
I2C_FUNC_SMBUS_PEC) |
/* |
* Data for SMBus Messages |
*/ |
#define I2C_SMBUS_BLOCK_MAX 32 /* As specified in SMBus standard */ |
union i2c_smbus_data { |
__u8 byte; |
__u16 word; |
__u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */ |
/* and one more for user-space compatibility */ |
}; |
/* i2c_smbus_xfer read or write markers */ |
#define I2C_SMBUS_READ 1 |
#define I2C_SMBUS_WRITE 0 |
/* SMBus transaction types (size parameter in the above functions) |
Note: these no longer correspond to the (arbitrary) PIIX4 internal codes! */ |
#define I2C_SMBUS_QUICK 0 |
#define I2C_SMBUS_BYTE 1 |
#define I2C_SMBUS_BYTE_DATA 2 |
#define I2C_SMBUS_WORD_DATA 3 |
#define I2C_SMBUS_PROC_CALL 4 |
#define I2C_SMBUS_BLOCK_DATA 5 |
#define I2C_SMBUS_I2C_BLOCK_BROKEN 6 |
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ |
#define I2C_SMBUS_I2C_BLOCK_DATA 8 |
#endif /* _LINUX_I2C_H */ |
/drivers/include/linux/idr.h |
---|
0,0 → 1,144 |
/* |
* include/linux/idr.h |
* |
* 2002-10-18 written by Jim Houston jim.houston@ccur.com |
* Copyright (C) 2002 by Concurrent Computer Corporation |
* Distributed under the GNU GPL license version 2. |
* |
* Small id to pointer translation service avoiding fixed sized |
* tables. |
*/ |
#ifndef __IDR_H__ |
#define __IDR_H__ |
#include <types.h> |
#include <errno-base.h> |
//#include <linux/bitops.h> |
//#include <linux/init.h> |
//#include <linux/rcupdate.h> |
struct rcu_head { |
struct rcu_head *next; |
void (*func)(struct rcu_head *head); |
}; |
# define IDR_BITS 5 |
# define IDR_FULL 0xfffffffful |
/* We can only use two of the bits in the top level because there is |
only one possible bit in the top level (5 bits * 7 levels = 35 |
bits, but you only use 31 bits in the id). */ |
# define TOP_LEVEL_FULL (IDR_FULL >> 30) |
#define IDR_SIZE (1 << IDR_BITS) |
#define IDR_MASK ((1 << IDR_BITS)-1) |
#define MAX_ID_SHIFT (sizeof(int)*8 - 1) |
#define MAX_ID_BIT (1U << MAX_ID_SHIFT) |
#define MAX_ID_MASK (MAX_ID_BIT - 1) |
/* Leave the possibility of an incomplete final layer */ |
#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS |
/* Number of id_layer structs to leave in free list */ |
#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL |
struct idr_layer { |
unsigned long bitmap; /* A zero bit means "space here" */ |
struct idr_layer *ary[1<<IDR_BITS]; |
int count; /* When zero, we can release it */ |
int layer; /* distance from leaf */ |
struct rcu_head rcu_head; |
}; |
struct idr { |
struct idr_layer *top; |
struct idr_layer *id_free; |
int layers; /* only valid without concurrent changes */ |
int id_free_cnt; |
// spinlock_t lock; |
}; |
#define IDR_INIT(name) \ |
{ \ |
.top = NULL, \ |
.id_free = NULL, \ |
.layers = 0, \ |
.id_free_cnt = 0, \ |
// .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
} |
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
/* Actions to be taken after a call to _idr_sub_alloc */ |
#define IDR_NEED_TO_GROW -2 |
#define IDR_NOMORE_SPACE -3 |
#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) |
/** |
* idr synchronization (stolen from radix-tree.h) |
* |
* idr_find() is able to be called locklessly, using RCU. The caller must |
* ensure calls to this function are made within rcu_read_lock() regions. |
* Other readers (lock-free or otherwise) and modifications may be running |
* concurrently. |
* |
* It is still required that the caller manage the synchronization and |
* lifetimes of the items. So if RCU lock-free lookups are used, typically |
* this would mean that the items have their own locks, or are amenable to |
* lock-free access; and that the items are freed by RCU (or only freed after |
* having been deleted from the idr tree *and* a synchronize_rcu() grace |
* period). |
*/ |
/* |
* This is what we export. |
*/ |
void *idr_find(struct idr *idp, int id); |
int idr_pre_get(struct idr *idp, u32_t gfp_mask); |
int idr_get_new(struct idr *idp, void *ptr, int *id); |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
int idr_for_each(struct idr *idp, |
int (*fn)(int id, void *p, void *data), void *data); |
void *idr_get_next(struct idr *idp, int *nextid); |
void *idr_replace(struct idr *idp, void *ptr, int id); |
void idr_remove(struct idr *idp, int id); |
void idr_remove_all(struct idr *idp); |
void idr_destroy(struct idr *idp); |
void idr_init(struct idr *idp); |
/* |
* IDA - IDR based id allocator, use when translation from id to |
* pointer isn't necessary. |
*/ |
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
#define IDA_BITMAP_LONGS (128 / sizeof(long) - 1) |
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
struct ida_bitmap { |
long nr_busy; |
unsigned long bitmap[IDA_BITMAP_LONGS]; |
}; |
struct ida { |
struct idr idr; |
struct ida_bitmap *free_bitmap; |
}; |
#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } |
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) |
int ida_pre_get(struct ida *ida, u32_t gfp_mask); |
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); |
int ida_get_new(struct ida *ida, int *p_id); |
void ida_remove(struct ida *ida, int id); |
void ida_destroy(struct ida *ida); |
void ida_init(struct ida *ida); |
void idr_init_cache(void); |
#endif /* __IDR_H__ */ |
/drivers/include/linux/kernel.h |
---|
0,0 → 1,140 |
#ifndef _LINUX_KERNEL_H |
#define _LINUX_KERNEL_H |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#ifdef __KERNEL__ |
#include <stdarg.h> |
#include <linux/stddef.h> |
#include <linux/types.h> |
#include <linux/compiler.h> |
#define USHORT_MAX ((u16)(~0U)) |
#define SHORT_MAX ((s16)(USHORT_MAX>>1)) |
#define SHORT_MIN (-SHORT_MAX - 1) |
#define INT_MAX ((int)(~0U>>1)) |
#define INT_MIN (-INT_MAX - 1) |
#define UINT_MAX (~0U) |
#define LONG_MAX ((long)(~0UL>>1)) |
#define LONG_MIN (-LONG_MAX - 1) |
#define ULONG_MAX (~0UL) |
#define LLONG_MAX ((long long)(~0ULL>>1)) |
#define LLONG_MIN (-LLONG_MAX - 1) |
#define ULLONG_MAX (~0ULL) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
/** |
* upper_32_bits - return bits 32-63 of a number |
* @n: the number we're accessing |
* |
* A basic shift-right of a 64- or 32-bit quantity. Use this to suppress |
* the "right shift count >= width of type" warning when that quantity is |
* 32-bits. |
*/ |
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) |
/** |
* lower_32_bits - return bits 0-31 of a number |
* @n: the number we're accessing |
*/ |
#define lower_32_bits(n) ((u32)(n)) |
#define KERN_EMERG "<0>" /* system is unusable */ |
#define KERN_ALERT "<1>" /* action must be taken immediately */ |
#define KERN_CRIT "<2>" /* critical conditions */ |
#define KERN_ERR "<3>" /* error conditions */ |
#define KERN_WARNING "<4>" /* warning conditions */ |
#define KERN_NOTICE "<5>" /* normal but significant condition */ |
#define KERN_INFO "<6>" /* informational */ |
#define KERN_DEBUG "<7>" /* debug-level messages */ |
//int printk(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
/* |
* min()/max()/clamp() macros that also do |
* strict type-checking.. See the |
* "unnecessary" pointer comparison. |
*/ |
#define min(x, y) ({ \ |
typeof(x) _min1 = (x); \ |
typeof(y) _min2 = (y); \ |
(void) (&_min1 == &_min2); \ |
_min1 < _min2 ? _min1 : _min2; }) |
#define max(x, y) ({ \ |
typeof(x) _max1 = (x); \ |
typeof(y) _max2 = (y); \ |
(void) (&_max1 == &_max2); \ |
_max1 > _max2 ? _max1 : _max2; }) |
/* |
* ..and if you can't take the strict |
* types, you can specify one yourself. |
* |
* Or not use min/max/clamp at all, of course. |
*/ |
#define min_t(type, x, y) ({ \ |
type __min1 = (x); \ |
type __min2 = (y); \ |
__min1 < __min2 ? __min1: __min2; }) |
#define max_t(type, x, y) ({ \ |
type __max1 = (x); \ |
type __max2 = (y); \ |
__max1 > __max2 ? __max1: __max2; }) |
/** |
* container_of - cast a member of a structure out to the containing structure |
* @ptr: the pointer to the member. |
* @type: the type of the container struct this is embedded in. |
* @member: the name of the member within the struct. |
* |
*/ |
#define container_of(ptr, type, member) ({ \ |
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
(type *)( (char *)__mptr - offsetof(type,member) );}) |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kzalloc(n * size, 0); |
} |
#endif /* __KERNEL__ */ |
typedef unsigned long pgprotval_t; |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
struct file {}; |
struct vm_area_struct {}; |
struct address_space {}; |
#define preempt_disable() do { } while (0) |
#define preempt_enable_no_resched() do { } while (0) |
#define preempt_enable() do { } while (0) |
#define preempt_check_resched() do { } while (0) |
#define preempt_disable_notrace() do { } while (0) |
#define preempt_enable_no_resched_notrace() do { } while (0) |
#define preempt_enable_notrace() do { } while (0) |
void free (void *ptr); |
#endif |
/drivers/include/linux/kref.h |
---|
0,0 → 1,29 |
/* |
* kref.c - library routines for handling generic reference counted objects |
* |
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> |
* Copyright (C) 2004 IBM Corp. |
* |
* based on kobject.h which was: |
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> |
* Copyright (C) 2002-2003 Open Source Development Labs |
* |
* This file is released under the GPLv2. |
* |
*/ |
#ifndef _KREF_H_ |
#define _KREF_H_ |
#include <linux/types.h> |
struct kref { |
atomic_t refcount; |
}; |
void kref_set(struct kref *kref, int num); |
void kref_init(struct kref *kref); |
void kref_get(struct kref *kref); |
int kref_put(struct kref *kref, void (*release) (struct kref *kref)); |
#endif /* _KREF_H_ */ |
/drivers/include/linux/list.h |
---|
0,0 → 1,700 |
#ifndef _LINUX_LIST_H |
#define _LINUX_LIST_H |
#include <linux/stddef.h> |
//#include <linux/poison.h> |
//#include <linux/prefetch.h> |
//#include <asm/system.h> |
/* |
* Simple doubly linked list implementation. |
* |
* Some of the internal functions ("__xxx") are useful when |
* manipulating whole lists rather than single entries, as |
* sometimes we already know the next/prev entries and we can |
* generate better code by using them directly rather than |
* using the generic single-entry routines. |
*/ |
#define LIST_POISON1 ((struct list_head*)0xFFFF0100) |
#define LIST_POISON2 ((struct list_head*)0xFFFF0200) |
#define prefetch(x) __builtin_prefetch(x) |
struct list_head { |
struct list_head *next, *prev; |
}; |
#define LIST_HEAD_INIT(name) { &(name), &(name) } |
#define LIST_HEAD(name) \ |
struct list_head name = LIST_HEAD_INIT(name) |
static inline void INIT_LIST_HEAD(struct list_head *list) |
{ |
list->next = list; |
list->prev = list; |
} |
/* |
* Insert a new entry between two known consecutive entries. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void __list_add(struct list_head *new, |
struct list_head *prev, |
struct list_head *next) |
{ |
next->prev = new; |
new->next = next; |
new->prev = prev; |
prev->next = new; |
} |
#else |
extern void __list_add(struct list_head *new, |
struct list_head *prev, |
struct list_head *next); |
#endif |
/** |
* list_add - add a new entry |
* @new: new entry to be added |
* @head: list head to add it after |
* |
* Insert a new entry after the specified head. |
* This is good for implementing stacks. |
*/ |
static inline void list_add(struct list_head *new, struct list_head *head) |
{ |
__list_add(new, head, head->next); |
} |
/** |
* list_add_tail - add a new entry |
* @new: new entry to be added |
* @head: list head to add it before |
* |
* Insert a new entry before the specified head. |
* This is useful for implementing queues. |
*/ |
static inline void list_add_tail(struct list_head *new, struct list_head *head) |
{ |
__list_add(new, head->prev, head); |
} |
/* |
* Delete a list entry by making the prev/next entries |
* point to each other. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
static inline void __list_del(struct list_head * prev, struct list_head * next) |
{ |
next->prev = prev; |
prev->next = next; |
} |
/** |
* list_del - deletes entry from list. |
* @entry: the element to delete from the list. |
* Note: list_empty() on entry does not return true after this, the entry is |
* in an undefined state. |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void list_del(struct list_head *entry) |
{ |
__list_del(entry->prev, entry->next); |
entry->next = LIST_POISON1; |
entry->prev = LIST_POISON2; |
} |
#else |
extern void list_del(struct list_head *entry); |
#endif |
/** |
* list_replace - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* If @old was empty, it will be overwritten. |
*/ |
static inline void list_replace(struct list_head *old, |
struct list_head *new) |
{ |
new->next = old->next; |
new->next->prev = new; |
new->prev = old->prev; |
new->prev->next = new; |
} |
static inline void list_replace_init(struct list_head *old, |
struct list_head *new) |
{ |
list_replace(old, new); |
INIT_LIST_HEAD(old); |
} |
/** |
* list_del_init - deletes entry from list and reinitialize it. |
* @entry: the element to delete from the list. |
*/ |
static inline void list_del_init(struct list_head *entry) |
{ |
__list_del(entry->prev, entry->next); |
INIT_LIST_HEAD(entry); |
} |
/** |
* list_move - delete from one list and add as another's head |
* @list: the entry to move |
* @head: the head that will precede our entry |
*/ |
static inline void list_move(struct list_head *list, struct list_head *head) |
{ |
__list_del(list->prev, list->next); |
list_add(list, head); |
} |
/** |
* list_move_tail - delete from one list and add as another's tail |
* @list: the entry to move |
* @head: the head that will follow our entry |
*/ |
static inline void list_move_tail(struct list_head *list, |
struct list_head *head) |
{ |
__list_del(list->prev, list->next); |
list_add_tail(list, head); |
} |
/** |
* list_is_last - tests whether @list is the last entry in list @head |
* @list: the entry to test |
* @head: the head of the list |
*/ |
static inline int list_is_last(const struct list_head *list, |
const struct list_head *head) |
{ |
return list->next == head; |
} |
/** |
* list_empty - tests whether a list is empty |
* @head: the list to test. |
*/ |
static inline int list_empty(const struct list_head *head) |
{ |
return head->next == head; |
} |
/** |
* list_empty_careful - tests whether a list is empty and not being modified |
* @head: the list to test |
* |
* Description: |
* tests whether a list is empty _and_ checks that no other CPU might be |
* in the process of modifying either member (next or prev) |
* |
* NOTE: using list_empty_careful() without synchronization |
* can only be safe if the only activity that can happen |
* to the list entry is list_del_init(). Eg. it cannot be used |
* if another CPU could re-list_add() it. |
*/ |
static inline int list_empty_careful(const struct list_head *head) |
{ |
struct list_head *next = head->next; |
return (next == head) && (next == head->prev); |
} |
/** |
* list_is_singular - tests whether a list has just one entry. |
* @head: the list to test. |
*/ |
static inline int list_is_singular(const struct list_head *head) |
{ |
return !list_empty(head) && (head->next == head->prev); |
} |
static inline void __list_cut_position(struct list_head *list, |
struct list_head *head, struct list_head *entry) |
{ |
struct list_head *new_first = entry->next; |
list->next = head->next; |
list->next->prev = list; |
list->prev = entry; |
entry->next = list; |
head->next = new_first; |
new_first->prev = head; |
} |
/** |
* list_cut_position - cut a list into two |
* @list: a new list to add all removed entries |
* @head: a list with entries |
* @entry: an entry within head, could be the head itself |
* and if so we won't cut the list |
* |
* This helper moves the initial part of @head, up to and |
* including @entry, from @head to @list. You should |
* pass on @entry an element you know is on @head. @list |
* should be an empty list or a list you do not care about |
* losing its data. |
* |
*/ |
static inline void list_cut_position(struct list_head *list, |
struct list_head *head, struct list_head *entry) |
{ |
if (list_empty(head)) |
return; |
if (list_is_singular(head) && |
(head->next != entry && head != entry)) |
return; |
if (entry == head) |
INIT_LIST_HEAD(list); |
else |
__list_cut_position(list, head, entry); |
} |
static inline void __list_splice(const struct list_head *list, |
struct list_head *prev, |
struct list_head *next) |
{ |
struct list_head *first = list->next; |
struct list_head *last = list->prev; |
first->prev = prev; |
prev->next = first; |
last->next = next; |
next->prev = last; |
} |
/** |
* list_splice - join two lists, this is designed for stacks |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
*/ |
static inline void list_splice(const struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) |
__list_splice(list, head, head->next); |
} |
/** |
* list_splice_tail - join two lists, each list being a queue |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
*/ |
static inline void list_splice_tail(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) |
__list_splice(list, head->prev, head); |
} |
/** |
* list_splice_init - join two lists and reinitialise the emptied list. |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
* |
* The list at @list is reinitialised |
*/ |
static inline void list_splice_init(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) { |
__list_splice(list, head, head->next); |
INIT_LIST_HEAD(list); |
} |
} |
/** |
* list_splice_tail_init - join two lists and reinitialise the emptied list |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
* |
* Each of the lists is a queue. |
* The list at @list is reinitialised |
*/ |
static inline void list_splice_tail_init(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) { |
__list_splice(list, head->prev, head); |
INIT_LIST_HEAD(list); |
} |
} |
/** |
* list_entry - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_entry(ptr, type, member) \ |
container_of(ptr, type, member) |
/** |
* list_first_entry - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
#define list_first_entry(ptr, type, member) \ |
list_entry((ptr)->next, type, member) |
/** |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
*/ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; prefetch(pos->next), pos != (head); \ |
pos = pos->next) |
/** |
* __list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* |
* This variant differs from list_for_each() in that it's the |
* simplest possible list iteration code, no prefetching is done. |
* Use this for code that knows the list to be very short (empty |
* or 1 entry) most of the time. |
*/ |
#define __list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
/** |
* list_for_each_prev - iterate over a list backwards |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
*/ |
#define list_for_each_prev(pos, head) \ |
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ |
pos = pos->prev) |
/** |
* list_for_each_safe - iterate over a list safe against removal of list entry |
* @pos: the &struct list_head to use as a loop cursor. |
* @n: another &struct list_head to use as temporary storage |
* @head: the head for your list. |
*/ |
#define list_for_each_safe(pos, n, head) \ |
for (pos = (head)->next, n = pos->next; pos != (head); \ |
pos = n, n = pos->next) |
/** |
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry |
* @pos: the &struct list_head to use as a loop cursor. |
* @n: another &struct list_head to use as temporary storage |
* @head: the head for your list. |
*/ |
#define list_for_each_prev_safe(pos, n, head) \ |
for (pos = (head)->prev, n = pos->prev; \ |
prefetch(pos->prev), pos != (head); \ |
pos = n, n = pos->prev) |
/** |
* list_for_each_entry - iterate over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
prefetch(pos->member.prev), &pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
/** |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
* @pos: the type * to use as a start point |
* @head: the head of the list |
* @member: the name of the list_struct within the struct. |
* |
* Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
*/ |
#define list_prepare_entry(pos, head, member) \ |
((pos) ? : list_entry(head, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue(pos, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Start to iterate over list of given type backwards, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue_reverse(pos, head, member) \ |
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
prefetch(pos->member.prev), &pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
/** |
* list_for_each_entry_from - iterate over list of given type from the current point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing from current position. |
*/ |
#define list_for_each_entry_from(pos, head, member) \ |
for (; prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_continue |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing after current point, |
* safe against removal of list entry. |
*/ |
#define list_for_each_entry_safe_continue(pos, n, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_from |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type from current point, safe against |
* removal of list entry. |
*/ |
#define list_for_each_entry_safe_from(pos, n, head, member) \ |
for (n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_reverse |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate backwards over list of given type, safe against removal |
* of list entry. |
*/ |
#define list_for_each_entry_safe_reverse(pos, n, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member), \ |
n = list_entry(pos->member.prev, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
/* |
* Double linked lists with a single pointer list head. |
* Mostly useful for hash tables where the two pointer list head is |
* too wasteful. |
* You lose the ability to access the tail in O(1). |
*/ |
struct hlist_head { |
struct hlist_node *first; |
}; |
struct hlist_node { |
struct hlist_node *next, **pprev; |
}; |
#define HLIST_HEAD_INIT { .first = NULL } |
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } |
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) |
static inline void INIT_HLIST_NODE(struct hlist_node *h) |
{ |
h->next = NULL; |
h->pprev = NULL; |
} |
static inline int hlist_unhashed(const struct hlist_node *h) |
{ |
return !h->pprev; |
} |
static inline int hlist_empty(const struct hlist_head *h) |
{ |
return !h->first; |
} |
static inline void __hlist_del(struct hlist_node *n) |
{ |
struct hlist_node *next = n->next; |
struct hlist_node **pprev = n->pprev; |
*pprev = next; |
if (next) |
next->pprev = pprev; |
} |
static inline void hlist_del(struct hlist_node *n) |
{ |
__hlist_del(n); |
n->next = (struct hlist_node*)LIST_POISON1; |
n->pprev = (struct hlist_node**)LIST_POISON2; |
} |
static inline void hlist_del_init(struct hlist_node *n) |
{ |
if (!hlist_unhashed(n)) { |
__hlist_del(n); |
INIT_HLIST_NODE(n); |
} |
} |
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
{ |
struct hlist_node *first = h->first; |
n->next = first; |
if (first) |
first->pprev = &n->next; |
h->first = n; |
n->pprev = &h->first; |
} |
/* next must be != NULL */ |
static inline void hlist_add_before(struct hlist_node *n, |
struct hlist_node *next) |
{ |
n->pprev = next->pprev; |
n->next = next; |
next->pprev = &n->next; |
*(n->pprev) = n; |
} |
static inline void hlist_add_after(struct hlist_node *n, |
struct hlist_node *next) |
{ |
next->next = n->next; |
n->next = next; |
next->pprev = &n->next; |
if(next->next) |
next->next->pprev = &next->next; |
} |
/* |
* Move a list from one list head to another. Fixup the pprev |
* reference of the first entry if it exists. |
*/ |
static inline void hlist_move_list(struct hlist_head *old, |
struct hlist_head *new) |
{ |
new->first = old->first; |
if (new->first) |
new->first->pprev = &new->first; |
old->first = NULL; |
} |
#define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
#define hlist_for_each(pos, head) \ |
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ |
pos = pos->next) |
#define hlist_for_each_safe(pos, n, head) \ |
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
pos = n) |
/** |
* hlist_for_each_entry - iterate over list of given type |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry(tpos, pos, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue(tpos, pos, member) \ |
for (pos = (pos)->next; \ |
pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_from - iterate over a hlist continuing from current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from(tpos, pos, member) \ |
for (; pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @n: another &struct hlist_node to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ n = pos->next; 1; }) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = n) |
#endif |
/drivers/include/linux/list_sort.h |
---|
0,0 → 1,11 |
#ifndef _LINUX_LIST_SORT_H |
#define _LINUX_LIST_SORT_H |
#include <linux/types.h> |
struct list_head; |
void list_sort(void *priv, struct list_head *head, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b)); |
#endif |
/drivers/include/linux/lockdep.h |
---|
0,0 → 1,537 |
/* |
* Runtime locking correctness validator |
* |
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
* |
* see Documentation/lockdep-design.txt for more details. |
*/ |
#ifndef __LINUX_LOCKDEP_H |
#define __LINUX_LOCKDEP_H |
struct task_struct; |
struct lockdep_map; |
#ifdef CONFIG_LOCKDEP |
#include <linux/linkage.h> |
#include <linux/list.h> |
#include <linux/debug_locks.h> |
#include <linux/stacktrace.h> |
/* |
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
* the total number of states... :-( |
*/ |
#define XXX_LOCK_USAGE_STATES (1+3*4) |
#define MAX_LOCKDEP_SUBCLASSES 8UL |
/* |
* Lock-classes are keyed via unique addresses, by embedding the |
* lockclass-key into the kernel (or module) .data section. (For |
* static locks we use the lock address itself as the key.) |
*/ |
struct lockdep_subclass_key { |
char __one_byte; |
} __attribute__ ((__packed__)); |
struct lock_class_key { |
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
}; |
#define LOCKSTAT_POINTS 4 |
/* |
* The lock-class itself: |
*/ |
struct lock_class { |
/* |
* class-hash: |
*/ |
struct list_head hash_entry; |
/* |
* global list of all lock-classes: |
*/ |
struct list_head lock_entry; |
struct lockdep_subclass_key *key; |
unsigned int subclass; |
unsigned int dep_gen_id; |
/* |
* IRQ/softirq usage tracking bits: |
*/ |
unsigned long usage_mask; |
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
/* |
* These fields represent a directed graph of lock dependencies, |
* to every node we attach a list of "forward" and a list of |
* "backward" graph nodes. |
*/ |
struct list_head locks_after, locks_before; |
/* |
* Generation counter, when doing certain classes of graph walking, |
* to ensure that we check one node only once: |
*/ |
unsigned int version; |
/* |
* Statistics counter: |
*/ |
unsigned long ops; |
const char *name; |
int name_version; |
#ifdef CONFIG_LOCK_STAT |
unsigned long contention_point[LOCKSTAT_POINTS]; |
unsigned long contending_point[LOCKSTAT_POINTS]; |
#endif |
}; |
#ifdef CONFIG_LOCK_STAT |
struct lock_time { |
s64 min; |
s64 max; |
s64 total; |
unsigned long nr; |
}; |
enum bounce_type { |
bounce_acquired_write, |
bounce_acquired_read, |
bounce_contended_write, |
bounce_contended_read, |
nr_bounce_types, |
bounce_acquired = bounce_acquired_write, |
bounce_contended = bounce_contended_write, |
}; |
struct lock_class_stats { |
unsigned long contention_point[4]; |
unsigned long contending_point[4]; |
struct lock_time read_waittime; |
struct lock_time write_waittime; |
struct lock_time read_holdtime; |
struct lock_time write_holdtime; |
unsigned long bounces[nr_bounce_types]; |
}; |
struct lock_class_stats lock_stats(struct lock_class *class); |
void clear_lock_stats(struct lock_class *class); |
#endif |
/* |
* Map the lock object (the lock instance) to the lock-class object. |
* This is embedded into specific lock instances: |
*/ |
struct lockdep_map { |
struct lock_class_key *key; |
struct lock_class *class_cache; |
const char *name; |
#ifdef CONFIG_LOCK_STAT |
int cpu; |
unsigned long ip; |
#endif |
}; |
/* |
* Every lock has a list of other locks that were taken after it. |
* We only grow the list, never remove from it: |
*/ |
struct lock_list { |
struct list_head entry; |
struct lock_class *class; |
struct stack_trace trace; |
int distance; |
/* |
* The parent field is used to implement breadth-first search, and the |
* bit 0 is reused to indicate if the lock has been accessed in BFS. |
*/ |
struct lock_list *parent; |
}; |
/* |
* We record lock dependency chains, so that we can cache them: |
*/ |
struct lock_chain { |
u8 irq_context; |
u8 depth; |
u16 base; |
struct list_head entry; |
u64 chain_key; |
}; |
#define MAX_LOCKDEP_KEYS_BITS 13 |
/* |
* Subtract one because we offset hlock->class_idx by 1 in order |
* to make 0 mean no class. This avoids overflowing the class_idx |
* bitfield and hitting the BUG in hlock_class(). |
*/ |
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
struct held_lock { |
/* |
* One-way hash of the dependency chain up to this point. We |
* hash the hashes step by step as the dependency chain grows. |
* |
* We use it for dependency-caching and we skip detection |
* passes and dependency-updates if there is a cache-hit, so |
* it is absolutely critical for 100% coverage of the validator |
* to have a unique key value for every unique dependency path |
* that can occur in the system, to make a unique hash value |
* as likely as possible - hence the 64-bit width. |
* |
* The task struct holds the current hash value (initialized |
* with zero), here we store the previous hash value: |
*/ |
u64 prev_chain_key; |
unsigned long acquire_ip; |
struct lockdep_map *instance; |
struct lockdep_map *nest_lock; |
#ifdef CONFIG_LOCK_STAT |
u64 waittime_stamp; |
u64 holdtime_stamp; |
#endif |
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
/* |
* The lock-stack is unified in that the lock chains of interrupt |
* contexts nest ontop of process context chains, but we 'separate' |
* the hashes by starting with 0 if we cross into an interrupt |
* context, and we also keep do not add cross-context lock |
* dependencies - the lock usage graph walking covers that area |
* anyway, and we'd just unnecessarily increase the number of |
* dependencies otherwise. [Note: hardirq and softirq contexts |
* are separated from each other too.] |
* |
* The following field is used to detect when we cross into an |
* interrupt context: |
*/ |
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
unsigned int trylock:1; /* 16 bits */ |
unsigned int read:2; /* see lock_acquire() comment */ |
unsigned int check:2; /* see lock_acquire() comment */ |
unsigned int hardirqs_off:1; |
unsigned int references:11; /* 32 bits */ |
}; |
/* |
* Initialization, self-test and debugging-output methods: |
*/ |
extern void lockdep_init(void); |
extern void lockdep_info(void); |
extern void lockdep_reset(void); |
extern void lockdep_reset_lock(struct lockdep_map *lock); |
extern void lockdep_free_key_range(void *start, unsigned long size); |
extern void lockdep_sys_exit(void); |
extern void lockdep_off(void); |
extern void lockdep_on(void); |
/* |
* These methods are used by specific locking variants (spinlocks, |
* rwlocks, mutexes and rwsems) to pass init/acquire/release events |
* to lockdep: |
*/ |
extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
struct lock_class_key *key, int subclass); |
/* |
* To initialize a lockdep_map statically use this macro. |
* Note that _name must not be NULL. |
*/ |
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
{ .name = (_name), .key = (void *)(_key), } |
/* |
* Reinitialize a lock key - for cases where there is special locking or |
* special initialization of locks so that the validator gets the scope |
* of dependencies wrong: they are either too broad (they need a class-split) |
* or they are too narrow (they suffer from a false class-split): |
*/ |
#define lockdep_set_class(lock, key) \ |
lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
#define lockdep_set_class_and_name(lock, key, name) \ |
lockdep_init_map(&(lock)->dep_map, name, key, 0) |
#define lockdep_set_class_and_subclass(lock, key, sub) \ |
lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
#define lockdep_set_subclass(lock, sub) \ |
lockdep_init_map(&(lock)->dep_map, #lock, \ |
(lock)->dep_map.key, sub) |
/* |
* Compare locking classes |
*/ |
#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
static inline int lockdep_match_key(struct lockdep_map *lock, |
struct lock_class_key *key) |
{ |
return lock->key == key; |
} |
/* |
* Acquire a lock. |
* |
* Values for "read": |
* |
* 0: exclusive (write) acquire |
* 1: read-acquire (no recursion allowed) |
* 2: read-acquire with same-instance recursion allowed |
* |
* Values for check: |
* |
* 0: disabled |
* 1: simple checks (freeing, held-at-exit-time, etc.) |
* 2: full validation |
*/ |
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
int trylock, int read, int check, |
struct lockdep_map *nest_lock, unsigned long ip); |
extern void lock_release(struct lockdep_map *lock, int nested, |
unsigned long ip); |
#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
extern int lock_is_held(struct lockdep_map *lock); |
extern void lock_set_class(struct lockdep_map *lock, const char *name, |
struct lock_class_key *key, unsigned int subclass, |
unsigned long ip); |
static inline void lock_set_subclass(struct lockdep_map *lock, |
unsigned int subclass, unsigned long ip) |
{ |
lock_set_class(lock, lock->name, lock->key, subclass, ip); |
} |
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
extern void lockdep_clear_current_reclaim_state(void); |
extern void lockdep_trace_alloc(gfp_t mask); |
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) |
#else /* !LOCKDEP */ |
static inline void lockdep_off(void) |
{ |
} |
static inline void lockdep_on(void) |
{ |
} |
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
# define lock_release(l, n, i) do { } while (0) |
# define lock_set_class(l, n, k, s, i) do { } while (0) |
# define lock_set_subclass(l, s, i) do { } while (0) |
# define lockdep_set_current_reclaim_state(g) do { } while (0) |
# define lockdep_clear_current_reclaim_state() do { } while (0) |
# define lockdep_trace_alloc(g) do { } while (0) |
# define lockdep_init() do { } while (0) |
# define lockdep_info() do { } while (0) |
# define lockdep_init_map(lock, name, key, sub) \ |
do { (void)(name); (void)(key); } while (0) |
# define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
# define lockdep_set_class_and_name(lock, key, name) \ |
do { (void)(key); (void)(name); } while (0) |
#define lockdep_set_class_and_subclass(lock, key, sub) \ |
do { (void)(key); } while (0) |
#define lockdep_set_subclass(lock, sub) do { } while (0) |
/* |
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
* case since the result is not well defined and the caller should rather |
* #ifdef the call himself. |
*/ |
# define INIT_LOCKDEP |
# define lockdep_reset() do { debug_locks = 1; } while (0) |
# define lockdep_free_key_range(start, size) do { } while (0) |
# define lockdep_sys_exit() do { } while (0) |
/* |
* The class key takes no space if lockdep is disabled: |
*/ |
struct lock_class_key { }; |
#define lockdep_depth(tsk) (0) |
#define lockdep_assert_held(l) do { } while (0) |
#endif /* !LOCKDEP */ |
#ifdef CONFIG_LOCK_STAT |
extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
#define LOCK_CONTENDED(_lock, try, lock) \ |
do { \ |
if (!try(_lock)) { \ |
lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
lock(_lock); \ |
} \ |
lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
} while (0) |
#else /* CONFIG_LOCK_STAT */ |
#define lock_contended(lockdep_map, ip) do {} while (0) |
#define lock_acquired(lockdep_map, ip) do {} while (0) |
#define LOCK_CONTENDED(_lock, try, lock) \ |
lock(_lock) |
#endif /* CONFIG_LOCK_STAT */ |
#ifdef CONFIG_LOCKDEP |
/* |
* On lockdep we dont want the hand-coded irq-enable of |
* _raw_*_lock_flags() code, because lockdep assumes |
* that interrupts are not re-enabled during lock-acquire: |
*/ |
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
LOCK_CONTENDED((_lock), (try), (lock)) |
#else /* CONFIG_LOCKDEP */ |
#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
lockfl((_lock), (flags)) |
#endif /* CONFIG_LOCKDEP */ |
#ifdef CONFIG_GENERIC_HARDIRQS |
extern void early_init_irq_lock_class(void); |
#else |
static inline void early_init_irq_lock_class(void) |
{ |
} |
#endif |
#ifdef CONFIG_TRACE_IRQFLAGS |
extern void early_boot_irqs_off(void); |
extern void early_boot_irqs_on(void); |
extern void print_irqtrace_events(struct task_struct *curr); |
#else |
static inline void early_boot_irqs_off(void) |
{ |
} |
static inline void early_boot_irqs_on(void) |
{ |
} |
static inline void print_irqtrace_events(struct task_struct *curr) |
{ |
} |
#endif |
/* |
* For trivial one-depth nesting of a lock-class, the following |
* global define can be used. (Subsystems with multiple levels |
* of nesting should define their own lock-nesting subclasses.) |
*/ |
#define SINGLE_DEPTH_NESTING 1 |
/* |
* Map the dependency ops to NOP or to real lockdep ops, depending |
* on the per lock-class debug mode: |
*/ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
# define spin_release(l, n, i) lock_release(l, n, i) |
#else |
# define spin_acquire(l, s, t, i) do { } while (0) |
# define spin_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
# else |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
# endif |
# define rwlock_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwlock_acquire(l, s, t, i) do { } while (0) |
# define rwlock_acquire_read(l, s, t, i) do { } while (0) |
# define rwlock_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# else |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
# define mutex_release(l, n, i) lock_release(l, n, i) |
#else |
# define mutex_acquire(l, s, t, i) do { } while (0) |
# define mutex_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
# else |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
# endif |
# define rwsem_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwsem_acquire(l, s, t, i) do { } while (0) |
# define rwsem_acquire_read(l, s, t, i) do { } while (0) |
# define rwsem_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
# else |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
# endif |
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
#else |
# define lock_map_acquire(l) do { } while (0) |
# define lock_map_release(l) do { } while (0) |
#endif |
#ifdef CONFIG_PROVE_LOCKING |
# define might_lock(lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ |
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
} while (0) |
# define might_lock_read(lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ |
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
} while (0) |
#else |
# define might_lock(lock) do { } while (0) |
# define might_lock_read(lock) do { } while (0) |
#endif |
#endif /* __LINUX_LOCKDEP_H */ |
/drivers/include/linux/module.h |
---|
0,0 → 1,15 |
#ifndef _LINUX_MODULE_H |
#define _LINUX_MODULE_H |
#include <linux/list.h> |
#include <linux/compiler.h> |
#define EXPORT_SYMBOL(x) |
#define MODULE_FIRMWARE(x) |
#endif /* _LINUX_MODULE_H */ |
/drivers/include/linux/pci.h |
---|
0,0 → 1,566 |
#include <types.h> |
#include <list.h> |
#ifndef __PCI_H__ |
#define __PCI_H__ |
#define PCI_ANY_ID (~0) |
#define PCI_CLASS_NOT_DEFINED 0x0000 |
#define PCI_CLASS_NOT_DEFINED_VGA 0x0001 |
#define PCI_BASE_CLASS_STORAGE 0x01 |
#define PCI_CLASS_STORAGE_SCSI 0x0100 |
#define PCI_CLASS_STORAGE_IDE 0x0101 |
#define PCI_CLASS_STORAGE_FLOPPY 0x0102 |
#define PCI_CLASS_STORAGE_IPI 0x0103 |
#define PCI_CLASS_STORAGE_RAID 0x0104 |
#define PCI_CLASS_STORAGE_SATA 0x0106 |
#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601 |
#define PCI_CLASS_STORAGE_SAS 0x0107 |
#define PCI_CLASS_STORAGE_OTHER 0x0180 |
#define PCI_BASE_CLASS_NETWORK 0x02 |
#define PCI_CLASS_NETWORK_ETHERNET 0x0200 |
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 |
#define PCI_CLASS_NETWORK_FDDI 0x0202 |
#define PCI_CLASS_NETWORK_ATM 0x0203 |
#define PCI_CLASS_NETWORK_OTHER 0x0280 |
#define PCI_BASE_CLASS_DISPLAY 0x03 |
#define PCI_CLASS_DISPLAY_VGA 0x0300 |
#define PCI_CLASS_DISPLAY_XGA 0x0301 |
#define PCI_CLASS_DISPLAY_3D 0x0302 |
#define PCI_CLASS_DISPLAY_OTHER 0x0380 |
#define PCI_BASE_CLASS_MULTIMEDIA 0x04 |
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 |
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 |
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402 |
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 |
#define PCI_BASE_CLASS_MEMORY 0x05 |
#define PCI_CLASS_MEMORY_RAM 0x0500 |
#define PCI_CLASS_MEMORY_FLASH 0x0501 |
#define PCI_CLASS_MEMORY_OTHER 0x0580 |
#define PCI_BASE_CLASS_BRIDGE 0x06 |
#define PCI_CLASS_BRIDGE_HOST 0x0600 |
#define PCI_CLASS_BRIDGE_ISA 0x0601 |
#define PCI_CLASS_BRIDGE_EISA 0x0602 |
#define PCI_CLASS_BRIDGE_MC 0x0603 |
#define PCI_CLASS_BRIDGE_PCI 0x0604 |
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605 |
#define PCI_CLASS_BRIDGE_NUBUS 0x0606 |
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607 |
#define PCI_CLASS_BRIDGE_RACEWAY 0x0608 |
#define PCI_CLASS_BRIDGE_OTHER 0x0680 |
#define PCI_BASE_CLASS_COMMUNICATION 0x07 |
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 |
#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 |
#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702 |
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703 |
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780 |
#define PCI_BASE_CLASS_SYSTEM 0x08 |
#define PCI_CLASS_SYSTEM_PIC 0x0800 |
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010 |
#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020 |
#define PCI_CLASS_SYSTEM_DMA 0x0801 |
#define PCI_CLASS_SYSTEM_TIMER 0x0802 |
#define PCI_CLASS_SYSTEM_RTC 0x0803 |
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 |
#define PCI_CLASS_SYSTEM_SDHCI 0x0805 |
#define PCI_CLASS_SYSTEM_OTHER 0x0880 |
#define PCI_BASE_CLASS_INPUT 0x09 |
#define PCI_CLASS_INPUT_KEYBOARD 0x0900 |
#define PCI_CLASS_INPUT_PEN 0x0901 |
#define PCI_CLASS_INPUT_MOUSE 0x0902 |
#define PCI_CLASS_INPUT_SCANNER 0x0903 |
#define PCI_CLASS_INPUT_GAMEPORT 0x0904 |
#define PCI_CLASS_INPUT_OTHER 0x0980 |
#define PCI_BASE_CLASS_DOCKING 0x0a |
#define PCI_CLASS_DOCKING_GENERIC 0x0a00 |
#define PCI_CLASS_DOCKING_OTHER 0x0a80 |
#define PCI_BASE_CLASS_PROCESSOR 0x0b |
#define PCI_CLASS_PROCESSOR_386 0x0b00 |
#define PCI_CLASS_PROCESSOR_486 0x0b01 |
#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 |
#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 |
#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 |
#define PCI_CLASS_PROCESSOR_MIPS 0x0b30 |
#define PCI_CLASS_PROCESSOR_CO 0x0b40 |
#define PCI_BASE_CLASS_SERIAL 0x0c |
#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 |
#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010 |
#define PCI_CLASS_SERIAL_ACCESS 0x0c01 |
#define PCI_CLASS_SERIAL_SSA 0x0c02 |
#define PCI_CLASS_SERIAL_USB 0x0c03 |
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 |
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 |
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 |
#define PCI_CLASS_SERIAL_FIBER 0x0c04 |
#define PCI_CLASS_SERIAL_SMBUS 0x0c05 |
#define PCI_BASE_CLASS_WIRELESS 0x0d |
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 |
#define PCI_CLASS_WIRELESS_WHCI 0x0d1010 |
#define PCI_BASE_CLASS_INTELLIGENT 0x0e |
#define PCI_CLASS_INTELLIGENT_I2O 0x0e00 |
#define PCI_BASE_CLASS_SATELLITE 0x0f |
#define PCI_CLASS_SATELLITE_TV 0x0f00 |
#define PCI_CLASS_SATELLITE_AUDIO 0x0f01 |
#define PCI_CLASS_SATELLITE_VOICE 0x0f03 |
#define PCI_CLASS_SATELLITE_DATA 0x0f04 |
#define PCI_BASE_CLASS_CRYPT 0x10 |
#define PCI_CLASS_CRYPT_NETWORK 0x1000 |
#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001 |
#define PCI_CLASS_CRYPT_OTHER 0x1080 |
#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11 |
#define PCI_CLASS_SP_DPIO 0x1100 |
#define PCI_CLASS_SP_OTHER 0x1180 |
#define PCI_CLASS_OTHERS 0xff |
/* |
* Under PCI, each device has 256 bytes of configuration address space, |
* of which the first 64 bytes are standardized as follows: |
*/ |
#define PCI_VENDOR_ID 0x000 /* 16 bits */ |
#define PCI_DEVICE_ID 0x002 /* 16 bits */ |
#define PCI_COMMAND 0x004 /* 16 bits */ |
#define PCI_COMMAND_IO 0x001 /* Enable response in I/O space */ |
#define PCI_COMMAND_MEMORY 0x002 /* Enable response in Memory space */ |
#define PCI_COMMAND_MASTER 0x004 /* Enable bus mastering */ |
#define PCI_COMMAND_SPECIAL 0x008 /* Enable response to special cycles */ |
#define PCI_COMMAND_INVALIDATE 0x010 /* Use memory write and invalidate */ |
#define PCI_COMMAND_VGA_PALETTE 0x020 /* Enable palette snooping */ |
#define PCI_COMMAND_PARITY 0x040 /* Enable parity checking */ |
#define PCI_COMMAND_WAIT 0x080 /* Enable address/data stepping */ |
#define PCI_COMMAND_SERR 0x100 /* Enable SERR */ |
#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ |
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ |
#define PCI_STATUS 0x006 /* 16 bits */ |
#define PCI_STATUS_CAP_LIST 0x010 /* Support Capability List */ |
#define PCI_STATUS_66MHZ 0x020 /* Support 66 Mhz PCI 2.1 bus */ |
#define PCI_STATUS_UDF 0x040 /* Support User Definable Features [obsolete] */ |
#define PCI_STATUS_FAST_BACK 0x080 /* Accept fast-back to back */ |
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */ |
#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */ |
#define PCI_STATUS_DEVSEL_FAST 0x000 |
#define PCI_STATUS_DEVSEL_MEDIUM 0x200 |
#define PCI_STATUS_DEVSEL_SLOW 0x400 |
#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */ |
#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */ |
#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */ |
#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */ |
#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */ |
#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ |
#define PCI_REVISION_ID 0x08 /* Revision ID */ |
#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */ |
#define PCI_CLASS_DEVICE 0x0a /* Device class */ |
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ |
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */ |
#define PCI_HEADER_TYPE 0x0e /* 8 bits */ |
#define PCI_HEADER_TYPE_NORMAL 0 |
#define PCI_HEADER_TYPE_BRIDGE 1 |
#define PCI_HEADER_TYPE_CARDBUS 2 |
#define PCI_BIST 0x0f /* 8 bits */ |
#define PCI_BIST_CODE_MASK 0x0f /* Return result */ |
#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */ |
#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */ |
/* |
* Base addresses specify locations in memory or I/O space. |
* Decoded size can be determined by writing a value of |
* 0xffffffff to the register, and reading it back. Only |
* 1 bits are decoded. |
*/ |
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ |
#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */ |
#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */ |
#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */ |
#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */ |
#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */ |
#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ |
#define PCI_BASE_ADDRESS_SPACE_IO 0x01 |
#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 |
#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 |
#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ |
#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */ |
#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ |
#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ |
#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) |
#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) |
/* bit 1 is reserved if address_space = 1 */ |
#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */ |
/* Header type 0 (normal devices) */ |
#define PCI_CARDBUS_CIS 0x28 |
#define PCI_SUBSYSTEM_VENDOR_ID 0x2c |
#define PCI_SUBSYSTEM_ID 0x2e |
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ |
#define PCI_ROM_ADDRESS_ENABLE 0x01 |
#define PCI_ROM_ADDRESS_MASK (~0x7ffUL) |
#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ |
#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ |
#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40 |
#define PCI_CB_SUBSYSTEM_ID 0x42 |
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ |
#define PCI_CB_CAPABILITY_LIST 0x14 |
/* Capability lists */ |
#define PCI_CAP_LIST_ID 0 /* Capability ID */ |
#define PCI_CAP_ID_PM 0x01 /* Power Management */ |
#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */ |
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ |
#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */ |
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ |
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */ |
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */ |
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */ |
#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */ |
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */ |
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ |
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ |
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ |
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ |
#define PCI_CAP_SIZEOF 4 |
/* AGP registers */ |
#define PCI_AGP_VERSION 2 /* BCD version number */ |
#define PCI_AGP_RFU 3 /* Rest of capability flags */ |
#define PCI_AGP_STATUS 4 /* Status register */ |
#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */ |
#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */ |
#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */ |
#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */ |
#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */ |
#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */ |
#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */ |
#define PCI_AGP_COMMAND 8 /* Control register */ |
#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */ |
#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */ |
#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */ |
#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */ |
#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */ |
#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */ |
#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */ |
#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */ |
#define PCI_AGP_SIZEOF 12 |
#define PCI_MAP_REG_START 0x10 |
#define PCI_MAP_REG_END 0x28 |
#define PCI_MAP_ROM_REG 0x30 |
#define PCI_MAP_MEMORY 0x00000000 |
#define PCI_MAP_IO 0x00000001 |
#define PCI_MAP_MEMORY_TYPE 0x00000007 |
#define PCI_MAP_IO_TYPE 0x00000003 |
#define PCI_MAP_MEMORY_TYPE_32BIT 0x00000000 |
#define PCI_MAP_MEMORY_TYPE_32BIT_1M 0x00000002 |
#define PCI_MAP_MEMORY_TYPE_64BIT 0x00000004 |
#define PCI_MAP_MEMORY_TYPE_MASK 0x00000006 |
#define PCI_MAP_MEMORY_CACHABLE 0x00000008 |
#define PCI_MAP_MEMORY_ATTR_MASK 0x0000000e |
#define PCI_MAP_MEMORY_ADDRESS_MASK 0xfffffff0 |
#define PCI_MAP_IO_ATTR_MASK 0x00000003 |
#define PCI_MAP_IS_IO(b) ((b) & PCI_MAP_IO) |
#define PCI_MAP_IS_MEM(b) (!PCI_MAP_IS_IO(b)) |
#define PCI_MAP_IS64BITMEM(b) \ |
(((b) & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_64BIT) |
#define PCIGETMEMORY(b) ((b) & PCI_MAP_MEMORY_ADDRESS_MASK) |
#define PCIGETMEMORY64HIGH(b) (*((CARD32*)&b + 1)) |
#define PCIGETMEMORY64(b) \ |
(PCIGETMEMORY(b) | ((CARD64)PCIGETMEMORY64HIGH(b) << 32)) |
#define PCI_MAP_IO_ADDRESS_MASK 0xfffffffc |
#define PCIGETIO(b) ((b) & PCI_MAP_IO_ADDRESS_MASK) |
#define PCI_MAP_ROM_DECODE_ENABLE 0x00000001 |
#define PCI_MAP_ROM_ADDRESS_MASK 0xfffff800 |
#define PCIGETROM(b) ((b) & PCI_MAP_ROM_ADDRESS_MASK) |
#ifndef PCI_DOM_MASK |
# define PCI_DOM_MASK 0x0ffu |
#endif |
#define PCI_DOMBUS_MASK (((PCI_DOM_MASK) << 8) | 0x0ffu) |
#define PCI_MAKE_TAG(b,d,f) ((((b) & (PCI_DOMBUS_MASK)) << 16) | \ |
(((d) & 0x00001fu) << 11) | \ |
(((f) & 0x000007u) << 8)) |
#define PCI_BUS_FROM_TAG(tag) (((tag) >> 16) & (PCI_DOMBUS_MASK)) |
#define PCI_DEV_FROM_TAG(tag) (((tag) & 0x0000f800u) >> 11) |
#define PCI_FUNC_FROM_TAG(tag) (((tag) & 0x00000700u) >> 8) |
#define PCI_DFN_FROM_TAG(tag) (((tag) & 0x0000ff00u) >> 8) |
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) |
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) |
#define PCI_FUNC(devfn) ((devfn) & 0x07) |
typedef unsigned int PCITAG; |
extern inline PCITAG |
pciTag(int busnum, int devnum, int funcnum) |
{ |
return(PCI_MAKE_TAG(busnum,devnum,funcnum)); |
} |
struct resource |
{ |
resource_size_t start; |
resource_size_t end; |
// const char *name; |
unsigned long flags; |
// struct resource *parent, *sibling, *child; |
}; |
/* |
* IO resources have these defined flags. |
*/ |
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ |
#define IORESOURCE_IO 0x00000100 /* Resource type */ |
#define IORESOURCE_MEM 0x00000200 |
#define IORESOURCE_IRQ 0x00000400 |
#define IORESOURCE_DMA 0x00000800 |
#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ |
#define IORESOURCE_READONLY 0x00002000 |
#define IORESOURCE_CACHEABLE 0x00004000 |
#define IORESOURCE_RANGELENGTH 0x00008000 |
#define IORESOURCE_SHADOWABLE 0x00010000 |
#define IORESOURCE_BUS_HAS_VGA 0x00080000 |
#define IORESOURCE_DISABLED 0x10000000 |
#define IORESOURCE_UNSET 0x20000000 |
#define IORESOURCE_AUTO 0x40000000 |
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ |
/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */ |
#define IORESOURCE_IRQ_HIGHEDGE (1<<0) |
#define IORESOURCE_IRQ_LOWEDGE (1<<1) |
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) |
#define IORESOURCE_IRQ_LOWLEVEL (1<<3) |
#define IORESOURCE_IRQ_SHAREABLE (1<<4) |
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */ |
#define IORESOURCE_DMA_TYPE_MASK (3<<0) |
#define IORESOURCE_DMA_8BIT (0<<0) |
#define IORESOURCE_DMA_8AND16BIT (1<<0) |
#define IORESOURCE_DMA_16BIT (2<<0) |
#define IORESOURCE_DMA_MASTER (1<<2) |
#define IORESOURCE_DMA_BYTE (1<<3) |
#define IORESOURCE_DMA_WORD (1<<4) |
#define IORESOURCE_DMA_SPEED_MASK (3<<6) |
#define IORESOURCE_DMA_COMPATIBLE (0<<6) |
#define IORESOURCE_DMA_TYPEA (1<<6) |
#define IORESOURCE_DMA_TYPEB (2<<6) |
#define IORESOURCE_DMA_TYPEF (3<<6) |
/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */ |
#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ |
#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ |
#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ |
#define IORESOURCE_MEM_TYPE_MASK (3<<3) |
#define IORESOURCE_MEM_8BIT (0<<3) |
#define IORESOURCE_MEM_16BIT (1<<3) |
#define IORESOURCE_MEM_8AND16BIT (2<<3) |
#define IORESOURCE_MEM_32BIT (3<<3) |
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ |
#define IORESOURCE_MEM_EXPANSIONROM (1<<6) |
/* PCI ROM control bits (IORESOURCE_BITS) */ |
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ |
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ |
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ |
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ |
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
/* |
* For PCI devices, the region numbers are assigned this way: |
* |
* 0-5 standard PCI regions |
* 6 expansion ROM |
* 7-10 bridges: address space assigned to buses behind the bridge |
*/ |
#define PCI_ROM_RESOURCE 6 |
#define PCI_BRIDGE_RESOURCES 7 |
#define PCI_NUM_RESOURCES 11 |
#ifndef PCI_BUS_NUM_RESOURCES |
#define PCI_BUS_NUM_RESOURCES 8 |
#endif |
#define DEVICE_COUNT_RESOURCE 12 |
/* |
* The pci_dev structure is used to describe PCI devices. |
*/ |
struct pci_dev { |
// struct list_head bus_list; /* node in per-bus list */ |
// struct pci_bus *bus; /* bus this device is on */ |
// struct pci_bus *subordinate; /* bus this device bridges to */ |
// void *sysdata; /* hook for sys-specific extension */ |
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
// struct pci_slot *slot; /* Physical slot this device is in */ |
u32_t bus; |
u32_t devfn; /* encoded device & function index */ |
u16_t vendor; |
u16_t device; |
u16_t subsystem_vendor; |
u16_t subsystem_device; |
u32_t class; /* 3 bytes: (base,sub,prog-if) */ |
uint8_t revision; /* PCI revision, low byte of class word */ |
uint8_t hdr_type; /* PCI header type (`multi' flag masked out) */ |
uint8_t pcie_type; /* PCI-E device/port type */ |
uint8_t rom_base_reg; /* which config register controls the ROM */ |
uint8_t pin; /* which interrupt pin this device uses */ |
// struct pci_driver *driver; /* which driver has allocated this device */ |
uint64_t dma_mask; /* Mask of the bits of bus address this |
device implements. Normally this is |
0xffffffff. You only need to change |
this if your device has broken DMA |
or supports 64-bit transfers. */ |
// struct device_dma_parameters dma_parms; |
// pci_power_t current_state; /* Current operating state. In ACPI-speak, |
// this is D0-D3, D0 being fully functional, |
// and D3 being off. */ |
// int pm_cap; /* PM capability offset in the |
// configuration space */ |
unsigned int pme_support:5; /* Bitmask of states from which PME# |
can be generated */ |
unsigned int d1_support:1; /* Low power state D1 is supported */ |
unsigned int d2_support:1; /* Low power state D2 is supported */ |
unsigned int no_d1d2:1; /* Only allow D0 and D3 */ |
// pci_channel_state_t error_state; /* current connectivity state */ |
// struct device dev; /* Generic device interface */ |
// int cfg_size; /* Size of configuration space */ |
/* |
* Instead of touching interrupt line and base address registers |
* directly, use the values stored here. They might be different! |
*/ |
unsigned int irq; |
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ |
/* These fields are used by common fixups */ |
unsigned int transparent:1; /* Transparent PCI bridge */ |
unsigned int multifunction:1;/* Part of multi-function device */ |
/* keep track of device state */ |
unsigned int is_added:1; |
unsigned int is_busmaster:1; /* device is busmaster */ |
unsigned int no_msi:1; /* device may not use msi */ |
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ |
unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ |
unsigned int msi_enabled:1; |
unsigned int msix_enabled:1; |
unsigned int ari_enabled:1; /* ARI forwarding */ |
unsigned int is_managed:1; |
unsigned int is_pcie:1; |
unsigned int state_saved:1; |
unsigned int is_physfn:1; |
unsigned int is_virtfn:1; |
// pci_dev_flags_t dev_flags; |
// atomic_t enable_cnt; /* pci_enable_device has been called */ |
// u32 saved_config_space[16]; /* config space saved at suspend time */ |
// struct hlist_head saved_cap_space; |
// struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ |
// int rom_attr_enabled; /* has display of the rom attribute been enabled? */ |
// struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ |
// struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ |
}; |
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) |
#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) |
#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) |
#define pci_resource_len(dev,bar) \ |
((pci_resource_start((dev), (bar)) == 0 && \ |
pci_resource_end((dev), (bar)) == \ |
pci_resource_start((dev), (bar))) ? 0 : \ |
\ |
(pci_resource_end((dev), (bar)) - \ |
pci_resource_start((dev), (bar)) + 1)) |
struct pci_device_id |
{ |
u16_t vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ |
u16_t subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ |
u32_t class, class_mask; /* (class,subclass,prog-if) triplet */ |
u32_t driver_data; /* Data private to the driver */ |
}; |
typedef struct |
{ |
struct list_head link; |
struct pci_dev pci_dev; |
}pci_dev_t; |
int enum_pci_devices(void); |
struct pci_device_id* |
find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist); |
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
int pci_set_dma_mask(struct pci_dev *dev, u64 mask); |
#define pci_name(x) "radeon" |
#endif //__PCI__H__ |
/drivers/include/linux/posix_types.h |
---|
0,0 → 1,49 |
#ifndef _LINUX_POSIX_TYPES_H |
#define _LINUX_POSIX_TYPES_H |
#include <linux/stddef.h> |
/* |
* This allows for 1024 file descriptors: if NR_OPEN is ever grown |
* beyond that you'll have to change this too. But 1024 fd's seem to be |
* enough even for such "real" unices like OSF/1, so hopefully this is |
* one limit that doesn't have to be changed [again]. |
* |
* Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in |
* <sys/time.h> (and thus <linux/time.h>) - but this is a more logical |
* place for them. Solved by having dummy defines in <sys/time.h>. |
*/ |
/* |
* Those macros may have been defined in <gnu/types.h>. But we always |
* use the ones here. |
*/ |
#undef __NFDBITS |
#define __NFDBITS (8 * sizeof(unsigned long)) |
#undef __FD_SETSIZE |
#define __FD_SETSIZE 1024 |
#undef __FDSET_LONGS |
#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS) |
#undef __FDELT |
#define __FDELT(d) ((d) / __NFDBITS) |
#undef __FDMASK |
#define __FDMASK(d) (1UL << ((d) % __NFDBITS)) |
typedef struct { |
unsigned long fds_bits [__FDSET_LONGS]; |
} __kernel_fd_set; |
/* Type of a signal handler. */ |
typedef void (*__kernel_sighandler_t)(int); |
/* Type of a SYSV IPC key. */ |
typedef int __kernel_key_t; |
typedef int __kernel_mqd_t; |
#include <asm/posix_types.h> |
#endif /* _LINUX_POSIX_TYPES_H */ |
/drivers/include/linux/sched.h |
---|
0,0 → 1,29 |
/* stub */ |
static inline void mdelay(unsigned long time) |
{ |
time /= 10; |
if(!time) time = 1; |
__asm__ __volatile__ ( |
"call *__imp__Delay" |
::"b" (time)); |
__asm__ __volatile__ ( |
"":::"ebx"); |
}; |
static inline void udelay(unsigned long delay) |
{ |
if(!delay) delay++; |
delay*= 500; |
while(delay--) |
{ |
__asm__ __volatile__( |
"xorl %%eax, %%eax \n\t" |
"cpuid" |
:::"eax","ebx","ecx","edx" ); |
} |
} |
/drivers/include/linux/seq_file.h |
---|
0,0 → 1,3 |
/* stub */ |
#include <errno.h> |
/drivers/include/linux/spinlock.h |
---|
0,0 → 1,347 |
#ifndef __LINUX_SPINLOCK_H |
#define __LINUX_SPINLOCK_H |
/* |
* include/linux/spinlock.h - generic spinlock/rwlock declarations |
* |
* here's the role of the various spinlock/rwlock related include files: |
* |
* on SMP builds: |
* |
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the |
* initializers |
* |
* linux/spinlock_types.h: |
* defines the generic type and initializers |
* |
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel |
* implementations, mostly inline assembly code |
* |
* (also included on UP-debug builds:) |
* |
* linux/spinlock_api_smp.h: |
* contains the prototypes for the _spin_*() APIs. |
* |
* linux/spinlock.h: builds the final spin_*() APIs. |
* |
* on UP builds: |
* |
* linux/spinlock_type_up.h: |
* contains the generic, simplified UP spinlock type. |
* (which is an empty structure on non-debug builds) |
* |
* linux/spinlock_types.h: |
* defines the generic type and initializers |
* |
* linux/spinlock_up.h: |
* contains the __raw_spin_*()/etc. version of UP |
* builds. (which are NOPs on non-debug, non-preempt |
* builds) |
* |
* (included on UP-non-debug builds:) |
* |
* linux/spinlock_api_up.h: |
* builds the _spin_*() APIs. |
* |
* linux/spinlock.h: builds the final spin_*() APIs. |
*/ |
#include <linux/typecheck.h> |
//#include <linux/preempt.h> |
//#include <linux/linkage.h> |
#include <linux/compiler.h> |
//#include <linux/thread_info.h> |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
//#include <linux/bottom_half.h> |
//#include <asm/system.h> |
/* |
* Must define these before including other files, inline functions need them |
*/ |
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME |
#define LOCK_SECTION_START(extra) \ |
".subsection 1\n\t" \ |
extra \ |
".ifndef " LOCK_SECTION_NAME "\n\t" \ |
LOCK_SECTION_NAME ":\n\t" \ |
".endif\n" |
#define LOCK_SECTION_END \ |
".previous\n\t" |
#define __lockfunc __attribute__((section(".spinlock.text"))) |
/* |
* Pull the raw_spinlock_t and raw_rwlock_t definitions: |
*/ |
#include <linux/spinlock_types.h> |
/* |
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
*/ |
#ifdef CONFIG_SMP |
# include <asm/spinlock.h> |
#else |
# include <linux/spinlock_up.h> |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void __spin_lock_init(spinlock_t *lock, const char *name, |
struct lock_class_key *key); |
# define spin_lock_init(lock) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__spin_lock_init((lock), #lock, &__key); \ |
} while (0) |
#else |
# define spin_lock_init(lock) \ |
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void __rwlock_init(rwlock_t *lock, const char *name, |
struct lock_class_key *key); |
# define rwlock_init(lock) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__rwlock_init((lock), #lock, &__key); \ |
} while (0) |
#else |
# define rwlock_init(lock) \ |
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) |
#endif |
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
#ifdef CONFIG_GENERIC_LOCKBREAK |
#define spin_is_contended(lock) ((lock)->break_lock) |
#else |
#ifdef __raw_spin_is_contended |
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) |
#else |
#define spin_is_contended(lock) (((void)(lock), 0)) |
#endif /*__raw_spin_is_contended*/ |
#endif |
/* The lock does not imply full memory barrier. */ |
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK |
static inline void smp_mb__after_lock(void) { smp_mb(); } |
#endif |
/** |
* spin_unlock_wait - wait until the spinlock gets unlocked |
* @lock: the spinlock in question. |
*/ |
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void _raw_spin_lock(spinlock_t *lock); |
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
extern int _raw_spin_trylock(spinlock_t *lock); |
extern void _raw_spin_unlock(spinlock_t *lock); |
extern void _raw_read_lock(rwlock_t *lock); |
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) |
extern int _raw_read_trylock(rwlock_t *lock); |
extern void _raw_read_unlock(rwlock_t *lock); |
extern void _raw_write_lock(rwlock_t *lock); |
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) |
extern int _raw_write_trylock(rwlock_t *lock); |
extern void _raw_write_unlock(rwlock_t *lock); |
#else |
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) |
# define _raw_spin_lock_flags(lock, flags) \ |
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
# define _raw_read_lock_flags(lock, flags) \ |
__raw_read_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
# define _raw_write_lock_flags(lock, flags) \ |
__raw_write_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
#endif |
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
/* |
* Define the various spin_lock and rw_lock methods. Note we define these |
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
* methods are defined as nops in the case they are not required. |
*/ |
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) |
#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) |
#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) |
#define spin_lock(lock) _spin_lock(lock) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) |
# define spin_lock_nest_lock(lock, nest_lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
} while (0) |
#else |
# define spin_lock_nested(lock, subclass) _spin_lock(lock) |
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) |
#endif |
#define write_lock(lock) _write_lock(lock) |
#define read_lock(lock) _read_lock(lock) |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
#define spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave(lock); \ |
} while (0) |
#define read_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _read_lock_irqsave(lock); \ |
} while (0) |
#define write_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _write_lock_irqsave(lock); \ |
} while (0) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave_nested(lock, subclass); \ |
} while (0) |
#else |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave(lock); \ |
} while (0) |
#endif |
#else |
#define spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_spin_lock_irqsave(lock, flags); \ |
} while (0) |
#define read_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_read_lock_irqsave(lock, flags); \ |
} while (0) |
#define write_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_write_lock_irqsave(lock, flags); \ |
} while (0) |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
spin_lock_irqsave(lock, flags) |
#endif |
#define spin_lock_irq(lock) _spin_lock_irq(lock) |
#define spin_lock_bh(lock) _spin_lock_bh(lock) |
#define read_lock_irq(lock) _read_lock_irq(lock) |
#define read_lock_bh(lock) _read_lock_bh(lock) |
#define write_lock_irq(lock) _write_lock_irq(lock) |
#define write_lock_bh(lock) _write_lock_bh(lock) |
#define spin_unlock(lock) _spin_unlock(lock) |
#define read_unlock(lock) _read_unlock(lock) |
#define write_unlock(lock) _write_unlock(lock) |
#define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
#define read_unlock_irq(lock) _read_unlock_irq(lock) |
#define write_unlock_irq(lock) _write_unlock_irq(lock) |
#define spin_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_spin_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
#define read_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_read_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define read_unlock_bh(lock) _read_unlock_bh(lock) |
#define write_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_write_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define write_unlock_bh(lock) _write_unlock_bh(lock) |
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) |
#define spin_trylock_irq(lock) \ |
({ \ |
local_irq_disable(); \ |
spin_trylock(lock) ? \ |
1 : ({ local_irq_enable(); 0; }); \ |
}) |
#define spin_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
spin_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
}) |
#define write_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
write_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
}) |
/* |
* Pull the atomic_t declaration: |
* (asm-mips/atomic.h needs above definitions) |
*/ |
#include <asm/atomic.h> |
/** |
* atomic_dec_and_lock - lock on reaching reference count zero |
* @atomic: the atomic counter |
* @lock: the spinlock in question |
* |
* Decrements @atomic by 1. If the result is 0, returns true and locks |
* @lock. Returns false for all other cases. |
*/ |
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
#define atomic_dec_and_lock(atomic, lock) \ |
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
/** |
* spin_can_lock - would spin_trylock() succeed? |
* @lock: the spinlock in question. |
*/ |
#define spin_can_lock(lock) (!spin_is_locked(lock)) |
/* |
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
*/ |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
# include <linux/spinlock_api_smp.h> |
#else |
# include <linux/spinlock_api_up.h> |
#endif |
#endif /* __LINUX_SPINLOCK_H */ |
/drivers/include/linux/spinlock_api_up.h |
---|
0,0 → 1,81 |
#ifndef __LINUX_SPINLOCK_API_UP_H |
#define __LINUX_SPINLOCK_API_UP_H |
#ifndef __LINUX_SPINLOCK_H |
# error "please don't include this file directly" |
#endif |
/* |
* include/linux/spinlock_api_up.h |
* |
* spinlock API implementation on UP-nondebug (inlined implementation) |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
#define in_lock_functions(ADDR) 0 |
#define assert_spin_locked(lock) do { (void)(lock); } while (0) |
/* |
* In the UP-nondebug case there's no real locking going on, so the |
* only thing we have to do is to keep the preempt counts and irq |
* flags straight, to suppress compiler warnings of unused lock |
* variables, and to add the proper checker annotations: |
*/ |
#define __LOCK(lock) \ |
do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) |
#define __LOCK_BH(lock) \ |
do { local_bh_disable(); __LOCK(lock); } while (0) |
#define __LOCK_IRQ(lock) \ |
do { local_irq_disable(); __LOCK(lock); } while (0) |
#define __LOCK_IRQSAVE(lock, flags) \ |
do { local_irq_save(flags); __LOCK(lock); } while (0) |
#define __UNLOCK(lock) \ |
do { preempt_enable(); __release(lock); (void)(lock); } while (0) |
#define __UNLOCK_BH(lock) \ |
do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) |
#define __UNLOCK_IRQ(lock) \ |
do { local_irq_enable(); __UNLOCK(lock); } while (0) |
#define __UNLOCK_IRQRESTORE(lock, flags) \ |
do { local_irq_restore(flags); __UNLOCK(lock); } while (0) |
#define _spin_lock(lock) __LOCK(lock) |
#define _spin_lock_nested(lock, subclass) __LOCK(lock) |
#define _read_lock(lock) __LOCK(lock) |
#define _write_lock(lock) __LOCK(lock) |
#define _spin_lock_bh(lock) __LOCK_BH(lock) |
#define _read_lock_bh(lock) __LOCK_BH(lock) |
#define _write_lock_bh(lock) __LOCK_BH(lock) |
#define _spin_lock_irq(lock) __LOCK_IRQ(lock) |
#define _read_lock_irq(lock) __LOCK_IRQ(lock) |
#define _write_lock_irq(lock) __LOCK_IRQ(lock) |
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _read_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _write_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) |
#define _spin_unlock(lock) __UNLOCK(lock) |
#define _read_unlock(lock) __UNLOCK(lock) |
#define _write_unlock(lock) __UNLOCK(lock) |
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _write_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _read_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#endif /* __LINUX_SPINLOCK_API_UP_H */ |
/drivers/include/linux/spinlock_types.h |
---|
0,0 → 1,100 |
#ifndef __LINUX_SPINLOCK_TYPES_H |
#define __LINUX_SPINLOCK_TYPES_H |
/* |
* include/linux/spinlock_types.h - generic spinlock type definitions |
* and initializers |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
#if defined(CONFIG_SMP) |
# include <asm/spinlock_types.h> |
#else |
# include <linux/spinlock_types_up.h> |
#endif |
#include <linux/lockdep.h> |
typedef struct { |
raw_spinlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
unsigned int magic, owner_cpu; |
void *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} spinlock_t; |
#define SPINLOCK_MAGIC 0xdead4ead |
typedef struct { |
raw_rwlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
unsigned int magic, owner_cpu; |
void *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} rwlock_t; |
#define RWLOCK_MAGIC 0xdeaf1eed |
#define SPINLOCK_OWNER_INIT ((void *)-1L) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
#else |
# define SPIN_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
#else |
# define RW_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
# define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
.magic = SPINLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
SPIN_DEP_MAP_INIT(lockname) } |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
.magic = RWLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
RW_DEP_MAP_INIT(lockname) } |
#else |
# define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
SPIN_DEP_MAP_INIT(lockname) } |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
RW_DEP_MAP_INIT(lockname) } |
#endif |
/* |
* SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and |
* are hence deprecated. |
* Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or |
* __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. |
*/ |
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) |
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) |
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) |
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) |
#endif /* __LINUX_SPINLOCK_TYPES_H */ |
/drivers/include/linux/spinlock_types_up.h |
---|
0,0 → 1,37 |
#ifndef __LINUX_SPINLOCK_TYPES_UP_H |
#define __LINUX_SPINLOCK_TYPES_UP_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
/* |
* include/linux/spinlock_types_up.h - spinlock type definitions for UP |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
typedef struct { |
volatile unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
#else |
typedef struct { } raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { } |
#endif |
typedef struct { |
/* no debug version on UP */ |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { } |
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ |
/drivers/include/linux/spinlock_up.h |
---|
0,0 → 1,76 |
#ifndef __LINUX_SPINLOCK_UP_H |
#define __LINUX_SPINLOCK_UP_H |
#ifndef __LINUX_SPINLOCK_H |
# error "please don't include this file directly" |
#endif |
/* |
* include/linux/spinlock_up.h - UP-debug version of spinlocks. |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
* |
* In the debug case, 1 means unlocked, 0 means locked. (the values |
* are inverted, to catch initialization bugs) |
* |
* No atomicity anywhere, we are on UP. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define __raw_spin_is_locked(x) ((x)->slock == 0) |
static inline void __raw_spin_lock(raw_spinlock_t *lock) |
{ |
lock->slock = 0; |
} |
static inline void |
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
{ |
local_irq_save(flags); |
lock->slock = 0; |
} |
static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
{ |
char oldval = lock->slock; |
lock->slock = 0; |
return oldval > 0; |
} |
static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
{ |
lock->slock = 1; |
} |
/* |
* Read-write spinlocks. No debug version. |
*/ |
#define __raw_read_lock(lock) do { (void)(lock); } while (0) |
#define __raw_write_lock(lock) do { (void)(lock); } while (0) |
#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_read_unlock(lock) do { (void)(lock); } while (0) |
#define __raw_write_unlock(lock) do { (void)(lock); } while (0) |
#else /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_locked(lock) ((void)(lock), 0) |
/* for sched.c and kernel_lock.c: */ |
# define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) |
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
#endif /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_contended(lock) (((void)(lock), 0)) |
#define __raw_read_can_lock(lock) (((void)(lock), 1)) |
#define __raw_write_can_lock(lock) (((void)(lock), 1)) |
#define __raw_spin_unlock_wait(lock) \ |
do { cpu_relax(); } while (__raw_spin_is_locked(lock)) |
#endif /* __LINUX_SPINLOCK_UP_H */ |
/drivers/include/linux/stddef.h |
---|
0,0 → 1,28 |
#ifndef _LINUX_STDDEF_H |
#define _LINUX_STDDEF_H |
#include <linux/compiler.h> |
#undef NULL |
#if defined(__cplusplus) |
#define NULL 0 |
#else |
#define NULL ((void *)0) |
#endif |
#ifdef __KERNEL__ |
enum { |
false = 0, |
true = 1 |
}; |
#undef offsetof |
#ifdef __compiler_offsetof |
#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) |
#else |
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) |
#endif |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/string.h |
---|
0,0 → 1,135 |
#ifndef _LINUX_STRING_H_ |
#define _LINUX_STRING_H_ |
/* We don't want strings.h stuff being used by user stuff by accident */ |
#ifndef __KERNEL__ |
#include <string.h> |
#else |
#include <linux/compiler.h> /* for inline */ |
#include <linux/types.h> /* for size_t */ |
#include <linux/stddef.h> /* for NULL */ |
#include <stdarg.h> |
extern char *strndup_user(const char __user *, long); |
extern void *memdup_user(const void __user *, size_t); |
/* |
* Include machine specific inline routines |
*/ |
#include <asm/string.h> |
#ifndef __HAVE_ARCH_STRCPY |
extern char * strcpy(char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNCPY |
extern char * strncpy(char *,const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRLCPY |
size_t strlcpy(char *, const char *, size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCAT |
extern char * strcat(char *, const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNCAT |
extern char * strncat(char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRLCAT |
extern size_t strlcat(char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCMP |
extern int strcmp(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *,const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRNICMP |
extern int strnicmp(const char *, const char *, __kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRCASECMP |
extern int strcasecmp(const char *s1, const char *s2); |
#endif |
#ifndef __HAVE_ARCH_STRNCASECMP |
extern int strncasecmp(const char *s1, const char *s2, size_t n); |
#endif |
#ifndef __HAVE_ARCH_STRCHR |
extern char * strchr(const char *,int); |
#endif |
#ifndef __HAVE_ARCH_STRNCHR |
extern char * strnchr(const char *, size_t, int); |
#endif |
#ifndef __HAVE_ARCH_STRRCHR |
extern char * strrchr(const char *,int); |
#endif |
extern char * __must_check strstrip(char *); |
#ifndef __HAVE_ARCH_STRSTR |
extern char * strstr(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRLEN |
extern __kernel_size_t strlen(const char *); |
#endif |
#ifndef __HAVE_ARCH_STRNLEN |
extern __kernel_size_t strnlen(const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRPBRK |
extern char * strpbrk(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRSEP |
extern char * strsep(char **,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRSPN |
extern __kernel_size_t strspn(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_STRCSPN |
extern __kernel_size_t strcspn(const char *,const char *); |
#endif |
#ifndef __HAVE_ARCH_MEMSET |
extern void * memset(void *,int,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMCPY |
extern void * memcpy(void *,const void *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMMOVE |
extern void * memmove(void *,const void *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMSCAN |
extern void * memscan(void *,int,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMCMP |
extern int memcmp(const void *,const void *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_MEMCHR |
extern void * memchr(const void *,int,__kernel_size_t); |
#endif |
extern char *kstrdup(const char *s, gfp_t gfp); |
extern char *kstrndup(const char *s, size_t len, gfp_t gfp); |
extern void *kmemdup(const void *src, size_t len, gfp_t gfp); |
extern char **argv_split(gfp_t gfp, const char *str, int *argcp); |
extern void argv_free(char **argv); |
extern bool sysfs_streq(const char *s1, const char *s2); |
#ifdef CONFIG_BINARY_PRINTF |
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); |
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); |
#endif |
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, |
const void *from, size_t available); |
/** |
* strstarts - does @str start with @prefix? |
* @str: string to examine |
* @prefix: prefix to look for. |
*/ |
static inline bool strstarts(const char *str, const char *prefix) |
{ |
return strncmp(str, prefix, strlen(prefix)) == 0; |
} |
#endif |
#endif /* _LINUX_STRING_H_ */ |
/drivers/include/linux/stringify.h |
---|
0,0 → 1,12 |
#ifndef __LINUX_STRINGIFY_H |
#define __LINUX_STRINGIFY_H |
/* Indirect stringification. Doing two levels allows the parameter to be a |
* macro itself. For example, compile with -DFOO=bar, __stringify(FOO) |
* converts to "bar". |
*/ |
#define __stringify_1(x...) #x |
#define __stringify(x...) __stringify_1(x) |
#endif /* !__LINUX_STRINGIFY_H */ |
/drivers/include/linux/swab.h |
---|
0,0 → 1,299 |
#ifndef _LINUX_SWAB_H |
#define _LINUX_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
#include <asm/swab.h> |
/* |
* casts are necessary for constants, because we never know how for sure |
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. |
*/ |
#define ___constant_swab16(x) ((__u16)( \ |
(((__u16)(x) & (__u16)0x00ffU) << 8) | \ |
(((__u16)(x) & (__u16)0xff00U) >> 8))) |
#define ___constant_swab32(x) ((__u32)( \ |
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ |
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ |
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ |
(((__u32)(x) & (__u32)0xff000000UL) >> 24))) |
#define ___constant_swab64(x) ((__u64)( \ |
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ |
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ |
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ |
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ |
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ |
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ |
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ |
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) |
#define ___constant_swahw32(x) ((__u32)( \ |
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ |
(((__u32)(x) & (__u32)0xffff0000UL) >> 16))) |
#define ___constant_swahb32(x) ((__u32)( \ |
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ |
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) |
/* |
* Implement the following as inlines, but define the interface using |
* macros to allow constant folding when possible: |
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 |
*/ |
static inline __attribute_const__ __u16 __fswab16(__u16 val) |
{ |
#ifdef __arch_swab16 |
return __arch_swab16(val); |
#else |
return ___constant_swab16(val); |
#endif |
} |
static inline __attribute_const__ __u32 __fswab32(__u32 val) |
{ |
#ifdef __arch_swab32 |
return __arch_swab32(val); |
#else |
return ___constant_swab32(val); |
#endif |
} |
static inline __attribute_const__ __u64 __fswab64(__u64 val) |
{ |
#ifdef __arch_swab64 |
return __arch_swab64(val); |
#elif defined(__SWAB_64_THRU_32__) |
__u32 h = val >> 32; |
__u32 l = val & ((1ULL << 32) - 1); |
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); |
#else |
return ___constant_swab64(val); |
#endif |
} |
static inline __attribute_const__ __u32 __fswahw32(__u32 val) |
{ |
#ifdef __arch_swahw32 |
return __arch_swahw32(val); |
#else |
return ___constant_swahw32(val); |
#endif |
} |
static inline __attribute_const__ __u32 __fswahb32(__u32 val) |
{ |
#ifdef __arch_swahb32 |
return __arch_swahb32(val); |
#else |
return ___constant_swahb32(val); |
#endif |
} |
/** |
* __swab16 - return a byteswapped 16-bit value |
* @x: value to byteswap |
*/ |
#define __swab16(x) \ |
(__builtin_constant_p((__u16)(x)) ? \ |
___constant_swab16(x) : \ |
__fswab16(x)) |
/** |
* __swab32 - return a byteswapped 32-bit value |
* @x: value to byteswap |
*/ |
#define __swab32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___constant_swab32(x) : \ |
__fswab32(x)) |
/** |
* __swab64 - return a byteswapped 64-bit value |
* @x: value to byteswap |
*/ |
#define __swab64(x) \ |
(__builtin_constant_p((__u64)(x)) ? \ |
___constant_swab64(x) : \ |
__fswab64(x)) |
/** |
* __swahw32 - return a word-swapped 32-bit value |
* @x: value to wordswap |
* |
* __swahw32(0x12340000) is 0x00001234 |
*/ |
#define __swahw32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___constant_swahw32(x) : \ |
__fswahw32(x)) |
/** |
* __swahb32 - return a high and low byte-swapped 32-bit value |
* @x: value to byteswap |
* |
* __swahb32(0x12345678) is 0x34127856 |
*/ |
#define __swahb32(x) \ |
(__builtin_constant_p((__u32)(x)) ? \ |
___constant_swahb32(x) : \ |
__fswahb32(x)) |
/** |
* __swab16p - return a byteswapped 16-bit value from a pointer |
* @p: pointer to a naturally-aligned 16-bit value |
*/ |
static inline __u16 __swab16p(const __u16 *p) |
{ |
#ifdef __arch_swab16p |
return __arch_swab16p(p); |
#else |
return __swab16(*p); |
#endif |
} |
/** |
* __swab32p - return a byteswapped 32-bit value from a pointer |
* @p: pointer to a naturally-aligned 32-bit value |
*/ |
static inline __u32 __swab32p(const __u32 *p) |
{ |
#ifdef __arch_swab32p |
return __arch_swab32p(p); |
#else |
return __swab32(*p); |
#endif |
} |
/** |
* __swab64p - return a byteswapped 64-bit value from a pointer |
* @p: pointer to a naturally-aligned 64-bit value |
*/ |
static inline __u64 __swab64p(const __u64 *p) |
{ |
#ifdef __arch_swab64p |
return __arch_swab64p(p); |
#else |
return __swab64(*p); |
#endif |
} |
/** |
* __swahw32p - return a wordswapped 32-bit value from a pointer |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahw32() for details of wordswapping. |
*/ |
static inline __u32 __swahw32p(const __u32 *p) |
{ |
#ifdef __arch_swahw32p |
return __arch_swahw32p(p); |
#else |
return __swahw32(*p); |
#endif |
} |
/** |
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahb32() for details of high/low byteswapping. |
*/ |
static inline __u32 __swahb32p(const __u32 *p) |
{ |
#ifdef __arch_swahb32p |
return __arch_swahb32p(p); |
#else |
return __swahb32(*p); |
#endif |
} |
/** |
* __swab16s - byteswap a 16-bit value in-place |
* @p: pointer to a naturally-aligned 16-bit value |
*/ |
static inline void __swab16s(__u16 *p) |
{ |
#ifdef __arch_swab16s |
__arch_swab16s(p); |
#else |
*p = __swab16p(p); |
#endif |
} |
/** |
* __swab32s - byteswap a 32-bit value in-place |
* @p: pointer to a naturally-aligned 32-bit value |
*/ |
static inline void __swab32s(__u32 *p) |
{ |
#ifdef __arch_swab32s |
__arch_swab32s(p); |
#else |
*p = __swab32p(p); |
#endif |
} |
/** |
* __swab64s - byteswap a 64-bit value in-place |
* @p: pointer to a naturally-aligned 64-bit value |
*/ |
static inline void __swab64s(__u64 *p) |
{ |
#ifdef __arch_swab64s |
__arch_swab64s(p); |
#else |
*p = __swab64p(p); |
#endif |
} |
/** |
* __swahw32s - wordswap a 32-bit value in-place |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahw32() for details of wordswapping |
*/ |
static inline void __swahw32s(__u32 *p) |
{ |
#ifdef __arch_swahw32s |
__arch_swahw32s(p); |
#else |
*p = __swahw32p(p); |
#endif |
} |
/** |
* __swahb32s - high and low byteswap a 32-bit value in-place |
* @p: pointer to a naturally-aligned 32-bit value |
* |
* See __swahb32() for details of high and low byte swapping |
*/ |
static inline void __swahb32s(__u32 *p) |
{ |
#ifdef __arch_swahb32s |
__arch_swahb32s(p); |
#else |
*p = __swahb32p(p); |
#endif |
} |
#ifdef __KERNEL__ |
# define swab16 __swab16 |
# define swab32 __swab32 |
# define swab64 __swab64 |
# define swahw32 __swahw32 |
# define swahb32 __swahb32 |
# define swab16p __swab16p |
# define swab32p __swab32p |
# define swab64p __swab64p |
# define swahw32p __swahw32p |
# define swahb32p __swahb32p |
# define swab16s __swab16s |
# define swab32s __swab32s |
# define swab64s __swab64s |
# define swahw32s __swahw32s |
# define swahb32s __swahb32s |
#endif /* __KERNEL__ */ |
#endif /* _LINUX_SWAB_H */ |
/drivers/include/linux/typecheck.h |
---|
0,0 → 1,24 |
#ifndef TYPECHECK_H_INCLUDED |
#define TYPECHECK_H_INCLUDED |
/* |
* Check at compile time that something is of a particular type. |
* Always evaluates to 1 so you may use it easily in comparisons. |
*/ |
#define typecheck(type,x) \ |
({ type __dummy; \ |
typeof(x) __dummy2; \ |
(void)(&__dummy == &__dummy2); \ |
1; \ |
}) |
/* |
* Check at compile time that 'function' is a certain type, or is a pointer |
* to that type (needs to use typedef for the function type.) |
*/ |
#define typecheck_fn(type,function) \ |
({ typeof(type) __tmp = function; \ |
(void)__tmp; \ |
}) |
#endif /* TYPECHECK_H_INCLUDED */ |
/drivers/include/linux/types.h |
---|
0,0 → 1,345 |
#ifndef _LINUX_TYPES_H |
#define _LINUX_TYPES_H |
#include <asm/types.h> |
#ifndef __ASSEMBLY__ |
#ifdef __KERNEL__ |
#define DECLARE_BITMAP(name,bits) \ |
unsigned long name[BITS_TO_LONGS(bits)] |
#endif |
#include <linux/posix_types.h> |
#ifdef __KERNEL__ |
typedef __u32 __kernel_dev_t; |
typedef __kernel_fd_set fd_set; |
typedef __kernel_dev_t dev_t; |
typedef __kernel_ino_t ino_t; |
typedef __kernel_mode_t mode_t; |
typedef __kernel_nlink_t nlink_t; |
typedef __kernel_off_t off_t; |
typedef __kernel_pid_t pid_t; |
typedef __kernel_daddr_t daddr_t; |
typedef __kernel_key_t key_t; |
typedef __kernel_suseconds_t suseconds_t; |
typedef __kernel_timer_t timer_t; |
typedef __kernel_clockid_t clockid_t; |
typedef __kernel_mqd_t mqd_t; |
typedef _Bool bool; |
typedef __kernel_uid32_t uid_t; |
typedef __kernel_gid32_t gid_t; |
typedef __kernel_uid16_t uid16_t; |
typedef __kernel_gid16_t gid16_t; |
typedef unsigned long uintptr_t; |
#ifdef CONFIG_UID16 |
/* This is defined by include/asm-{arch}/posix_types.h */ |
typedef __kernel_old_uid_t old_uid_t; |
typedef __kernel_old_gid_t old_gid_t; |
#endif /* CONFIG_UID16 */ |
#if defined(__GNUC__) |
typedef __kernel_loff_t loff_t; |
#endif |
/* |
* The following typedefs are also protected by individual ifdefs for |
* historical reasons: |
*/ |
#ifndef _SIZE_T |
#define _SIZE_T |
typedef __kernel_size_t size_t; |
#endif |
#ifndef _SSIZE_T |
#define _SSIZE_T |
typedef __kernel_ssize_t ssize_t; |
#endif |
#ifndef _PTRDIFF_T |
#define _PTRDIFF_T |
typedef __kernel_ptrdiff_t ptrdiff_t; |
#endif |
#ifndef _TIME_T |
#define _TIME_T |
typedef __kernel_time_t time_t; |
#endif |
#ifndef _CLOCK_T |
#define _CLOCK_T |
typedef __kernel_clock_t clock_t; |
#endif |
#ifndef _CADDR_T |
#define _CADDR_T |
typedef __kernel_caddr_t caddr_t; |
#endif |
/* bsd */ |
typedef unsigned char u_char; |
typedef unsigned short u_short; |
typedef unsigned int u_int; |
typedef unsigned long u_long; |
/* sysv */ |
typedef unsigned char unchar; |
typedef unsigned short ushort; |
typedef unsigned int uint; |
typedef unsigned long ulong; |
#ifndef __BIT_TYPES_DEFINED__ |
#define __BIT_TYPES_DEFINED__ |
typedef __u8 u_int8_t; |
typedef __s8 int8_t; |
typedef __u16 u_int16_t; |
typedef __s16 int16_t; |
typedef __u32 u_int32_t; |
typedef __s32 int32_t; |
#endif /* !(__BIT_TYPES_DEFINED__) */ |
typedef __u8 uint8_t; |
typedef __u16 uint16_t; |
typedef __u32 uint32_t; |
#if defined(__GNUC__) |
typedef __u64 uint64_t; |
typedef __u64 u_int64_t; |
typedef __s64 int64_t; |
#endif |
/* this is a special 64bit data type that is 8-byte aligned */ |
#define aligned_u64 __u64 __attribute__((aligned(8))) |
#define aligned_be64 __be64 __attribute__((aligned(8))) |
#define aligned_le64 __le64 __attribute__((aligned(8))) |
/** |
* The type used for indexing onto a disc or disc partition. |
* |
* Linux always considers sectors to be 512 bytes long independently |
* of the devices real block size. |
* |
* blkcnt_t is the type of the inode's block count. |
*/ |
#ifdef CONFIG_LBDAF |
typedef u64 sector_t; |
typedef u64 blkcnt_t; |
#else |
typedef unsigned long sector_t; |
typedef unsigned long blkcnt_t; |
#endif |
/* |
* The type of an index into the pagecache. Use a #define so asm/types.h |
* can override it. |
*/ |
#ifndef pgoff_t |
#define pgoff_t unsigned long |
#endif |
#endif /* __KERNEL__ */ |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
#ifdef __KERNEL__ |
typedef unsigned __bitwise__ gfp_t; |
typedef unsigned __bitwise__ fmode_t; |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 phys_addr_t; |
#else |
typedef u32 phys_addr_t; |
#endif |
typedef phys_addr_t resource_size_t; |
typedef struct { |
volatile int counter; |
} atomic_t; |
#ifdef CONFIG_64BIT |
typedef struct { |
volatile long counter; |
} atomic64_t; |
#endif |
struct ustat { |
__kernel_daddr_t f_tfree; |
__kernel_ino_t f_tinode; |
char f_fname[6]; |
char f_fpack[6]; |
}; |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
typedef unsigned char u8_t; |
typedef unsigned short u16_t; |
typedef unsigned int u32_t; |
typedef unsigned long long u64_t; |
typedef unsigned int addr_t; |
typedef unsigned int count_t; |
# define WARN(condition, format...) |
#define false 0 |
#define true 1 |
#define likely(x) __builtin_expect(!!(x), 1) |
#define unlikely(x) __builtin_expect(!!(x), 0) |
#define BITS_PER_LONG 32 |
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_INFO(fmt, arg...) dbgprintf("DRM: "fmt , ##arg) |
#define DRM_ERROR(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) |
#define __must_be_array(a) \ |
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
#ifndef HAVE_ARCH_BUG |
#define BUG() do { \ |
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ |
/* panic("BUG!"); */ \ |
} while (0) |
#endif |
#ifndef HAVE_ARCH_BUG_ON |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
#endif |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
int dbgprintf(const char* format, ...); |
#define GFP_KERNEL 0 |
//#include <stdio.h> |
int snprintf(char *str, size_t size, const char *format, ...); |
//#include <string.h> |
void* memcpy(void *s1, const void *s2, size_t n); |
void* memset(void *s, int c, size_t n); |
size_t strlen(const char *s); |
char *strcpy(char *s1, const char *s2); |
char *strncpy (char *dst, const char *src, size_t len); |
void *malloc(size_t size); |
#define kfree free |
static inline void *kzalloc(size_t size, uint32_t flags) |
{ |
void *ret = malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
#define kmalloc(s,f) kzalloc((s), (f)) |
struct drm_file; |
#define DRM_MEMORYBARRIER() __asm__ __volatile__("lock; addl $0,0(%esp)") |
#define mb() __asm__ __volatile__("lock; addl $0,0(%esp)") |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (1UL << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define do_div(n, base) \ |
({ \ |
unsigned long __upper, __low, __high, __mod, __base; \ |
__base = (base); \ |
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \ |
__upper = __high; \ |
if (__high) { \ |
__upper = __high % (__base); \ |
__high = __high / (__base); \ |
} \ |
asm("divl %2":"=a" (__low), "=d" (__mod) \ |
: "rm" (__base), "0" (__low), "1" (__upper)); \ |
asm("":"=A" (n) : "a" (__low), "d" (__high)); \ |
__mod; \ |
}) |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#endif /* _LINUX_TYPES_H */ |