/drivers/include/linux/asm/alternative.h |
---|
0,0 → 1,164 |
#ifndef _ASM_X86_ALTERNATIVE_H |
#define _ASM_X86_ALTERNATIVE_H |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
#include <asm/asm.h> |
/* |
* Alternative inline assembly for SMP. |
* |
* The LOCK_PREFIX macro defined here replaces the LOCK and |
* LOCK_PREFIX macros used everywhere in the source tree. |
* |
* SMP alternatives use the same data structures as the other |
* alternatives and the X86_FEATURE_UP flag to indicate the case of a |
* UP system running a SMP kernel. The existing apply_alternatives() |
* works fine for patching a SMP kernel for UP. |
* |
* The SMP alternative tables can be kept after boot and contain both |
* UP and SMP versions of the instructions to allow switching back to |
* SMP at runtime, when hotplugging in a new CPU, which is especially |
* useful in virtualized environments. |
* |
* The very common lock prefix is handled as special case in a |
* separate table which is a pure address list without replacement ptr |
* and size information. That keeps the table sizes small. |
*/ |
#ifdef CONFIG_SMP |
#define LOCK_PREFIX \ |
".section .smp_locks,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661f\n" /* address */ \ |
".previous\n" \ |
"661:\n\tlock; " |
#else /* ! CONFIG_SMP */ |
#define LOCK_PREFIX "" |
#endif |
/* This must be included *after* the definition of LOCK_PREFIX */ |
#include <asm/cpufeature.h> |
struct alt_instr { |
u8 *instr; /* original instruction */ |
u8 *replacement; |
u8 cpuid; /* cpuid bit set for replacement */ |
u8 instrlen; /* length of original instruction */ |
u8 replacementlen; /* length of new instruction, <= instrlen */ |
u8 pad1; |
#ifdef CONFIG_X86_64 |
u32 pad2; |
#endif |
}; |
extern void alternative_instructions(void); |
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); |
struct module; |
#ifdef CONFIG_SMP |
extern void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end); |
extern void alternatives_smp_module_del(struct module *mod); |
extern void alternatives_smp_switch(int smp); |
#else |
static inline void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end) {} |
static inline void alternatives_smp_module_del(struct module *mod) {} |
static inline void alternatives_smp_switch(int smp) {} |
#endif /* CONFIG_SMP */ |
/* alternative assembly primitive: */ |
#define ALTERNATIVE(oldinstr, newinstr, feature) \ |
\ |
"661:\n\t" oldinstr "\n662:\n" \ |
".section .altinstructions,\"a\"\n" \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR "661b\n" /* label */ \ |
_ASM_PTR "663f\n" /* new instruction */ \ |
" .byte " __stringify(feature) "\n" /* feature bit */ \ |
" .byte 662b-661b\n" /* sourcelen */ \ |
" .byte 664f-663f\n" /* replacementlen */ \ |
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ |
".previous\n" \ |
".section .altinstr_replacement, \"ax\"\n" \ |
"663:\n\t" newinstr "\n664:\n" /* replacement */ \ |
".previous" |
/* |
* Alternative instructions for different CPU types or capabilities. |
* |
* This allows to use optimized instructions even on generic binary |
* kernels. |
* |
* length of oldinstr must be longer or equal the length of newinstr |
* It can be padded with nops as needed. |
* |
* For non barrier like inlines please define new variants |
* without volatile and memory clobber. |
*/ |
#define alternative(oldinstr, newinstr, feature) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
/* |
* Alternative inline assembly with input. |
* |
* Pecularities: |
* No memory clobber here. |
* Argument numbers start with 1. |
* Best is to use constraints that are fixed size (like (%1) ... "r") |
* If you use variable sized constraints like "m" or "g" in the |
* replacement make sure to pad to the worst case length. |
* Leaving an unused argument 0 to keep API compatibility. |
*/ |
#define alternative_input(oldinstr, newinstr, feature, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: : "i" (0), ## input) |
/* Like alternative_input, but with a single output argument */ |
#define alternative_io(oldinstr, newinstr, feature, output, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: output : "i" (0), ## input) |
/* |
* use this macro(s) if you need more than one output parameter |
* in alternative_io |
*/ |
#define ASM_OUTPUT2(a, b) a, b |
struct paravirt_patch_site; |
#ifdef CONFIG_PARAVIRT |
void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end); |
#else |
static inline void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end) |
{} |
#define __parainstructions NULL |
#define __parainstructions_end NULL |
#endif |
/* |
* Clear and restore the kernel write-protection flag on the local CPU. |
* Allows the kernel to edit read-only pages. |
* Side-effect: any interrupt handler running between save and restore will have |
* the ability to write to read-only pages. |
* |
* Warning: |
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and |
* no thread can be preempted in the instructions being modified (no iret to an |
* invalid instruction possible) or if the instructions are changed from a |
* consistent state to another consistent state atomically. |
* More care must be taken when modifying code in the SMP case because of |
* Intel's errata. |
* On the local CPU you need to be protected again NMI or MCE handlers seeing an |
* inconsistent instruction while you patch. |
*/ |
extern void *text_poke(void *addr, const void *opcode, size_t len); |
#endif /* _ASM_X86_ALTERNATIVE_H */ |
/drivers/include/linux/asm/asm.h |
---|
0,0 → 1,55 |
#ifndef _ASM_X86_ASM_H |
#define _ASM_X86_ASM_H |
#ifdef __ASSEMBLY__ |
# define __ASM_FORM(x) x |
# define __ASM_EX_SEC .section __ex_table, "a" |
#else |
# define __ASM_FORM(x) " " #x " " |
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" |
#endif |
#ifdef CONFIG_X86_32 |
# define __ASM_SEL(a,b) __ASM_FORM(a) |
#else |
# define __ASM_SEL(a,b) __ASM_FORM(b) |
#endif |
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) |
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) |
#define _ASM_PTR __ASM_SEL(.long, .quad) |
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
#define _ASM_MOV __ASM_SIZE(mov) |
#define _ASM_INC __ASM_SIZE(inc) |
#define _ASM_DEC __ASM_SIZE(dec) |
#define _ASM_ADD __ASM_SIZE(add) |
#define _ASM_SUB __ASM_SIZE(sub) |
#define _ASM_XADD __ASM_SIZE(xadd) |
#define _ASM_AX __ASM_REG(ax) |
#define _ASM_BX __ASM_REG(bx) |
#define _ASM_CX __ASM_REG(cx) |
#define _ASM_DX __ASM_REG(dx) |
#define _ASM_SP __ASM_REG(sp) |
#define _ASM_BP __ASM_REG(bp) |
#define _ASM_SI __ASM_REG(si) |
#define _ASM_DI __ASM_REG(di) |
/* Exception table entry */ |
#ifdef __ASSEMBLY__ |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC ; \ |
_ASM_ALIGN ; \ |
_ASM_PTR from , to ; \ |
.previous |
#else |
# define _ASM_EXTABLE(from,to) \ |
__ASM_EX_SEC \ |
_ASM_ALIGN "\n" \ |
_ASM_PTR #from "," #to "\n" \ |
" .previous\n" |
#endif |
#endif /* _ASM_X86_ASM_H */ |
/drivers/include/linux/asm/atomic.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "atomic_32.h" |
#else |
# include "atomic_64.h" |
#endif |
/drivers/include/linux/asm/atomic_32.h |
---|
0,0 → 1,415 |
#ifndef _ASM_X86_ATOMIC_32_H |
#define _ASM_X86_ATOMIC_32_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
//#include <asm/processor.h> |
#include <asm/cmpxchg.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return v->counter; |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "decl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "incl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_add_negative - add and test if negative |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_add_return - add integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
int __i; |
#ifdef CONFIG_M386 |
unsigned long flags; |
if (unlikely(boot_cpu_data.x86 <= 3)) |
goto no_xadd; |
#endif |
/* Modern 486+ processor */ |
__i = i; |
asm volatile(LOCK_PREFIX "xaddl %0, %1" |
: "+r" (i), "+m" (v->counter) |
: : "memory"); |
return i + __i; |
#ifdef CONFIG_M386 |
no_xadd: /* Legacy 386 processor */ |
local_irq_save(flags); |
__i = atomic_read(v); |
atomic_set(v, i + __i); |
local_irq_restore(flags); |
return i + __i; |
#endif |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c != (u); |
} |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" (mask), "m" (*(addr)) : "memory") |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic_dec() barrier() |
#define smp_mb__after_atomic_dec() barrier() |
#define smp_mb__before_atomic_inc() barrier() |
#define smp_mb__after_atomic_inc() barrier() |
/* An 64bit atomic type */ |
typedef struct { |
u64 __aligned(8) counter; |
} atomic64_t; |
#define ATOMIC64_INIT(val) { (val) } |
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); |
/** |
* atomic64_xchg - xchg atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically xchgs the value of @ptr to @new_val and returns |
* the old value. |
*/ |
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); |
/** |
* atomic64_set - set atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically sets the value of @ptr to @new_val. |
*/ |
extern void atomic64_set(atomic64_t *ptr, u64 new_val); |
/** |
* atomic64_read - read atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically reads the value of @ptr and returns it. |
*/ |
static inline u64 atomic64_read(atomic64_t *ptr) |
{ |
u64 res; |
/* |
* Note, we inline this atomic64_t primitive because |
* it only clobbers EAX/EDX and leaves the others |
* untouched. We also (somewhat subtly) rely on the |
* fact that cmpxchg8b returns the current 64-bit value |
* of the memory location we are touching: |
*/ |
asm volatile( |
"mov %%ebx, %%eax\n\t" |
"mov %%ecx, %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b %1\n" |
: "=&A" (res) |
: "m" (*ptr) |
); |
return res; |
} |
extern u64 atomic64_read(atomic64_t *ptr); |
/** |
* atomic64_add_return - add and return |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns @delta + *@ptr |
*/ |
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); |
/* |
* Other variants with different arithmetic operators: |
*/ |
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); |
extern u64 atomic64_inc_return(atomic64_t *ptr); |
extern u64 atomic64_dec_return(atomic64_t *ptr); |
/** |
* atomic64_add - add integer to atomic64 variable |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr. |
*/ |
extern void atomic64_add(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub - subtract the atomic64 variable |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr. |
*/ |
extern void atomic64_sub(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub_and_test - subtract value from variable and test result |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_inc - increment atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1. |
*/ |
extern void atomic64_inc(atomic64_t *ptr); |
/** |
* atomic64_dec - decrement atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1. |
*/ |
extern void atomic64_dec(atomic64_t *ptr); |
/** |
* atomic64_dec_and_test - decrement and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
extern int atomic64_dec_and_test(atomic64_t *ptr); |
/** |
* atomic64_inc_and_test - increment and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_inc_and_test(atomic64_t *ptr); |
/** |
* atomic64_add_negative - add and test if negative |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); |
#include <asm-generic/atomic-long.h> |
#endif /* _ASM_X86_ATOMIC_32_H */ |
/drivers/include/linux/asm/bitops.h |
---|
0,0 → 1,465 |
#ifndef _ASM_X86_BITOPS_H |
#define _ASM_X86_BITOPS_H |
/* |
* Copyright 1992, Linus Torvalds. |
* |
* Note: inlines with more than a single statement should be marked |
* __always_inline to avoid problems with older gcc's inlining heuristics. |
*/ |
#ifndef _LINUX_BITOPS_H |
#error only <linux/bitops.h> can be included directly |
#endif |
#include <linux/compiler.h> |
#include <asm/alternative.h> |
/* |
* These have to be done with inline assembly: that way the bit-setting |
* is guaranteed to be atomic. All bit operations return 0 if the bit |
* was cleared before the operation and != 0 if it was not. |
* |
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
*/ |
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
/* Technically wrong, but this avoids compilation errors on some gcc |
versions. */ |
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
#else |
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
#endif |
#define ADDR BITOP_ADDR(addr) |
/* |
* We do the locked ops that don't return the old value as |
* a mask operation on a byte. |
*/ |
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
#define CONST_MASK(nr) (1 << ((nr) & 7)) |
/** |
* set_bit - Atomically set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* This function is atomic and may not be reordered. See __set_bit() |
* if you do not require the atomic guarantees. |
* |
* Note: there are no guarantees that this function will not be reordered |
* on non x86 architectures, so if you are writing portable code, |
* make sure not to rely on its reordering guarantees. |
* |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static __always_inline void |
set_bit(unsigned int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "orb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr)) |
: "memory"); |
} else { |
asm volatile(LOCK_PREFIX "bts %1,%0" |
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
} |
} |
/** |
* __set_bit - Set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* Unlike set_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __set_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
} |
/** |
* clear_bit - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and may not be reordered. However, it does |
* not contain a memory barrier, so if it is used for locking purposes, |
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
* in order to ensure changes are visible on other processors. |
*/ |
static __always_inline void |
clear_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "andb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)~CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btr %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/* |
* clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and implies release semantics before the memory |
* operation. It can be used for an unlock. |
*/ |
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
clear_bit(nr, addr); |
} |
static inline void __clear_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
} |
/* |
* __clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* __clear_bit() is non-atomic and implies release semantics before the memory |
* operation. It can be used for an unlock if no other CPUs can concurrently |
* modify other bits in the word. |
* |
* No memory barrier is required here, because x86 cannot reorder stores past |
* older loads. Same principle as spin_unlock. |
*/ |
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) |
{ |
barrier(); |
__clear_bit(nr, addr); |
} |
#define smp_mb__before_clear_bit() barrier() |
#define smp_mb__after_clear_bit() barrier() |
/** |
* __change_bit - Toggle a bit in memory |
* @nr: the bit to change |
* @addr: the address to start counting from |
* |
* Unlike change_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __change_bit(int nr, volatile unsigned long *addr) |
{ |
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
} |
/** |
* change_bit - Toggle a bit in memory |
* @nr: Bit to change |
* @addr: Address to start counting from |
* |
* change_bit() is atomic and may not be reordered. |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static inline void change_bit(int nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "xorb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btc %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/** |
* test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "bts %2,%1\n\t" |
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_set_bit_lock - Set a bit and return its old value for lock |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This is the same as test_and_set_bit on x86. |
*/ |
static __always_inline int |
test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
{ |
return test_and_set_bit(nr, addr); |
} |
/** |
* __test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm("bts %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/** |
* test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* __test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/* WARNING: non atomic and it can be reordered! */ |
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_change_bit - Change a bit and return its old value |
* @nr: Bit to change |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile(LOCK_PREFIX "btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); |
return oldbit; |
} |
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
{ |
return ((1UL << (nr % BITS_PER_LONG)) & |
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
} |
static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
{ |
int oldbit; |
asm volatile("bt %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
} |
#if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
/** |
* test_bit - Determine whether a bit is set |
* @nr: bit number to test |
* @addr: Address to start counting from |
*/ |
static int test_bit(int nr, const volatile unsigned long *addr); |
#endif |
#define test_bit(nr, addr) \ |
(__builtin_constant_p((nr)) \ |
? constant_test_bit((nr), (addr)) \ |
: variable_test_bit((nr), (addr))) |
/** |
* __ffs - find first set bit in word |
* @word: The word to search |
* |
* Undefined if no bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __ffs(unsigned long word) |
{ |
asm("bsf %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
/** |
* ffz - find first zero bit in word |
* @word: The word to search |
* |
* Undefined if no zero exists, so code should check against ~0UL first. |
*/ |
static inline unsigned long ffz(unsigned long word) |
{ |
asm("bsf %1,%0" |
: "=r" (word) |
: "r" (~word)); |
return word; |
} |
/* |
* __fls: find last set bit in word |
* @word: The word to search |
* |
* Undefined if no set bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __fls(unsigned long word) |
{ |
asm("bsr %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
#ifdef __KERNEL__ |
/** |
* ffs - find first set bit in word |
* @x: the word to search |
* |
* This is defined the same way as the libc and compiler builtin ffs |
* routines, therefore differs in spirit from the other bitops. |
* |
* ffs(value) returns 0 if value is 0 or the position of the first |
* set bit if value is nonzero. The first (least significant) bit |
* is at position 1. |
*/ |
static inline int ffs(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsfl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=r" (r) : "rm" (x), "r" (-1)); |
#else |
asm("bsfl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
/** |
* fls - find last set bit in word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffs, but returns the position of the most significant set bit. |
* |
* fls(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 32. |
*/ |
static inline int fls(int x) |
{ |
int r; |
#ifdef CONFIG_X86_CMOV |
asm("bsrl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "rm" (-1)); |
#else |
asm("bsrl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
#endif /* __KERNEL__ */ |
#undef ADDR |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/sched.h> |
#define ARCH_HAS_FAST_MULTIPLIER 1 |
#include <asm-generic/bitops/hweight.h> |
#endif /* __KERNEL__ */ |
#include <asm-generic/bitops/fls64.h> |
#ifdef __KERNEL__ |
#include <asm-generic/bitops/ext2-non-atomic.h> |
#define ext2_set_bit_atomic(lock, nr, addr) \ |
test_and_set_bit((nr), (unsigned long *)(addr)) |
#define ext2_clear_bit_atomic(lock, nr, addr) \ |
test_and_clear_bit((nr), (unsigned long *)(addr)) |
#include <asm-generic/bitops/minix.h> |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_BITOPS_H */ |
/drivers/include/linux/asm/bitsperlong.h |
---|
0,0 → 1,13 |
#ifndef __ASM_X86_BITSPERLONG_H |
#define __ASM_X86_BITSPERLONG_H |
#ifdef __x86_64__ |
# define __BITS_PER_LONG 64 |
#else |
# define __BITS_PER_LONG 32 |
#endif |
#include <asm-generic/bitsperlong.h> |
#endif /* __ASM_X86_BITSPERLONG_H */ |
/drivers/include/linux/asm/byteorder.h |
---|
0,0 → 1,6 |
#ifndef _ASM_X86_BYTEORDER_H |
#define _ASM_X86_BYTEORDER_H |
#include <linux/byteorder/little_endian.h> |
#endif /* _ASM_X86_BYTEORDER_H */ |
/drivers/include/linux/asm/cmpxchg.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "cmpxchg_32.h" |
#else |
# include "cmpxchg_64.h" |
#endif |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
0,0 → 1,274 |
#ifndef _ASM_X86_CMPXCHG_32_H |
#define _ASM_X86_CMPXCHG_32_H |
#include <linux/bitops.h> /* for LOCK_PREFIX */ |
/* |
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
* you need to test for the feature in boot_cpu_data. |
*/ |
extern void __xchg_wrong_size(void); |
/* |
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway |
* Note 2: xchg has side effect, so that attribute volatile is necessary, |
* but generally the primitive is invalid, *ptr is output argument. --ANK |
*/ |
struct __xchg_dummy { |
unsigned long a[100]; |
}; |
#define __xg(x) ((struct __xchg_dummy *)(x)) |
#define __xchg(x, ptr, size) \ |
({ \ |
__typeof(*(ptr)) __x = (x); \ |
switch (size) { \ |
case 1: \ |
asm volatile("xchgb %b0,%1" \ |
: "=q" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm volatile("xchgw %w0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm volatile("xchgl %0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "memory"); \ |
break; \ |
default: \ |
__xchg_wrong_size(); \ |
} \ |
__x; \ |
}) |
#define xchg(ptr, v) \ |
__xchg((v), (ptr), sizeof(*ptr)) |
/* |
* The semantics of XCHGCMP8B are a bit strange, this is why |
* there is a loop and the loading of %%eax and %%edx has to |
* be inside. This inlines well in most cases, the cached |
* cost is around ~38 cycles. (in the future we might want |
* to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that |
* might have an implicit FPU-save as a cost, so it's not |
* clear which path to go.) |
* |
* cmpxchg8b must be used with the lock prefix here to allow |
* the instruction to be executed atomically, see page 3-102 |
* of the instruction set reference 24319102.pdf. We need |
* the reader side to see the coherent 64bit value. |
*/ |
static inline void __set_64bit(unsigned long long *ptr, |
unsigned int low, unsigned int high) |
{ |
asm volatile("\n1:\t" |
"movl (%0), %%eax\n\t" |
"movl 4(%0), %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b (%0)\n\t" |
"jnz 1b" |
: /* no outputs */ |
: "D"(ptr), |
"b"(low), |
"c"(high) |
: "ax", "dx", "memory"); |
} |
static inline void __set_64bit_constant(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
} |
#define ll_low(x) *(((unsigned int *)&(x)) + 0) |
#define ll_high(x) *(((unsigned int *)&(x)) + 1) |
static inline void __set_64bit_var(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, ll_low(value), ll_high(value)); |
} |
#define set_64bit(ptr, value) \ |
(__builtin_constant_p((value)) \ |
? __set_64bit_constant((ptr), (value)) \ |
: __set_64bit_var((ptr), (value))) |
#define _set_64bit(ptr, value) \ |
(__builtin_constant_p(value) \ |
? __set_64bit(ptr, (unsigned int)(value), \ |
(unsigned int)((value) >> 32)) \ |
: __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
extern void __cmpxchg_wrong_size(void); |
/* |
* Atomic compare and exchange. Compare OLD with MEM, if identical, |
* store NEW in MEM. Return the initial value in MEM. Success is |
* indicated by comparing RETURN with OLD. |
*/ |
#define __raw_cmpxchg(ptr, old, new, size, lock) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (old); \ |
__typeof__(*(ptr)) __new = (new); \ |
switch (size) { \ |
case 1: \ |
asm volatile(lock "cmpxchgb %b1,%2" \ |
: "=a"(__ret) \ |
: "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm volatile(lock "cmpxchgw %w1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm volatile(lock "cmpxchgl %1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
: "memory"); \ |
break; \ |
default: \ |
__cmpxchg_wrong_size(); \ |
} \ |
__ret; \ |
}) |
#define __cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) |
#define __sync_cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
#define __cmpxchg_local(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "") |
#ifdef CONFIG_X86_CMPXCHG |
#define __HAVE_ARCH_CMPXCHG 1 |
#define cmpxchg(ptr, old, new) \ |
__cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define sync_cmpxchg(ptr, old, new) \ |
__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
#define cmpxchg_local(ptr, old, new) \ |
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
#define cmpxchg64(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#define cmpxchg64_local(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#endif |
static inline unsigned long long __cmpxchg64(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
{ |
unsigned long long prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
"0"(old) |
: "memory"); |
return prev; |
} |
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
{ |
unsigned long long prev; |
asm volatile("cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
"0"(old) |
: "memory"); |
return prev; |
} |
#ifndef CONFIG_X86_CMPXCHG |
/* |
* Building a kernel capable running on 80386. It may be necessary to |
* simulate the cmpxchg on the 80386 CPU. For that purpose we define |
* a function for each of the sizes we support. |
*/ |
#define cmpxchg(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#define cmpxchg_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
(unsigned long)(o), (unsigned long)(n), \ |
sizeof(*(ptr))); \ |
__ret; \ |
}) |
#endif |
#ifndef CONFIG_X86_CMPXCHG64 |
/* |
* Building a kernel capable running on 80386 and 80486. It may be necessary |
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
*/ |
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
#define cmpxchg64(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
"lock; cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#define cmpxchg64_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
if (likely(boot_cpu_data.x86 > 4)) \ |
__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
else \ |
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
__ret; \ |
}) |
#endif |
#endif /* _ASM_X86_CMPXCHG_32_H */ |
/drivers/include/linux/asm/cpufeature.h |
---|
0,0 → 1,283 |
/* |
* Defines x86 CPU feature bits |
*/ |
#ifndef _ASM_X86_CPUFEATURE_H |
#define _ASM_X86_CPUFEATURE_H |
#include <asm/required-features.h> |
#define NCAPINTS 9 /* N 32-bit words worth of info */ |
/* |
* Note: If the comment begins with a quoted string, that string is used |
* in /proc/cpuinfo instead of the macro name. If the string is "", |
* this feature bit is not displayed in /proc/cpuinfo at all. |
*/ |
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ |
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ |
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ |
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ |
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
/* (plus FCMOVcc, FCOMI with FPU) */ |
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
#define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ |
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
/* Don't duplicate feature flags which are redundant with Intel! */ |
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ |
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ |
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ |
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ |
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ |
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ |
/* Other features, Linux-defined mapping, word 3 */ |
/* This range is used for feature bits which conflict or are synthesized */ |
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ |
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
/* cpu types for specific tunings: */ |
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ |
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ |
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ |
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ |
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ |
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ |
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ |
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ |
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ |
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ |
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ |
#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ |
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ |
#define X86_FEATURE_CID (4*32+10) /* Context ID */ |
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ |
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ |
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ |
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ |
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ |
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ |
#define X86_FEATURE_AES (4*32+25) /* AES instructions */ |
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ |
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ |
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ |
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ |
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ |
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ |
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ |
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ |
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ |
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
/* |
* Auxiliary flags: Linux defined - For features scattered in various |
* CPUID levels like 0x6, 0xA etc |
*/ |
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ |
/* Virtualization flags: Linux defined */ |
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ |
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ |
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ |
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ |
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
#include <linux/bitops.h> |
extern const char * const x86_cap_flags[NCAPINTS*32]; |
extern const char * const x86_power_flags[32]; |
#define test_cpu_cap(c, bit) \ |
test_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define cpu_has(c, bit) \ |
(__builtin_constant_p(bit) && \ |
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ |
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ |
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ |
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ |
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ |
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ |
? 1 : \ |
test_cpu_cap(c, bit)) |
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define setup_clear_cpu_cap(bit) do { \ |
clear_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_cleared); \ |
} while (0) |
#define setup_force_cpu_cap(bit) do { \ |
set_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_set); \ |
} while (0) |
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) |
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) |
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) |
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) |
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) |
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) |
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) |
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) |
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) |
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) |
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) |
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) |
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) |
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) |
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) |
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) |
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) |
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) |
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) |
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) |
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) |
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) |
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) |
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) |
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) |
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) |
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) |
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) |
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) |
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) |
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
# define cpu_has_invlpg 1 |
#else |
# define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
#endif |
#ifdef CONFIG_X86_64 |
#undef cpu_has_vme |
#define cpu_has_vme 0 |
#undef cpu_has_pae |
#define cpu_has_pae ___BUG___ |
#undef cpu_has_mp |
#define cpu_has_mp 1 |
#undef cpu_has_k6_mtrr |
#define cpu_has_k6_mtrr 0 |
#undef cpu_has_cyrix_arr |
#define cpu_has_cyrix_arr 0 |
#undef cpu_has_centaur_mcr |
#define cpu_has_centaur_mcr 0 |
#endif /* CONFIG_X86_64 */ |
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
#endif /* _ASM_X86_CPUFEATURE_H */ |
/drivers/include/linux/asm/posix_types.h |
---|
0,0 → 1,13 |
#ifdef __KERNEL__ |
# ifdef CONFIG_X86_32 |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#else |
# ifdef __i386__ |
# include "posix_types_32.h" |
# else |
# include "posix_types_64.h" |
# endif |
#endif |
/drivers/include/linux/asm/posix_types_32.h |
---|
0,0 → 1,85 |
#ifndef _ASM_X86_POSIX_TYPES_32_H |
#define _ASM_X86_POSIX_TYPES_32_H |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. Also, we cannot |
* assume GCC is being used. |
*/ |
typedef unsigned long __kernel_ino_t; |
typedef unsigned short __kernel_mode_t; |
typedef unsigned short __kernel_nlink_t; |
typedef long __kernel_off_t; |
typedef int __kernel_pid_t; |
typedef unsigned short __kernel_ipc_pid_t; |
typedef unsigned short __kernel_uid_t; |
typedef unsigned short __kernel_gid_t; |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
typedef long __kernel_time_t; |
typedef long __kernel_suseconds_t; |
typedef long __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef int __kernel_daddr_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
typedef unsigned short __kernel_old_uid_t; |
typedef unsigned short __kernel_old_gid_t; |
typedef unsigned short __kernel_old_dev_t; |
#ifdef __GNUC__ |
typedef long long __kernel_loff_t; |
#endif |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#if defined(__KERNEL__) |
#undef __FD_SET |
#define __FD_SET(fd,fdsetp) \ |
asm volatile("btsl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int)(fd))) |
#undef __FD_CLR |
#define __FD_CLR(fd,fdsetp) \ |
asm volatile("btrl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int) (fd))) |
#undef __FD_ISSET |
#define __FD_ISSET(fd,fdsetp) \ |
(__extension__ \ |
({ \ |
unsigned char __result; \ |
asm volatile("btl %1,%2 ; setb %0" \ |
: "=q" (__result) \ |
: "r" ((int)(fd)), \ |
"m" (*(__kernel_fd_set *)(fdsetp))); \ |
__result; \ |
})) |
#undef __FD_ZERO |
#define __FD_ZERO(fdsetp) \ |
do { \ |
int __d0, __d1; \ |
asm volatile("cld ; rep ; stosl" \ |
: "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
"=&c" (__d0), "=&D" (__d1) \ |
: "a" (0), "1" (__FDSET_LONGS), \ |
"2" ((__kernel_fd_set *)(fdsetp)) \ |
: "memory"); \ |
} while (0) |
#endif /* defined(__KERNEL__) */ |
#endif /* _ASM_X86_POSIX_TYPES_32_H */ |
/drivers/include/linux/asm/required-features.h |
---|
0,0 → 1,88 |
#ifndef _ASM_X86_REQUIRED_FEATURES_H |
#define _ASM_X86_REQUIRED_FEATURES_H |
/* Define minimum CPUID feature set for kernel These bits are checked |
really early to actually display a visible error message before the |
kernel dies. Make sure to assign features to the proper mask! |
Some requirements that are not in CPUID yet are also in the |
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. |
The real information is in arch/x86/Kconfig.cpu, this just converts |
the CONFIGs into a bitmask */ |
#ifndef CONFIG_MATH_EMULATION |
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) |
#else |
# define NEED_FPU 0 |
#endif |
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) |
#else |
# define NEED_PAE 0 |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) |
#else |
# define NEED_CX8 0 |
#endif |
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) |
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) |
#else |
# define NEED_CMOV 0 |
#endif |
#ifdef CONFIG_X86_USE_3DNOW |
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) |
#else |
# define NEED_3DNOW 0 |
#endif |
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) |
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) |
#else |
# define NEED_NOPL 0 |
#endif |
#ifdef CONFIG_X86_64 |
#ifdef CONFIG_PARAVIRT |
/* Paravirtualized systems may not have PSE or PGE available */ |
#define NEED_PSE 0 |
#define NEED_PGE 0 |
#else |
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31) |
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31) |
#endif |
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) |
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) |
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) |
#define NEED_LM (1<<(X86_FEATURE_LM & 31)) |
#else |
#define NEED_PSE 0 |
#define NEED_MSR 0 |
#define NEED_PGE 0 |
#define NEED_FXSR 0 |
#define NEED_XMM 0 |
#define NEED_XMM2 0 |
#define NEED_LM 0 |
#endif |
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ |
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ |
NEED_XMM|NEED_XMM2) |
#define SSE_MASK (NEED_XMM|NEED_XMM2) |
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
#define REQUIRED_MASK2 0 |
#define REQUIRED_MASK3 (NEED_NOPL) |
#define REQUIRED_MASK4 0 |
#define REQUIRED_MASK5 0 |
#define REQUIRED_MASK6 0 |
#define REQUIRED_MASK7 0 |
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ |
/drivers/include/linux/asm/spinlock_types.h |
---|
0,0 → 1,20 |
#ifndef _ASM_X86_SPINLOCK_TYPES_H |
#define _ASM_X86_SPINLOCK_TYPES_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
typedef struct raw_spinlock { |
unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
typedef struct { |
unsigned int lock; |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
#endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
/drivers/include/linux/asm/string.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include "string_32.h" |
#else |
# include "string_64.h" |
#endif |
/drivers/include/linux/asm/string_32.h |
---|
0,0 → 1,342 |
#ifndef _ASM_X86_STRING_32_H |
#define _ASM_X86_STRING_32_H |
#ifdef __KERNEL__ |
/* Let gcc decide whether to inline or use the out of line functions */ |
#define __HAVE_ARCH_STRCPY |
extern char *strcpy(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCPY |
extern char *strncpy(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCAT |
extern char *strcat(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCAT |
extern char *strncat(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCMP |
extern int strcmp(const char *cs, const char *ct); |
#define __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *cs, const char *ct, size_t count); |
#define __HAVE_ARCH_STRCHR |
extern char *strchr(const char *s, int c); |
#define __HAVE_ARCH_STRLEN |
extern size_t strlen(const char *s); |
static __always_inline void *__memcpy(void *to, const void *from, size_t n) |
{ |
int d0, d1, d2; |
asm volatile("rep ; movsl\n\t" |
"movl %4,%%ecx\n\t" |
"andl $3,%%ecx\n\t" |
"jz 1f\n\t" |
"rep ; movsb\n\t" |
"1:" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) |
: "memory"); |
return to; |
} |
/* |
* This looks ugly, but the compiler can optimize it totally, |
* as the count is constant. |
*/ |
static __always_inline void *__constant_memcpy(void *to, const void *from, |
size_t n) |
{ |
long esi, edi; |
if (!n) |
return to; |
switch (n) { |
case 1: |
*(char *)to = *(char *)from; |
return to; |
case 2: |
*(short *)to = *(short *)from; |
return to; |
case 4: |
*(int *)to = *(int *)from; |
return to; |
case 3: |
*(short *)to = *(short *)from; |
*((char *)to + 2) = *((char *)from + 2); |
return to; |
case 5: |
*(int *)to = *(int *)from; |
*((char *)to + 4) = *((char *)from + 4); |
return to; |
case 6: |
*(int *)to = *(int *)from; |
*((short *)to + 2) = *((short *)from + 2); |
return to; |
case 8: |
*(int *)to = *(int *)from; |
*((int *)to + 1) = *((int *)from + 1); |
return to; |
} |
esi = (long)from; |
edi = (long)to; |
if (n >= 5 * 4) { |
/* large block: use rep prefix */ |
int ecx; |
asm volatile("rep ; movsl" |
: "=&c" (ecx), "=&D" (edi), "=&S" (esi) |
: "0" (n / 4), "1" (edi), "2" (esi) |
: "memory" |
); |
} else { |
/* small block: don't clobber ecx + smaller code */ |
if (n >= 4 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 3 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 2 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 1 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
} |
switch (n % 4) { |
/* tail */ |
case 0: |
return to; |
case 1: |
asm volatile("movsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
case 2: |
asm volatile("movsw" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
default: |
asm volatile("movsw\n\tmovsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
} |
} |
#define __HAVE_ARCH_MEMCPY |
#ifdef CONFIG_X86_USE_3DNOW |
#include <asm/mmx.h> |
/* |
* This CPU favours 3DNow strongly (eg AMD Athlon) |
*/ |
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __constant_memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
static inline void *__memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy3d((t), (f), (n)) \ |
: __memcpy3d((t), (f), (n))) |
#else |
/* |
* No 3D Now! |
*/ |
#ifndef CONFIG_KMEMCHECK |
#if (__GNUC__ >= 4) |
#define memcpy(t, f, n) __builtin_memcpy(t, f, n) |
#else |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy((t), (f), (n)) \ |
: __memcpy((t), (f), (n))) |
#endif |
#else |
/* |
* kmemcheck becomes very happy if we use the REP instructions unconditionally, |
* because it means that we know both memory operands in advance. |
*/ |
#define memcpy(t, f, n) __memcpy((t), (f), (n)) |
#endif |
#endif |
#define __HAVE_ARCH_MEMMOVE |
void *memmove(void *dest, const void *src, size_t n); |
#define memcmp __builtin_memcmp |
#define __HAVE_ARCH_MEMCHR |
extern void *memchr(const void *cs, int c, size_t count); |
static inline void *__memset_generic(void *s, char c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep\n\t" |
"stosb" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "1" (s), "0" (count) |
: "memory"); |
return s; |
} |
/* we might want to write optimized versions of these later */ |
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) |
/* |
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill |
* things 32 bits at a time even when we don't know the size of the |
* area at compile-time.. |
*/ |
static __always_inline |
void *__constant_c_memset(void *s, unsigned long c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep ; stosl\n\t" |
"testb $2,%b3\n\t" |
"je 1f\n\t" |
"stosw\n" |
"1:\ttestb $1,%b3\n\t" |
"je 2f\n\t" |
"stosb\n" |
"2:" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s) |
: "memory"); |
return s; |
} |
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern size_t strnlen(const char *s, size_t count); |
/* end of additional stuff */ |
#define __HAVE_ARCH_STRSTR |
extern char *strstr(const char *cs, const char *ct); |
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as we by now know that both pattern and count is constant.. |
*/ |
static __always_inline |
void *__constant_c_and_count_memset(void *s, unsigned long pattern, |
size_t count) |
{ |
switch (count) { |
case 0: |
return s; |
case 1: |
*(unsigned char *)s = pattern & 0xff; |
return s; |
case 2: |
*(unsigned short *)s = pattern & 0xffff; |
return s; |
case 3: |
*(unsigned short *)s = pattern & 0xffff; |
*((unsigned char *)s + 2) = pattern & 0xff; |
return s; |
case 4: |
*(unsigned long *)s = pattern; |
return s; |
} |
#define COMMON(x) \ |
asm volatile("rep ; stosl" \ |
x \ |
: "=&c" (d0), "=&D" (d1) \ |
: "a" (eax), "0" (count/4), "1" ((long)s) \ |
: "memory") |
{ |
int d0, d1; |
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 |
/* Workaround for broken gcc 4.0 */ |
register unsigned long eax asm("%eax") = pattern; |
#else |
unsigned long eax = pattern; |
#endif |
switch (count % 4) { |
case 0: |
COMMON(""); |
return s; |
case 1: |
COMMON("\n\tstosb"); |
return s; |
case 2: |
COMMON("\n\tstosw"); |
return s; |
default: |
COMMON("\n\tstosw\n\tstosb"); |
return s; |
} |
} |
#undef COMMON |
} |
#define __constant_c_x_memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_c_and_count_memset((s), (c), (count)) \ |
: __constant_c_memset((s), (c), (count))) |
#define __memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_count_memset((s), (c), (count)) \ |
: __memset_generic((s), (c), (count))) |
#define __HAVE_ARCH_MEMSET |
#if (__GNUC__ >= 4) |
#define memset(s, c, count) __builtin_memset(s, c, count) |
#else |
#define memset(s, c, count) \ |
(__builtin_constant_p(c) \ |
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
(count)) \ |
: __memset((s), (c), (count))) |
#endif |
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern void *memscan(void *addr, int c, size_t size); |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_STRING_32_H */ |
/drivers/include/linux/asm/swab.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_SWAB_H |
#define _ASM_X86_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
{ |
#ifdef __i386__ |
# ifdef CONFIG_X86_BSWAP |
asm("bswap %0" : "=r" (val) : "0" (val)); |
# else |
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
"rorl $16,%0\n\t" /* swap words */ |
"xchgb %b0,%h0" /* swap higher bytes */ |
: "=q" (val) |
: "0" (val)); |
# endif |
#else /* __i386__ */ |
asm("bswapl %0" |
: "=r" (val) |
: "0" (val)); |
#endif |
return val; |
} |
#define __arch_swab32 __arch_swab32 |
static inline __attribute_const__ __u64 __arch_swab64(__u64 val) |
{ |
#ifdef __i386__ |
union { |
struct { |
__u32 a; |
__u32 b; |
} s; |
__u64 u; |
} v; |
v.u = val; |
# ifdef CONFIG_X86_BSWAP |
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# else |
v.s.a = __arch_swab32(v.s.a); |
v.s.b = __arch_swab32(v.s.b); |
asm("xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# endif |
return v.u; |
#else /* __i386__ */ |
asm("bswapq %0" |
: "=r" (val) |
: "0" (val)); |
return val; |
#endif |
} |
#define __arch_swab64 __arch_swab64 |
#endif /* _ASM_X86_SWAB_H */ |
/drivers/include/linux/asm/types.h |
---|
0,0 → 1,22 |
#ifndef _ASM_X86_TYPES_H |
#define _ASM_X86_TYPES_H |
#define dma_addr_t dma_addr_t |
#include <asm-generic/types.h> |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef u64 dma64_addr_t; |
#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) |
/* DMA addresses come in 32-bit and 64-bit flavours. */ |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_TYPES_H */ |