/drivers/include/linux/asm/atomic_32.h |
---|
File deleted |
/drivers/include/linux/asm/atomic.h |
---|
1,5 → 1,318 |
#ifdef CONFIG_X86_32 |
# include "atomic_32.h" |
#else |
# include "atomic_64.h" |
#ifndef _ASM_X86_ATOMIC_H |
#define _ASM_X86_ATOMIC_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
//#include <asm/processor.h> |
//#include <asm/alternative.h> |
#include <asm/cmpxchg.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return (*(volatile int *)&(v)->counter); |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "decl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "incl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_add_negative - add and test if negative |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_add_return - add integer and return |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
int __i; |
#ifdef CONFIG_M386 |
unsigned long flags; |
if (unlikely(boot_cpu_data.x86 <= 3)) |
goto no_xadd; |
#endif |
/* Modern 486+ processor */ |
__i = i; |
asm volatile(LOCK_PREFIX "xaddl %0, %1" |
: "+r" (i), "+m" (v->counter) |
: : "memory"); |
return i + __i; |
#ifdef CONFIG_M386 |
no_xadd: /* Legacy 386 processor */ |
raw_local_irq_save(flags); |
__i = atomic_read(v); |
atomic_set(v, i + __i); |
raw_local_irq_restore(flags); |
return i + __i; |
#endif |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c != (u); |
} |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
/* |
* atomic_dec_if_positive - decrement by 1 if old value positive |
* @v: pointer of type atomic_t |
* |
* The function returns the old value of *v minus 1, even if |
* the atomic variable, v, was not decremented. |
*/ |
static inline int atomic_dec_if_positive(atomic_t *v) |
{ |
int c, old, dec; |
c = atomic_read(v); |
for (;;) { |
dec = c - 1; |
if (unlikely(dec < 0)) |
break; |
old = atomic_cmpxchg((v), c, dec); |
if (likely(old == c)) |
break; |
c = old; |
} |
return dec; |
} |
/** |
* atomic_inc_short - increment of a short integer |
* @v: pointer to type int |
* |
* Atomically adds 1 to @v |
* Returns the new value of @u |
*/ |
static inline short int atomic_inc_short(short int *v) |
{ |
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); |
return *v; |
} |
#ifdef CONFIG_X86_64 |
/** |
* atomic_or_long - OR of two long integers |
* @v1: pointer to type unsigned long |
* @v2: pointer to type unsigned long |
* |
* Atomically ORs @v1 and @v2 |
* Returns the result of the OR |
*/ |
static inline void atomic_or_long(unsigned long *v1, unsigned long v2) |
{ |
asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); |
} |
#endif |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
: "memory") |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic_dec() barrier() |
#define smp_mb__after_atomic_dec() barrier() |
#define smp_mb__before_atomic_inc() barrier() |
#define smp_mb__after_atomic_inc() barrier() |
//#include <asm-generic/atomic-long.h> |
#endif /* _ASM_X86_ATOMIC_H */ |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
26,23 → 26,32 |
__typeof(*(ptr)) __x = (x); \ |
switch (size) { \ |
case 1: \ |
asm volatile("xchgb %b0,%1" \ |
: "=q" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
{ \ |
volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
asm volatile("xchgb %0,%1" \ |
: "=q" (__x), "+m" (*__ptr) \ |
: "0" (__x) \ |
: "memory"); \ |
break; \ |
} \ |
case 2: \ |
asm volatile("xchgw %w0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
{ \ |
volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
asm volatile("xchgw %0,%1" \ |
: "=r" (__x), "+m" (*__ptr) \ |
: "0" (__x) \ |
: "memory"); \ |
break; \ |
} \ |
case 4: \ |
{ \ |
volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
asm volatile("xchgl %0,%1" \ |
: "=r" (__x) \ |
: "m" (*__xg(ptr)), "0" (__x) \ |
: "=r" (__x), "+m" (*__ptr) \ |
: "0" (__x) \ |
: "memory"); \ |
break; \ |
} \ |
default: \ |
__xchg_wrong_size(); \ |
} \ |
53,60 → 62,33 |
__xchg((v), (ptr), sizeof(*ptr)) |
/* |
* The semantics of XCHGCMP8B are a bit strange, this is why |
* there is a loop and the loading of %%eax and %%edx has to |
* be inside. This inlines well in most cases, the cached |
* cost is around ~38 cycles. (in the future we might want |
* to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that |
* might have an implicit FPU-save as a cost, so it's not |
* clear which path to go.) |
* CMPXCHG8B only writes to the target if we had the previous |
* value in registers, otherwise it acts as a read and gives us the |
* "new previous" value. That is why there is a loop. Preloading |
* EDX:EAX is a performance optimization: in the common case it means |
* we need only one locked operation. |
* |
* cmpxchg8b must be used with the lock prefix here to allow |
* the instruction to be executed atomically, see page 3-102 |
* of the instruction set reference 24319102.pdf. We need |
* the reader side to see the coherent 64bit value. |
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
* least an FPU save and/or %cr0.ts manipulation. |
* |
* cmpxchg8b must be used with the lock prefix here to allow the |
* instruction to be executed atomically. We need to have the reader |
* side to see the coherent 64bit value. |
*/ |
static inline void __set_64bit(unsigned long long *ptr, |
unsigned int low, unsigned int high) |
static inline void set_64bit(volatile u64 *ptr, u64 value) |
{ |
u32 low = value; |
u32 high = value >> 32; |
u64 prev = *ptr; |
asm volatile("\n1:\t" |
"movl (%0), %%eax\n\t" |
"movl 4(%0), %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b (%0)\n\t" |
LOCK_PREFIX "cmpxchg8b %0\n\t" |
"jnz 1b" |
: /* no outputs */ |
: "D"(ptr), |
"b"(low), |
"c"(high) |
: "ax", "dx", "memory"); |
: "=m" (*ptr), "+A" (prev) |
: "b" (low), "c" (high) |
: "memory"); |
} |
static inline void __set_64bit_constant(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
} |
#define ll_low(x) *(((unsigned int *)&(x)) + 0) |
#define ll_high(x) *(((unsigned int *)&(x)) + 1) |
static inline void __set_64bit_var(unsigned long long *ptr, |
unsigned long long value) |
{ |
__set_64bit(ptr, ll_low(value), ll_high(value)); |
} |
#define set_64bit(ptr, value) \ |
(__builtin_constant_p((value)) \ |
? __set_64bit_constant((ptr), (value)) \ |
: __set_64bit_var((ptr), (value))) |
#define _set_64bit(ptr, value) \ |
(__builtin_constant_p(value) \ |
? __set_64bit(ptr, (unsigned int)(value), \ |
(unsigned int)((value) >> 32)) \ |
: __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
extern void __cmpxchg_wrong_size(void); |
/* |
121,23 → 103,32 |
__typeof__(*(ptr)) __new = (new); \ |
switch (size) { \ |
case 1: \ |
asm volatile(lock "cmpxchgb %b1,%2" \ |
: "=a"(__ret) \ |
: "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
{ \ |
volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
asm volatile(lock "cmpxchgb %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "q" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case 2: \ |
asm volatile(lock "cmpxchgw %w1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
{ \ |
volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
asm volatile(lock "cmpxchgw %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case 4: \ |
asm volatile(lock "cmpxchgl %1,%2" \ |
: "=a"(__ret) \ |
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ |
{ \ |
volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
asm volatile(lock "cmpxchgl %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
default: \ |
__cmpxchg_wrong_size(); \ |
} \ |
175,31 → 166,27 |
(unsigned long long)(n))) |
#endif |
static inline unsigned long long __cmpxchg64(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
{ |
unsigned long long prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
u64 prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
: "=A" (prev), |
"+m" (*ptr) |
: "b" ((u32)new), |
"c" ((u32)(new >> 32)), |
"0"(old) |
: "memory"); |
return prev; |
} |
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, |
unsigned long long old, |
unsigned long long new) |
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
{ |
unsigned long long prev; |
asm volatile("cmpxchg8b %3" |
: "=A"(prev) |
: "b"((unsigned long)new), |
"c"((unsigned long)(new >> 32)), |
"m"(*__xg(ptr)), |
u64 prev; |
asm volatile("cmpxchg8b %1" |
: "=A" (prev), |
"+m" (*ptr) |
: "b" ((u32)new), |
"c" ((u32)(new >> 32)), |
"0"(old) |
: "memory"); |
return prev; |
212,6 → 199,24 |
* a function for each of the sizes we support. |
*/ |
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); |
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); |
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); |
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, |
unsigned long new, int size) |
{ |
switch (size) { |
case 1: |
return cmpxchg_386_u8(ptr, old, new); |
case 2: |
return cmpxchg_386_u16(ptr, old, new); |
case 4: |
return cmpxchg_386_u32(ptr, old, new); |
} |
return old; |
} |
#define cmpxchg(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
236,14 → 241,13 |
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
*/ |
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
#define cmpxchg64(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
alternative_io(LOCK_PREFIX_HERE \ |
"call cmpxchg8b_emu", \ |
"lock; cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
254,20 → 258,20 |
__ret; }) |
#define cmpxchg64_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
if (likely(boot_cpu_data.x86 > 4)) \ |
__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
else \ |
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ |
(unsigned long long)(o), \ |
(unsigned long long)(n)); \ |
__ret; \ |
}) |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
"cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#endif |