Subversion Repositories Kolibri OS

Rev

Rev 5270 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
#ifndef _ASM_X86_CMPXCHG_32_H
2
#define _ASM_X86_CMPXCHG_32_H
3
 
4
/*
5
 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6
 *       you need to test for the feature in boot_cpu_data.
7
 */
8
 
9
/*
10
 * CMPXCHG8B only writes to the target if we had the previous
11
 * value in registers, otherwise it acts as a read and gives us the
12
 * "new previous" value.  That is why there is a loop.  Preloading
13
 * EDX:EAX is a performance optimization: in the common case it means
14
 * we need only one locked operation.
15
 *
16
 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17
 * least an FPU save and/or %cr0.ts manipulation.
18
 *
19
 * cmpxchg8b must be used with the lock prefix here to allow the
20
 * instruction to be executed atomically.  We need to have the reader
21
 * side to see the coherent 64bit value.
22
 */
23
static inline void set_64bit(volatile u64 *ptr, u64 value)
24
{
25
	u32 low  = value;
26
	u32 high = value >> 32;
27
	u64 prev = *ptr;
28
 
29
	asm volatile("\n1:\t"
30
		     LOCK_PREFIX "cmpxchg8b %0\n\t"
31
		     "jnz 1b"
32
		     : "=m" (*ptr), "+A" (prev)
33
		     : "b" (low), "c" (high)
34
		     : "memory");
35
}
36
 
37
#ifdef CONFIG_X86_CMPXCHG64
38
#define cmpxchg64(ptr, o, n)						\
39
	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
40
					 (unsigned long long)(n)))
41
#define cmpxchg64_local(ptr, o, n)					\
42
	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
43
					       (unsigned long long)(n)))
44
#endif
45
 
46
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
47
{
48
	u64 prev;
49
	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
50
		     : "=A" (prev),
51
		       "+m" (*ptr)
52
		     : "b" ((u32)new),
53
		       "c" ((u32)(new >> 32)),
54
		       "0" (old)
55
		     : "memory");
56
	return prev;
57
}
58
 
59
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
60
{
61
	u64 prev;
62
	asm volatile("cmpxchg8b %1"
63
		     : "=A" (prev),
64
		       "+m" (*ptr)
65
		     : "b" ((u32)new),
66
		       "c" ((u32)(new >> 32)),
67
		       "0" (old)
68
		     : "memory");
69
	return prev;
70
}
71
 
72
#ifndef CONFIG_X86_CMPXCHG64
73
/*
74
 * Building a kernel capable running on 80386 and 80486. It may be necessary
75
 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
76
 */
77
 
78
#define cmpxchg64(ptr, o, n)					\
79
({								\
80
	__typeof__(*(ptr)) __ret;				\
81
	__typeof__(*(ptr)) __old = (o);				\
82
	__typeof__(*(ptr)) __new = (n);				\
83
	alternative_io(LOCK_PREFIX_HERE				\
84
			"call cmpxchg8b_emu",			\
85
			"lock; cmpxchg8b (%%esi)" ,		\
86
		       X86_FEATURE_CX8,				\
87
		       "=A" (__ret),				\
88
		       "S" ((ptr)), "0" (__old),		\
89
		       "b" ((unsigned int)__new),		\
90
		       "c" ((unsigned int)(__new>>32))		\
91
		       : "memory");				\
92
	__ret; })
93
 
94
 
95
#define cmpxchg64_local(ptr, o, n)				\
96
({								\
97
	__typeof__(*(ptr)) __ret;				\
98
	__typeof__(*(ptr)) __old = (o);				\
99
	__typeof__(*(ptr)) __new = (n);				\
100
	alternative_io("call cmpxchg8b_emu",			\
101
		       "cmpxchg8b (%%esi)" ,			\
102
		       X86_FEATURE_CX8,				\
103
		       "=A" (__ret),				\
104
		       "S" ((ptr)), "0" (__old),		\
105
		       "b" ((unsigned int)__new),		\
106
		       "c" ((unsigned int)(__new>>32))		\
107
		       : "memory");				\
108
	__ret; })
109
 
110
#endif
111
 
112
#define system_has_cmpxchg_double() cpu_has_cx8
113
 
114
#endif /* _ASM_X86_CMPXCHG_32_H */