Subversion Repositories Kolibri OS

Rev

Rev 6934 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
#ifndef ASM_X86_CMPXCHG_H
2
#define ASM_X86_CMPXCHG_H
3
 
4
#include 
7143 serge 5
#include 
5270 serge 6
#include  /* Provides LOCK_PREFIX */
7
 
8
#define __HAVE_ARCH_CMPXCHG 1
9
 
10
/*
11
 * Non-existant functions to indicate usage errors at link time
12
 * (or compile-time if the compiler implements __compiletime_error().
13
 */
14
extern void __xchg_wrong_size(void)
15
	__compiletime_error("Bad argument size for xchg");
16
extern void __cmpxchg_wrong_size(void)
17
	__compiletime_error("Bad argument size for cmpxchg");
18
extern void __xadd_wrong_size(void)
19
	__compiletime_error("Bad argument size for xadd");
20
extern void __add_wrong_size(void)
21
	__compiletime_error("Bad argument size for add");
22
 
23
/*
24
 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
25
 * -1 because sizeof will never return -1, thereby making those switch
26
 * case statements guaranteeed dead code which the compiler will
27
 * eliminate, and allowing the "missing symbol in the default case" to
28
 * indicate a usage error.
29
 */
30
#define __X86_CASE_B	1
31
#define __X86_CASE_W	2
32
#define __X86_CASE_L	4
33
#ifdef CONFIG_64BIT
34
#define __X86_CASE_Q	8
35
#else
36
#define	__X86_CASE_Q	-1		/* sizeof will never return -1 */
37
#endif
38
 
39
/*
40
 * An exchange-type operation, which takes a value and a pointer, and
41
 * returns the old value.
42
 */
43
#define __xchg_op(ptr, arg, op, lock)					\
44
	({								\
45
	        __typeof__ (*(ptr)) __ret = (arg);			\
46
		switch (sizeof(*(ptr))) {				\
47
		case __X86_CASE_B:					\
48
			asm volatile (lock #op "b %b0, %1\n"		\
49
				      : "+q" (__ret), "+m" (*(ptr))	\
50
				      : : "memory", "cc");		\
51
			break;						\
52
		case __X86_CASE_W:					\
53
			asm volatile (lock #op "w %w0, %1\n"		\
54
				      : "+r" (__ret), "+m" (*(ptr))	\
55
				      : : "memory", "cc");		\
56
			break;						\
57
		case __X86_CASE_L:					\
58
			asm volatile (lock #op "l %0, %1\n"		\
59
				      : "+r" (__ret), "+m" (*(ptr))	\
60
				      : : "memory", "cc");		\
61
			break;						\
62
		case __X86_CASE_Q:					\
63
			asm volatile (lock #op "q %q0, %1\n"		\
64
				      : "+r" (__ret), "+m" (*(ptr))	\
65
				      : : "memory", "cc");		\
66
			break;						\
67
		default:						\
68
			__ ## op ## _wrong_size();			\
69
		}							\
70
		__ret;							\
71
	})
72
 
73
/*
74
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
75
 * Since this is generally used to protect other memory information, we
76
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
77
 * information around.
78
 */
79
#define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")
80
 
81
/*
82
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
83
 * store NEW in MEM.  Return the initial value in MEM.  Success is
84
 * indicated by comparing RETURN with OLD.
85
 */
86
#define __raw_cmpxchg(ptr, old, new, size, lock)			\
87
({									\
88
	__typeof__(*(ptr)) __ret;					\
89
	__typeof__(*(ptr)) __old = (old);				\
90
	__typeof__(*(ptr)) __new = (new);				\
91
	switch (size) {							\
92
	case __X86_CASE_B:						\
93
	{								\
94
		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
95
		asm volatile(lock "cmpxchgb %2,%1"			\
96
			     : "=a" (__ret), "+m" (*__ptr)		\
97
			     : "q" (__new), "0" (__old)			\
98
			     : "memory");				\
99
		break;							\
100
	}								\
101
	case __X86_CASE_W:						\
102
	{								\
103
		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
104
		asm volatile(lock "cmpxchgw %2,%1"			\
105
			     : "=a" (__ret), "+m" (*__ptr)		\
106
			     : "r" (__new), "0" (__old)			\
107
			     : "memory");				\
108
		break;							\
109
	}								\
110
	case __X86_CASE_L:						\
111
	{								\
112
		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
113
		asm volatile(lock "cmpxchgl %2,%1"			\
114
			     : "=a" (__ret), "+m" (*__ptr)		\
115
			     : "r" (__new), "0" (__old)			\
116
			     : "memory");				\
117
		break;							\
118
	}								\
119
	case __X86_CASE_Q:						\
120
	{								\
121
		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
122
		asm volatile(lock "cmpxchgq %2,%1"			\
123
			     : "=a" (__ret), "+m" (*__ptr)		\
124
			     : "r" (__new), "0" (__old)			\
125
			     : "memory");				\
126
		break;							\
127
	}								\
128
	default:							\
129
		__cmpxchg_wrong_size();					\
130
	}								\
131
	__ret;								\
132
})
133
 
134
#define __cmpxchg(ptr, old, new, size)					\
135
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
136
 
137
#define __sync_cmpxchg(ptr, old, new, size)				\
138
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
139
 
140
#define __cmpxchg_local(ptr, old, new, size)				\
141
	__raw_cmpxchg((ptr), (old), (new), (size), "")
142
 
143
#ifdef CONFIG_X86_32
144
# include 
145
#else
146
# include 
147
#endif
148
 
149
#define cmpxchg(ptr, old, new)						\
150
	__cmpxchg(ptr, old, new, sizeof(*(ptr)))
151
 
152
#define sync_cmpxchg(ptr, old, new)					\
153
	__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
154
 
155
#define cmpxchg_local(ptr, old, new)					\
156
	__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
157
 
158
/*
159
 * xadd() adds "inc" to "*ptr" and atomically returns the previous
160
 * value of "*ptr".
161
 *
162
 * xadd() is locked when multiple CPUs are online
163
 * xadd_sync() is always locked
164
 * xadd_local() is never locked
165
 */
166
#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
167
#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
168
#define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
169
#define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
170
 
171
#define __add(ptr, inc, lock)						\
172
	({								\
173
	        __typeof__ (*(ptr)) __ret = (inc);			\
174
		switch (sizeof(*(ptr))) {				\
175
		case __X86_CASE_B:					\
176
			asm volatile (lock "addb %b1, %0\n"		\
177
				      : "+m" (*(ptr)) : "qi" (inc)	\
178
				      : "memory", "cc");		\
179
			break;						\
180
		case __X86_CASE_W:					\
181
			asm volatile (lock "addw %w1, %0\n"		\
182
				      : "+m" (*(ptr)) : "ri" (inc)	\
183
				      : "memory", "cc");		\
184
			break;						\
185
		case __X86_CASE_L:					\
186
			asm volatile (lock "addl %1, %0\n"		\
187
				      : "+m" (*(ptr)) : "ri" (inc)	\
188
				      : "memory", "cc");		\
189
			break;						\
190
		case __X86_CASE_Q:					\
191
			asm volatile (lock "addq %1, %0\n"		\
192
				      : "+m" (*(ptr)) : "ri" (inc)	\
193
				      : "memory", "cc");		\
194
			break;						\
195
		default:						\
196
			__add_wrong_size();				\
197
		}							\
198
		__ret;							\
199
	})
200
 
201
/*
202
 * add_*() adds "inc" to "*ptr"
203
 *
204
 * __add() takes a lock prefix
205
 * add_smp() is locked when multiple CPUs are online
206
 * add_sync() is always locked
207
 */
208
#define add_smp(ptr, inc)	__add((ptr), (inc), LOCK_PREFIX)
209
#define add_sync(ptr, inc)	__add((ptr), (inc), "lock; ")
210
 
211
#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)			\
212
({									\
213
	bool __ret;							\
214
	__typeof__(*(p1)) __old1 = (o1), __new1 = (n1);			\
215
	__typeof__(*(p2)) __old2 = (o2), __new2 = (n2);			\
216
	BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));			\
217
	BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));			\
218
	VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));		\
219
	VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));	\
220
	asm volatile(pfx "cmpxchg%c4b %2; sete %0"			\
221
		     : "=a" (__ret), "+d" (__old2),			\
222
		       "+m" (*(p1)), "+m" (*(p2))			\
223
		     : "i" (2 * sizeof(long)), "a" (__old1),		\
224
		       "b" (__new1), "c" (__new2));			\
225
	__ret;								\
226
})
227
 
228
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
229
	__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
230
 
231
#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
232
	__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
233
 
234
#endif	/* ASM_X86_CMPXCHG_H */