Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef _ASM_X86_CMPXCHG_32_H
2
#define _ASM_X86_CMPXCHG_32_H
3
 
4
#include  /* for LOCK_PREFIX */
5
 
6
/*
7
 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8
 *       you need to test for the feature in boot_cpu_data.
9
 */
10
 
11
extern void __xchg_wrong_size(void);
12
 
13
/*
14
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
16
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
17
 */
18
 
19
struct __xchg_dummy {
20
	unsigned long a[100];
21
};
22
#define __xg(x) ((struct __xchg_dummy *)(x))
23
 
24
#define __xchg(x, ptr, size)						\
25
({									\
26
	__typeof(*(ptr)) __x = (x);					\
27
	switch (size) {							\
28
	case 1:								\
29
		asm volatile("xchgb %b0,%1"				\
30
			     : "=q" (__x)				\
31
			     : "m" (*__xg(ptr)), "0" (__x)		\
32
			     : "memory");				\
33
		break;							\
34
	case 2:								\
35
		asm volatile("xchgw %w0,%1"				\
36
			     : "=r" (__x)				\
37
			     : "m" (*__xg(ptr)), "0" (__x)		\
38
			     : "memory");				\
39
		break;							\
40
	case 4:								\
41
		asm volatile("xchgl %0,%1"				\
42
			     : "=r" (__x)				\
43
			     : "m" (*__xg(ptr)), "0" (__x)		\
44
			     : "memory");				\
45
		break;							\
46
	default:							\
47
		__xchg_wrong_size();					\
48
	}								\
49
	__x;								\
50
})
51
 
52
#define xchg(ptr, v)							\
53
	__xchg((v), (ptr), sizeof(*ptr))
54
 
55
/*
56
 * The semantics of XCHGCMP8B are a bit strange, this is why
57
 * there is a loop and the loading of %%eax and %%edx has to
58
 * be inside. This inlines well in most cases, the cached
59
 * cost is around ~38 cycles. (in the future we might want
60
 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
61
 * might have an implicit FPU-save as a cost, so it's not
62
 * clear which path to go.)
63
 *
64
 * cmpxchg8b must be used with the lock prefix here to allow
65
 * the instruction to be executed atomically, see page 3-102
66
 * of the instruction set reference 24319102.pdf. We need
67
 * the reader side to see the coherent 64bit value.
68
 */
69
static inline void __set_64bit(unsigned long long *ptr,
70
			       unsigned int low, unsigned int high)
71
{
72
	asm volatile("\n1:\t"
73
		     "movl (%0), %%eax\n\t"
74
		     "movl 4(%0), %%edx\n\t"
75
		     LOCK_PREFIX "cmpxchg8b (%0)\n\t"
76
		     "jnz 1b"
77
		     : /* no outputs */
78
		     : "D"(ptr),
79
		       "b"(low),
80
		       "c"(high)
81
		     : "ax", "dx", "memory");
82
}
83
 
84
static inline void __set_64bit_constant(unsigned long long *ptr,
85
					unsigned long long value)
86
{
87
	__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
88
}
89
 
90
#define ll_low(x)	*(((unsigned int *)&(x)) + 0)
91
#define ll_high(x)	*(((unsigned int *)&(x)) + 1)
92
 
93
static inline void __set_64bit_var(unsigned long long *ptr,
94
				   unsigned long long value)
95
{
96
	__set_64bit(ptr, ll_low(value), ll_high(value));
97
}
98
 
99
#define set_64bit(ptr, value)			\
100
	(__builtin_constant_p((value))		\
101
	 ? __set_64bit_constant((ptr), (value))	\
102
	 : __set_64bit_var((ptr), (value)))
103
 
104
#define _set_64bit(ptr, value)						\
105
	(__builtin_constant_p(value)					\
106
	 ? __set_64bit(ptr, (unsigned int)(value),			\
107
		       (unsigned int)((value) >> 32))			\
108
	 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
109
 
110
extern void __cmpxchg_wrong_size(void);
111
 
112
/*
113
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
114
 * store NEW in MEM.  Return the initial value in MEM.  Success is
115
 * indicated by comparing RETURN with OLD.
116
 */
117
#define __raw_cmpxchg(ptr, old, new, size, lock)			\
118
({									\
119
	__typeof__(*(ptr)) __ret;					\
120
	__typeof__(*(ptr)) __old = (old);				\
121
	__typeof__(*(ptr)) __new = (new);				\
122
	switch (size) {							\
123
	case 1:								\
124
		asm volatile(lock "cmpxchgb %b1,%2"			\
125
			     : "=a"(__ret)				\
126
			     : "q"(__new), "m"(*__xg(ptr)), "0"(__old)	\
127
			     : "memory");				\
128
		break;							\
129
	case 2:								\
130
		asm volatile(lock "cmpxchgw %w1,%2"			\
131
			     : "=a"(__ret)				\
132
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
133
			     : "memory");				\
134
		break;							\
135
	case 4:								\
136
		asm volatile(lock "cmpxchgl %1,%2"			\
137
			     : "=a"(__ret)				\
138
			     : "r"(__new), "m"(*__xg(ptr)), "0"(__old)	\
139
			     : "memory");				\
140
		break;							\
141
	default:							\
142
		__cmpxchg_wrong_size();					\
143
	}								\
144
	__ret;								\
145
})
146
 
147
#define __cmpxchg(ptr, old, new, size)					\
148
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
149
 
150
#define __sync_cmpxchg(ptr, old, new, size)				\
151
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
152
 
153
#define __cmpxchg_local(ptr, old, new, size)				\
154
	__raw_cmpxchg((ptr), (old), (new), (size), "")
155
 
156
#ifdef CONFIG_X86_CMPXCHG
157
#define __HAVE_ARCH_CMPXCHG 1
158
 
159
#define cmpxchg(ptr, old, new)						\
160
	__cmpxchg((ptr), (old), (new), sizeof(*ptr))
161
 
162
#define sync_cmpxchg(ptr, old, new)					\
163
	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
164
 
165
#define cmpxchg_local(ptr, old, new)					\
166
	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
167
#endif
168
 
169
#ifdef CONFIG_X86_CMPXCHG64
170
#define cmpxchg64(ptr, o, n)						\
171
	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
172
					 (unsigned long long)(n)))
173
#define cmpxchg64_local(ptr, o, n)					\
174
	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
175
					       (unsigned long long)(n)))
176
#endif
177
 
178
static inline unsigned long long __cmpxchg64(volatile void *ptr,
179
					     unsigned long long old,
180
					     unsigned long long new)
181
{
182
	unsigned long long prev;
183
	asm volatile(LOCK_PREFIX "cmpxchg8b %3"
184
		     : "=A"(prev)
185
		     : "b"((unsigned long)new),
186
		       "c"((unsigned long)(new >> 32)),
187
		       "m"(*__xg(ptr)),
188
		       "0"(old)
189
		     : "memory");
190
	return prev;
191
}
192
 
193
static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
194
						   unsigned long long old,
195
						   unsigned long long new)
196
{
197
	unsigned long long prev;
198
	asm volatile("cmpxchg8b %3"
199
		     : "=A"(prev)
200
		     : "b"((unsigned long)new),
201
		       "c"((unsigned long)(new >> 32)),
202
		       "m"(*__xg(ptr)),
203
		       "0"(old)
204
		     : "memory");
205
	return prev;
206
}
207
 
208
#ifndef CONFIG_X86_CMPXCHG
209
/*
210
 * Building a kernel capable running on 80386. It may be necessary to
211
 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
212
 * a function for each of the sizes we support.
213
 */
214
 
215
#define cmpxchg(ptr, o, n)						\
216
({									\
217
	__typeof__(*(ptr)) __ret;					\
218
    __ret = (__typeof__(*(ptr)))__cmpxchg((ptr),        \
219
            (unsigned long)(o), (unsigned long)(n), \
220
            sizeof(*(ptr)));            \
221
	__ret;								\
222
})
223
#define cmpxchg_local(ptr, o, n)					\
224
({									\
225
	__typeof__(*(ptr)) __ret;					\
226
    __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),  \
227
            (unsigned long)(o), (unsigned long)(n), \
228
            sizeof(*(ptr)));            \
229
	__ret;								\
230
})
231
#endif
232
 
233
#ifndef CONFIG_X86_CMPXCHG64
234
/*
235
 * Building a kernel capable running on 80386 and 80486. It may be necessary
236
 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
237
 */
238
 
239
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
240
 
241
#define cmpxchg64(ptr, o, n)					\
242
({								\
243
	__typeof__(*(ptr)) __ret;				\
244
	__typeof__(*(ptr)) __old = (o);				\
245
	__typeof__(*(ptr)) __new = (n);				\
246
	alternative_io("call cmpxchg8b_emu",			\
247
			"lock; cmpxchg8b (%%esi)" ,		\
248
		       X86_FEATURE_CX8,				\
249
		       "=A" (__ret),				\
250
		       "S" ((ptr)), "0" (__old),		\
251
		       "b" ((unsigned int)__new),		\
252
		       "c" ((unsigned int)(__new>>32))		\
253
		       : "memory");				\
254
	__ret; })
255
 
256
 
257
 
258
#define cmpxchg64_local(ptr, o, n)					\
259
({									\
260
	__typeof__(*(ptr)) __ret;					\
261
	if (likely(boot_cpu_data.x86 > 4))				\
262
		__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr),	\
263
				(unsigned long long)(o),		\
264
				(unsigned long long)(n));		\
265
	else								\
266
		__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),	\
267
				(unsigned long long)(o),		\
268
				(unsigned long long)(n));		\
269
	__ret;								\
270
})
271
 
272
#endif
273
 
274
#endif /* _ASM_X86_CMPXCHG_32_H */