Subversion Repositories Kolibri OS

Rev

Rev 1408 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef _ASM_X86_CMPXCHG_32_H
2
#define _ASM_X86_CMPXCHG_32_H
3
 
4
#include  /* for LOCK_PREFIX */
5
 
6
/*
7
 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8
 *       you need to test for the feature in boot_cpu_data.
9
 */
10
 
11
extern void __xchg_wrong_size(void);
12
 
13
/*
14
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
16
 *	  but generally the primitive is invalid, *ptr is output argument. --ANK
17
 */
18
 
19
struct __xchg_dummy {
20
	unsigned long a[100];
21
};
22
#define __xg(x) ((struct __xchg_dummy *)(x))
23
 
24
#define __xchg(x, ptr, size)						\
25
({									\
26
	__typeof(*(ptr)) __x = (x);					\
27
	switch (size) {							\
28
	case 1:								\
1631 serge 29
	{								\
30
		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
31
		asm volatile("xchgb %0,%1"				\
32
			     : "=q" (__x), "+m" (*__ptr)		\
33
			     : "0" (__x)				\
1408 serge 34
			     : "memory");				\
35
		break;							\
1631 serge 36
	}								\
1408 serge 37
	case 2:								\
1631 serge 38
	{								\
39
		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
40
		asm volatile("xchgw %0,%1"				\
41
			     : "=r" (__x), "+m" (*__ptr)		\
42
			     : "0" (__x)				\
1408 serge 43
			     : "memory");				\
44
		break;							\
1631 serge 45
	}								\
1408 serge 46
	case 4:								\
1631 serge 47
	{								\
48
		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
1408 serge 49
		asm volatile("xchgl %0,%1"				\
1631 serge 50
			     : "=r" (__x), "+m" (*__ptr)		\
51
			     : "0" (__x)				\
1408 serge 52
			     : "memory");				\
53
		break;							\
1631 serge 54
	}								\
1408 serge 55
	default:							\
56
		__xchg_wrong_size();					\
57
	}								\
58
	__x;								\
59
})
60
 
61
#define xchg(ptr, v)							\
62
	__xchg((v), (ptr), sizeof(*ptr))
63
 
64
/*
1631 serge 65
 * CMPXCHG8B only writes to the target if we had the previous
66
 * value in registers, otherwise it acts as a read and gives us the
67
 * "new previous" value.  That is why there is a loop.  Preloading
68
 * EDX:EAX is a performance optimization: in the common case it means
69
 * we need only one locked operation.
1408 serge 70
 *
1631 serge 71
 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
72
 * least an FPU save and/or %cr0.ts manipulation.
73
 *
74
 * cmpxchg8b must be used with the lock prefix here to allow the
75
 * instruction to be executed atomically.  We need to have the reader
76
 * side to see the coherent 64bit value.
1408 serge 77
 */
1631 serge 78
static inline void set_64bit(volatile u64 *ptr, u64 value)
1408 serge 79
{
1631 serge 80
	u32 low  = value;
81
	u32 high = value >> 32;
82
	u64 prev = *ptr;
83
 
1408 serge 84
	asm volatile("\n1:\t"
1631 serge 85
		     LOCK_PREFIX "cmpxchg8b %0\n\t"
1408 serge 86
		     "jnz 1b"
1631 serge 87
		     : "=m" (*ptr), "+A" (prev)
88
		     : "b" (low), "c" (high)
89
		     : "memory");
1408 serge 90
}
91
 
92
extern void __cmpxchg_wrong_size(void);
93
 
94
/*
95
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
96
 * store NEW in MEM.  Return the initial value in MEM.  Success is
97
 * indicated by comparing RETURN with OLD.
98
 */
99
#define __raw_cmpxchg(ptr, old, new, size, lock)			\
100
({									\
101
	__typeof__(*(ptr)) __ret;					\
102
	__typeof__(*(ptr)) __old = (old);				\
103
	__typeof__(*(ptr)) __new = (new);				\
104
	switch (size) {							\
105
	case 1:								\
1631 serge 106
	{								\
107
		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
108
		asm volatile(lock "cmpxchgb %2,%1"			\
109
			     : "=a" (__ret), "+m" (*__ptr)		\
110
			     : "q" (__new), "0" (__old)			\
1408 serge 111
			     : "memory");				\
112
		break;							\
1631 serge 113
	}								\
1408 serge 114
	case 2:								\
1631 serge 115
	{								\
116
		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
117
		asm volatile(lock "cmpxchgw %2,%1"			\
118
			     : "=a" (__ret), "+m" (*__ptr)		\
119
			     : "r" (__new), "0" (__old)			\
1408 serge 120
			     : "memory");				\
121
		break;							\
1631 serge 122
	}								\
1408 serge 123
	case 4:								\
1631 serge 124
	{								\
125
		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
126
		asm volatile(lock "cmpxchgl %2,%1"			\
127
			     : "=a" (__ret), "+m" (*__ptr)		\
128
			     : "r" (__new), "0" (__old)			\
1408 serge 129
			     : "memory");				\
130
		break;							\
1631 serge 131
	}								\
1408 serge 132
	default:							\
133
		__cmpxchg_wrong_size();					\
134
	}								\
135
	__ret;								\
136
})
137
 
138
#define __cmpxchg(ptr, old, new, size)					\
139
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
140
 
141
#define __sync_cmpxchg(ptr, old, new, size)				\
142
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
143
 
144
#define __cmpxchg_local(ptr, old, new, size)				\
145
	__raw_cmpxchg((ptr), (old), (new), (size), "")
146
 
147
#ifdef CONFIG_X86_CMPXCHG
148
#define __HAVE_ARCH_CMPXCHG 1
149
 
150
#define cmpxchg(ptr, old, new)						\
151
	__cmpxchg((ptr), (old), (new), sizeof(*ptr))
152
 
153
#define sync_cmpxchg(ptr, old, new)					\
154
	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
155
 
156
#define cmpxchg_local(ptr, old, new)					\
157
	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
158
#endif
159
 
160
#ifdef CONFIG_X86_CMPXCHG64
161
#define cmpxchg64(ptr, o, n)						\
162
	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
163
					 (unsigned long long)(n)))
164
#define cmpxchg64_local(ptr, o, n)					\
165
	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
166
					       (unsigned long long)(n)))
167
#endif
168
 
1631 serge 169
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
1408 serge 170
{
1631 serge 171
	u64 prev;
172
	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
173
		     : "=A" (prev),
174
		       "+m" (*ptr)
175
		     : "b" ((u32)new),
176
		       "c" ((u32)(new >> 32)),
177
		       "0" (old)
1408 serge 178
		     : "memory");
179
	return prev;
180
}
181
 
1631 serge 182
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
1408 serge 183
{
1631 serge 184
	u64 prev;
185
	asm volatile("cmpxchg8b %1"
186
		     : "=A" (prev),
187
		       "+m" (*ptr)
188
		     : "b" ((u32)new),
189
		       "c" ((u32)(new >> 32)),
190
		       "0" (old)
1408 serge 191
		     : "memory");
192
	return prev;
193
}
194
 
195
#ifndef CONFIG_X86_CMPXCHG
196
/*
197
 * Building a kernel capable running on 80386. It may be necessary to
198
 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
199
 * a function for each of the sizes we support.
200
 */
201
 
1631 serge 202
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
203
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
204
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
205
 
206
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
207
					unsigned long new, int size)
208
{
209
	switch (size) {
210
	case 1:
211
		return cmpxchg_386_u8(ptr, old, new);
212
	case 2:
213
		return cmpxchg_386_u16(ptr, old, new);
214
	case 4:
215
		return cmpxchg_386_u32(ptr, old, new);
216
	}
217
	return old;
218
}
219
 
1408 serge 220
#define cmpxchg(ptr, o, n)						\
221
({									\
222
	__typeof__(*(ptr)) __ret;					\
1631 serge 223
		__ret = (__typeof__(*(ptr)))__cmpxchg((ptr),		\
224
				(unsigned long)(o), (unsigned long)(n),	\
225
				sizeof(*(ptr)));			\
1408 serge 226
	__ret;								\
227
})
228
#define cmpxchg_local(ptr, o, n)					\
229
({									\
230
	__typeof__(*(ptr)) __ret;					\
1631 serge 231
		__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),	\
232
				(unsigned long)(o), (unsigned long)(n),	\
233
				sizeof(*(ptr)));			\
1408 serge 234
	__ret;								\
235
})
236
#endif
237
 
238
#ifndef CONFIG_X86_CMPXCHG64
239
/*
240
 * Building a kernel capable running on 80386 and 80486. It may be necessary
241
 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
242
 */
243
 
244
#define cmpxchg64(ptr, o, n)					\
245
({								\
246
	__typeof__(*(ptr)) __ret;				\
247
	__typeof__(*(ptr)) __old = (o);				\
248
	__typeof__(*(ptr)) __new = (n);				\
1631 serge 249
	alternative_io(LOCK_PREFIX_HERE				\
250
			"call cmpxchg8b_emu",			\
1408 serge 251
			"lock; cmpxchg8b (%%esi)" ,		\
252
		       X86_FEATURE_CX8,				\
253
		       "=A" (__ret),				\
254
		       "S" ((ptr)), "0" (__old),		\
255
		       "b" ((unsigned int)__new),		\
256
		       "c" ((unsigned int)(__new>>32))		\
257
		       : "memory");				\
258
	__ret; })
259
 
260
 
1631 serge 261
#define cmpxchg64_local(ptr, o, n)				\
262
({								\
263
	__typeof__(*(ptr)) __ret;				\
264
	__typeof__(*(ptr)) __old = (o);				\
265
	__typeof__(*(ptr)) __new = (n);				\
266
	alternative_io("call cmpxchg8b_emu",			\
267
		       "cmpxchg8b (%%esi)" ,			\
268
		       X86_FEATURE_CX8,				\
269
		       "=A" (__ret),				\
270
		       "S" ((ptr)), "0" (__old),		\
271
		       "b" ((unsigned int)__new),		\
272
		       "c" ((unsigned int)(__new>>32))		\
273
		       : "memory");				\
274
	__ret; })
1408 serge 275
 
276
#endif
277
 
278
#endif /* _ASM_X86_CMPXCHG_32_H */