Subversion Repositories Kolibri OS

Rev

Rev 1408 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1631 serge 1
#ifndef _ASM_X86_ATOMIC_H
2
#define _ASM_X86_ATOMIC_H
3
 
4
#include 
5
#include 
6
//#include 
7
//#include 
8
#include 
9
 
10
/*
11
 * Atomic operations that C can't guarantee us.  Useful for
12
 * resource counting etc..
13
 */
14
 
15
#define ATOMIC_INIT(i)	{ (i) }
16
 
17
/**
18
 * atomic_read - read atomic variable
19
 * @v: pointer of type atomic_t
20
 *
21
 * Atomically reads the value of @v.
22
 */
23
static inline int atomic_read(const atomic_t *v)
24
{
25
	return (*(volatile int *)&(v)->counter);
26
}
27
 
28
/**
29
 * atomic_set - set atomic variable
30
 * @v: pointer of type atomic_t
31
 * @i: required value
32
 *
33
 * Atomically sets the value of @v to @i.
34
 */
35
static inline void atomic_set(atomic_t *v, int i)
36
{
37
	v->counter = i;
38
}
39
 
40
/**
41
 * atomic_add - add integer to atomic variable
42
 * @i: integer value to add
43
 * @v: pointer of type atomic_t
44
 *
45
 * Atomically adds @i to @v.
46
 */
47
static inline void atomic_add(int i, atomic_t *v)
48
{
49
	asm volatile(LOCK_PREFIX "addl %1,%0"
50
		     : "+m" (v->counter)
51
		     : "ir" (i));
52
}
53
 
54
/**
55
 * atomic_sub - subtract integer from atomic variable
56
 * @i: integer value to subtract
57
 * @v: pointer of type atomic_t
58
 *
59
 * Atomically subtracts @i from @v.
60
 */
61
static inline void atomic_sub(int i, atomic_t *v)
62
{
63
	asm volatile(LOCK_PREFIX "subl %1,%0"
64
		     : "+m" (v->counter)
65
		     : "ir" (i));
66
}
67
 
68
/**
69
 * atomic_sub_and_test - subtract value from variable and test result
70
 * @i: integer value to subtract
71
 * @v: pointer of type atomic_t
72
 *
73
 * Atomically subtracts @i from @v and returns
74
 * true if the result is zero, or false for all
75
 * other cases.
76
 */
77
static inline int atomic_sub_and_test(int i, atomic_t *v)
78
{
79
	unsigned char c;
80
 
81
	asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
82
		     : "+m" (v->counter), "=qm" (c)
83
		     : "ir" (i) : "memory");
84
	return c;
85
}
86
 
87
/**
88
 * atomic_inc - increment atomic variable
89
 * @v: pointer of type atomic_t
90
 *
91
 * Atomically increments @v by 1.
92
 */
93
static inline void atomic_inc(atomic_t *v)
94
{
95
	asm volatile(LOCK_PREFIX "incl %0"
96
		     : "+m" (v->counter));
97
}
98
 
99
/**
100
 * atomic_dec - decrement atomic variable
101
 * @v: pointer of type atomic_t
102
 *
103
 * Atomically decrements @v by 1.
104
 */
105
static inline void atomic_dec(atomic_t *v)
106
{
107
	asm volatile(LOCK_PREFIX "decl %0"
108
		     : "+m" (v->counter));
109
}
110
 
111
/**
112
 * atomic_dec_and_test - decrement and test
113
 * @v: pointer of type atomic_t
114
 *
115
 * Atomically decrements @v by 1 and
116
 * returns true if the result is 0, or false for all other
117
 * cases.
118
 */
119
static inline int atomic_dec_and_test(atomic_t *v)
120
{
121
	unsigned char c;
122
 
123
	asm volatile(LOCK_PREFIX "decl %0; sete %1"
124
		     : "+m" (v->counter), "=qm" (c)
125
		     : : "memory");
126
	return c != 0;
127
}
128
 
129
/**
130
 * atomic_inc_and_test - increment and test
131
 * @v: pointer of type atomic_t
132
 *
133
 * Atomically increments @v by 1
134
 * and returns true if the result is zero, or false for all
135
 * other cases.
136
 */
137
static inline int atomic_inc_and_test(atomic_t *v)
138
{
139
	unsigned char c;
140
 
141
	asm volatile(LOCK_PREFIX "incl %0; sete %1"
142
		     : "+m" (v->counter), "=qm" (c)
143
		     : : "memory");
144
	return c != 0;
145
}
146
 
147
/**
148
 * atomic_add_negative - add and test if negative
149
 * @i: integer value to add
150
 * @v: pointer of type atomic_t
151
 *
152
 * Atomically adds @i to @v and returns true
153
 * if the result is negative, or false when
154
 * result is greater than or equal to zero.
155
 */
156
static inline int atomic_add_negative(int i, atomic_t *v)
157
{
158
	unsigned char c;
159
 
160
	asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
161
		     : "+m" (v->counter), "=qm" (c)
162
		     : "ir" (i) : "memory");
163
	return c;
164
}
165
 
166
/**
167
 * atomic_add_return - add integer and return
168
 * @i: integer value to add
169
 * @v: pointer of type atomic_t
170
 *
171
 * Atomically adds @i to @v and returns @i + @v
172
 */
173
static inline int atomic_add_return(int i, atomic_t *v)
174
{
175
	int __i;
176
#ifdef CONFIG_M386
177
	unsigned long flags;
178
	if (unlikely(boot_cpu_data.x86 <= 3))
179
		goto no_xadd;
1408 serge 180
#endif
1631 serge 181
	/* Modern 486+ processor */
182
	__i = i;
183
	asm volatile(LOCK_PREFIX "xaddl %0, %1"
184
		     : "+r" (i), "+m" (v->counter)
185
		     : : "memory");
186
	return i + __i;
187
 
188
#ifdef CONFIG_M386
189
no_xadd: /* Legacy 386 processor */
190
	raw_local_irq_save(flags);
191
	__i = atomic_read(v);
192
	atomic_set(v, i + __i);
193
	raw_local_irq_restore(flags);
194
	return i + __i;
195
#endif
196
}
197
 
198
/**
199
 * atomic_sub_return - subtract integer and return
200
 * @v: pointer of type atomic_t
201
 * @i: integer value to subtract
202
 *
203
 * Atomically subtracts @i from @v and returns @v - @i
204
 */
205
static inline int atomic_sub_return(int i, atomic_t *v)
206
{
207
	return atomic_add_return(-i, v);
208
}
209
 
210
#define atomic_inc_return(v)  (atomic_add_return(1, v))
211
#define atomic_dec_return(v)  (atomic_sub_return(1, v))
212
 
213
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
214
{
215
	return cmpxchg(&v->counter, old, new);
216
}
217
 
218
static inline int atomic_xchg(atomic_t *v, int new)
219
{
220
	return xchg(&v->counter, new);
221
}
222
 
223
/**
224
 * atomic_add_unless - add unless the number is already a given value
225
 * @v: pointer of type atomic_t
226
 * @a: the amount to add to v...
227
 * @u: ...unless v is equal to u.
228
 *
229
 * Atomically adds @a to @v, so long as @v was not already @u.
230
 * Returns non-zero if @v was not @u, and zero otherwise.
231
 */
232
static inline int atomic_add_unless(atomic_t *v, int a, int u)
233
{
234
	int c, old;
235
	c = atomic_read(v);
236
	for (;;) {
237
		if (unlikely(c == (u)))
238
			break;
239
		old = atomic_cmpxchg((v), c, c + (a));
240
		if (likely(old == c))
241
			break;
242
		c = old;
243
	}
244
	return c != (u);
245
}
246
 
247
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
248
 
249
/*
250
 * atomic_dec_if_positive - decrement by 1 if old value positive
251
 * @v: pointer of type atomic_t
252
 *
253
 * The function returns the old value of *v minus 1, even if
254
 * the atomic variable, v, was not decremented.
255
 */
256
static inline int atomic_dec_if_positive(atomic_t *v)
257
{
258
	int c, old, dec;
259
	c = atomic_read(v);
260
	for (;;) {
261
		dec = c - 1;
262
		if (unlikely(dec < 0))
263
			break;
264
		old = atomic_cmpxchg((v), c, dec);
265
		if (likely(old == c))
266
			break;
267
		c = old;
268
	}
269
	return dec;
270
}
271
 
272
/**
273
 * atomic_inc_short - increment of a short integer
274
 * @v: pointer to type int
275
 *
276
 * Atomically adds 1 to @v
277
 * Returns the new value of @u
278
 */
279
static inline short int atomic_inc_short(short int *v)
280
{
281
	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
282
	return *v;
283
}
284
 
285
#ifdef CONFIG_X86_64
286
/**
287
 * atomic_or_long - OR of two long integers
288
 * @v1: pointer to type unsigned long
289
 * @v2: pointer to type unsigned long
290
 *
291
 * Atomically ORs @v1 and @v2
292
 * Returns the result of the OR
293
 */
294
static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
295
{
296
	asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
297
}
298
#endif
299
 
300
/* These are x86-specific, used by some header files */
301
#define atomic_clear_mask(mask, addr)				\
302
	asm volatile(LOCK_PREFIX "andl %0,%1"			\
303
		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
304
 
305
#define atomic_set_mask(mask, addr)				\
306
	asm volatile(LOCK_PREFIX "orl %0,%1"			\
307
		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
308
		     : "memory")
309
 
310
/* Atomic operations are already serializing on x86 */
311
#define smp_mb__before_atomic_dec()	barrier()
312
#define smp_mb__after_atomic_dec()	barrier()
313
#define smp_mb__before_atomic_inc()	barrier()
314
#define smp_mb__after_atomic_inc()	barrier()
315
 
316
 
317
//#include 
318
#endif /* _ASM_X86_ATOMIC_H */