Subversion Repositories Kolibri OS

Rev

Rev 5270 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
/* Atomic operations usable in machine independent code */
2
#ifndef _LINUX_ATOMIC_H
3
#define _LINUX_ATOMIC_H
4
#include 
6082 serge 5
#include 
5270 serge 6
 
6082 serge 7
/*
8
 * Relaxed variants of xchg, cmpxchg and some atomic operations.
9
 *
10
 * We support four variants:
11
 *
12
 * - Fully ordered: The default implementation, no suffix required.
13
 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14
 * - Release: Provides RELEASE semantics, _release suffix.
15
 * - Relaxed: No ordering guarantees, _relaxed suffix.
16
 *
17
 * For compound atomics performing both a load and a store, ACQUIRE
18
 * semantics apply only to the load and RELEASE semantics only to the
19
 * store portion of the operation. Note that a failed cmpxchg_acquire
20
 * does -not- imply any memory ordering constraints.
21
 *
22
 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
23
 */
24
 
25
#ifndef atomic_read_acquire
26
#define  atomic_read_acquire(v)		smp_load_acquire(&(v)->counter)
27
#endif
28
 
29
#ifndef atomic_set_release
30
#define  atomic_set_release(v, i)	smp_store_release(&(v)->counter, (i))
31
#endif
32
 
33
/*
34
 * The idea here is to build acquire/release variants by adding explicit
35
 * barriers on top of the relaxed variant. In the case where the relaxed
36
 * variant is already fully ordered, no additional barriers are needed.
37
 */
38
#define __atomic_op_acquire(op, args...)				\
39
({									\
40
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
41
	smp_mb__after_atomic();						\
42
	__ret;								\
43
})
44
 
45
#define __atomic_op_release(op, args...)				\
46
({									\
47
	smp_mb__before_atomic();					\
48
	op##_relaxed(args);						\
49
})
50
 
51
#define __atomic_op_fence(op, args...)					\
52
({									\
53
	typeof(op##_relaxed(args)) __ret;				\
54
	smp_mb__before_atomic();					\
55
	__ret = op##_relaxed(args);					\
56
	smp_mb__after_atomic();						\
57
	__ret;								\
58
})
59
 
60
/* atomic_add_return_relaxed */
61
#ifndef atomic_add_return_relaxed
62
#define  atomic_add_return_relaxed	atomic_add_return
63
#define  atomic_add_return_acquire	atomic_add_return
64
#define  atomic_add_return_release	atomic_add_return
65
 
66
#else /* atomic_add_return_relaxed */
67
 
68
#ifndef atomic_add_return_acquire
69
#define  atomic_add_return_acquire(...)					\
70
	__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
71
#endif
72
 
73
#ifndef atomic_add_return_release
74
#define  atomic_add_return_release(...)					\
75
	__atomic_op_release(atomic_add_return, __VA_ARGS__)
76
#endif
77
 
78
#ifndef atomic_add_return
79
#define  atomic_add_return(...)						\
80
	__atomic_op_fence(atomic_add_return, __VA_ARGS__)
81
#endif
82
#endif /* atomic_add_return_relaxed */
83
 
84
/* atomic_inc_return_relaxed */
85
#ifndef atomic_inc_return_relaxed
86
#define  atomic_inc_return_relaxed	atomic_inc_return
87
#define  atomic_inc_return_acquire	atomic_inc_return
88
#define  atomic_inc_return_release	atomic_inc_return
89
 
90
#else /* atomic_inc_return_relaxed */
91
 
92
#ifndef atomic_inc_return_acquire
93
#define  atomic_inc_return_acquire(...)					\
94
	__atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
95
#endif
96
 
97
#ifndef atomic_inc_return_release
98
#define  atomic_inc_return_release(...)					\
99
	__atomic_op_release(atomic_inc_return, __VA_ARGS__)
100
#endif
101
 
102
#ifndef atomic_inc_return
103
#define  atomic_inc_return(...)						\
104
	__atomic_op_fence(atomic_inc_return, __VA_ARGS__)
105
#endif
106
#endif /* atomic_inc_return_relaxed */
107
 
108
/* atomic_sub_return_relaxed */
109
#ifndef atomic_sub_return_relaxed
110
#define  atomic_sub_return_relaxed	atomic_sub_return
111
#define  atomic_sub_return_acquire	atomic_sub_return
112
#define  atomic_sub_return_release	atomic_sub_return
113
 
114
#else /* atomic_sub_return_relaxed */
115
 
116
#ifndef atomic_sub_return_acquire
117
#define  atomic_sub_return_acquire(...)					\
118
	__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
119
#endif
120
 
121
#ifndef atomic_sub_return_release
122
#define  atomic_sub_return_release(...)					\
123
	__atomic_op_release(atomic_sub_return, __VA_ARGS__)
124
#endif
125
 
126
#ifndef atomic_sub_return
127
#define  atomic_sub_return(...)						\
128
	__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
129
#endif
130
#endif /* atomic_sub_return_relaxed */
131
 
132
/* atomic_dec_return_relaxed */
133
#ifndef atomic_dec_return_relaxed
134
#define  atomic_dec_return_relaxed	atomic_dec_return
135
#define  atomic_dec_return_acquire	atomic_dec_return
136
#define  atomic_dec_return_release	atomic_dec_return
137
 
138
#else /* atomic_dec_return_relaxed */
139
 
140
#ifndef atomic_dec_return_acquire
141
#define  atomic_dec_return_acquire(...)					\
142
	__atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
143
#endif
144
 
145
#ifndef atomic_dec_return_release
146
#define  atomic_dec_return_release(...)					\
147
	__atomic_op_release(atomic_dec_return, __VA_ARGS__)
148
#endif
149
 
150
#ifndef atomic_dec_return
151
#define  atomic_dec_return(...)						\
152
	__atomic_op_fence(atomic_dec_return, __VA_ARGS__)
153
#endif
154
#endif /* atomic_dec_return_relaxed */
155
 
156
/* atomic_xchg_relaxed */
157
#ifndef atomic_xchg_relaxed
158
#define  atomic_xchg_relaxed		atomic_xchg
159
#define  atomic_xchg_acquire		atomic_xchg
160
#define  atomic_xchg_release		atomic_xchg
161
 
162
#else /* atomic_xchg_relaxed */
163
 
164
#ifndef atomic_xchg_acquire
165
#define  atomic_xchg_acquire(...)					\
166
	__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
167
#endif
168
 
169
#ifndef atomic_xchg_release
170
#define  atomic_xchg_release(...)					\
171
	__atomic_op_release(atomic_xchg, __VA_ARGS__)
172
#endif
173
 
174
#ifndef atomic_xchg
175
#define  atomic_xchg(...)						\
176
	__atomic_op_fence(atomic_xchg, __VA_ARGS__)
177
#endif
178
#endif /* atomic_xchg_relaxed */
179
 
180
/* atomic_cmpxchg_relaxed */
181
#ifndef atomic_cmpxchg_relaxed
182
#define  atomic_cmpxchg_relaxed		atomic_cmpxchg
183
#define  atomic_cmpxchg_acquire		atomic_cmpxchg
184
#define  atomic_cmpxchg_release		atomic_cmpxchg
185
 
186
#else /* atomic_cmpxchg_relaxed */
187
 
188
#ifndef atomic_cmpxchg_acquire
189
#define  atomic_cmpxchg_acquire(...)					\
190
	__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
191
#endif
192
 
193
#ifndef atomic_cmpxchg_release
194
#define  atomic_cmpxchg_release(...)					\
195
	__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
196
#endif
197
 
198
#ifndef atomic_cmpxchg
199
#define  atomic_cmpxchg(...)						\
200
	__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
201
#endif
202
#endif /* atomic_cmpxchg_relaxed */
203
 
204
#ifndef atomic64_read_acquire
205
#define  atomic64_read_acquire(v)	smp_load_acquire(&(v)->counter)
206
#endif
207
 
208
#ifndef atomic64_set_release
209
#define  atomic64_set_release(v, i)	smp_store_release(&(v)->counter, (i))
210
#endif
211
 
212
/* atomic64_add_return_relaxed */
213
#ifndef atomic64_add_return_relaxed
214
#define  atomic64_add_return_relaxed	atomic64_add_return
215
#define  atomic64_add_return_acquire	atomic64_add_return
216
#define  atomic64_add_return_release	atomic64_add_return
217
 
218
#else /* atomic64_add_return_relaxed */
219
 
220
#ifndef atomic64_add_return_acquire
221
#define  atomic64_add_return_acquire(...)				\
222
	__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
223
#endif
224
 
225
#ifndef atomic64_add_return_release
226
#define  atomic64_add_return_release(...)				\
227
	__atomic_op_release(atomic64_add_return, __VA_ARGS__)
228
#endif
229
 
230
#ifndef atomic64_add_return
231
#define  atomic64_add_return(...)					\
232
	__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
233
#endif
234
#endif /* atomic64_add_return_relaxed */
235
 
236
/* atomic64_inc_return_relaxed */
237
#ifndef atomic64_inc_return_relaxed
238
#define  atomic64_inc_return_relaxed	atomic64_inc_return
239
#define  atomic64_inc_return_acquire	atomic64_inc_return
240
#define  atomic64_inc_return_release	atomic64_inc_return
241
 
242
#else /* atomic64_inc_return_relaxed */
243
 
244
#ifndef atomic64_inc_return_acquire
245
#define  atomic64_inc_return_acquire(...)				\
246
	__atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
247
#endif
248
 
249
#ifndef atomic64_inc_return_release
250
#define  atomic64_inc_return_release(...)				\
251
	__atomic_op_release(atomic64_inc_return, __VA_ARGS__)
252
#endif
253
 
254
#ifndef atomic64_inc_return
255
#define  atomic64_inc_return(...)					\
256
	__atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
257
#endif
258
#endif /* atomic64_inc_return_relaxed */
259
 
260
 
261
/* atomic64_sub_return_relaxed */
262
#ifndef atomic64_sub_return_relaxed
263
#define  atomic64_sub_return_relaxed	atomic64_sub_return
264
#define  atomic64_sub_return_acquire	atomic64_sub_return
265
#define  atomic64_sub_return_release	atomic64_sub_return
266
 
267
#else /* atomic64_sub_return_relaxed */
268
 
269
#ifndef atomic64_sub_return_acquire
270
#define  atomic64_sub_return_acquire(...)				\
271
	__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
272
#endif
273
 
274
#ifndef atomic64_sub_return_release
275
#define  atomic64_sub_return_release(...)				\
276
	__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
277
#endif
278
 
279
#ifndef atomic64_sub_return
280
#define  atomic64_sub_return(...)					\
281
	__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
282
#endif
283
#endif /* atomic64_sub_return_relaxed */
284
 
285
/* atomic64_dec_return_relaxed */
286
#ifndef atomic64_dec_return_relaxed
287
#define  atomic64_dec_return_relaxed	atomic64_dec_return
288
#define  atomic64_dec_return_acquire	atomic64_dec_return
289
#define  atomic64_dec_return_release	atomic64_dec_return
290
 
291
#else /* atomic64_dec_return_relaxed */
292
 
293
#ifndef atomic64_dec_return_acquire
294
#define  atomic64_dec_return_acquire(...)				\
295
	__atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
296
#endif
297
 
298
#ifndef atomic64_dec_return_release
299
#define  atomic64_dec_return_release(...)				\
300
	__atomic_op_release(atomic64_dec_return, __VA_ARGS__)
301
#endif
302
 
303
#ifndef atomic64_dec_return
304
#define  atomic64_dec_return(...)					\
305
	__atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
306
#endif
307
#endif /* atomic64_dec_return_relaxed */
308
 
309
/* atomic64_xchg_relaxed */
310
#ifndef atomic64_xchg_relaxed
311
#define  atomic64_xchg_relaxed		atomic64_xchg
312
#define  atomic64_xchg_acquire		atomic64_xchg
313
#define  atomic64_xchg_release		atomic64_xchg
314
 
315
#else /* atomic64_xchg_relaxed */
316
 
317
#ifndef atomic64_xchg_acquire
318
#define  atomic64_xchg_acquire(...)					\
319
	__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
320
#endif
321
 
322
#ifndef atomic64_xchg_release
323
#define  atomic64_xchg_release(...)					\
324
	__atomic_op_release(atomic64_xchg, __VA_ARGS__)
325
#endif
326
 
327
#ifndef atomic64_xchg
328
#define  atomic64_xchg(...)						\
329
	__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
330
#endif
331
#endif /* atomic64_xchg_relaxed */
332
 
333
/* atomic64_cmpxchg_relaxed */
334
#ifndef atomic64_cmpxchg_relaxed
335
#define  atomic64_cmpxchg_relaxed	atomic64_cmpxchg
336
#define  atomic64_cmpxchg_acquire	atomic64_cmpxchg
337
#define  atomic64_cmpxchg_release	atomic64_cmpxchg
338
 
339
#else /* atomic64_cmpxchg_relaxed */
340
 
341
#ifndef atomic64_cmpxchg_acquire
342
#define  atomic64_cmpxchg_acquire(...)					\
343
	__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
344
#endif
345
 
346
#ifndef atomic64_cmpxchg_release
347
#define  atomic64_cmpxchg_release(...)					\
348
	__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
349
#endif
350
 
351
#ifndef atomic64_cmpxchg
352
#define  atomic64_cmpxchg(...)						\
353
	__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
354
#endif
355
#endif /* atomic64_cmpxchg_relaxed */
356
 
357
/* cmpxchg_relaxed */
358
#ifndef cmpxchg_relaxed
359
#define  cmpxchg_relaxed		cmpxchg
360
#define  cmpxchg_acquire		cmpxchg
361
#define  cmpxchg_release		cmpxchg
362
 
363
#else /* cmpxchg_relaxed */
364
 
365
#ifndef cmpxchg_acquire
366
#define  cmpxchg_acquire(...)						\
367
	__atomic_op_acquire(cmpxchg, __VA_ARGS__)
368
#endif
369
 
370
#ifndef cmpxchg_release
371
#define  cmpxchg_release(...)						\
372
	__atomic_op_release(cmpxchg, __VA_ARGS__)
373
#endif
374
 
375
#ifndef cmpxchg
376
#define  cmpxchg(...)							\
377
	__atomic_op_fence(cmpxchg, __VA_ARGS__)
378
#endif
379
#endif /* cmpxchg_relaxed */
380
 
381
/* cmpxchg64_relaxed */
382
#ifndef cmpxchg64_relaxed
383
#define  cmpxchg64_relaxed		cmpxchg64
384
#define  cmpxchg64_acquire		cmpxchg64
385
#define  cmpxchg64_release		cmpxchg64
386
 
387
#else /* cmpxchg64_relaxed */
388
 
389
#ifndef cmpxchg64_acquire
390
#define  cmpxchg64_acquire(...)						\
391
	__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
392
#endif
393
 
394
#ifndef cmpxchg64_release
395
#define  cmpxchg64_release(...)						\
396
	__atomic_op_release(cmpxchg64, __VA_ARGS__)
397
#endif
398
 
399
#ifndef cmpxchg64
400
#define  cmpxchg64(...)							\
401
	__atomic_op_fence(cmpxchg64, __VA_ARGS__)
402
#endif
403
#endif /* cmpxchg64_relaxed */
404
 
405
/* xchg_relaxed */
406
#ifndef xchg_relaxed
407
#define  xchg_relaxed			xchg
408
#define  xchg_acquire			xchg
409
#define  xchg_release			xchg
410
 
411
#else /* xchg_relaxed */
412
 
413
#ifndef xchg_acquire
414
#define  xchg_acquire(...)		__atomic_op_acquire(xchg, __VA_ARGS__)
415
#endif
416
 
417
#ifndef xchg_release
418
#define  xchg_release(...)		__atomic_op_release(xchg, __VA_ARGS__)
419
#endif
420
 
421
#ifndef xchg
422
#define  xchg(...)			__atomic_op_fence(xchg, __VA_ARGS__)
423
#endif
424
#endif /* xchg_relaxed */
425
 
5270 serge 426
/**
427
 * atomic_add_unless - add unless the number is already a given value
428
 * @v: pointer of type atomic_t
429
 * @a: the amount to add to v...
430
 * @u: ...unless v is equal to u.
431
 *
432
 * Atomically adds @a to @v, so long as @v was not already @u.
433
 * Returns non-zero if @v was not @u, and zero otherwise.
434
 */
435
static inline int atomic_add_unless(atomic_t *v, int a, int u)
436
{
437
	return __atomic_add_unless(v, a, u) != u;
438
}
439
 
440
/**
441
 * atomic_inc_not_zero - increment unless the number is zero
442
 * @v: pointer of type atomic_t
443
 *
444
 * Atomically increments @v by 1, so long as @v is non-zero.
445
 * Returns non-zero if @v was non-zero, and zero otherwise.
446
 */
447
#ifndef atomic_inc_not_zero
448
#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
449
#endif
450
 
6082 serge 451
#ifndef atomic_andnot
452
static inline void atomic_andnot(int i, atomic_t *v)
453
{
454
	atomic_and(~i, v);
455
}
456
#endif
457
 
458
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
459
{
460
	atomic_andnot(mask, v);
461
}
462
 
463
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
464
{
465
	atomic_or(mask, v);
466
}
467
 
5270 serge 468
/**
469
 * atomic_inc_not_zero_hint - increment if not null
470
 * @v: pointer of type atomic_t
471
 * @hint: probable value of the atomic before the increment
472
 *
473
 * This version of atomic_inc_not_zero() gives a hint of probable
474
 * value of the atomic. This helps processor to not read the memory
475
 * before doing the atomic read/modify/write cycle, lowering
476
 * number of bus transactions on some arches.
477
 *
478
 * Returns: 0 if increment was not done, 1 otherwise.
479
 */
480
#ifndef atomic_inc_not_zero_hint
481
static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
482
{
483
	int val, c = hint;
484
 
485
	/* sanity test, should be removed by compiler if hint is a constant */
486
	if (!hint)
487
		return atomic_inc_not_zero(v);
488
 
489
	do {
490
		val = atomic_cmpxchg(v, c, c + 1);
491
		if (val == c)
492
			return 1;
493
		c = val;
494
	} while (c);
495
 
496
	return 0;
497
}
498
#endif
499
 
500
#ifndef atomic_inc_unless_negative
501
static inline int atomic_inc_unless_negative(atomic_t *p)
502
{
503
	int v, v1;
504
	for (v = 0; v >= 0; v = v1) {
505
		v1 = atomic_cmpxchg(p, v, v + 1);
506
		if (likely(v1 == v))
507
			return 1;
508
	}
509
	return 0;
510
}
511
#endif
512
 
513
#ifndef atomic_dec_unless_positive
514
static inline int atomic_dec_unless_positive(atomic_t *p)
515
{
516
	int v, v1;
517
	for (v = 0; v <= 0; v = v1) {
518
		v1 = atomic_cmpxchg(p, v, v - 1);
519
		if (likely(v1 == v))
520
			return 1;
521
	}
522
	return 0;
523
}
524
#endif
525
 
526
/*
527
 * atomic_dec_if_positive - decrement by 1 if old value positive
528
 * @v: pointer of type atomic_t
529
 *
530
 * The function returns the old value of *v minus 1, even if
531
 * the atomic variable, v, was not decremented.
532
 */
533
#ifndef atomic_dec_if_positive
534
static inline int atomic_dec_if_positive(atomic_t *v)
535
{
536
	int c, old, dec;
537
	c = atomic_read(v);
538
	for (;;) {
539
		dec = c - 1;
540
		if (unlikely(dec < 0))
541
			break;
542
		old = atomic_cmpxchg((v), c, dec);
543
		if (likely(old == c))
544
			break;
545
		c = old;
546
	}
547
	return dec;
548
}
549
#endif
550
 
6082 serge 551
#ifdef CONFIG_GENERIC_ATOMIC64
552
#include 
553
#endif
554
 
555
#ifndef atomic64_andnot
556
static inline void atomic64_andnot(long long i, atomic64_t *v)
5270 serge 557
{
6082 serge 558
	atomic64_and(~i, v);
5270 serge 559
}
6082 serge 560
#endif
5270 serge 561
 
562
#include 
6082 serge 563
 
5270 serge 564
#endif /* _LINUX_ATOMIC_H */