Subversion Repositories Kolibri OS

Rev

Rev 6082 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6082 Rev 7143
Line 32... Line 32...
32
 
32
 
33
/*
33
/*
34
 * The idea here is to build acquire/release variants by adding explicit
34
 * The idea here is to build acquire/release variants by adding explicit
35
 * barriers on top of the relaxed variant. In the case where the relaxed
35
 * barriers on top of the relaxed variant. In the case where the relaxed
-
 
36
 * variant is already fully ordered, no additional barriers are needed.
-
 
37
 *
-
 
38
 * Besides, if an arch has a special barrier for acquire/release, it could
-
 
39
 * implement its own __atomic_op_* and use the same framework for building
36
 * variant is already fully ordered, no additional barriers are needed.
40
 * variants
-
 
41
 */
37
 */
42
#ifndef __atomic_op_acquire
38
#define __atomic_op_acquire(op, args...)				\
43
#define __atomic_op_acquire(op, args...)				\
39
({									\
44
({									\
40
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
45
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
41
	smp_mb__after_atomic();						\
46
	smp_mb__after_atomic();						\
42
	__ret;								\
47
	__ret;								\
-
 
48
})
Line -... Line 49...
-
 
49
#endif
43
})
50
 
44
 
51
#ifndef __atomic_op_release
45
#define __atomic_op_release(op, args...)				\
52
#define __atomic_op_release(op, args...)				\
46
({									\
53
({									\
47
	smp_mb__before_atomic();					\
54
	smp_mb__before_atomic();					\
-
 
55
	op##_relaxed(args);						\
Line -... Line 56...
-
 
56
})
48
	op##_relaxed(args);						\
57
#endif
49
})
58
 
50
 
59
#ifndef __atomic_op_fence
51
#define __atomic_op_fence(op, args...)					\
60
#define __atomic_op_fence(op, args...)					\
52
({									\
61
({									\
53
	typeof(op##_relaxed(args)) __ret;				\
62
	typeof(op##_relaxed(args)) __ret;				\
54
	smp_mb__before_atomic();					\
63
	smp_mb__before_atomic();					\
55
	__ret = op##_relaxed(args);					\
64
	__ret = op##_relaxed(args);					\
-
 
65
	smp_mb__after_atomic();						\
Line 56... Line 66...
56
	smp_mb__after_atomic();						\
66
	__ret;								\
57
	__ret;								\
67
})
58
})
68
#endif
59
 
69
 
Line 546... Line 556...
546
	}
556
	}
547
	return dec;
557
	return dec;
548
}
558
}
549
#endif
559
#endif
Line -... Line 560...
-
 
560
 
-
 
561
/**
-
 
562
 * atomic_fetch_or - perform *p |= mask and return old value of *p
-
 
563
 * @p: pointer to atomic_t
-
 
564
 * @mask: mask to OR on the atomic_t
-
 
565
 */
-
 
566
#ifndef atomic_fetch_or
-
 
567
static inline int atomic_fetch_or(atomic_t *p, int mask)
-
 
568
{
-
 
569
	int old, val = atomic_read(p);
-
 
570
 
-
 
571
	for (;;) {
-
 
572
		old = atomic_cmpxchg(p, val, val | mask);
-
 
573
		if (old == val)
-
 
574
			break;
-
 
575
		val = old;
-
 
576
	}
-
 
577
 
-
 
578
	return old;
-
 
579
}
-
 
580
#endif
550
 
581
 
551
#ifdef CONFIG_GENERIC_ATOMIC64
582
#ifdef CONFIG_GENERIC_ATOMIC64
552
#include 
583
#include 
Line 553... Line 584...
553
#endif
584
#endif