Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6934 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5270 Rev 6082
1
#ifndef _ASM_X86_BARRIER_H
1
#ifndef _ASM_X86_BARRIER_H
2
#define _ASM_X86_BARRIER_H
2
#define _ASM_X86_BARRIER_H
3
 
3
 
4
#include 
4
#include 
5
#include 
5
#include 
6
 
6
 
7
/*
7
/*
8
 * Force strict CPU ordering.
8
 * Force strict CPU ordering.
9
 * And yes, this is required on UP too when we're talking
9
 * And yes, this is required on UP too when we're talking
10
 * to devices.
10
 * to devices.
11
 */
11
 */
12
 
12
 
13
#ifdef CONFIG_X86_32
13
#ifdef CONFIG_X86_32
14
/*
14
/*
15
 * Some non-Intel clones support out of order store. wmb() ceases to be a
15
 * Some non-Intel clones support out of order store. wmb() ceases to be a
16
 * nop for these.
16
 * nop for these.
17
 */
17
 */
18
#define mb() asm volatile ("lock; addl $0,0(%esp)")/*, "mfence", X86_FEATURE_XMM2) */
18
#define mb() asm volatile ("lock; addl $0,0(%esp)")/*, "mfence", X86_FEATURE_XMM2) */
19
#define rmb() asm volatile("lock; addl $0,0(%esp)")/*, "lfence", X86_FEATURE_XMM2) */
19
#define rmb() asm volatile("lock; addl $0,0(%esp)")/*, "lfence", X86_FEATURE_XMM2) */
20
#define wmb() asm volatile("lock; addl $0,0(%esp)")/*, "sfence", X86_FEATURE_XMM)  */
20
#define wmb() asm volatile("lock; addl $0,0(%esp)")/*, "sfence", X86_FEATURE_XMM)  */
21
#else
21
#else
22
#define mb() 	asm volatile("mfence":::"memory")
22
#define mb() 	asm volatile("mfence":::"memory")
23
#define rmb()	asm volatile("lfence":::"memory")
23
#define rmb()	asm volatile("lfence":::"memory")
24
#define wmb()	asm volatile("sfence" ::: "memory")
24
#define wmb()	asm volatile("sfence" ::: "memory")
25
#endif
25
#endif
26
 
26
 
27
#ifdef CONFIG_X86_PPRO_FENCE
27
#ifdef CONFIG_X86_PPRO_FENCE
28
#define dma_rmb()	rmb()
28
#define dma_rmb()	rmb()
29
#else
29
#else
30
#define dma_rmb()	barrier()
30
#define dma_rmb()	barrier()
31
#endif
31
#endif
32
#define dma_wmb()	barrier()
32
#define dma_wmb()	barrier()
33
 
33
 
34
#ifdef CONFIG_SMP
34
#ifdef CONFIG_SMP
35
#define smp_mb()	mb()
35
#define smp_mb()	mb()
36
#define smp_rmb()	dma_rmb()
36
#define smp_rmb()	dma_rmb()
37
#define smp_wmb()	barrier()
37
#define smp_wmb()	barrier()
38
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
38
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
39
#else /* !SMP */
39
#else /* !SMP */
40
#define smp_mb()	barrier()
40
#define smp_mb()	barrier()
41
#define smp_rmb()	barrier()
41
#define smp_rmb()	barrier()
42
#define smp_wmb()	barrier()
42
#define smp_wmb()	barrier()
43
#define set_mb(var, value) do { var = value; barrier(); } while (0)
43
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
44
#endif /* SMP */
44
#endif /* SMP */
45
 
45
 
46
#define read_barrier_depends()		do { } while (0)
46
#define read_barrier_depends()		do { } while (0)
47
#define smp_read_barrier_depends()	do { } while (0)
47
#define smp_read_barrier_depends()	do { } while (0)
48
 
48
 
49
#if defined(CONFIG_X86_PPRO_FENCE)
49
#if defined(CONFIG_X86_PPRO_FENCE)
50
 
50
 
51
/*
51
/*
52
 * For this option x86 doesn't have a strong TSO memory
52
 * For this option x86 doesn't have a strong TSO memory
53
 * model and we should fall back to full barriers.
53
 * model and we should fall back to full barriers.
54
 */
54
 */
55
 
55
 
56
#define smp_store_release(p, v)						\
56
#define smp_store_release(p, v)						\
57
do {									\
57
do {									\
58
	compiletime_assert_atomic_type(*p);				\
58
	compiletime_assert_atomic_type(*p);				\
59
	smp_mb();							\
59
	smp_mb();							\
60
	ACCESS_ONCE(*p) = (v);						\
60
	WRITE_ONCE(*p, v);						\
61
} while (0)
61
} while (0)
62
 
62
 
63
#define smp_load_acquire(p)						\
63
#define smp_load_acquire(p)						\
64
({									\
64
({									\
65
	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
65
	typeof(*p) ___p1 = READ_ONCE(*p);				\
66
	compiletime_assert_atomic_type(*p);				\
66
	compiletime_assert_atomic_type(*p);				\
67
	smp_mb();							\
67
	smp_mb();							\
68
	___p1;								\
68
	___p1;								\
69
})
69
})
70
 
70
 
71
#else /* regular x86 TSO memory ordering */
71
#else /* regular x86 TSO memory ordering */
72
 
72
 
73
#define smp_store_release(p, v)						\
73
#define smp_store_release(p, v)						\
74
do {									\
74
do {									\
75
	compiletime_assert_atomic_type(*p);				\
75
	compiletime_assert_atomic_type(*p);				\
76
	barrier();							\
76
	barrier();							\
77
	ACCESS_ONCE(*p) = (v);						\
77
	WRITE_ONCE(*p, v);						\
78
} while (0)
78
} while (0)
79
 
79
 
80
#define smp_load_acquire(p)						\
80
#define smp_load_acquire(p)						\
81
({									\
81
({									\
82
	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
82
	typeof(*p) ___p1 = READ_ONCE(*p);				\
83
	compiletime_assert_atomic_type(*p);				\
83
	compiletime_assert_atomic_type(*p);				\
84
	barrier();							\
84
	barrier();							\
85
	___p1;								\
85
	___p1;								\
86
})
86
})
87
 
87
 
88
#endif
88
#endif
89
 
89
 
90
/* Atomic operations are already serializing on x86 */
90
/* Atomic operations are already serializing on x86 */
91
#define smp_mb__before_atomic()	barrier()
91
#define smp_mb__before_atomic()	barrier()
92
#define smp_mb__after_atomic()	barrier()
92
#define smp_mb__after_atomic()	barrier()
93
 
-
 
94
/*
-
 
95
 * Stop RDTSC speculation. This is needed when you need to use RDTSC
-
 
96
 * (or get_cycles or vread that possibly accesses the TSC) in a defined
-
 
97
 * code region.
-
 
98
 *
-
 
99
 * (Could use an alternative three way for this if there was one.)
-
 
100
 */
-
 
101
static __always_inline void rdtsc_barrier(void)
-
 
102
{
-
 
103
	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
-
 
104
	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
-
 
105
}
-
 
106
 
93
 
107
#endif /* _ASM_X86_BARRIER_H */
94
#endif /* _ASM_X86_BARRIER_H */