Rev 6082 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5270 | serge | 1 | #ifndef _ASM_X86_BARRIER_H |
2 | #define _ASM_X86_BARRIER_H |
||
3 | |||
4 | #include |
||
5 | #include |
||
6 | |||
7 | /* |
||
8 | * Force strict CPU ordering. |
||
9 | * And yes, this is required on UP too when we're talking |
||
10 | * to devices. |
||
11 | */ |
||
12 | |||
13 | #ifdef CONFIG_X86_32 |
||
14 | /* |
||
15 | * Some non-Intel clones support out of order store. wmb() ceases to be a |
||
16 | * nop for these. |
||
17 | */ |
||
18 | #define mb() asm volatile ("lock; addl $0,0(%esp)")/*, "mfence", X86_FEATURE_XMM2) */ |
||
19 | #define rmb() asm volatile("lock; addl $0,0(%esp)")/*, "lfence", X86_FEATURE_XMM2) */ |
||
20 | #define wmb() asm volatile("lock; addl $0,0(%esp)")/*, "sfence", X86_FEATURE_XMM) */ |
||
21 | #else |
||
22 | #define mb() asm volatile("mfence":::"memory") |
||
23 | #define rmb() asm volatile("lfence":::"memory") |
||
24 | #define wmb() asm volatile("sfence" ::: "memory") |
||
25 | #endif |
||
26 | |||
27 | #ifdef CONFIG_X86_PPRO_FENCE |
||
28 | #define dma_rmb() rmb() |
||
29 | #else |
||
30 | #define dma_rmb() barrier() |
||
31 | #endif |
||
32 | #define dma_wmb() barrier() |
||
33 | |||
34 | #ifdef CONFIG_SMP |
||
35 | #define smp_mb() mb() |
||
36 | #define smp_rmb() dma_rmb() |
||
37 | #define smp_wmb() barrier() |
||
38 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
||
39 | #else /* !SMP */ |
||
40 | #define smp_mb() barrier() |
||
41 | #define smp_rmb() barrier() |
||
42 | #define smp_wmb() barrier() |
||
43 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
||
44 | #endif /* SMP */ |
||
45 | |||
46 | #define read_barrier_depends() do { } while (0) |
||
47 | #define smp_read_barrier_depends() do { } while (0) |
||
48 | |||
49 | #if defined(CONFIG_X86_PPRO_FENCE) |
||
50 | |||
51 | /* |
||
52 | * For this option x86 doesn't have a strong TSO memory |
||
53 | * model and we should fall back to full barriers. |
||
54 | */ |
||
55 | |||
56 | #define smp_store_release(p, v) \ |
||
57 | do { \ |
||
58 | compiletime_assert_atomic_type(*p); \ |
||
59 | smp_mb(); \ |
||
60 | ACCESS_ONCE(*p) = (v); \ |
||
61 | } while (0) |
||
62 | |||
63 | #define smp_load_acquire(p) \ |
||
64 | ({ \ |
||
65 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
||
66 | compiletime_assert_atomic_type(*p); \ |
||
67 | smp_mb(); \ |
||
68 | ___p1; \ |
||
69 | }) |
||
70 | |||
71 | #else /* regular x86 TSO memory ordering */ |
||
72 | |||
73 | #define smp_store_release(p, v) \ |
||
74 | do { \ |
||
75 | compiletime_assert_atomic_type(*p); \ |
||
76 | barrier(); \ |
||
77 | ACCESS_ONCE(*p) = (v); \ |
||
78 | } while (0) |
||
79 | |||
80 | #define smp_load_acquire(p) \ |
||
81 | ({ \ |
||
82 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
||
83 | compiletime_assert_atomic_type(*p); \ |
||
84 | barrier(); \ |
||
85 | ___p1; \ |
||
86 | }) |
||
87 | |||
88 | #endif |
||
89 | |||
90 | /* Atomic operations are already serializing on x86 */ |
||
91 | #define smp_mb__before_atomic() barrier() |
||
92 | #define smp_mb__after_atomic() barrier() |
||
93 | |||
94 | /* |
||
95 | * Stop RDTSC speculation. This is needed when you need to use RDTSC |
||
96 | * (or get_cycles or vread that possibly accesses the TSC) in a defined |
||
97 | * code region. |
||
98 | * |
||
99 | * (Could use an alternative three way for this if there was one.) |
||
100 | */ |
||
101 | static __always_inline void rdtsc_barrier(void) |
||
102 | { |
||
103 | alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); |
||
104 | alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); |
||
105 | } |
||
106 | |||
107 | #endif /* _ASM_X86_BARRIER_H */ |