Rev 6936 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6936 | Rev 7143 | ||
---|---|---|---|
1 | #ifndef __LINUX_COMPILER_H |
1 | #ifndef __LINUX_COMPILER_H |
2 | #define __LINUX_COMPILER_H |
2 | #define __LINUX_COMPILER_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ |
4 | #ifndef __ASSEMBLY__ |
5 | 5 | ||
6 | #ifdef __CHECKER__ |
6 | #ifdef __CHECKER__ |
7 | # define __user __attribute__((noderef, address_space(1))) |
7 | # define __user __attribute__((noderef, address_space(1))) |
8 | # define __kernel __attribute__((address_space(0))) |
8 | # define __kernel __attribute__((address_space(0))) |
9 | # define __safe __attribute__((safe)) |
9 | # define __safe __attribute__((safe)) |
10 | # define __force __attribute__((force)) |
10 | # define __force __attribute__((force)) |
11 | # define __nocast __attribute__((nocast)) |
11 | # define __nocast __attribute__((nocast)) |
12 | # define __iomem __attribute__((noderef, address_space(2))) |
12 | # define __iomem __attribute__((noderef, address_space(2))) |
13 | # define __must_hold(x) __attribute__((context(x,1,1))) |
13 | # define __must_hold(x) __attribute__((context(x,1,1))) |
14 | # define __acquires(x) __attribute__((context(x,0,1))) |
14 | # define __acquires(x) __attribute__((context(x,0,1))) |
15 | # define __releases(x) __attribute__((context(x,1,0))) |
15 | # define __releases(x) __attribute__((context(x,1,0))) |
16 | # define __acquire(x) __context__(x,1) |
16 | # define __acquire(x) __context__(x,1) |
17 | # define __release(x) __context__(x,-1) |
17 | # define __release(x) __context__(x,-1) |
18 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
18 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
19 | # define __percpu __attribute__((noderef, address_space(3))) |
19 | # define __percpu __attribute__((noderef, address_space(3))) |
20 | # define __pmem __attribute__((noderef, address_space(5))) |
20 | # define __pmem __attribute__((noderef, address_space(5))) |
21 | #ifdef CONFIG_SPARSE_RCU_POINTER |
21 | #ifdef CONFIG_SPARSE_RCU_POINTER |
22 | # define __rcu __attribute__((noderef, address_space(4))) |
22 | # define __rcu __attribute__((noderef, address_space(4))) |
23 | #else |
23 | #else /* CONFIG_SPARSE_RCU_POINTER */ |
24 | # define __rcu |
24 | # define __rcu |
25 | #endif |
25 | #endif /* CONFIG_SPARSE_RCU_POINTER */ |
- | 26 | # define __private __attribute__((noderef)) |
|
26 | extern void __chk_user_ptr(const volatile void __user *); |
27 | extern void __chk_user_ptr(const volatile void __user *); |
27 | extern void __chk_io_ptr(const volatile void __iomem *); |
28 | extern void __chk_io_ptr(const volatile void __iomem *); |
- | 29 | # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) |
|
28 | #else |
30 | #else /* __CHECKER__ */ |
29 | # define __user |
31 | # define __user |
30 | # define __kernel |
32 | # define __kernel |
31 | # define __safe |
33 | # define __safe |
32 | # define __force |
34 | # define __force |
33 | # define __nocast |
35 | # define __nocast |
34 | # define __iomem |
36 | # define __iomem |
35 | # define __chk_user_ptr(x) (void)0 |
37 | # define __chk_user_ptr(x) (void)0 |
36 | # define __chk_io_ptr(x) (void)0 |
38 | # define __chk_io_ptr(x) (void)0 |
37 | # define __builtin_warning(x, y...) (1) |
39 | # define __builtin_warning(x, y...) (1) |
38 | # define __must_hold(x) |
40 | # define __must_hold(x) |
39 | # define __acquires(x) |
41 | # define __acquires(x) |
40 | # define __releases(x) |
42 | # define __releases(x) |
41 | # define __acquire(x) (void)0 |
43 | # define __acquire(x) (void)0 |
42 | # define __release(x) (void)0 |
44 | # define __release(x) (void)0 |
43 | # define __cond_lock(x,c) (c) |
45 | # define __cond_lock(x,c) (c) |
44 | # define __percpu |
46 | # define __percpu |
45 | # define __rcu |
47 | # define __rcu |
46 | # define __pmem |
48 | # define __pmem |
47 | #endif |
49 | # define __private |
- | 50 | # define ACCESS_PRIVATE(p, member) ((p)->member) |
|
- | 51 | #endif /* __CHECKER__ */ |
|
48 | 52 | ||
49 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ |
53 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ |
50 | #define ___PASTE(a,b) a##b |
54 | #define ___PASTE(a,b) a##b |
51 | #define __PASTE(a,b) ___PASTE(a,b) |
55 | #define __PASTE(a,b) ___PASTE(a,b) |
52 | 56 | ||
53 | #ifdef __KERNEL__ |
57 | #ifdef __KERNEL__ |
54 | 58 | ||
55 | #ifdef __GNUC__ |
59 | #ifdef __GNUC__ |
56 | #include |
60 | #include |
57 | #endif |
61 | #endif |
58 | 62 | ||
59 | #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) |
63 | #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) |
60 | #define notrace __attribute__((hotpatch(0,0))) |
64 | #define notrace __attribute__((hotpatch(0,0))) |
61 | #else |
65 | #else |
62 | #define notrace __attribute__((no_instrument_function)) |
66 | #define notrace __attribute__((no_instrument_function)) |
63 | #endif |
67 | #endif |
64 | 68 | ||
65 | /* Intel compiler defines __GNUC__. So we will overwrite implementations |
69 | /* Intel compiler defines __GNUC__. So we will overwrite implementations |
66 | * coming from above header files here |
70 | * coming from above header files here |
67 | */ |
71 | */ |
68 | #ifdef __INTEL_COMPILER |
72 | #ifdef __INTEL_COMPILER |
69 | # include |
73 | # include |
70 | #endif |
74 | #endif |
71 | 75 | ||
72 | /* Clang compiler defines __GNUC__. So we will overwrite implementations |
76 | /* Clang compiler defines __GNUC__. So we will overwrite implementations |
73 | * coming from above header files here |
77 | * coming from above header files here |
74 | */ |
78 | */ |
75 | #ifdef __clang__ |
79 | #ifdef __clang__ |
76 | #include |
80 | #include |
77 | #endif |
81 | #endif |
78 | 82 | ||
79 | /* |
83 | /* |
80 | * Generic compiler-dependent macros required for kernel |
84 | * Generic compiler-dependent macros required for kernel |
81 | * build go below this comment. Actual compiler/compiler version |
85 | * build go below this comment. Actual compiler/compiler version |
82 | * specific implementations come from the above header files |
86 | * specific implementations come from the above header files |
83 | */ |
87 | */ |
84 | 88 | ||
85 | struct ftrace_branch_data { |
89 | struct ftrace_branch_data { |
86 | const char *func; |
90 | const char *func; |
87 | const char *file; |
91 | const char *file; |
88 | unsigned line; |
92 | unsigned line; |
89 | union { |
93 | union { |
90 | struct { |
94 | struct { |
91 | unsigned long correct; |
95 | unsigned long correct; |
92 | unsigned long incorrect; |
96 | unsigned long incorrect; |
93 | }; |
97 | }; |
94 | struct { |
98 | struct { |
95 | unsigned long miss; |
99 | unsigned long miss; |
96 | unsigned long hit; |
100 | unsigned long hit; |
97 | }; |
101 | }; |
98 | unsigned long miss_hit[2]; |
102 | unsigned long miss_hit[2]; |
99 | }; |
103 | }; |
100 | }; |
104 | }; |
101 | 105 | ||
102 | /* |
106 | /* |
103 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
107 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
104 | * to disable branch tracing on a per file basis. |
108 | * to disable branch tracing on a per file basis. |
105 | */ |
109 | */ |
106 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
110 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
107 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
111 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
108 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); |
112 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); |
109 | 113 | ||
110 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
114 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
111 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
115 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
112 | 116 | ||
113 | #define __branch_check__(x, expect) ({ \ |
117 | #define __branch_check__(x, expect) ({ \ |
114 | int ______r; \ |
118 | int ______r; \ |
115 | static struct ftrace_branch_data \ |
119 | static struct ftrace_branch_data \ |
116 | __attribute__((__aligned__(4))) \ |
120 | __attribute__((__aligned__(4))) \ |
117 | __attribute__((section("_ftrace_annotated_branch"))) \ |
121 | __attribute__((section("_ftrace_annotated_branch"))) \ |
118 | ______f = { \ |
122 | ______f = { \ |
119 | .func = __func__, \ |
123 | .func = __func__, \ |
120 | .file = __FILE__, \ |
124 | .file = __FILE__, \ |
121 | .line = __LINE__, \ |
125 | .line = __LINE__, \ |
122 | }; \ |
126 | }; \ |
123 | ______r = likely_notrace(x); \ |
127 | ______r = likely_notrace(x); \ |
124 | ftrace_likely_update(&______f, ______r, expect); \ |
128 | ftrace_likely_update(&______f, ______r, expect); \ |
125 | ______r; \ |
129 | ______r; \ |
126 | }) |
130 | }) |
127 | 131 | ||
128 | /* |
132 | /* |
129 | * Using __builtin_constant_p(x) to ignore cases where the return |
133 | * Using __builtin_constant_p(x) to ignore cases where the return |
130 | * value is always the same. This idea is taken from a similar patch |
134 | * value is always the same. This idea is taken from a similar patch |
131 | * written by Daniel Walker. |
135 | * written by Daniel Walker. |
132 | */ |
136 | */ |
133 | # ifndef likely |
137 | # ifndef likely |
134 | # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) |
138 | # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) |
135 | # endif |
139 | # endif |
136 | # ifndef unlikely |
140 | # ifndef unlikely |
137 | # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) |
141 | # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) |
138 | # endif |
142 | # endif |
139 | 143 | ||
140 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
144 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
141 | /* |
145 | /* |
142 | * "Define 'is'", Bill Clinton |
146 | * "Define 'is'", Bill Clinton |
143 | * "Define 'if'", Steven Rostedt |
147 | * "Define 'if'", Steven Rostedt |
144 | */ |
148 | */ |
145 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
149 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
146 | #define __trace_if(cond) \ |
150 | #define __trace_if(cond) \ |
147 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
151 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
148 | ({ \ |
152 | ({ \ |
149 | int ______r; \ |
153 | int ______r; \ |
150 | static struct ftrace_branch_data \ |
154 | static struct ftrace_branch_data \ |
151 | __attribute__((__aligned__(4))) \ |
155 | __attribute__((__aligned__(4))) \ |
152 | __attribute__((section("_ftrace_branch"))) \ |
156 | __attribute__((section("_ftrace_branch"))) \ |
153 | ______f = { \ |
157 | ______f = { \ |
154 | .func = __func__, \ |
158 | .func = __func__, \ |
155 | .file = __FILE__, \ |
159 | .file = __FILE__, \ |
156 | .line = __LINE__, \ |
160 | .line = __LINE__, \ |
157 | }; \ |
161 | }; \ |
158 | ______r = !!(cond); \ |
162 | ______r = !!(cond); \ |
159 | ______f.miss_hit[______r]++; \ |
163 | ______f.miss_hit[______r]++; \ |
160 | ______r; \ |
164 | ______r; \ |
161 | })) |
165 | })) |
162 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
166 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
163 | 167 | ||
164 | #else |
168 | #else |
165 | # define likely(x) __builtin_expect(!!(x), 1) |
169 | # define likely(x) __builtin_expect(!!(x), 1) |
166 | # define unlikely(x) __builtin_expect(!!(x), 0) |
170 | # define unlikely(x) __builtin_expect(!!(x), 0) |
167 | #endif |
171 | #endif |
168 | 172 | ||
169 | /* Optimization barrier */ |
173 | /* Optimization barrier */ |
170 | #ifndef barrier |
174 | #ifndef barrier |
171 | # define barrier() __memory_barrier() |
175 | # define barrier() __memory_barrier() |
172 | #endif |
176 | #endif |
173 | 177 | ||
174 | #ifndef barrier_data |
178 | #ifndef barrier_data |
175 | # define barrier_data(ptr) barrier() |
179 | # define barrier_data(ptr) barrier() |
176 | #endif |
180 | #endif |
177 | 181 | ||
178 | /* Unreachable code */ |
182 | /* Unreachable code */ |
179 | #ifndef unreachable |
183 | #ifndef unreachable |
180 | # define unreachable() do { } while (1) |
184 | # define unreachable() do { } while (1) |
181 | #endif |
185 | #endif |
182 | 186 | ||
183 | #ifndef RELOC_HIDE |
187 | #ifndef RELOC_HIDE |
184 | # define RELOC_HIDE(ptr, off) \ |
188 | # define RELOC_HIDE(ptr, off) \ |
185 | ({ unsigned long __ptr; \ |
189 | ({ unsigned long __ptr; \ |
186 | __ptr = (unsigned long) (ptr); \ |
190 | __ptr = (unsigned long) (ptr); \ |
187 | (typeof(ptr)) (__ptr + (off)); }) |
191 | (typeof(ptr)) (__ptr + (off)); }) |
188 | #endif |
192 | #endif |
189 | 193 | ||
190 | #ifndef OPTIMIZER_HIDE_VAR |
194 | #ifndef OPTIMIZER_HIDE_VAR |
191 | #define OPTIMIZER_HIDE_VAR(var) barrier() |
195 | #define OPTIMIZER_HIDE_VAR(var) barrier() |
192 | #endif |
196 | #endif |
193 | 197 | ||
194 | /* Not-quite-unique ID. */ |
198 | /* Not-quite-unique ID. */ |
195 | #ifndef __UNIQUE_ID |
199 | #ifndef __UNIQUE_ID |
196 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
200 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
197 | #endif |
201 | #endif |
198 | 202 | ||
199 | #include |
203 | #include |
200 | 204 | ||
201 | #define __READ_ONCE_SIZE \ |
205 | #define __READ_ONCE_SIZE \ |
202 | ({ \ |
206 | ({ \ |
203 | switch (size) { \ |
207 | switch (size) { \ |
204 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
208 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
205 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
209 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
206 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
210 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
207 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
211 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
208 | default: \ |
212 | default: \ |
209 | barrier(); \ |
213 | barrier(); \ |
210 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
214 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
211 | barrier(); \ |
215 | barrier(); \ |
212 | } \ |
216 | } \ |
213 | }) |
217 | }) |
214 | 218 | ||
215 | static __always_inline |
219 | static __always_inline |
216 | void __read_once_size(const volatile void *p, void *res, int size) |
220 | void __read_once_size(const volatile void *p, void *res, int size) |
217 | { |
221 | { |
218 | __READ_ONCE_SIZE; |
222 | __READ_ONCE_SIZE; |
219 | } |
223 | } |
220 | 224 | ||
221 | #ifdef CONFIG_KASAN |
225 | #ifdef CONFIG_KASAN |
222 | /* |
226 | /* |
223 | * This function is not 'inline' because __no_sanitize_address confilcts |
227 | * This function is not 'inline' because __no_sanitize_address confilcts |
224 | * with inlining. Attempt to inline it may cause a build failure. |
228 | * with inlining. Attempt to inline it may cause a build failure. |
225 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
229 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
226 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
230 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
227 | */ |
231 | */ |
228 | static __no_sanitize_address __maybe_unused |
232 | static __no_sanitize_address __maybe_unused |
229 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
233 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
230 | { |
234 | { |
231 | __READ_ONCE_SIZE; |
235 | __READ_ONCE_SIZE; |
232 | } |
236 | } |
233 | #else |
237 | #else |
234 | static __always_inline |
238 | static __always_inline |
235 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
239 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
236 | { |
240 | { |
237 | __READ_ONCE_SIZE; |
241 | __READ_ONCE_SIZE; |
238 | } |
242 | } |
239 | #endif |
243 | #endif |
240 | 244 | ||
241 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
245 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
242 | { |
246 | { |
243 | switch (size) { |
247 | switch (size) { |
244 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
248 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
245 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
249 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
246 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
250 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
247 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
251 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
248 | default: |
252 | default: |
249 | barrier(); |
253 | barrier(); |
250 | __builtin_memcpy((void *)p, (const void *)res, size); |
254 | __builtin_memcpy((void *)p, (const void *)res, size); |
251 | barrier(); |
255 | barrier(); |
252 | } |
256 | } |
253 | } |
257 | } |
254 | 258 | ||
255 | /* |
259 | /* |
256 | * Prevent the compiler from merging or refetching reads or writes. The |
260 | * Prevent the compiler from merging or refetching reads or writes. The |
257 | * compiler is also forbidden from reordering successive instances of |
261 | * compiler is also forbidden from reordering successive instances of |
258 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the |
262 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the |
259 | * compiler is aware of some particular ordering. One way to make the |
263 | * compiler is aware of some particular ordering. One way to make the |
260 | * compiler aware of ordering is to put the two invocations of READ_ONCE, |
264 | * compiler aware of ordering is to put the two invocations of READ_ONCE, |
261 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. |
265 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. |
262 | * |
266 | * |
263 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate |
267 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate |
264 | * data types like structs or unions. If the size of the accessed data |
268 | * data types like structs or unions. If the size of the accessed data |
265 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
269 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
266 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a |
270 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at |
- | 271 | * least two memcpy()s: one for the __builtin_memcpy() and then one for |
|
267 | * compile-time warning. |
272 | * the macro doing the copy of variable - '__u' allocated on the stack. |
268 | * |
273 | * |
269 | * Their two major use cases are: (1) Mediating communication between |
274 | * Their two major use cases are: (1) Mediating communication between |
270 | * process-level code and irq/NMI handlers, all running on the same CPU, |
275 | * process-level code and irq/NMI handlers, all running on the same CPU, |
271 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
276 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
272 | * mutilate accesses that either do not require ordering or that interact |
277 | * mutilate accesses that either do not require ordering or that interact |
273 | * with an explicit memory barrier or atomic instruction that provides the |
278 | * with an explicit memory barrier or atomic instruction that provides the |
274 | * required ordering. |
279 | * required ordering. |
275 | */ |
280 | */ |
276 | 281 | ||
277 | #define __READ_ONCE(x, check) \ |
282 | #define __READ_ONCE(x, check) \ |
278 | ({ \ |
283 | ({ \ |
279 | union { typeof(x) __val; char __c[1]; } __u; \ |
284 | union { typeof(x) __val; char __c[1]; } __u; \ |
280 | if (check) \ |
285 | if (check) \ |
281 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
286 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
282 | else \ |
287 | else \ |
283 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
288 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
284 | __u.__val; \ |
289 | __u.__val; \ |
285 | }) |
290 | }) |
286 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
291 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
287 | 292 | ||
288 | /* |
293 | /* |
289 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
294 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
290 | * to hide memory access from KASAN. |
295 | * to hide memory access from KASAN. |
291 | */ |
296 | */ |
292 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
297 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
293 | 298 | ||
294 | #define WRITE_ONCE(x, val) \ |
299 | #define WRITE_ONCE(x, val) \ |
295 | ({ \ |
300 | ({ \ |
296 | union { typeof(x) __val; char __c[1]; } __u = \ |
301 | union { typeof(x) __val; char __c[1]; } __u = \ |
297 | { .__val = (__force typeof(x)) (val) }; \ |
302 | { .__val = (__force typeof(x)) (val) }; \ |
298 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
303 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
299 | __u.__val; \ |
304 | __u.__val; \ |
300 | }) |
305 | }) |
301 | 306 | ||
302 | /** |
307 | /** |
303 | * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering |
308 | * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering |
304 | * @cond: boolean expression to wait for |
309 | * @cond: boolean expression to wait for |
305 | * |
310 | * |
306 | * Equivalent to using smp_load_acquire() on the condition variable but employs |
311 | * Equivalent to using smp_load_acquire() on the condition variable but employs |
307 | * the control dependency of the wait to reduce the barrier on many platforms. |
312 | * the control dependency of the wait to reduce the barrier on many platforms. |
308 | * |
313 | * |
309 | * The control dependency provides a LOAD->STORE order, the additional RMB |
314 | * The control dependency provides a LOAD->STORE order, the additional RMB |
310 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
315 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
311 | * aka. ACQUIRE. |
316 | * aka. ACQUIRE. |
312 | */ |
317 | */ |
313 | #define smp_cond_acquire(cond) do { \ |
318 | #define smp_cond_acquire(cond) do { \ |
314 | while (!(cond)) \ |
319 | while (!(cond)) \ |
315 | cpu_relax(); \ |
320 | cpu_relax(); \ |
316 | smp_rmb(); /* ctrl + rmb := acquire */ \ |
321 | smp_rmb(); /* ctrl + rmb := acquire */ \ |
317 | } while (0) |
322 | } while (0) |
318 | 323 | ||
319 | #endif /* __KERNEL__ */ |
324 | #endif /* __KERNEL__ */ |
320 | 325 | ||
321 | #endif /* __ASSEMBLY__ */ |
326 | #endif /* __ASSEMBLY__ */ |
322 | 327 | ||
323 | #ifdef __KERNEL__ |
328 | #ifdef __KERNEL__ |
324 | /* |
329 | /* |
325 | * Allow us to mark functions as 'deprecated' and have gcc emit a nice |
330 | * Allow us to mark functions as 'deprecated' and have gcc emit a nice |
326 | * warning for each use, in hopes of speeding the functions removal. |
331 | * warning for each use, in hopes of speeding the functions removal. |
327 | * Usage is: |
332 | * Usage is: |
328 | * int __deprecated foo(void) |
333 | * int __deprecated foo(void) |
329 | */ |
334 | */ |
330 | #ifndef __deprecated |
335 | #ifndef __deprecated |
331 | # define __deprecated /* unimplemented */ |
336 | # define __deprecated /* unimplemented */ |
332 | #endif |
337 | #endif |
333 | 338 | ||
334 | #ifdef MODULE |
339 | #ifdef MODULE |
335 | #define __deprecated_for_modules __deprecated |
340 | #define __deprecated_for_modules __deprecated |
336 | #else |
341 | #else |
337 | #define __deprecated_for_modules |
342 | #define __deprecated_for_modules |
338 | #endif |
343 | #endif |
339 | 344 | ||
340 | #ifndef __must_check |
345 | #ifndef __must_check |
341 | #define __must_check |
346 | #define __must_check |
342 | #endif |
347 | #endif |
343 | 348 | ||
344 | #ifndef CONFIG_ENABLE_MUST_CHECK |
349 | #ifndef CONFIG_ENABLE_MUST_CHECK |
345 | #undef __must_check |
350 | #undef __must_check |
346 | #define __must_check |
351 | #define __must_check |
347 | #endif |
352 | #endif |
348 | #ifndef CONFIG_ENABLE_WARN_DEPRECATED |
353 | #ifndef CONFIG_ENABLE_WARN_DEPRECATED |
349 | #undef __deprecated |
354 | #undef __deprecated |
350 | #undef __deprecated_for_modules |
355 | #undef __deprecated_for_modules |
351 | #define __deprecated |
356 | #define __deprecated |
352 | #define __deprecated_for_modules |
357 | #define __deprecated_for_modules |
353 | #endif |
358 | #endif |
354 | 359 | ||
355 | /* |
360 | /* |
356 | * Allow us to avoid 'defined but not used' warnings on functions and data, |
361 | * Allow us to avoid 'defined but not used' warnings on functions and data, |
357 | * as well as force them to be emitted to the assembly file. |
362 | * as well as force them to be emitted to the assembly file. |
358 | * |
363 | * |
359 | * As of gcc 3.4, static functions that are not marked with attribute((used)) |
364 | * As of gcc 3.4, static functions that are not marked with attribute((used)) |
360 | * may be elided from the assembly file. As of gcc 3.4, static data not so |
365 | * may be elided from the assembly file. As of gcc 3.4, static data not so |
361 | * marked will not be elided, but this may change in a future gcc version. |
366 | * marked will not be elided, but this may change in a future gcc version. |
362 | * |
367 | * |
363 | * NOTE: Because distributions shipped with a backported unit-at-a-time |
368 | * NOTE: Because distributions shipped with a backported unit-at-a-time |
364 | * compiler in gcc 3.3, we must define __used to be __attribute__((used)) |
369 | * compiler in gcc 3.3, we must define __used to be __attribute__((used)) |
365 | * for gcc >=3.3 instead of 3.4. |
370 | * for gcc >=3.3 instead of 3.4. |
366 | * |
371 | * |
367 | * In prior versions of gcc, such functions and data would be emitted, but |
372 | * In prior versions of gcc, such functions and data would be emitted, but |
368 | * would be warned about except with attribute((unused)). |
373 | * would be warned about except with attribute((unused)). |
369 | * |
374 | * |
370 | * Mark functions that are referenced only in inline assembly as __used so |
375 | * Mark functions that are referenced only in inline assembly as __used so |
371 | * the code is emitted even though it appears to be unreferenced. |
376 | * the code is emitted even though it appears to be unreferenced. |
372 | */ |
377 | */ |
373 | #ifndef __used |
378 | #ifndef __used |
374 | # define __used /* unimplemented */ |
379 | # define __used /* unimplemented */ |
375 | #endif |
380 | #endif |
376 | 381 | ||
377 | #ifndef __maybe_unused |
382 | #ifndef __maybe_unused |
378 | # define __maybe_unused /* unimplemented */ |
383 | # define __maybe_unused /* unimplemented */ |
379 | #endif |
384 | #endif |
380 | 385 | ||
381 | #ifndef __always_unused |
386 | #ifndef __always_unused |
382 | # define __always_unused /* unimplemented */ |
387 | # define __always_unused /* unimplemented */ |
383 | #endif |
388 | #endif |
384 | 389 | ||
385 | #ifndef noinline |
390 | #ifndef noinline |
386 | #define noinline |
391 | #define noinline |
387 | #endif |
392 | #endif |
388 | 393 | ||
389 | /* |
394 | /* |
390 | * Rather then using noinline to prevent stack consumption, use |
395 | * Rather then using noinline to prevent stack consumption, use |
391 | * noinline_for_stack instead. For documentation reasons. |
396 | * noinline_for_stack instead. For documentation reasons. |
392 | */ |
397 | */ |
393 | #define noinline_for_stack noinline |
398 | #define noinline_for_stack noinline |
394 | 399 | ||
395 | #ifndef __always_inline |
400 | #ifndef __always_inline |
396 | #define __always_inline inline |
401 | #define __always_inline inline |
397 | #endif |
402 | #endif |
398 | 403 | ||
399 | #endif /* __KERNEL__ */ |
404 | #endif /* __KERNEL__ */ |
400 | 405 | ||
401 | /* |
406 | /* |
402 | * From the GCC manual: |
407 | * From the GCC manual: |
403 | * |
408 | * |
404 | * Many functions do not examine any values except their arguments, |
409 | * Many functions do not examine any values except their arguments, |
405 | * and have no effects except the return value. Basically this is |
410 | * and have no effects except the return value. Basically this is |
406 | * just slightly more strict class than the `pure' attribute above, |
411 | * just slightly more strict class than the `pure' attribute above, |
407 | * since function is not allowed to read global memory. |
412 | * since function is not allowed to read global memory. |
408 | * |
413 | * |
409 | * Note that a function that has pointer arguments and examines the |
414 | * Note that a function that has pointer arguments and examines the |
410 | * data pointed to must _not_ be declared `const'. Likewise, a |
415 | * data pointed to must _not_ be declared `const'. Likewise, a |
411 | * function that calls a non-`const' function usually must not be |
416 | * function that calls a non-`const' function usually must not be |
412 | * `const'. It does not make sense for a `const' function to return |
417 | * `const'. It does not make sense for a `const' function to return |
413 | * `void'. |
418 | * `void'. |
414 | */ |
419 | */ |
415 | #ifndef __attribute_const__ |
420 | #ifndef __attribute_const__ |
416 | # define __attribute_const__ /* unimplemented */ |
421 | # define __attribute_const__ /* unimplemented */ |
417 | #endif |
422 | #endif |
418 | 423 | ||
419 | /* |
424 | /* |
420 | * Tell gcc if a function is cold. The compiler will assume any path |
425 | * Tell gcc if a function is cold. The compiler will assume any path |
421 | * directly leading to the call is unlikely. |
426 | * directly leading to the call is unlikely. |
422 | */ |
427 | */ |
423 | 428 | ||
424 | #ifndef __cold |
429 | #ifndef __cold |
425 | #define __cold |
430 | #define __cold |
426 | #endif |
431 | #endif |
427 | 432 | ||
428 | /* Simple shorthand for a section definition */ |
433 | /* Simple shorthand for a section definition */ |
429 | #ifndef __section |
434 | #ifndef __section |
430 | # define __section(S) __attribute__ ((__section__(#S))) |
435 | # define __section(S) __attribute__ ((__section__(#S))) |
431 | #endif |
436 | #endif |
432 | 437 | ||
433 | #ifndef __visible |
438 | #ifndef __visible |
434 | #define __visible |
439 | #define __visible |
435 | #endif |
440 | #endif |
436 | 441 | ||
437 | /* |
442 | /* |
438 | * Assume alignment of return value. |
443 | * Assume alignment of return value. |
439 | */ |
444 | */ |
440 | #ifndef __assume_aligned |
445 | #ifndef __assume_aligned |
441 | #define __assume_aligned(a, ...) |
446 | #define __assume_aligned(a, ...) |
442 | #endif |
447 | #endif |
443 | 448 | ||
444 | 449 | ||
445 | /* Are two types/vars the same type (ignoring qualifiers)? */ |
450 | /* Are two types/vars the same type (ignoring qualifiers)? */ |
446 | #ifndef __same_type |
451 | #ifndef __same_type |
447 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
452 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
448 | #endif |
453 | #endif |
449 | 454 | ||
450 | /* Is this type a native word size -- useful for atomic operations */ |
455 | /* Is this type a native word size -- useful for atomic operations */ |
451 | #ifndef __native_word |
456 | #ifndef __native_word |
452 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) |
457 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) |
453 | #endif |
458 | #endif |
454 | 459 | ||
455 | /* Compile time object size, -1 for unknown */ |
460 | /* Compile time object size, -1 for unknown */ |
456 | #ifndef __compiletime_object_size |
461 | #ifndef __compiletime_object_size |
457 | # define __compiletime_object_size(obj) -1 |
462 | # define __compiletime_object_size(obj) -1 |
458 | #endif |
463 | #endif |
459 | #ifndef __compiletime_warning |
464 | #ifndef __compiletime_warning |
460 | # define __compiletime_warning(message) |
465 | # define __compiletime_warning(message) |
461 | #endif |
466 | #endif |
462 | #ifndef __compiletime_error |
467 | #ifndef __compiletime_error |
463 | # define __compiletime_error(message) |
468 | # define __compiletime_error(message) |
464 | /* |
469 | /* |
465 | * Sparse complains of variable sized arrays due to the temporary variable in |
470 | * Sparse complains of variable sized arrays due to the temporary variable in |
466 | * __compiletime_assert. Unfortunately we can't just expand it out to make |
471 | * __compiletime_assert. Unfortunately we can't just expand it out to make |
467 | * sparse see a constant array size without breaking compiletime_assert on old |
472 | * sparse see a constant array size without breaking compiletime_assert on old |
468 | * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. |
473 | * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. |
469 | */ |
474 | */ |
470 | # ifndef __CHECKER__ |
475 | # ifndef __CHECKER__ |
471 | # define __compiletime_error_fallback(condition) \ |
476 | # define __compiletime_error_fallback(condition) \ |
472 | do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
477 | do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
473 | # endif |
478 | # endif |
474 | #endif |
479 | #endif |
475 | #ifndef __compiletime_error_fallback |
480 | #ifndef __compiletime_error_fallback |
476 | # define __compiletime_error_fallback(condition) do { } while (0) |
481 | # define __compiletime_error_fallback(condition) do { } while (0) |
477 | #endif |
482 | #endif |
478 | 483 | ||
479 | #define __compiletime_assert(condition, msg, prefix, suffix) \ |
484 | #define __compiletime_assert(condition, msg, prefix, suffix) \ |
480 | do { \ |
485 | do { \ |
481 | bool __cond = !(condition); \ |
486 | bool __cond = !(condition); \ |
482 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
487 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
483 | if (__cond) \ |
488 | if (__cond) \ |
484 | prefix ## suffix(); \ |
489 | prefix ## suffix(); \ |
485 | __compiletime_error_fallback(__cond); \ |
490 | __compiletime_error_fallback(__cond); \ |
486 | } while (0) |
491 | } while (0) |
487 | 492 | ||
488 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
493 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
489 | __compiletime_assert(condition, msg, prefix, suffix) |
494 | __compiletime_assert(condition, msg, prefix, suffix) |
490 | 495 | ||
491 | /** |
496 | /** |
492 | * compiletime_assert - break build and emit msg if condition is false |
497 | * compiletime_assert - break build and emit msg if condition is false |
493 | * @condition: a compile-time constant condition to check |
498 | * @condition: a compile-time constant condition to check |
494 | * @msg: a message to emit if condition is false |
499 | * @msg: a message to emit if condition is false |
495 | * |
500 | * |
496 | * In tradition of POSIX assert, this macro will break the build if the |
501 | * In tradition of POSIX assert, this macro will break the build if the |
497 | * supplied condition is *false*, emitting the supplied error message if the |
502 | * supplied condition is *false*, emitting the supplied error message if the |
498 | * compiler has support to do so. |
503 | * compiler has support to do so. |
499 | */ |
504 | */ |
500 | #define compiletime_assert(condition, msg) \ |
505 | #define compiletime_assert(condition, msg) \ |
501 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
506 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
502 | 507 | ||
503 | #define compiletime_assert_atomic_type(t) \ |
508 | #define compiletime_assert_atomic_type(t) \ |
504 | compiletime_assert(__native_word(t), \ |
509 | compiletime_assert(__native_word(t), \ |
505 | "Need native word sized stores/loads for atomicity.") |
510 | "Need native word sized stores/loads for atomicity.") |
506 | 511 | ||
507 | /* |
512 | /* |
508 | * Prevent the compiler from merging or refetching accesses. The compiler |
513 | * Prevent the compiler from merging or refetching accesses. The compiler |
509 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
514 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
510 | * but only when the compiler is aware of some particular ordering. One way |
515 | * but only when the compiler is aware of some particular ordering. One way |
511 | * to make the compiler aware of ordering is to put the two invocations of |
516 | * to make the compiler aware of ordering is to put the two invocations of |
512 | * ACCESS_ONCE() in different C statements. |
517 | * ACCESS_ONCE() in different C statements. |
513 | * |
518 | * |
514 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE |
519 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE |
515 | * on a union member will work as long as the size of the member matches the |
520 | * on a union member will work as long as the size of the member matches the |
516 | * size of the union and the size is smaller than word size. |
521 | * size of the union and the size is smaller than word size. |
517 | * |
522 | * |
518 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication |
523 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication |
519 | * between process-level code and irq/NMI handlers, all running on the same CPU, |
524 | * between process-level code and irq/NMI handlers, all running on the same CPU, |
520 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
525 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
521 | * mutilate accesses that either do not require ordering or that interact |
526 | * mutilate accesses that either do not require ordering or that interact |
522 | * with an explicit memory barrier or atomic instruction that provides the |
527 | * with an explicit memory barrier or atomic instruction that provides the |
523 | * required ordering. |
528 | * required ordering. |
524 | * |
529 | * |
525 | * If possible use READ_ONCE()/WRITE_ONCE() instead. |
530 | * If possible use READ_ONCE()/WRITE_ONCE() instead. |
526 | */ |
531 | */ |
527 | #define __ACCESS_ONCE(x) ({ \ |
532 | #define __ACCESS_ONCE(x) ({ \ |
528 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ |
533 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ |
529 | (volatile typeof(x) *)&(x); }) |
534 | (volatile typeof(x) *)&(x); }) |
530 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) |
535 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) |
531 | 536 | ||
532 | /** |
537 | /** |
533 | * lockless_dereference() - safely load a pointer for later dereference |
538 | * lockless_dereference() - safely load a pointer for later dereference |
534 | * @p: The pointer to load |
539 | * @p: The pointer to load |
535 | * |
540 | * |
536 | * Similar to rcu_dereference(), but for situations where the pointed-to |
541 | * Similar to rcu_dereference(), but for situations where the pointed-to |
537 | * object's lifetime is managed by something other than RCU. That |
542 | * object's lifetime is managed by something other than RCU. That |
538 | * "something other" might be reference counting or simple immortality. |
543 | * "something other" might be reference counting or simple immortality. |
539 | */ |
544 | */ |
540 | #define lockless_dereference(p) \ |
545 | #define lockless_dereference(p) \ |
541 | ({ \ |
546 | ({ \ |
542 | typeof(p) _________p1 = READ_ONCE(p); \ |
547 | typeof(p) _________p1 = READ_ONCE(p); \ |
543 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
548 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
544 | (_________p1); \ |
549 | (_________p1); \ |
545 | }) |
550 | }) |
546 | 551 | ||
547 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
552 | /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
548 | #ifdef CONFIG_KPROBES |
553 | #ifdef CONFIG_KPROBES |
549 | # define __kprobes __attribute__((__section__(".kprobes.text"))) |
554 | # define __kprobes __attribute__((__section__(".kprobes.text"))) |
550 | # define nokprobe_inline __always_inline |
555 | # define nokprobe_inline __always_inline |
551 | #else |
556 | #else |
552 | # define __kprobes |
557 | # define __kprobes |
553 | # define nokprobe_inline inline |
558 | # define nokprobe_inline inline |
554 | #endif |
559 | #endif |
555 | #endif /* __LINUX_COMPILER_H */ |
560 | #endif /* __LINUX_COMPILER_H */ |