Subversion Repositories Kolibri OS

Rev

Rev 6588 | Rev 6936 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6588 Rev 6934
1
#ifndef __LINUX_COMPILER_H
1
#ifndef __LINUX_COMPILER_H
2
#error "Please don't include  directly, include  instead."
2
#error "Please don't include  directly, include  instead."
3
#endif
3
#endif
4
 
4
 
5
/*
5
/*
6
 * Common definitions for all gcc versions go here.
6
 * Common definitions for all gcc versions go here.
7
 */
7
 */
8
#define GCC_VERSION (__GNUC__ * 10000		\
8
#define GCC_VERSION (__GNUC__ * 10000		\
9
		     + __GNUC_MINOR__ * 100	\
9
		     + __GNUC_MINOR__ * 100	\
10
		     + __GNUC_PATCHLEVEL__)
10
		     + __GNUC_PATCHLEVEL__)
11
 
11
 
12
/* Optimization barrier */
12
/* Optimization barrier */
13
 
13
 
14
/* The "volatile" is due to gcc bugs */
14
/* The "volatile" is due to gcc bugs */
15
#define barrier() __asm__ __volatile__("": : :"memory")
15
#define barrier() __asm__ __volatile__("": : :"memory")
16
/*
16
/*
17
 * This version is i.e. to prevent dead stores elimination on @ptr
17
 * This version is i.e. to prevent dead stores elimination on @ptr
18
 * where gcc and llvm may behave differently when otherwise using
18
 * where gcc and llvm may behave differently when otherwise using
19
 * normal barrier(): while gcc behavior gets along with a normal
19
 * normal barrier(): while gcc behavior gets along with a normal
20
 * barrier(), llvm needs an explicit input variable to be assumed
20
 * barrier(), llvm needs an explicit input variable to be assumed
21
 * clobbered. The issue is as follows: while the inline asm might
21
 * clobbered. The issue is as follows: while the inline asm might
22
 * access any memory it wants, the compiler could have fit all of
22
 * access any memory it wants, the compiler could have fit all of
23
 * @ptr into memory registers instead, and since @ptr never escaped
23
 * @ptr into memory registers instead, and since @ptr never escaped
24
 * from that, it proofed that the inline asm wasn't touching any of
24
 * from that, it proofed that the inline asm wasn't touching any of
25
 * it. This version works well with both compilers, i.e. we're telling
25
 * it. This version works well with both compilers, i.e. we're telling
26
 * the compiler that the inline asm absolutely may see the contents
26
 * the compiler that the inline asm absolutely may see the contents
27
 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
27
 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
28
 */
28
 */
29
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
29
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
30
 
30
 
31
/*
31
/*
32
 * This macro obfuscates arithmetic on a variable address so that gcc
32
 * This macro obfuscates arithmetic on a variable address so that gcc
33
 * shouldn't recognize the original var, and make assumptions about it.
33
 * shouldn't recognize the original var, and make assumptions about it.
34
 *
34
 *
35
 * This is needed because the C standard makes it undefined to do
35
 * This is needed because the C standard makes it undefined to do
36
 * pointer arithmetic on "objects" outside their boundaries and the
36
 * pointer arithmetic on "objects" outside their boundaries and the
37
 * gcc optimizers assume this is the case. In particular they
37
 * gcc optimizers assume this is the case. In particular they
38
 * assume such arithmetic does not wrap.
38
 * assume such arithmetic does not wrap.
39
 *
39
 *
40
 * A miscompilation has been observed because of this on PPC.
40
 * A miscompilation has been observed because of this on PPC.
41
 * To work around it we hide the relationship of the pointer and the object
41
 * To work around it we hide the relationship of the pointer and the object
42
 * using this macro.
42
 * using this macro.
43
 *
43
 *
44
 * Versions of the ppc64 compiler before 4.1 had a bug where use of
44
 * Versions of the ppc64 compiler before 4.1 had a bug where use of
45
 * RELOC_HIDE could trash r30. The bug can be worked around by changing
45
 * RELOC_HIDE could trash r30. The bug can be worked around by changing
46
 * the inline assembly constraint from =g to =r, in this particular
46
 * the inline assembly constraint from =g to =r, in this particular
47
 * case either is valid.
47
 * case either is valid.
48
 */
48
 */
49
#define RELOC_HIDE(ptr, off)						\
49
#define RELOC_HIDE(ptr, off)						\
50
({									\
50
({									\
51
	unsigned long __ptr;						\
51
	unsigned long __ptr;						\
52
	__asm__ ("" : "=r"(__ptr) : "0"(ptr));				\
52
	__asm__ ("" : "=r"(__ptr) : "0"(ptr));				\
53
	(typeof(ptr)) (__ptr + (off));					\
53
	(typeof(ptr)) (__ptr + (off));					\
54
})
54
})
55
 
55
 
56
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
56
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
57
#define OPTIMIZER_HIDE_VAR(var)						\
57
#define OPTIMIZER_HIDE_VAR(var)						\
58
	__asm__ ("" : "=r" (var) : "0" (var))
58
	__asm__ ("" : "=r" (var) : "0" (var))
59
 
59
 
60
#ifdef __CHECKER__
60
#ifdef __CHECKER__
61
#define __must_be_array(a)	0
61
#define __must_be_array(a)	0
62
#else
62
#else
63
/* &a[0] degrades to a pointer: a different type from an array */
63
/* &a[0] degrades to a pointer: a different type from an array */
64
#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
64
#define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
65
#endif
65
#endif
66
 
66
 
67
/*
67
/*
68
 * Force always-inline if the user requests it so via the .config,
68
 * Force always-inline if the user requests it so via the .config,
69
 * or if gcc is too old:
69
 * or if gcc is too old:
70
 */
70
 */
71
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||		\
71
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||		\
72
    !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
72
    !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
73
#define inline		inline		__attribute__((always_inline)) notrace
73
#define inline		inline		__attribute__((always_inline)) notrace
74
#define __inline__	__inline__	__attribute__((always_inline)) notrace
74
#define __inline__	__inline__	__attribute__((always_inline)) notrace
75
#define __inline	__inline	__attribute__((always_inline)) notrace
75
#define __inline	__inline	__attribute__((always_inline)) notrace
76
#else
76
#else
77
/* A lot of inline functions can cause havoc with function tracing */
77
/* A lot of inline functions can cause havoc with function tracing */
78
#define inline		inline		notrace
78
#define inline		inline		notrace
79
#define __inline__	__inline__	notrace
79
#define __inline__	__inline__	notrace
80
#define __inline	__inline	notrace
80
#define __inline	__inline	notrace
81
#endif
81
#endif
82
 
82
 
83
#define __always_inline	inline __attribute__((always_inline))
83
#define __always_inline	inline __attribute__((always_inline))
84
#define  noinline	__attribute__((noinline))
84
#define  noinline	__attribute__((noinline))
85
 
85
 
86
#define __deprecated	__attribute__((deprecated))
86
#define __deprecated	__attribute__((deprecated))
87
#define __packed	__attribute__((packed))
87
#define __packed	__attribute__((packed))
88
#define __weak		__attribute__((weak))
88
#define __weak		__attribute__((weak))
89
#define __alias(symbol)	__attribute__((alias(#symbol)))
89
#define __alias(symbol)	__attribute__((alias(#symbol)))
90
 
90
 
91
/*
91
/*
92
 * it doesn't make sense on ARM (currently the only user of __naked)
92
 * it doesn't make sense on ARM (currently the only user of __naked)
93
 * to trace naked functions because then mcount is called without
93
 * to trace naked functions because then mcount is called without
94
 * stack and frame pointer being set up and there is no chance to
94
 * stack and frame pointer being set up and there is no chance to
95
 * restore the lr register to the value before mcount was called.
95
 * restore the lr register to the value before mcount was called.
96
 *
96
 *
97
 * The asm() bodies of naked functions often depend on standard calling
97
 * The asm() bodies of naked functions often depend on standard calling
98
 * conventions, therefore they must be noinline and noclone.
98
 * conventions, therefore they must be noinline and noclone.
99
 *
99
 *
100
 * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
100
 * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
101
 * See GCC PR44290.
101
 * See GCC PR44290.
102
 */
102
 */
103
#define __naked		__attribute__((naked)) noinline __noclone notrace
103
#define __naked		__attribute__((naked)) noinline __noclone notrace
104
 
104
 
105
#define __noreturn	__attribute__((noreturn))
105
#define __noreturn	__attribute__((noreturn))
106
 
106
 
107
/*
107
/*
108
 * From the GCC manual:
108
 * From the GCC manual:
109
 *
109
 *
110
 * Many functions have no effects except the return value and their
110
 * Many functions have no effects except the return value and their
111
 * return value depends only on the parameters and/or global
111
 * return value depends only on the parameters and/or global
112
 * variables.  Such a function can be subject to common subexpression
112
 * variables.  Such a function can be subject to common subexpression
113
 * elimination and loop optimization just as an arithmetic operator
113
 * elimination and loop optimization just as an arithmetic operator
114
 * would be.
114
 * would be.
115
 * [...]
115
 * [...]
116
 */
116
 */
117
#define __pure			__attribute__((pure))
117
#define __pure			__attribute__((pure))
118
#define __aligned(x)		__attribute__((aligned(x)))
118
#define __aligned(x)		__attribute__((aligned(x)))
119
#define __printf(a, b)		__attribute__((format(printf, a, b)))
119
#define __printf(a, b)		__attribute__((format(printf, a, b)))
120
#define __scanf(a, b)		__attribute__((format(scanf, a, b)))
120
#define __scanf(a, b)		__attribute__((format(scanf, a, b)))
121
#define __attribute_const__	__attribute__((__const__))
121
#define __attribute_const__	__attribute__((__const__))
122
#define __maybe_unused		__attribute__((unused))
122
#define __maybe_unused		__attribute__((unused))
123
#define __always_unused		__attribute__((unused))
123
#define __always_unused		__attribute__((unused))
124
 
124
 
125
/* gcc version specific checks */
125
/* gcc version specific checks */
126
 
126
 
127
#if GCC_VERSION < 30200
127
#if GCC_VERSION < 30200
128
# error Sorry, your compiler is too old - please upgrade it.
128
# error Sorry, your compiler is too old - please upgrade it.
129
#endif
129
#endif
130
 
130
 
131
#if GCC_VERSION < 30300
131
#if GCC_VERSION < 30300
132
# define __used			__attribute__((__unused__))
132
# define __used			__attribute__((__unused__))
133
#else
133
#else
134
# define __used			__attribute__((__used__))
134
# define __used			__attribute__((__used__))
135
#endif
135
#endif
136
 
136
 
137
#ifdef CONFIG_GCOV_KERNEL
137
#ifdef CONFIG_GCOV_KERNEL
138
# if GCC_VERSION < 30400
138
# if GCC_VERSION < 30400
139
#   error "GCOV profiling support for gcc versions below 3.4 not included"
139
#   error "GCOV profiling support for gcc versions below 3.4 not included"
140
# endif /* __GNUC_MINOR__ */
140
# endif /* __GNUC_MINOR__ */
141
#endif /* CONFIG_GCOV_KERNEL */
141
#endif /* CONFIG_GCOV_KERNEL */
142
 
142
 
143
#if GCC_VERSION >= 30400
143
#if GCC_VERSION >= 30400
144
#define __must_check		__attribute__((warn_unused_result))
144
#define __must_check		__attribute__((warn_unused_result))
145
#endif
145
#endif
146
 
146
 
147
#if GCC_VERSION >= 40000
147
#if GCC_VERSION >= 40000
148
 
148
 
149
/* GCC 4.1.[01] miscompiles __weak */
149
/* GCC 4.1.[01] miscompiles __weak */
150
#ifdef __KERNEL__
150
#ifdef __KERNEL__
151
# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
151
# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
152
#  error Your version of gcc miscompiles the __weak directive
152
#  error Your version of gcc miscompiles the __weak directive
153
# endif
153
# endif
154
#endif
154
#endif
155
 
155
 
156
#define __used			__attribute__((__used__))
156
#define __used			__attribute__((__used__))
157
#define __compiler_offsetof(a, b)					\
157
#define __compiler_offsetof(a, b)					\
158
	__builtin_offsetof(a, b)
158
	__builtin_offsetof(a, b)
159
 
159
 
160
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
160
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
161
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
161
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
162
#endif
162
#endif
163
 
163
 
164
#if GCC_VERSION >= 40300
164
#if GCC_VERSION >= 40300
165
/* Mark functions as cold. gcc will assume any path leading to a call
165
/* Mark functions as cold. gcc will assume any path leading to a call
166
 * to them will be unlikely.  This means a lot of manual unlikely()s
166
 * to them will be unlikely.  This means a lot of manual unlikely()s
167
 * are unnecessary now for any paths leading to the usual suspects
167
 * are unnecessary now for any paths leading to the usual suspects
168
 * like BUG(), printk(), panic() etc. [but let's keep them for now for
168
 * like BUG(), printk(), panic() etc. [but let's keep them for now for
169
 * older compilers]
169
 * older compilers]
170
 *
170
 *
171
 * Early snapshots of gcc 4.3 don't support this and we can't detect this
171
 * Early snapshots of gcc 4.3 don't support this and we can't detect this
172
 * in the preprocessor, but we can live with this because they're unreleased.
172
 * in the preprocessor, but we can live with this because they're unreleased.
173
 * Maketime probing would be overkill here.
173
 * Maketime probing would be overkill here.
174
 *
174
 *
175
 * gcc also has a __attribute__((__hot__)) to move hot functions into
175
 * gcc also has a __attribute__((__hot__)) to move hot functions into
176
 * a special section, but I don't see any sense in this right now in
176
 * a special section, but I don't see any sense in this right now in
177
 * the kernel context
177
 * the kernel context
178
 */
178
 */
179
#define __cold			__attribute__((__cold__))
179
#define __cold			__attribute__((__cold__))
180
 
180
 
181
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
181
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
182
 
182
 
183
#ifndef __CHECKER__
183
#ifndef __CHECKER__
184
# define __compiletime_warning(message) __attribute__((warning(message)))
184
# define __compiletime_warning(message) __attribute__((warning(message)))
185
# define __compiletime_error(message) __attribute__((error(message)))
185
# define __compiletime_error(message) __attribute__((error(message)))
186
#endif /* __CHECKER__ */
186
#endif /* __CHECKER__ */
187
#endif /* GCC_VERSION >= 40300 */
187
#endif /* GCC_VERSION >= 40300 */
188
 
188
 
189
#if GCC_VERSION >= 40500
189
#if GCC_VERSION >= 40500
190
/*
190
/*
191
 * Mark a position in code as unreachable.  This can be used to
191
 * Mark a position in code as unreachable.  This can be used to
192
 * suppress control flow warnings after asm blocks that transfer
192
 * suppress control flow warnings after asm blocks that transfer
193
 * control elsewhere.
193
 * control elsewhere.
194
 *
194
 *
195
 * Early snapshots of gcc 4.5 don't support this and we can't detect
195
 * Early snapshots of gcc 4.5 don't support this and we can't detect
196
 * this in the preprocessor, but we can live with this because they're
196
 * this in the preprocessor, but we can live with this because they're
197
 * unreleased.  Really, we need to have autoconf for the kernel.
197
 * unreleased.  Really, we need to have autoconf for the kernel.
198
 */
198
 */
199
#define unreachable() __builtin_unreachable()
199
#define unreachable() __builtin_unreachable()
200
 
200
 
201
/* Mark a function definition as prohibited from being cloned. */
201
/* Mark a function definition as prohibited from being cloned. */
202
#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
202
#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
203
 
203
 
204
#endif /* GCC_VERSION >= 40500 */
204
#endif /* GCC_VERSION >= 40500 */
205
 
205
 
206
#if GCC_VERSION >= 40600
206
#if GCC_VERSION >= 40600
207
/*
207
/*
208
 * When used with Link Time Optimization, gcc can optimize away C functions or
208
 * When used with Link Time Optimization, gcc can optimize away C functions or
209
 * variables which are referenced only from assembly code.  __visible tells the
209
 * variables which are referenced only from assembly code.  __visible tells the
210
 * optimizer that something else uses this function or variable, thus preventing
210
 * optimizer that something else uses this function or variable, thus preventing
211
 * this.
211
 * this.
212
 */
212
 */
213
#define __visible	__attribute__((externally_visible))
213
#define __visible	__attribute__((externally_visible))
214
#endif
214
#endif
215
 
215
 
216
 
216
 
217
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
217
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
218
/*
218
/*
219
 * __assume_aligned(n, k): Tell the optimizer that the returned
219
 * __assume_aligned(n, k): Tell the optimizer that the returned
220
 * pointer can be assumed to be k modulo n. The second argument is
220
 * pointer can be assumed to be k modulo n. The second argument is
221
 * optional (default 0), so we use a variadic macro to make the
221
 * optional (default 0), so we use a variadic macro to make the
222
 * shorthand.
222
 * shorthand.
223
 *
223
 *
224
 * Beware: Do not apply this to functions which may return
224
 * Beware: Do not apply this to functions which may return
225
 * ERR_PTRs. Also, it is probably unwise to apply it to functions
225
 * ERR_PTRs. Also, it is probably unwise to apply it to functions
226
 * returning extra information in the low bits (but in that case the
226
 * returning extra information in the low bits (but in that case the
227
 * compiler should see some alignment anyway, when the return value is
227
 * compiler should see some alignment anyway, when the return value is
228
 * massaged by 'flags = ptr & 3; ptr &= ~3;').
228
 * massaged by 'flags = ptr & 3; ptr &= ~3;').
229
 */
229
 */
230
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
230
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
231
#endif
231
#endif
232
 
232
 
233
/*
233
/*
234
 * GCC 'asm goto' miscompiles certain code sequences:
234
 * GCC 'asm goto' miscompiles certain code sequences:
235
 *
235
 *
236
 *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
236
 *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
237
 *
237
 *
238
 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
238
 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
239
 *
239
 *
240
 * (asm goto is automatically volatile - the naming reflects this.)
240
 * (asm goto is automatically volatile - the naming reflects this.)
241
 */
241
 */
242
#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
242
#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
243
 
243
 
244
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
244
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
245
#if GCC_VERSION >= 40400
245
#if GCC_VERSION >= 40400
246
#define __HAVE_BUILTIN_BSWAP32__
246
#define __HAVE_BUILTIN_BSWAP32__
247
#define __HAVE_BUILTIN_BSWAP64__
247
#define __HAVE_BUILTIN_BSWAP64__
248
#endif
248
#endif
249
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
249
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
250
#define __HAVE_BUILTIN_BSWAP16__
250
#define __HAVE_BUILTIN_BSWAP16__
251
#endif
251
#endif
252
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
252
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
253
 
253
 
-
 
254
#if GCC_VERSION >= 70000
-
 
255
#define KASAN_ABI_VERSION 5
254
#if GCC_VERSION >= 50000
256
#elif GCC_VERSION >= 50000
255
#define KASAN_ABI_VERSION 4
257
#define KASAN_ABI_VERSION 4
256
#elif GCC_VERSION >= 40902
258
#elif GCC_VERSION >= 40902
257
#define KASAN_ABI_VERSION 3
259
#define KASAN_ABI_VERSION 3
258
#endif
260
#endif
259
 
261
 
260
#if GCC_VERSION >= 40902
262
#if GCC_VERSION >= 40902
261
/*
263
/*
262
 * Tell the compiler that address safety instrumentation (KASAN)
264
 * Tell the compiler that address safety instrumentation (KASAN)
263
 * should not be applied to that function.
265
 * should not be applied to that function.
264
 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
266
 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
265
 */
267
 */
266
#define __no_sanitize_address __attribute__((no_sanitize_address))
268
#define __no_sanitize_address __attribute__((no_sanitize_address))
267
#endif
269
#endif
268
 
270
 
269
#endif	/* gcc version >= 40000 specific checks */
271
#endif	/* gcc version >= 40000 specific checks */
270
 
272
 
271
#if !defined(__noclone)
273
#if !defined(__noclone)
272
#define __noclone	/* not needed */
274
#define __noclone	/* not needed */
273
#endif
275
#endif
274
 
276
 
275
#if !defined(__no_sanitize_address)
277
#if !defined(__no_sanitize_address)
276
#define __no_sanitize_address
278
#define __no_sanitize_address
277
#endif
279
#endif
278
 
280
 
279
/*
281
/*
280
 * A trick to suppress uninitialized variable warning without generating any
282
 * A trick to suppress uninitialized variable warning without generating any
281
 * code
283
 * code
282
 */
284
 */
283
#define uninitialized_var(x) x = x
285
#define uninitialized_var(x) x = x