Subversion Repositories Kolibri OS

Rev

Rev 4103 | Rev 5270 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef __LINUX_COMPILER_H
2
#define __LINUX_COMPILER_H
3
 
4
#ifndef __ASSEMBLY__
5
 
6
#ifdef __CHECKER__
7
# define __user		__attribute__((noderef, address_space(1)))
1964 serge 8
# define __kernel	__attribute__((address_space(0)))
1408 serge 9
# define __safe		__attribute__((safe))
10
# define __force	__attribute__((force))
11
# define __nocast	__attribute__((nocast))
12
# define __iomem	__attribute__((noderef, address_space(2)))
3243 Serge 13
# define __must_hold(x)	__attribute__((context(x,1,1)))
1408 serge 14
# define __acquires(x)	__attribute__((context(x,0,1)))
15
# define __releases(x)	__attribute__((context(x,1,0)))
16
# define __acquire(x)	__context__(x,1)
17
# define __release(x)	__context__(x,-1)
18
# define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
1964 serge 19
# define __percpu	__attribute__((noderef, address_space(3)))
20
#ifdef CONFIG_SPARSE_RCU_POINTER
21
# define __rcu		__attribute__((noderef, address_space(4)))
22
#else
23
# define __rcu
24
#endif
1408 serge 25
extern void __chk_user_ptr(const volatile void __user *);
26
extern void __chk_io_ptr(const volatile void __iomem *);
27
#else
28
# define __user
29
# define __kernel
30
# define __safe
31
# define __force
32
# define __nocast
33
# define __iomem
34
# define __chk_user_ptr(x) (void)0
35
# define __chk_io_ptr(x) (void)0
36
# define __builtin_warning(x, y...) (1)
3243 Serge 37
# define __must_hold(x)
1408 serge 38
# define __acquires(x)
39
# define __releases(x)
40
# define __acquire(x) (void)0
41
# define __release(x) (void)0
42
# define __cond_lock(x,c) (c)
1964 serge 43
# define __percpu
44
# define __rcu
1408 serge 45
#endif
46
 
3243 Serge 47
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
48
#define ___PASTE(a,b) a##b
49
#define __PASTE(a,b) ___PASTE(a,b)
50
 
1408 serge 51
#ifdef __KERNEL__
52
 
53
#ifdef __GNUC__
54
#include 
55
#endif
56
 
57
#define notrace __attribute__((no_instrument_function))
58
 
59
/* Intel compiler defines __GNUC__. So we will overwrite implementations
60
 * coming from above header files here
61
 */
62
#ifdef __INTEL_COMPILER
63
# include 
64
#endif
65
 
5056 serge 66
/* Clang compiler defines __GNUC__. So we will overwrite implementations
67
 * coming from above header files here
68
 */
69
#ifdef __clang__
70
#include 
71
#endif
72
 
1408 serge 73
/*
74
 * Generic compiler-dependent macros required for kernel
75
 * build go below this comment. Actual compiler/compiler version
76
 * specific implementations come from the above header files
77
 */
78
 
79
struct ftrace_branch_data {
80
	const char *func;
81
	const char *file;
82
	unsigned line;
83
	union {
84
		struct {
85
			unsigned long correct;
86
			unsigned long incorrect;
87
		};
88
		struct {
89
			unsigned long miss;
90
			unsigned long hit;
91
		};
92
		unsigned long miss_hit[2];
93
	};
94
};
95
 
96
/*
97
 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
98
 * to disable branch tracing on a per file basis.
99
 */
100
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
101
    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
102
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
103
 
104
#define likely_notrace(x)	__builtin_expect(!!(x), 1)
105
#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
106
 
107
#define __branch_check__(x, expect) ({					\
108
			int ______r;					\
109
			static struct ftrace_branch_data		\
110
				__attribute__((__aligned__(4)))		\
111
				__attribute__((section("_ftrace_annotated_branch"))) \
112
				______f = {				\
113
				.func = __func__,			\
114
				.file = __FILE__,			\
115
				.line = __LINE__,			\
116
			};						\
117
			______r = likely_notrace(x);			\
118
			ftrace_likely_update(&______f, ______r, expect); \
119
			______r;					\
120
		})
121
 
122
/*
123
 * Using __builtin_constant_p(x) to ignore cases where the return
124
 * value is always the same.  This idea is taken from a similar patch
125
 * written by Daniel Walker.
126
 */
127
# ifndef likely
128
#  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
129
# endif
130
# ifndef unlikely
131
#  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
132
# endif
133
 
134
#ifdef CONFIG_PROFILE_ALL_BRANCHES
135
/*
136
 * "Define 'is'", Bill Clinton
137
 * "Define 'if'", Steven Rostedt
138
 */
139
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
140
#define __trace_if(cond) \
141
	if (__builtin_constant_p((cond)) ? !!(cond) :			\
142
	({								\
143
		int ______r;						\
144
		static struct ftrace_branch_data			\
145
			__attribute__((__aligned__(4)))			\
146
			__attribute__((section("_ftrace_branch")))	\
147
			______f = {					\
148
				.func = __func__,			\
149
				.file = __FILE__,			\
150
				.line = __LINE__,			\
151
			};						\
152
		______r = !!(cond);					\
153
		______f.miss_hit[______r]++;					\
154
		______r;						\
155
	}))
156
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
157
 
158
#else
159
# define likely(x)	__builtin_expect(!!(x), 1)
160
# define unlikely(x)	__builtin_expect(!!(x), 0)
161
#endif
162
 
163
/* Optimization barrier */
164
#ifndef barrier
165
# define barrier() __memory_barrier()
166
#endif
167
 
168
/* Unreachable code */
169
#ifndef unreachable
170
# define unreachable() do { } while (1)
171
#endif
172
 
173
#ifndef RELOC_HIDE
174
# define RELOC_HIDE(ptr, off)					\
175
  ({ unsigned long __ptr;					\
176
     __ptr = (unsigned long) (ptr);				\
177
    (typeof(ptr)) (__ptr + (off)); })
178
#endif
179
 
5056 serge 180
#ifndef OPTIMIZER_HIDE_VAR
181
#define OPTIMIZER_HIDE_VAR(var) barrier()
182
#endif
183
 
3480 Serge 184
/* Not-quite-unique ID. */
185
#ifndef __UNIQUE_ID
186
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187
#endif
188
 
1408 serge 189
#endif /* __KERNEL__ */
190
 
191
#endif /* __ASSEMBLY__ */
192
 
193
#ifdef __KERNEL__
194
/*
195
 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
196
 * warning for each use, in hopes of speeding the functions removal.
197
 * Usage is:
198
 * 		int __deprecated foo(void)
199
 */
200
#ifndef __deprecated
201
# define __deprecated		/* unimplemented */
202
#endif
203
 
204
#ifdef MODULE
205
#define __deprecated_for_modules __deprecated
206
#else
207
#define __deprecated_for_modules
208
#endif
209
 
210
#ifndef __must_check
211
#define __must_check
212
#endif
213
 
214
#ifndef CONFIG_ENABLE_MUST_CHECK
215
#undef __must_check
216
#define __must_check
217
#endif
218
#ifndef CONFIG_ENABLE_WARN_DEPRECATED
219
#undef __deprecated
220
#undef __deprecated_for_modules
221
#define __deprecated
222
#define __deprecated_for_modules
223
#endif
224
 
225
/*
226
 * Allow us to avoid 'defined but not used' warnings on functions and data,
227
 * as well as force them to be emitted to the assembly file.
228
 *
229
 * As of gcc 3.4, static functions that are not marked with attribute((used))
230
 * may be elided from the assembly file.  As of gcc 3.4, static data not so
231
 * marked will not be elided, but this may change in a future gcc version.
232
 *
233
 * NOTE: Because distributions shipped with a backported unit-at-a-time
234
 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
235
 * for gcc >=3.3 instead of 3.4.
236
 *
237
 * In prior versions of gcc, such functions and data would be emitted, but
238
 * would be warned about except with attribute((unused)).
239
 *
240
 * Mark functions that are referenced only in inline assembly as __used so
241
 * the code is emitted even though it appears to be unreferenced.
242
 */
243
#ifndef __used
244
# define __used			/* unimplemented */
245
#endif
246
 
247
#ifndef __maybe_unused
248
# define __maybe_unused		/* unimplemented */
249
#endif
250
 
251
#ifndef __always_unused
252
# define __always_unused	/* unimplemented */
253
#endif
254
 
255
#ifndef noinline
256
#define noinline
257
#endif
258
 
259
/*
260
 * Rather then using noinline to prevent stack consumption, use
3031 serge 261
 * noinline_for_stack instead.  For documentation reasons.
1408 serge 262
 */
263
#define noinline_for_stack noinline
264
 
265
#ifndef __always_inline
266
#define __always_inline inline
267
#endif
268
 
269
#endif /* __KERNEL__ */
270
 
271
/*
272
 * From the GCC manual:
273
 *
274
 * Many functions do not examine any values except their arguments,
275
 * and have no effects except the return value.  Basically this is
276
 * just slightly more strict class than the `pure' attribute above,
277
 * since function is not allowed to read global memory.
278
 *
279
 * Note that a function that has pointer arguments and examines the
280
 * data pointed to must _not_ be declared `const'.  Likewise, a
281
 * function that calls a non-`const' function usually must not be
282
 * `const'.  It does not make sense for a `const' function to return
283
 * `void'.
284
 */
285
#ifndef __attribute_const__
286
# define __attribute_const__	/* unimplemented */
287
#endif
288
 
289
/*
290
 * Tell gcc if a function is cold. The compiler will assume any path
291
 * directly leading to the call is unlikely.
292
 */
293
 
294
#ifndef __cold
295
#define __cold
296
#endif
297
 
298
/* Simple shorthand for a section definition */
299
#ifndef __section
300
# define __section(S) __attribute__ ((__section__(#S)))
301
#endif
302
 
3031 serge 303
#ifndef __visible
304
#define __visible
305
#endif
306
 
1408 serge 307
/* Are two types/vars the same type (ignoring qualifiers)? */
308
#ifndef __same_type
309
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
310
#endif
311
 
5056 serge 312
/* Is this type a native word size -- useful for atomic operations */
313
#ifndef __native_word
314
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
315
#endif
316
 
1408 serge 317
/* Compile time object size, -1 for unknown */
318
#ifndef __compiletime_object_size
319
# define __compiletime_object_size(obj) -1
320
#endif
321
#ifndef __compiletime_warning
322
# define __compiletime_warning(message)
323
#endif
324
#ifndef __compiletime_error
325
# define __compiletime_error(message)
5056 serge 326
/*
327
 * Sparse complains of variable sized arrays due to the temporary variable in
328
 * __compiletime_assert. Unfortunately we can't just expand it out to make
329
 * sparse see a constant array size without breaking compiletime_assert on old
330
 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
331
 */
332
# ifndef __CHECKER__
3480 Serge 333
# define __compiletime_error_fallback(condition) \
334
	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
5056 serge 335
# endif
336
#endif
337
#ifndef __compiletime_error_fallback
3480 Serge 338
# define __compiletime_error_fallback(condition) do { } while (0)
1408 serge 339
#endif
3480 Serge 340
 
341
#define __compiletime_assert(condition, msg, prefix, suffix)		\
342
	do {								\
343
		bool __cond = !(condition);				\
344
		extern void prefix ## suffix(void) __compiletime_error(msg); \
345
		if (__cond)						\
346
			prefix ## suffix();				\
347
		__compiletime_error_fallback(__cond);			\
348
	} while (0)
349
 
350
#define _compiletime_assert(condition, msg, prefix, suffix) \
351
	__compiletime_assert(condition, msg, prefix, suffix)
352
 
353
/**
354
 * compiletime_assert - break build and emit msg if condition is false
355
 * @condition: a compile-time constant condition to check
356
 * @msg:       a message to emit if condition is false
357
 *
358
 * In tradition of POSIX assert, this macro will break the build if the
359
 * supplied condition is *false*, emitting the supplied error message if the
360
 * compiler has support to do so.
361
 */
362
#define compiletime_assert(condition, msg) \
363
	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
364
 
5056 serge 365
#define compiletime_assert_atomic_type(t)				\
366
	compiletime_assert(__native_word(t),				\
367
		"Need native word sized stores/loads for atomicity.")
368
 
1408 serge 369
/*
370
 * Prevent the compiler from merging or refetching accesses.  The compiler
371
 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
372
 * but only when the compiler is aware of some particular ordering.  One way
373
 * to make the compiler aware of ordering is to put the two invocations of
374
 * ACCESS_ONCE() in different C statements.
375
 *
376
 * This macro does absolutely -nothing- to prevent the CPU from reordering,
377
 * merging, or refetching absolutely anything at any time.  Its main intended
378
 * use is to mediate communication between process-level code and irq/NMI
379
 * handlers, all running on the same CPU.
380
 */
381
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
382
 
4103 Serge 383
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
384
#ifdef CONFIG_KPROBES
385
# define __kprobes	__attribute__((__section__(".kprobes.text")))
5056 serge 386
# define nokprobe_inline	__always_inline
4103 Serge 387
#else
388
# define __kprobes
5056 serge 389
# define nokprobe_inline	inline
4103 Serge 390
#endif
1408 serge 391
#endif /* __LINUX_COMPILER_H */