Subversion Repositories Kolibri OS

Rev

Rev 5056 | Rev 6082 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef __LINUX_COMPILER_H
2
#define __LINUX_COMPILER_H
3
 
4
#ifndef __ASSEMBLY__
5
 
6
#ifdef __CHECKER__
7
# define __user		__attribute__((noderef, address_space(1)))
1964 serge 8
# define __kernel	__attribute__((address_space(0)))
1408 serge 9
# define __safe		__attribute__((safe))
10
# define __force	__attribute__((force))
11
# define __nocast	__attribute__((nocast))
12
# define __iomem	__attribute__((noderef, address_space(2)))
3243 Serge 13
# define __must_hold(x)	__attribute__((context(x,1,1)))
1408 serge 14
# define __acquires(x)	__attribute__((context(x,0,1)))
15
# define __releases(x)	__attribute__((context(x,1,0)))
16
# define __acquire(x)	__context__(x,1)
17
# define __release(x)	__context__(x,-1)
18
# define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
1964 serge 19
# define __percpu	__attribute__((noderef, address_space(3)))
20
#ifdef CONFIG_SPARSE_RCU_POINTER
21
# define __rcu		__attribute__((noderef, address_space(4)))
22
#else
23
# define __rcu
24
#endif
1408 serge 25
extern void __chk_user_ptr(const volatile void __user *);
26
extern void __chk_io_ptr(const volatile void __iomem *);
27
#else
28
# define __user
29
# define __kernel
30
# define __safe
31
# define __force
32
# define __nocast
33
# define __iomem
34
# define __chk_user_ptr(x) (void)0
35
# define __chk_io_ptr(x) (void)0
36
# define __builtin_warning(x, y...) (1)
3243 Serge 37
# define __must_hold(x)
1408 serge 38
# define __acquires(x)
39
# define __releases(x)
40
# define __acquire(x) (void)0
41
# define __release(x) (void)0
42
# define __cond_lock(x,c) (c)
1964 serge 43
# define __percpu
44
# define __rcu
1408 serge 45
#endif
46
 
3243 Serge 47
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
48
#define ___PASTE(a,b) a##b
49
#define __PASTE(a,b) ___PASTE(a,b)
50
 
1408 serge 51
#ifdef __KERNEL__
52
 
53
#ifdef __GNUC__
54
#include 
55
#endif
56
 
57
#define notrace __attribute__((no_instrument_function))
58
 
59
/* Intel compiler defines __GNUC__. So we will overwrite implementations
60
 * coming from above header files here
61
 */
62
#ifdef __INTEL_COMPILER
63
# include 
64
#endif
65
 
5056 serge 66
/* Clang compiler defines __GNUC__. So we will overwrite implementations
67
 * coming from above header files here
68
 */
69
#ifdef __clang__
70
#include 
71
#endif
72
 
1408 serge 73
/*
74
 * Generic compiler-dependent macros required for kernel
75
 * build go below this comment. Actual compiler/compiler version
76
 * specific implementations come from the above header files
77
 */
78
 
79
struct ftrace_branch_data {
80
	const char *func;
81
	const char *file;
82
	unsigned line;
83
	union {
84
		struct {
85
			unsigned long correct;
86
			unsigned long incorrect;
87
		};
88
		struct {
89
			unsigned long miss;
90
			unsigned long hit;
91
		};
92
		unsigned long miss_hit[2];
93
	};
94
};
95
 
96
/*
97
 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
98
 * to disable branch tracing on a per file basis.
99
 */
100
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
101
    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
102
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
103
 
104
#define likely_notrace(x)	__builtin_expect(!!(x), 1)
105
#define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
106
 
107
#define __branch_check__(x, expect) ({					\
108
			int ______r;					\
109
			static struct ftrace_branch_data		\
110
				__attribute__((__aligned__(4)))		\
111
				__attribute__((section("_ftrace_annotated_branch"))) \
112
				______f = {				\
113
				.func = __func__,			\
114
				.file = __FILE__,			\
115
				.line = __LINE__,			\
116
			};						\
117
			______r = likely_notrace(x);			\
118
			ftrace_likely_update(&______f, ______r, expect); \
119
			______r;					\
120
		})
121
 
122
/*
123
 * Using __builtin_constant_p(x) to ignore cases where the return
124
 * value is always the same.  This idea is taken from a similar patch
125
 * written by Daniel Walker.
126
 */
127
# ifndef likely
128
#  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
129
# endif
130
# ifndef unlikely
131
#  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
132
# endif
133
 
134
#ifdef CONFIG_PROFILE_ALL_BRANCHES
135
/*
136
 * "Define 'is'", Bill Clinton
137
 * "Define 'if'", Steven Rostedt
138
 */
139
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
140
#define __trace_if(cond) \
141
	if (__builtin_constant_p((cond)) ? !!(cond) :			\
142
	({								\
143
		int ______r;						\
144
		static struct ftrace_branch_data			\
145
			__attribute__((__aligned__(4)))			\
146
			__attribute__((section("_ftrace_branch")))	\
147
			______f = {					\
148
				.func = __func__,			\
149
				.file = __FILE__,			\
150
				.line = __LINE__,			\
151
			};						\
152
		______r = !!(cond);					\
153
		______f.miss_hit[______r]++;					\
154
		______r;						\
155
	}))
156
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
157
 
158
#else
159
# define likely(x)	__builtin_expect(!!(x), 1)
160
# define unlikely(x)	__builtin_expect(!!(x), 0)
161
#endif
162
 
163
/* Optimization barrier */
164
#ifndef barrier
165
# define barrier() __memory_barrier()
166
#endif
167
 
168
/* Unreachable code */
169
#ifndef unreachable
170
# define unreachable() do { } while (1)
171
#endif
172
 
173
#ifndef RELOC_HIDE
174
# define RELOC_HIDE(ptr, off)					\
175
  ({ unsigned long __ptr;					\
176
     __ptr = (unsigned long) (ptr);				\
177
    (typeof(ptr)) (__ptr + (off)); })
178
#endif
179
 
5056 serge 180
#ifndef OPTIMIZER_HIDE_VAR
181
#define OPTIMIZER_HIDE_VAR(var) barrier()
182
#endif
183
 
3480 Serge 184
/* Not-quite-unique ID. */
185
#ifndef __UNIQUE_ID
186
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187
#endif
188
 
5270 serge 189
#include 
190
 
191
static __always_inline void data_access_exceeds_word_size(void)
192
#ifdef __compiletime_warning
193
__compiletime_warning("data access exceeds word size and won't be atomic")
194
#endif
195
;
196
 
197
static __always_inline void data_access_exceeds_word_size(void)
198
{
199
}
200
 
201
static __always_inline void __read_once_size(volatile void *p, void *res, int size)
202
{
203
	switch (size) {
204
	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
205
	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
206
	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
207
#ifdef CONFIG_64BIT
208
	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
209
#endif
210
	default:
211
		barrier();
212
		__builtin_memcpy((void *)res, (const void *)p, size);
213
		data_access_exceeds_word_size();
214
		barrier();
215
	}
216
}
217
 
218
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
219
{
220
	switch (size) {
221
	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
222
	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
223
	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
224
#ifdef CONFIG_64BIT
225
	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
226
#endif
227
	default:
228
		barrier();
229
		__builtin_memcpy((void *)p, (const void *)res, size);
230
		data_access_exceeds_word_size();
231
		barrier();
232
	}
233
}
234
 
235
/*
236
 * Prevent the compiler from merging or refetching reads or writes. The
237
 * compiler is also forbidden from reordering successive instances of
238
 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
239
 * compiler is aware of some particular ordering.  One way to make the
240
 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241
 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
242
 *
243
 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244
 * data types like structs or unions. If the size of the accessed data
245
 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246
 * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
247
 * compile-time warning.
248
 *
249
 * Their two major use cases are: (1) Mediating communication between
250
 * process-level code and irq/NMI handlers, all running on the same CPU,
251
 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
252
 * mutilate accesses that either do not require ordering or that interact
253
 * with an explicit memory barrier or atomic instruction that provides the
254
 * required ordering.
255
 */
256
 
257
#define READ_ONCE(x) \
258
	({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259
 
260
#define ASSIGN_ONCE(val, x) \
261
	({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
262
 
1408 serge 263
#endif /* __KERNEL__ */
264
 
265
#endif /* __ASSEMBLY__ */
266
 
267
#ifdef __KERNEL__
268
/*
269
 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
270
 * warning for each use, in hopes of speeding the functions removal.
271
 * Usage is:
272
 * 		int __deprecated foo(void)
273
 */
274
#ifndef __deprecated
275
# define __deprecated		/* unimplemented */
276
#endif
277
 
278
#ifdef MODULE
279
#define __deprecated_for_modules __deprecated
280
#else
281
#define __deprecated_for_modules
282
#endif
283
 
284
#ifndef __must_check
285
#define __must_check
286
#endif
287
 
288
#ifndef CONFIG_ENABLE_MUST_CHECK
289
#undef __must_check
290
#define __must_check
291
#endif
292
#ifndef CONFIG_ENABLE_WARN_DEPRECATED
293
#undef __deprecated
294
#undef __deprecated_for_modules
295
#define __deprecated
296
#define __deprecated_for_modules
297
#endif
298
 
299
/*
300
 * Allow us to avoid 'defined but not used' warnings on functions and data,
301
 * as well as force them to be emitted to the assembly file.
302
 *
303
 * As of gcc 3.4, static functions that are not marked with attribute((used))
304
 * may be elided from the assembly file.  As of gcc 3.4, static data not so
305
 * marked will not be elided, but this may change in a future gcc version.
306
 *
307
 * NOTE: Because distributions shipped with a backported unit-at-a-time
308
 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
309
 * for gcc >=3.3 instead of 3.4.
310
 *
311
 * In prior versions of gcc, such functions and data would be emitted, but
312
 * would be warned about except with attribute((unused)).
313
 *
314
 * Mark functions that are referenced only in inline assembly as __used so
315
 * the code is emitted even though it appears to be unreferenced.
316
 */
317
#ifndef __used
318
# define __used			/* unimplemented */
319
#endif
320
 
321
#ifndef __maybe_unused
322
# define __maybe_unused		/* unimplemented */
323
#endif
324
 
325
#ifndef __always_unused
326
# define __always_unused	/* unimplemented */
327
#endif
328
 
329
#ifndef noinline
330
#define noinline
331
#endif
332
 
333
/*
334
 * Rather then using noinline to prevent stack consumption, use
3031 serge 335
 * noinline_for_stack instead.  For documentation reasons.
1408 serge 336
 */
337
#define noinline_for_stack noinline
338
 
339
#ifndef __always_inline
340
#define __always_inline inline
341
#endif
342
 
343
#endif /* __KERNEL__ */
344
 
345
/*
346
 * From the GCC manual:
347
 *
348
 * Many functions do not examine any values except their arguments,
349
 * and have no effects except the return value.  Basically this is
350
 * just slightly more strict class than the `pure' attribute above,
351
 * since function is not allowed to read global memory.
352
 *
353
 * Note that a function that has pointer arguments and examines the
354
 * data pointed to must _not_ be declared `const'.  Likewise, a
355
 * function that calls a non-`const' function usually must not be
356
 * `const'.  It does not make sense for a `const' function to return
357
 * `void'.
358
 */
359
#ifndef __attribute_const__
360
# define __attribute_const__	/* unimplemented */
361
#endif
362
 
363
/*
364
 * Tell gcc if a function is cold. The compiler will assume any path
365
 * directly leading to the call is unlikely.
366
 */
367
 
368
#ifndef __cold
369
#define __cold
370
#endif
371
 
372
/* Simple shorthand for a section definition */
373
#ifndef __section
374
# define __section(S) __attribute__ ((__section__(#S)))
375
#endif
376
 
3031 serge 377
#ifndef __visible
378
#define __visible
379
#endif
380
 
1408 serge 381
/* Are two types/vars the same type (ignoring qualifiers)? */
382
#ifndef __same_type
383
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
384
#endif
385
 
5056 serge 386
/* Is this type a native word size -- useful for atomic operations */
387
#ifndef __native_word
388
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
389
#endif
390
 
1408 serge 391
/* Compile time object size, -1 for unknown */
392
#ifndef __compiletime_object_size
393
# define __compiletime_object_size(obj) -1
394
#endif
395
#ifndef __compiletime_warning
396
# define __compiletime_warning(message)
397
#endif
398
#ifndef __compiletime_error
399
# define __compiletime_error(message)
5056 serge 400
/*
401
 * Sparse complains of variable sized arrays due to the temporary variable in
402
 * __compiletime_assert. Unfortunately we can't just expand it out to make
403
 * sparse see a constant array size without breaking compiletime_assert on old
404
 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
405
 */
406
# ifndef __CHECKER__
3480 Serge 407
# define __compiletime_error_fallback(condition) \
408
	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
5056 serge 409
# endif
410
#endif
411
#ifndef __compiletime_error_fallback
3480 Serge 412
# define __compiletime_error_fallback(condition) do { } while (0)
1408 serge 413
#endif
3480 Serge 414
 
415
#define __compiletime_assert(condition, msg, prefix, suffix)		\
416
	do {								\
417
		bool __cond = !(condition);				\
418
		extern void prefix ## suffix(void) __compiletime_error(msg); \
419
		if (__cond)						\
420
			prefix ## suffix();				\
421
		__compiletime_error_fallback(__cond);			\
422
	} while (0)
423
 
424
#define _compiletime_assert(condition, msg, prefix, suffix) \
425
	__compiletime_assert(condition, msg, prefix, suffix)
426
 
427
/**
428
 * compiletime_assert - break build and emit msg if condition is false
429
 * @condition: a compile-time constant condition to check
430
 * @msg:       a message to emit if condition is false
431
 *
432
 * In tradition of POSIX assert, this macro will break the build if the
433
 * supplied condition is *false*, emitting the supplied error message if the
434
 * compiler has support to do so.
435
 */
436
#define compiletime_assert(condition, msg) \
437
	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
438
 
5056 serge 439
#define compiletime_assert_atomic_type(t)				\
440
	compiletime_assert(__native_word(t),				\
441
		"Need native word sized stores/loads for atomicity.")
442
 
1408 serge 443
/*
444
 * Prevent the compiler from merging or refetching accesses.  The compiler
445
 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
446
 * but only when the compiler is aware of some particular ordering.  One way
447
 * to make the compiler aware of ordering is to put the two invocations of
448
 * ACCESS_ONCE() in different C statements.
449
 *
450
 * This macro does absolutely -nothing- to prevent the CPU from reordering,
451
 * merging, or refetching absolutely anything at any time.  Its main intended
452
 * use is to mediate communication between process-level code and irq/NMI
453
 * handlers, all running on the same CPU.
454
 */
455
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
456
 
4103 Serge 457
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
458
#ifdef CONFIG_KPROBES
459
# define __kprobes	__attribute__((__section__(".kprobes.text")))
5056 serge 460
# define nokprobe_inline	__always_inline
4103 Serge 461
#else
462
# define __kprobes
5056 serge 463
# define nokprobe_inline	inline
4103 Serge 464
#endif
1408 serge 465
#endif /* __LINUX_COMPILER_H */