Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6295 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5270 Rev 6082
Line 15... Line 15...
15
# define __releases(x)	__attribute__((context(x,1,0)))
15
# define __releases(x)	__attribute__((context(x,1,0)))
16
# define __acquire(x)	__context__(x,1)
16
# define __acquire(x)	__context__(x,1)
17
# define __release(x)	__context__(x,-1)
17
# define __release(x)	__context__(x,-1)
18
# define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
18
# define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
19
# define __percpu	__attribute__((noderef, address_space(3)))
19
# define __percpu	__attribute__((noderef, address_space(3)))
-
 
20
# define __pmem		__attribute__((noderef, address_space(5)))
20
#ifdef CONFIG_SPARSE_RCU_POINTER
21
#ifdef CONFIG_SPARSE_RCU_POINTER
21
# define __rcu		__attribute__((noderef, address_space(4)))
22
# define __rcu		__attribute__((noderef, address_space(4)))
22
#else
23
#else
23
# define __rcu
24
# define __rcu
24
#endif
25
#endif
Line 40... Line 41...
40
# define __acquire(x) (void)0
41
# define __acquire(x) (void)0
41
# define __release(x) (void)0
42
# define __release(x) (void)0
42
# define __cond_lock(x,c) (c)
43
# define __cond_lock(x,c) (c)
43
# define __percpu
44
# define __percpu
44
# define __rcu
45
# define __rcu
-
 
46
# define __pmem
45
#endif
47
#endif
Line 46... Line 48...
46
 
48
 
47
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
49
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
48
#define ___PASTE(a,b) a##b
50
#define ___PASTE(a,b) a##b
Line 52... Line 54...
52
 
54
 
53
#ifdef __GNUC__
55
#ifdef __GNUC__
54
#include 
56
#include 
Line -... Line 57...
-
 
57
#endif
-
 
58
 
-
 
59
#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
55
#endif
60
#define notrace __attribute__((hotpatch(0,0)))
-
 
61
#else
Line 56... Line 62...
56
 
62
#define notrace __attribute__((no_instrument_function))
57
#define notrace __attribute__((no_instrument_function))
63
#endif
58
 
64
 
59
/* Intel compiler defines __GNUC__. So we will overwrite implementations
65
/* Intel compiler defines __GNUC__. So we will overwrite implementations
Line 163... Line 169...
163
/* Optimization barrier */
169
/* Optimization barrier */
164
#ifndef barrier
170
#ifndef barrier
165
# define barrier() __memory_barrier()
171
# define barrier() __memory_barrier()
166
#endif
172
#endif
Line -... Line 173...
-
 
173
 
-
 
174
#ifndef barrier_data
-
 
175
# define barrier_data(ptr) barrier()
-
 
176
#endif
167
 
177
 
168
/* Unreachable code */
178
/* Unreachable code */
169
#ifndef unreachable
179
#ifndef unreachable
170
# define unreachable() do { } while (1)
180
# define unreachable() do { } while (1)
Line 186... Line 196...
186
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
196
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187
#endif
197
#endif
Line 188... Line 198...
188
 
198
 
Line -... Line 199...
-
 
199
#include 
-
 
200
 
-
 
201
#define __READ_ONCE_SIZE						\
-
 
202
({									\
189
#include 
203
	switch (size) {							\
-
 
204
	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
-
 
205
	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
190
 
206
	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
-
 
207
	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
191
static __always_inline void data_access_exceeds_word_size(void)
208
	default:							\
-
 
209
		barrier();						\
192
#ifdef __compiletime_warning
210
		__builtin_memcpy((void *)res, (const void *)p, size);	\
193
__compiletime_warning("data access exceeds word size and won't be atomic")
211
		barrier();						\
Line -... Line 212...
-
 
212
	}								\
194
#endif
213
})
195
;
214
 
-
 
215
static __always_inline
196
 
216
void __read_once_size(const volatile void *p, void *res, int size)
Line -... Line 217...
-
 
217
{
-
 
218
	__READ_ONCE_SIZE;
-
 
219
}
-
 
220
 
-
 
221
#ifdef CONFIG_KASAN
-
 
222
/*
-
 
223
 * This function is not 'inline' because __no_sanitize_address confilcts
-
 
224
 * with inlining. Attempt to inline it may cause a build failure.
197
static __always_inline void data_access_exceeds_word_size(void)
225
 * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
198
{
226
 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
199
}
-
 
200
 
-
 
201
static __always_inline void __read_once_size(volatile void *p, void *res, int size)
-
 
202
{
-
 
203
	switch (size) {
227
 */
204
	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
-
 
205
	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
-
 
206
	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
-
 
207
#ifdef CONFIG_64BIT
-
 
208
	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
-
 
209
#endif
-
 
210
	default:
-
 
211
		barrier();
228
static __no_sanitize_address __maybe_unused
-
 
229
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-
 
230
{
-
 
231
	__READ_ONCE_SIZE;
-
 
232
}
-
 
233
#else
212
		__builtin_memcpy((void *)res, (const void *)p, size);
234
static __always_inline
-
 
235
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
Line 213... Line 236...
213
		data_access_exceeds_word_size();
236
{
214
		barrier();
237
	__READ_ONCE_SIZE;
215
	}
238
}
216
}
239
#endif
217
 
240
 
218
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
241
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
219
{
-
 
220
	switch (size) {
242
{
221
	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
-
 
222
	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
243
	switch (size) {
223
	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
244
	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
224
#ifdef CONFIG_64BIT
245
	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
225
	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
-
 
226
#endif
246
	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
227
	default:
247
	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
228
		barrier();
248
	default:
Line 229... Line 249...
229
		__builtin_memcpy((void *)p, (const void *)res, size);
249
		barrier();
230
		data_access_exceeds_word_size();
250
		__builtin_memcpy((void *)p, (const void *)res, size);
231
		barrier();
251
		barrier();
232
	}
252
	}
233
}
253
}
234
 
254
 
235
/*
255
/*
236
 * Prevent the compiler from merging or refetching reads or writes. The
256
 * Prevent the compiler from merging or refetching reads or writes. The
237
 * compiler is also forbidden from reordering successive instances of
257
 * compiler is also forbidden from reordering successive instances of
238
 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
258
 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
239
 * compiler is aware of some particular ordering.  One way to make the
259
 * compiler is aware of some particular ordering.  One way to make the
240
 * compiler aware of ordering is to put the two invocations of READ_ONCE,
260
 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241
 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
261
 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
242
 *
262
 *
243
 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
263
 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244
 * data types like structs or unions. If the size of the accessed data
264
 * data types like structs or unions. If the size of the accessed data
245
 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
265
 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246
 * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
266
 * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
247
 * compile-time warning.
267
 * compile-time warning.
248
 *
268
 *
249
 * Their two major use cases are: (1) Mediating communication between
269
 * Their two major use cases are: (1) Mediating communication between
Line 250... Line 270...
250
 * process-level code and irq/NMI handlers, all running on the same CPU,
270
 * process-level code and irq/NMI handlers, all running on the same CPU,
-
 
271
 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
-
 
272
 * mutilate accesses that either do not require ordering or that interact
-
 
273
 * with an explicit memory barrier or atomic instruction that provides the
251
 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
274
 * required ordering.
-
 
275
 */
-
 
276
 
-
 
277
#define __READ_ONCE(x, check)						\
-
 
278
({									\
-
 
279
	union { typeof(x) __val; char __c[1]; } __u;			\
-
 
280
	if (check)							\
-
 
281
		__read_once_size(&(x), __u.__c, sizeof(x));		\
-
 
282
	else								\
-
 
283
		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
-
 
284
	__u.__val;							\
-
 
285
})
Line 252... Line 286...
252
 * mutilate accesses that either do not require ordering or that interact
286
#define READ_ONCE(x) __READ_ONCE(x, 1)
-
 
287
 
-
 
288
/*
-
 
289
 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
253
 * with an explicit memory barrier or atomic instruction that provides the
290
 * to hide memory access from KASAN.
-
 
291
 */
-
 
292
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
Line 254... Line 293...
254
 * required ordering.
293
 
Line 255... Line 294...
255
 */
294
#define WRITE_ONCE(x, val) \
Line 376... Line 415...
376
 
415
 
377
#ifndef __visible
416
#ifndef __visible
378
#define __visible
417
#define __visible
Line -... Line 418...
-
 
418
#endif
-
 
419
 
-
 
420
/*
-
 
421
 * Assume alignment of return value.
-
 
422
 */
-
 
423
#ifndef __assume_aligned
-
 
424
#define __assume_aligned(a, ...)
-
 
425
#endif
379
#endif
426
 
380
 
427
 
381
/* Are two types/vars the same type (ignoring qualifiers)? */
428
/* Are two types/vars the same type (ignoring qualifiers)? */
382
#ifndef __same_type
429
#ifndef __same_type
Line 383... Line 430...
383
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
430
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
384
#endif
431
#endif
385
 
432
 
386
/* Is this type a native word size -- useful for atomic operations */
433
/* Is this type a native word size -- useful for atomic operations */
Line 387... Line 434...
387
#ifndef __native_word
434
#ifndef __native_word
388
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
435
# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
389
#endif
436
#endif
Line 445... Line 492...
445
 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
492
 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
446
 * but only when the compiler is aware of some particular ordering.  One way
493
 * but only when the compiler is aware of some particular ordering.  One way
447
 * to make the compiler aware of ordering is to put the two invocations of
494
 * to make the compiler aware of ordering is to put the two invocations of
448
 * ACCESS_ONCE() in different C statements.
495
 * ACCESS_ONCE() in different C statements.
449
 *
496
 *
-
 
497
 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
450
 * This macro does absolutely -nothing- to prevent the CPU from reordering,
498
 * on a union member will work as long as the size of the member matches the
451
 * merging, or refetching absolutely anything at any time.  Its main intended
499
 * size of the union and the size is smaller than word size.
-
 
500
 *
452
 * use is to mediate communication between process-level code and irq/NMI
501
 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
453
 * handlers, all running on the same CPU.
502
 * between process-level code and irq/NMI handlers, all running on the same CPU,
-
 
503
 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
-
 
504
 * mutilate accesses that either do not require ordering or that interact
-
 
505
 * with an explicit memory barrier or atomic instruction that provides the
-
 
506
 * required ordering.
-
 
507
 *
-
 
508
 * If possible use READ_ONCE()/WRITE_ONCE() instead.
-
 
509
 */
-
 
510
#define __ACCESS_ONCE(x) ({ \
-
 
511
	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
-
 
512
	(volatile typeof(x) *)&(x); })
-
 
513
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
-
 
514
 
-
 
515
/**
-
 
516
 * lockless_dereference() - safely load a pointer for later dereference
-
 
517
 * @p: The pointer to load
-
 
518
 *
-
 
519
 * Similar to rcu_dereference(), but for situations where the pointed-to
-
 
520
 * object's lifetime is managed by something other than RCU.  That
-
 
521
 * "something other" might be reference counting or simple immortality.
454
 */
522
 */
455
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
523
#define lockless_dereference(p) \
-
 
524
({ \
-
 
525
	typeof(p) _________p1 = READ_ONCE(p); \
-
 
526
	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
-
 
527
	(_________p1); \
-
 
528
})
Line 456... Line 529...
456
 
529
 
457
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
530
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
458
#ifdef CONFIG_KPROBES
531
#ifdef CONFIG_KPROBES
459
# define __kprobes	__attribute__((__section__(".kprobes.text")))
532
# define __kprobes	__attribute__((__section__(".kprobes.text")))