Subversion Repositories Kolibri OS

Rev

Rev 6082 | Rev 6588 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
/*
2
 * Read-Copy Update mechanism for mutual exclusion
3
 *
4
 * This program is free software; you can redistribute it and/or modify
5
 * it under the terms of the GNU General Public License as published by
6
 * the Free Software Foundation; either version 2 of the License, or
7
 * (at your option) any later version.
8
 *
9
 * This program is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
 * GNU General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU General Public License
15
 * along with this program; if not, you can access it online at
16
 * http://www.gnu.org/licenses/gpl-2.0.html.
17
 *
18
 * Copyright IBM Corporation, 2001
19
 *
20
 * Author: Dipankar Sarma 
21
 *
22
 * Based on the original work by Paul McKenney 
23
 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24
 * Papers:
25
 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26
 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27
 *
28
 * For detailed explanation of Read-Copy Update mechanism see -
29
 *		http://lse.sourceforge.net/locking/rcupdate.html
30
 *
31
 */
32
 
33
#ifndef __LINUX_RCUPDATE_H
34
#define __LINUX_RCUPDATE_H
35
 
36
#include 
37
#include 
38
#include 
39
#include 
40
//#include 
41
#include 
42
#include 
43
#include 
44
//#include 
45
#include 
46
#include 
6082 serge 47
#include 
48
 
5270 serge 49
#include 
50
 
51
extern int rcu_expedited; /* for sysctl */
52
 
6082 serge 53
#ifdef CONFIG_TINY_RCU
54
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
55
static inline bool rcu_gp_is_expedited(void)  /* Internal RCU use. */
56
{
57
	return false;
58
}
59
 
60
static inline void rcu_expedite_gp(void)
61
{
62
}
63
 
64
static inline void rcu_unexpedite_gp(void)
65
{
66
}
67
#else /* #ifdef CONFIG_TINY_RCU */
68
bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
69
void rcu_expedite_gp(void);
70
void rcu_unexpedite_gp(void);
71
#endif /* #else #ifdef CONFIG_TINY_RCU */
72
 
5270 serge 73
enum rcutorture_type {
74
	RCU_FLAVOR,
75
	RCU_BH_FLAVOR,
76
	RCU_SCHED_FLAVOR,
77
	RCU_TASKS_FLAVOR,
78
	SRCU_FLAVOR,
79
	INVALID_RCU_FLAVOR
80
};
81
 
82
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
83
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
84
			    unsigned long *gpnum, unsigned long *completed);
85
void rcutorture_record_test_transition(void);
86
void rcutorture_record_progress(unsigned long vernum);
87
void do_trace_rcu_torture_read(const char *rcutorturename,
88
			       struct rcu_head *rhp,
89
			       unsigned long secs,
90
			       unsigned long c_old,
91
			       unsigned long c);
92
#else
93
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
94
					  int *flags,
95
					  unsigned long *gpnum,
96
					  unsigned long *completed)
97
{
98
	*flags = 0;
99
	*gpnum = 0;
100
	*completed = 0;
101
}
102
static inline void rcutorture_record_test_transition(void)
103
{
104
}
105
static inline void rcutorture_record_progress(unsigned long vernum)
106
{
107
}
108
#ifdef CONFIG_RCU_TRACE
109
void do_trace_rcu_torture_read(const char *rcutorturename,
110
			       struct rcu_head *rhp,
111
			       unsigned long secs,
112
			       unsigned long c_old,
113
			       unsigned long c);
114
#else
115
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
116
	do { } while (0)
117
#endif
118
#endif
119
 
120
#define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
121
#define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
122
#define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
123
#define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
124
#define ulong2long(a)		(*(long *)(&(a)))
125
 
126
/* Exported common interfaces */
127
 
128
#ifdef CONFIG_PREEMPT_RCU
129
 
130
/**
131
 * call_rcu() - Queue an RCU callback for invocation after a grace period.
132
 * @head: structure to be used for queueing the RCU updates.
133
 * @func: actual callback function to be invoked after the grace period
134
 *
135
 * The callback function will be invoked some time after a full grace
136
 * period elapses, in other words after all pre-existing RCU read-side
137
 * critical sections have completed.  However, the callback function
138
 * might well execute concurrently with RCU read-side critical sections
139
 * that started after call_rcu() was invoked.  RCU read-side critical
140
 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
141
 * and may be nested.
142
 *
143
 * Note that all CPUs must agree that the grace period extended beyond
144
 * all pre-existing RCU read-side critical section.  On systems with more
145
 * than one CPU, this means that when "func()" is invoked, each CPU is
146
 * guaranteed to have executed a full memory barrier since the end of its
147
 * last RCU read-side critical section whose beginning preceded the call
148
 * to call_rcu().  It also means that each CPU executing an RCU read-side
149
 * critical section that continues beyond the start of "func()" must have
150
 * executed a memory barrier after the call_rcu() but before the beginning
151
 * of that RCU read-side critical section.  Note that these guarantees
152
 * include CPUs that are offline, idle, or executing in user mode, as
153
 * well as CPUs that are executing in the kernel.
154
 *
155
 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
156
 * resulting RCU callback function "func()", then both CPU A and CPU B are
157
 * guaranteed to execute a full memory barrier during the time interval
158
 * between the call to call_rcu() and the invocation of "func()" -- even
159
 * if CPU A and CPU B are the same CPU (but again only if the system has
160
 * more than one CPU).
161
 */
162
void call_rcu(struct rcu_head *head,
6082 serge 163
	      rcu_callback_t func);
5270 serge 164
 
165
#else /* #ifdef CONFIG_PREEMPT_RCU */
166
 
167
/* In classic RCU, call_rcu() is just call_rcu_sched(). */
168
#define	call_rcu	call_rcu_sched
169
 
170
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
171
 
172
/**
173
 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
174
 * @head: structure to be used for queueing the RCU updates.
175
 * @func: actual callback function to be invoked after the grace period
176
 *
177
 * The callback function will be invoked some time after a full grace
178
 * period elapses, in other words after all currently executing RCU
179
 * read-side critical sections have completed. call_rcu_bh() assumes
180
 * that the read-side critical sections end on completion of a softirq
181
 * handler. This means that read-side critical sections in process
182
 * context must not be interrupted by softirqs. This interface is to be
183
 * used when most of the read-side critical sections are in softirq context.
184
 * RCU read-side critical sections are delimited by :
185
 *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
186
 *  OR
187
 *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
188
 *  These may be nested.
189
 *
190
 * See the description of call_rcu() for more detailed information on
191
 * memory ordering guarantees.
192
 */
193
void call_rcu_bh(struct rcu_head *head,
6082 serge 194
		 rcu_callback_t func);
5270 serge 195
 
196
/**
197
 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
198
 * @head: structure to be used for queueing the RCU updates.
199
 * @func: actual callback function to be invoked after the grace period
200
 *
201
 * The callback function will be invoked some time after a full grace
202
 * period elapses, in other words after all currently executing RCU
203
 * read-side critical sections have completed. call_rcu_sched() assumes
204
 * that the read-side critical sections end on enabling of preemption
205
 * or on voluntary preemption.
206
 * RCU read-side critical sections are delimited by :
207
 *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
208
 *  OR
209
 *  anything that disables preemption.
210
 *  These may be nested.
211
 *
212
 * See the description of call_rcu() for more detailed information on
213
 * memory ordering guarantees.
214
 */
215
void call_rcu_sched(struct rcu_head *head,
6082 serge 216
		    rcu_callback_t func);
5270 serge 217
 
218
void synchronize_sched(void);
219
 
220
/**
221
 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
222
 * @head: structure to be used for queueing the RCU updates.
223
 * @func: actual callback function to be invoked after the grace period
224
 *
225
 * The callback function will be invoked some time after a full grace
226
 * period elapses, in other words after all currently executing RCU
227
 * read-side critical sections have completed. call_rcu_tasks() assumes
228
 * that the read-side critical sections end at a voluntary context
229
 * switch (not a preemption!), entry into idle, or transition to usermode
230
 * execution.  As such, there are no read-side primitives analogous to
231
 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
232
 * to determine that all tasks have passed through a safe state, not so
233
 * much for data-strcuture synchronization.
234
 *
235
 * See the description of call_rcu() for more detailed information on
236
 * memory ordering guarantees.
237
 */
6082 serge 238
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
5270 serge 239
void synchronize_rcu_tasks(void);
240
void rcu_barrier_tasks(void);
241
 
242
#ifdef CONFIG_PREEMPT_RCU
243
 
244
void __rcu_read_lock(void);
245
void __rcu_read_unlock(void);
246
void rcu_read_unlock_special(struct task_struct *t);
247
void synchronize_rcu(void);
248
 
249
/*
250
 * Defined as a macro as it is a very low level header included from
251
 * areas that don't even know about current.  This gives the rcu_read_lock()
252
 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
253
 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
254
 */
255
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
256
 
257
#else /* #ifdef CONFIG_PREEMPT_RCU */
258
 
259
static inline void __rcu_read_lock(void)
260
{
6082 serge 261
	if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
262
		preempt_disable();
5270 serge 263
}
264
 
265
static inline void __rcu_read_unlock(void)
266
{
6082 serge 267
	if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
268
		preempt_enable();
5270 serge 269
}
270
 
271
static inline void synchronize_rcu(void)
272
{
273
	synchronize_sched();
274
}
275
 
276
static inline int rcu_preempt_depth(void)
277
{
278
	return 0;
279
}
280
 
281
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
282
 
283
/* Internal to kernel */
284
void rcu_init(void);
6082 serge 285
void rcu_end_inkernel_boot(void);
5270 serge 286
void rcu_sched_qs(void);
287
void rcu_bh_qs(void);
288
void rcu_check_callbacks(int user);
289
struct notifier_block;
6082 serge 290
int rcu_cpu_notify(struct notifier_block *self,
291
		   unsigned long action, void *hcpu);
5270 serge 292
 
293
#ifdef CONFIG_RCU_STALL_COMMON
294
void rcu_sysrq_start(void);
295
void rcu_sysrq_end(void);
296
#else /* #ifdef CONFIG_RCU_STALL_COMMON */
297
static inline void rcu_sysrq_start(void)
298
{
299
}
300
static inline void rcu_sysrq_end(void)
301
{
302
}
303
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
304
 
6082 serge 305
#ifdef CONFIG_NO_HZ_FULL
5270 serge 306
void rcu_user_enter(void);
307
void rcu_user_exit(void);
308
#else
309
static inline void rcu_user_enter(void) { }
310
static inline void rcu_user_exit(void) { }
311
static inline void rcu_user_hooks_switch(struct task_struct *prev,
312
					 struct task_struct *next) { }
6082 serge 313
#endif /* CONFIG_NO_HZ_FULL */
5270 serge 314
 
315
#ifdef CONFIG_RCU_NOCB_CPU
316
void rcu_init_nohz(void);
317
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
318
static inline void rcu_init_nohz(void)
319
{
320
}
321
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
322
 
323
/**
324
 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
325
 * @a: Code that RCU needs to pay attention to.
326
 *
327
 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
328
 * in the inner idle loop, that is, between the rcu_idle_enter() and
329
 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
330
 * critical sections.  However, things like powertop need tracepoints
331
 * in the inner idle loop.
332
 *
333
 * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
334
 * will tell RCU that it needs to pay attending, invoke its argument
335
 * (in this example, a call to the do_something_with_RCU() function),
336
 * and then tell RCU to go back to ignoring this CPU.  It is permissible
337
 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
338
 * quite limited.  If deeper nesting is required, it will be necessary
339
 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
340
 */
341
#define RCU_NONIDLE(a) \
342
	do { \
343
		rcu_irq_enter(); \
344
		do { a; } while (0); \
345
		rcu_irq_exit(); \
346
	} while (0)
347
 
348
/*
349
 * Note a voluntary context switch for RCU-tasks benefit.  This is a
350
 * macro rather than an inline function to avoid #include hell.
351
 */
352
#ifdef CONFIG_TASKS_RCU
353
#define TASKS_RCU(x) x
354
extern struct srcu_struct tasks_rcu_exit_srcu;
355
#define rcu_note_voluntary_context_switch(t) \
356
	do { \
6082 serge 357
		rcu_all_qs(); \
358
		if (READ_ONCE((t)->rcu_tasks_holdout)) \
359
			WRITE_ONCE((t)->rcu_tasks_holdout, false); \
5270 serge 360
	} while (0)
361
#else /* #ifdef CONFIG_TASKS_RCU */
362
#define TASKS_RCU(x) do { } while (0)
6082 serge 363
#define rcu_note_voluntary_context_switch(t)	rcu_all_qs()
5270 serge 364
#endif /* #else #ifdef CONFIG_TASKS_RCU */
365
 
366
/**
367
 * cond_resched_rcu_qs - Report potential quiescent states to RCU
368
 *
369
 * This macro resembles cond_resched(), except that it is defined to
370
 * report potential quiescent states to RCU-tasks even if the cond_resched()
371
 * machinery were to be shut off, as some advocate for PREEMPT kernels.
372
 */
373
#define cond_resched_rcu_qs() \
374
do { \
375
	if (!cond_resched()) \
376
		rcu_note_voluntary_context_switch(current); \
377
} while (0)
378
 
379
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
380
bool __rcu_is_watching(void);
381
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
382
 
383
/*
384
 * Infrastructure to implement the synchronize_() primitives in
385
 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
386
 */
387
 
388
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
389
#include 
390
#elif defined(CONFIG_TINY_RCU)
391
#include 
392
#else
393
#error "Unknown RCU implementation specified to kernel configuration"
394
#endif
395
 
396
/*
397
 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
398
 * initialization and destruction of rcu_head on the stack. rcu_head structures
399
 * allocated dynamically in the heap or defined statically don't need any
400
 * initialization.
401
 */
402
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
403
void init_rcu_head(struct rcu_head *head);
404
void destroy_rcu_head(struct rcu_head *head);
405
void init_rcu_head_on_stack(struct rcu_head *head);
406
void destroy_rcu_head_on_stack(struct rcu_head *head);
407
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
408
static inline void init_rcu_head(struct rcu_head *head)
409
{
410
}
411
 
412
static inline void destroy_rcu_head(struct rcu_head *head)
413
{
414
}
415
 
416
static inline void init_rcu_head_on_stack(struct rcu_head *head)
417
{
418
}
419
 
420
static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
421
{
422
}
423
#endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
424
 
425
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
426
bool rcu_lockdep_current_cpu_online(void);
427
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
428
static inline bool rcu_lockdep_current_cpu_online(void)
429
{
430
	return true;
431
}
432
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
433
 
434
#ifdef CONFIG_DEBUG_LOCK_ALLOC
435
 
436
static inline void rcu_lock_acquire(struct lockdep_map *map)
437
{
438
	lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
439
}
440
 
441
static inline void rcu_lock_release(struct lockdep_map *map)
442
{
443
	lock_release(map, 1, _THIS_IP_);
444
}
445
 
446
extern struct lockdep_map rcu_lock_map;
447
extern struct lockdep_map rcu_bh_lock_map;
448
extern struct lockdep_map rcu_sched_lock_map;
449
extern struct lockdep_map rcu_callback_map;
450
int debug_lockdep_rcu_enabled(void);
451
 
452
int rcu_read_lock_held(void);
453
int rcu_read_lock_bh_held(void);
454
 
455
/**
456
 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
457
 *
458
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
459
 * RCU-sched read-side critical section.  In absence of
460
 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
6295 serge 461
 * critical section unless it can prove otherwise.
5270 serge 462
 */
463
#ifdef CONFIG_PREEMPT_COUNT
6295 serge 464
int rcu_read_lock_sched_held(void);
5270 serge 465
#else /* #ifdef CONFIG_PREEMPT_COUNT */
466
static inline int rcu_read_lock_sched_held(void)
467
{
468
	return 1;
469
}
470
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
471
 
472
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
473
 
474
# define rcu_lock_acquire(a)		do { } while (0)
475
# define rcu_lock_release(a)		do { } while (0)
476
 
477
static inline int rcu_read_lock_held(void)
478
{
479
	return 1;
480
}
481
 
482
static inline int rcu_read_lock_bh_held(void)
483
{
484
	return 1;
485
}
486
 
487
#ifdef CONFIG_PREEMPT_COUNT
488
static inline int rcu_read_lock_sched_held(void)
489
{
490
	return preempt_count() != 0 || irqs_disabled();
491
}
492
#else /* #ifdef CONFIG_PREEMPT_COUNT */
493
static inline int rcu_read_lock_sched_held(void)
494
{
495
	return 1;
496
}
497
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
498
 
499
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
500
 
501
#ifdef CONFIG_PROVE_RCU
502
 
503
/**
6295 serge 504
 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
5270 serge 505
 * @c: condition to check
506
 * @s: informative message
507
 */
6295 serge 508
#define RCU_LOCKDEP_WARN(c, s)						\
5270 serge 509
	do {								\
510
		static bool __section(.data.unlikely) __warned;		\
6295 serge 511
		if (debug_lockdep_rcu_enabled() && !__warned && (c)) {	\
5270 serge 512
			__warned = true;				\
513
			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
514
		}							\
515
	} while (0)
516
 
517
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
518
static inline void rcu_preempt_sleep_check(void)
519
{
6295 serge 520
	RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
5270 serge 521
			   "Illegal context switch in RCU read-side critical section");
522
}
523
#else /* #ifdef CONFIG_PROVE_RCU */
524
static inline void rcu_preempt_sleep_check(void)
525
{
526
}
527
#endif /* #else #ifdef CONFIG_PROVE_RCU */
528
 
529
#define rcu_sleep_check()						\
530
	do {								\
531
		rcu_preempt_sleep_check();				\
6295 serge 532
		RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),	\
5270 serge 533
				   "Illegal context switch in RCU-bh read-side critical section"); \
6295 serge 534
		RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),	\
5270 serge 535
				   "Illegal context switch in RCU-sched read-side critical section"); \
536
	} while (0)
537
 
538
#else /* #ifdef CONFIG_PROVE_RCU */
539
 
6295 serge 540
#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
5270 serge 541
#define rcu_sleep_check() do { } while (0)
542
 
543
#endif /* #else #ifdef CONFIG_PROVE_RCU */
544
 
545
/*
546
 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
547
 * and rcu_assign_pointer().  Some of these could be folded into their
548
 * callers, but they are left separate in order to ease introduction of
549
 * multiple flavors of pointers to match the multiple flavors of RCU
550
 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
551
 * the future.
552
 */
553
 
554
#ifdef __CHECKER__
555
#define rcu_dereference_sparse(p, space) \
556
	((void)(((typeof(*p) space *)p) == p))
557
#else /* #ifdef __CHECKER__ */
558
#define rcu_dereference_sparse(p, space)
559
#endif /* #else #ifdef __CHECKER__ */
560
 
561
#define __rcu_access_pointer(p, space) \
562
({ \
6082 serge 563
	typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
5270 serge 564
	rcu_dereference_sparse(p, space); \
565
	((typeof(*p) __force __kernel *)(_________p1)); \
566
})
567
#define __rcu_dereference_check(p, c, space) \
568
({ \
6082 serge 569
	/* Dependency order vs. p above. */ \
570
	typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
6295 serge 571
	RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
5270 serge 572
	rcu_dereference_sparse(p, space); \
6082 serge 573
	((typeof(*p) __force __kernel *)(________p1)); \
5270 serge 574
})
575
#define __rcu_dereference_protected(p, c, space) \
576
({ \
6295 serge 577
	RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
5270 serge 578
	rcu_dereference_sparse(p, space); \
579
	((typeof(*p) __force __kernel *)(p)); \
580
})
581
 
582
/**
583
 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
584
 * @v: The value to statically initialize with.
585
 */
586
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
587
 
588
/**
589
 * rcu_assign_pointer() - assign to RCU-protected pointer
590
 * @p: pointer to assign to
591
 * @v: value to assign (publish)
592
 *
593
 * Assigns the specified value to the specified RCU-protected
594
 * pointer, ensuring that any concurrent RCU readers will see
595
 * any prior initialization.
596
 *
597
 * Inserts memory barriers on architectures that require them
598
 * (which is most of them), and also prevents the compiler from
599
 * reordering the code that initializes the structure after the pointer
600
 * assignment.  More importantly, this call documents which pointers
601
 * will be dereferenced by RCU read-side code.
602
 *
603
 * In some special cases, you may use RCU_INIT_POINTER() instead
604
 * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
605
 * to the fact that it does not constrain either the CPU or the compiler.
606
 * That said, using RCU_INIT_POINTER() when you should have used
607
 * rcu_assign_pointer() is a very bad thing that results in
608
 * impossible-to-diagnose memory corruption.  So please be careful.
609
 * See the RCU_INIT_POINTER() comment header for details.
610
 *
611
 * Note that rcu_assign_pointer() evaluates each of its arguments only
612
 * once, appearances notwithstanding.  One of the "extra" evaluations
613
 * is in typeof() and the other visible only to sparse (__CHECKER__),
614
 * neither of which actually execute the argument.  As with most cpp
615
 * macros, this execute-arguments-only-once property is important, so
616
 * please be careful when making changes to rcu_assign_pointer() and the
617
 * other macros that it invokes.
618
 */
619
#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
620
 
621
/**
622
 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
623
 * @p: The pointer to read
624
 *
625
 * Return the value of the specified RCU-protected pointer, but omit the
6082 serge 626
 * smp_read_barrier_depends() and keep the READ_ONCE().  This is useful
5270 serge 627
 * when the value of this pointer is accessed, but the pointer is not
628
 * dereferenced, for example, when testing an RCU-protected pointer against
629
 * NULL.  Although rcu_access_pointer() may also be used in cases where
630
 * update-side locks prevent the value of the pointer from changing, you
631
 * should instead use rcu_dereference_protected() for this use case.
632
 *
633
 * It is also permissible to use rcu_access_pointer() when read-side
634
 * access to the pointer was removed at least one grace period ago, as
635
 * is the case in the context of the RCU callback that is freeing up
636
 * the data, or after a synchronize_rcu() returns.  This can be useful
637
 * when tearing down multi-linked structures after a grace period
638
 * has elapsed.
639
 */
640
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
641
 
642
/**
643
 * rcu_dereference_check() - rcu_dereference with debug checking
644
 * @p: The pointer to read, prior to dereferencing
645
 * @c: The conditions under which the dereference will take place
646
 *
647
 * Do an rcu_dereference(), but check that the conditions under which the
648
 * dereference will take place are correct.  Typically the conditions
649
 * indicate the various locking conditions that should be held at that
650
 * point.  The check should return true if the conditions are satisfied.
651
 * An implicit check for being in an RCU read-side critical section
652
 * (rcu_read_lock()) is included.
653
 *
654
 * For example:
655
 *
656
 *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
657
 *
658
 * could be used to indicate to lockdep that foo->bar may only be dereferenced
659
 * if either rcu_read_lock() is held, or that the lock required to replace
660
 * the bar struct at foo->bar is held.
661
 *
662
 * Note that the list of conditions may also include indications of when a lock
663
 * need not be held, for example during initialisation or destruction of the
664
 * target struct:
665
 *
666
 *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
667
 *					      atomic_read(&foo->usage) == 0);
668
 *
669
 * Inserts memory barriers on architectures that require them
670
 * (currently only the Alpha), prevents the compiler from refetching
671
 * (and from merging fetches), and, more importantly, documents exactly
672
 * which pointers are protected by RCU and checks that the pointer is
673
 * annotated as __rcu.
674
 */
675
#define rcu_dereference_check(p, c) \
6082 serge 676
	__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
5270 serge 677
 
678
/**
679
 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
680
 * @p: The pointer to read, prior to dereferencing
681
 * @c: The conditions under which the dereference will take place
682
 *
683
 * This is the RCU-bh counterpart to rcu_dereference_check().
684
 */
685
#define rcu_dereference_bh_check(p, c) \
6082 serge 686
	__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
5270 serge 687
 
688
/**
689
 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
690
 * @p: The pointer to read, prior to dereferencing
691
 * @c: The conditions under which the dereference will take place
692
 *
693
 * This is the RCU-sched counterpart to rcu_dereference_check().
694
 */
695
#define rcu_dereference_sched_check(p, c) \
6082 serge 696
	__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
5270 serge 697
				__rcu)
698
 
699
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
700
 
701
/*
702
 * The tracing infrastructure traces RCU (we want that), but unfortunately
703
 * some of the RCU checks causes tracing to lock up the system.
704
 *
705
 * The tracing version of rcu_dereference_raw() must not call
706
 * rcu_read_lock_held().
707
 */
708
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
709
 
710
/**
711
 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
712
 * @p: The pointer to read, prior to dereferencing
713
 * @c: The conditions under which the dereference will take place
714
 *
715
 * Return the value of the specified RCU-protected pointer, but omit
6082 serge 716
 * both the smp_read_barrier_depends() and the READ_ONCE().  This
5270 serge 717
 * is useful in cases where update-side locks prevent the value of the
718
 * pointer from changing.  Please note that this primitive does -not-
719
 * prevent the compiler from repeating this reference or combining it
720
 * with other references, so it should not be used without protection
721
 * of appropriate locks.
722
 *
723
 * This function is only for update-side use.  Using this function
724
 * when protected only by rcu_read_lock() will result in infrequent
725
 * but very ugly failures.
726
 */
727
#define rcu_dereference_protected(p, c) \
728
	__rcu_dereference_protected((p), (c), __rcu)
729
 
730
 
731
/**
732
 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
733
 * @p: The pointer to read, prior to dereferencing
734
 *
735
 * This is a simple wrapper around rcu_dereference_check().
736
 */
737
#define rcu_dereference(p) rcu_dereference_check(p, 0)
738
 
739
/**
740
 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
741
 * @p: The pointer to read, prior to dereferencing
742
 *
743
 * Makes rcu_dereference_check() do the dirty work.
744
 */
745
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
746
 
747
/**
748
 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
749
 * @p: The pointer to read, prior to dereferencing
750
 *
751
 * Makes rcu_dereference_check() do the dirty work.
752
 */
753
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
754
 
755
/**
756
 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
757
 *
758
 * When synchronize_rcu() is invoked on one CPU while other CPUs
759
 * are within RCU read-side critical sections, then the
760
 * synchronize_rcu() is guaranteed to block until after all the other
761
 * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
762
 * on one CPU while other CPUs are within RCU read-side critical
763
 * sections, invocation of the corresponding RCU callback is deferred
764
 * until after the all the other CPUs exit their critical sections.
765
 *
766
 * Note, however, that RCU callbacks are permitted to run concurrently
767
 * with new RCU read-side critical sections.  One way that this can happen
768
 * is via the following sequence of events: (1) CPU 0 enters an RCU
769
 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
770
 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
771
 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
772
 * callback is invoked.  This is legal, because the RCU read-side critical
773
 * section that was running concurrently with the call_rcu() (and which
774
 * therefore might be referencing something that the corresponding RCU
775
 * callback would free up) has completed before the corresponding
776
 * RCU callback is invoked.
777
 *
778
 * RCU read-side critical sections may be nested.  Any deferred actions
779
 * will be deferred until the outermost RCU read-side critical section
780
 * completes.
781
 *
782
 * You can avoid reading and understanding the next paragraph by
783
 * following this rule: don't put anything in an rcu_read_lock() RCU
784
 * read-side critical section that would block in a !PREEMPT kernel.
785
 * But if you want the full story, read on!
786
 *
787
 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
788
 * it is illegal to block while in an RCU read-side critical section.
789
 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
790
 * kernel builds, RCU read-side critical sections may be preempted,
791
 * but explicit blocking is illegal.  Finally, in preemptible RCU
792
 * implementations in real-time (with -rt patchset) kernel builds, RCU
793
 * read-side critical sections may be preempted and they may also block, but
794
 * only when acquiring spinlocks that are subject to priority inheritance.
795
 */
796
static inline void rcu_read_lock(void)
797
{
798
	__rcu_read_lock();
799
	__acquire(RCU);
800
	rcu_lock_acquire(&rcu_lock_map);
6295 serge 801
	RCU_LOCKDEP_WARN(!rcu_is_watching(),
5270 serge 802
			   "rcu_read_lock() used illegally while idle");
803
}
804
 
805
/*
806
 * So where is rcu_write_lock()?  It does not exist, as there is no
807
 * way for writers to lock out RCU readers.  This is a feature, not
808
 * a bug -- this property is what provides RCU's performance benefits.
809
 * Of course, writers must coordinate with each other.  The normal
810
 * spinlock primitives work well for this, but any other technique may be
811
 * used as well.  RCU does not care how the writers keep out of each
812
 * others' way, as long as they do so.
813
 */
814
 
815
/**
816
 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
817
 *
818
 * In most situations, rcu_read_unlock() is immune from deadlock.
819
 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
820
 * is responsible for deboosting, which it does via rt_mutex_unlock().
821
 * Unfortunately, this function acquires the scheduler's runqueue and
822
 * priority-inheritance spinlocks.  This means that deadlock could result
823
 * if the caller of rcu_read_unlock() already holds one of these locks or
824
 * any lock that is ever acquired while holding them; or any lock which
825
 * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
826
 * does not disable irqs while taking ->wait_lock.
827
 *
828
 * That said, RCU readers are never priority boosted unless they were
829
 * preempted.  Therefore, one way to avoid deadlock is to make sure
830
 * that preemption never happens within any RCU read-side critical
831
 * section whose outermost rcu_read_unlock() is called with one of
832
 * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
833
 * a number of ways, for example, by invoking preempt_disable() before
834
 * critical section's outermost rcu_read_lock().
835
 *
836
 * Given that the set of locks acquired by rt_mutex_unlock() might change
837
 * at any time, a somewhat more future-proofed approach is to make sure
838
 * that that preemption never happens within any RCU read-side critical
839
 * section whose outermost rcu_read_unlock() is called with irqs disabled.
840
 * This approach relies on the fact that rt_mutex_unlock() currently only
841
 * acquires irq-disabled locks.
842
 *
843
 * The second of these two approaches is best in most situations,
844
 * however, the first approach can also be useful, at least to those
845
 * developers willing to keep abreast of the set of locks acquired by
846
 * rt_mutex_unlock().
847
 *
848
 * See rcu_read_lock() for more information.
849
 */
850
static inline void rcu_read_unlock(void)
851
{
6295 serge 852
	RCU_LOCKDEP_WARN(!rcu_is_watching(),
5270 serge 853
			   "rcu_read_unlock() used illegally while idle");
854
	__release(RCU);
855
	__rcu_read_unlock();
6082 serge 856
	rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
5270 serge 857
}
858
 
859
/**
860
 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
861
 *
862
 * This is equivalent of rcu_read_lock(), but to be used when updates
863
 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
864
 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
865
 * softirq handler to be a quiescent state, a process in RCU read-side
866
 * critical section must be protected by disabling softirqs. Read-side
867
 * critical sections in interrupt context can use just rcu_read_lock(),
868
 * though this should at least be commented to avoid confusing people
869
 * reading the code.
870
 *
871
 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
872
 * must occur in the same context, for example, it is illegal to invoke
873
 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
874
 * was invoked from some other task.
875
 */
876
static inline void rcu_read_lock_bh(void)
877
{
878
	local_bh_disable();
879
	__acquire(RCU_BH);
880
	rcu_lock_acquire(&rcu_bh_lock_map);
6295 serge 881
	RCU_LOCKDEP_WARN(!rcu_is_watching(),
5270 serge 882
			   "rcu_read_lock_bh() used illegally while idle");
883
}
884
 
885
/*
886
 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
887
 *
888
 * See rcu_read_lock_bh() for more information.
889
 */
890
static inline void rcu_read_unlock_bh(void)
891
{
6295 serge 892
	RCU_LOCKDEP_WARN(!rcu_is_watching(),
5270 serge 893
			   "rcu_read_unlock_bh() used illegally while idle");
894
	rcu_lock_release(&rcu_bh_lock_map);
895
	__release(RCU_BH);
896
	local_bh_enable();
897
}
898
 
899
/**
900
 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
901
 *
902
 * This is equivalent of rcu_read_lock(), but to be used when updates
903
 * are being done using call_rcu_sched() or synchronize_rcu_sched().
904
 * Read-side critical sections can also be introduced by anything that
905
 * disables preemption, including local_irq_disable() and friends.
906
 *
907
 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
908
 * must occur in the same context, for example, it is illegal to invoke
909
 * rcu_read_unlock_sched() from process context if the matching
910
 * rcu_read_lock_sched() was invoked from an NMI handler.
911
 */
912
static inline void rcu_read_lock_sched(void)
913
{
914
	preempt_disable();
915
	__acquire(RCU_SCHED);
916
	rcu_lock_acquire(&rcu_sched_lock_map);
6295 serge 917
	RCU_LOCKDEP_WARN(!rcu_is_watching(),
5270 serge 918
			   "rcu_read_lock_sched() used illegally while idle");
919
}
920
 
921
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
922
static inline notrace void rcu_read_lock_sched_notrace(void)
923
{
924
	preempt_disable_notrace();
925
	__acquire(RCU_SCHED);
926
}
927
 
928
/*
929
 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
930
 *
931
 * See rcu_read_lock_sched for more information.
932
 */
933
static inline void rcu_read_unlock_sched(void)
934
{
6295 serge 935
	RCU_LOCKDEP_WARN(!rcu_is_watching(),
5270 serge 936
			   "rcu_read_unlock_sched() used illegally while idle");
937
	rcu_lock_release(&rcu_sched_lock_map);
938
	__release(RCU_SCHED);
939
	preempt_enable();
940
}
941
 
942
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
943
static inline notrace void rcu_read_unlock_sched_notrace(void)
944
{
945
	__release(RCU_SCHED);
946
	preempt_enable_notrace();
947
}
948
 
949
/**
950
 * RCU_INIT_POINTER() - initialize an RCU protected pointer
951
 *
952
 * Initialize an RCU-protected pointer in special cases where readers
953
 * do not need ordering constraints on the CPU or the compiler.  These
954
 * special cases are:
955
 *
956
 * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
957
 * 2.	The caller has taken whatever steps are required to prevent
958
 *	RCU readers from concurrently accessing this pointer -or-
959
 * 3.	The referenced data structure has already been exposed to
960
 *	readers either at compile time or via rcu_assign_pointer() -and-
961
 *	a.	You have not made -any- reader-visible changes to
962
 *		this structure since then -or-
963
 *	b.	It is OK for readers accessing this structure from its
964
 *		new location to see the old state of the structure.  (For
965
 *		example, the changes were to statistical counters or to
966
 *		other state where exact synchronization is not required.)
967
 *
968
 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
969
 * result in impossible-to-diagnose memory corruption.  As in the structures
970
 * will look OK in crash dumps, but any concurrent RCU readers might
971
 * see pre-initialized values of the referenced data structure.  So
972
 * please be very careful how you use RCU_INIT_POINTER()!!!
973
 *
974
 * If you are creating an RCU-protected linked structure that is accessed
975
 * by a single external-to-structure RCU-protected pointer, then you may
976
 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
977
 * pointers, but you must use rcu_assign_pointer() to initialize the
978
 * external-to-structure pointer -after- you have completely initialized
979
 * the reader-accessible portions of the linked structure.
980
 *
981
 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
982
 * ordering guarantees for either the CPU or the compiler.
983
 */
984
#define RCU_INIT_POINTER(p, v) \
985
	do { \
986
		rcu_dereference_sparse(p, __rcu); \
987
		p = RCU_INITIALIZER(v); \
988
	} while (0)
989
 
990
/**
991
 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
992
 *
993
 * GCC-style initialization for an RCU-protected pointer in a structure field.
994
 */
995
#define RCU_POINTER_INITIALIZER(p, v) \
996
		.p = RCU_INITIALIZER(v)
997
 
998
/*
999
 * Does the specified offset indicate that the corresponding rcu_head
1000
 * structure can be handled by kfree_rcu()?
1001
 */
1002
#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
1003
 
1004
/*
1005
 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
1006
 */
1007
#define __kfree_rcu(head, offset) \
1008
	do { \
1009
		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
1010
		kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
1011
	} while (0)
1012
 
1013
/**
1014
 * kfree_rcu() - kfree an object after a grace period.
1015
 * @ptr:	pointer to kfree
1016
 * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
1017
 *
1018
 * Many rcu callbacks functions just call kfree() on the base structure.
1019
 * These functions are trivial, but their size adds up, and furthermore
1020
 * when they are used in a kernel module, that module must invoke the
1021
 * high-latency rcu_barrier() function at module-unload time.
1022
 *
1023
 * The kfree_rcu() function handles this issue.  Rather than encoding a
1024
 * function address in the embedded rcu_head structure, kfree_rcu() instead
1025
 * encodes the offset of the rcu_head structure within the base structure.
1026
 * Because the functions are not allowed in the low-order 4096 bytes of
1027
 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
1028
 * If the offset is larger than 4095 bytes, a compile-time error will
1029
 * be generated in __kfree_rcu().  If this error is triggered, you can
1030
 * either fall back to use of call_rcu() or rearrange the structure to
1031
 * position the rcu_head structure into the first 4096 bytes.
1032
 *
1033
 * Note that the allowable offset might decrease in the future, for example,
1034
 * to allow something like kmem_cache_free_rcu().
1035
 *
1036
 * The BUILD_BUG_ON check must not involve any function calls, hence the
1037
 * checks are done in macros here.
1038
 */
1039
#define kfree_rcu(ptr, rcu_head)					\
1040
	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1041
 
6082 serge 1042
#ifdef CONFIG_TINY_RCU
1043
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
5270 serge 1044
{
6082 serge 1045
	*nextevt = KTIME_MAX;
5270 serge 1046
	return 0;
1047
}
6082 serge 1048
#endif /* #ifdef CONFIG_TINY_RCU */
5270 serge 1049
 
1050
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
1051
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
1052
#elif defined(CONFIG_RCU_NOCB_CPU)
1053
bool rcu_is_nocb_cpu(int cpu);
1054
#else
1055
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
1056
#endif
1057
 
1058
 
1059
/* Only for use by adaptive-ticks code. */
1060
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
1061
bool rcu_sys_is_idle(void);
1062
void rcu_sysidle_force_exit(void);
1063
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1064
 
1065
static inline bool rcu_sys_is_idle(void)
1066
{
1067
	return false;
1068
}
1069
 
1070
static inline void rcu_sysidle_force_exit(void)
1071
{
1072
}
1073
 
1074
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1075
 
1076
 
1077
#endif /* __LINUX_RCUPDATE_H */