Subversion Repositories Kolibri OS

Rev

Rev 4103 | Rev 5270 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4065 Serge 1
#ifndef _LINUX_RCULIST_H
2
#define _LINUX_RCULIST_H
3
 
4
#ifdef __KERNEL__
5
 
6
/*
7
 * RCU-protected list version
8
 */
9
#include 
10
//#include 
11
 
12
/*
13
 * Why is there no list_empty_rcu()?  Because list_empty() serves this
14
 * purpose.  The list_empty() function fetches the RCU-protected pointer
15
 * and compares it to the address of the list head, but neither dereferences
16
 * this pointer itself nor provides this pointer to the caller.  Therefore,
17
 * it is not necessary to use rcu_dereference(), so that list_empty() can
18
 * be used anywhere you would want to use a list_empty_rcu().
19
 */
20
 
21
/*
22
 * return the ->next pointer of a list_head in an rcu safe
23
 * way, we must not access it directly
24
 */
25
#define list_next_rcu(list)	(*((struct list_head __rcu **)(&(list)->next)))
26
 
27
/*
28
 * Insert a new entry between two known consecutive entries.
29
 *
30
 * This is only for internal list manipulation where we know
31
 * the prev/next entries already!
32
 */
33
#ifndef CONFIG_DEBUG_LIST
34
static inline void __list_add_rcu(struct list_head *new,
35
		struct list_head *prev, struct list_head *next)
36
{
37
	new->next = next;
38
	new->prev = prev;
39
	rcu_assign_pointer(list_next_rcu(prev), new);
40
	next->prev = new;
41
}
42
#else
5056 serge 43
void __list_add_rcu(struct list_head *new,
4065 Serge 44
		struct list_head *prev, struct list_head *next);
45
#endif
46
 
47
/**
48
 * list_add_rcu - add a new entry to rcu-protected list
49
 * @new: new entry to be added
50
 * @head: list head to add it after
51
 *
52
 * Insert a new entry after the specified head.
53
 * This is good for implementing stacks.
54
 *
55
 * The caller must take whatever precautions are necessary
56
 * (such as holding appropriate locks) to avoid racing
57
 * with another list-mutation primitive, such as list_add_rcu()
58
 * or list_del_rcu(), running on this same list.
59
 * However, it is perfectly legal to run concurrently with
60
 * the _rcu list-traversal primitives, such as
61
 * list_for_each_entry_rcu().
62
 */
63
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
64
{
65
	__list_add_rcu(new, head, head->next);
66
}
67
 
68
/**
69
 * list_add_tail_rcu - add a new entry to rcu-protected list
70
 * @new: new entry to be added
71
 * @head: list head to add it before
72
 *
73
 * Insert a new entry before the specified head.
74
 * This is useful for implementing queues.
75
 *
76
 * The caller must take whatever precautions are necessary
77
 * (such as holding appropriate locks) to avoid racing
78
 * with another list-mutation primitive, such as list_add_tail_rcu()
79
 * or list_del_rcu(), running on this same list.
80
 * However, it is perfectly legal to run concurrently with
81
 * the _rcu list-traversal primitives, such as
82
 * list_for_each_entry_rcu().
83
 */
84
static inline void list_add_tail_rcu(struct list_head *new,
85
					struct list_head *head)
86
{
87
	__list_add_rcu(new, head->prev, head);
88
}
89
 
90
/**
91
 * list_del_rcu - deletes entry from list without re-initialization
92
 * @entry: the element to delete from the list.
93
 *
94
 * Note: list_empty() on entry does not return true after this,
95
 * the entry is in an undefined state. It is useful for RCU based
96
 * lockfree traversal.
97
 *
98
 * In particular, it means that we can not poison the forward
99
 * pointers that may still be used for walking the list.
100
 *
101
 * The caller must take whatever precautions are necessary
102
 * (such as holding appropriate locks) to avoid racing
103
 * with another list-mutation primitive, such as list_del_rcu()
104
 * or list_add_rcu(), running on this same list.
105
 * However, it is perfectly legal to run concurrently with
106
 * the _rcu list-traversal primitives, such as
107
 * list_for_each_entry_rcu().
108
 *
109
 * Note that the caller is not permitted to immediately free
110
 * the newly deleted entry.  Instead, either synchronize_rcu()
111
 * or call_rcu() must be used to defer freeing until an RCU
112
 * grace period has elapsed.
113
 */
114
static inline void list_del_rcu(struct list_head *entry)
115
{
116
	__list_del_entry(entry);
117
	entry->prev = LIST_POISON2;
118
}
119
 
120
/**
121
 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
122
 * @n: the element to delete from the hash list.
123
 *
124
 * Note: list_unhashed() on the node return true after this. It is
125
 * useful for RCU based read lockfree traversal if the writer side
126
 * must know if the list entry is still hashed or already unhashed.
127
 *
128
 * In particular, it means that we can not poison the forward pointers
129
 * that may still be used for walking the hash list and we can only
130
 * zero the pprev pointer so list_unhashed() will return true after
131
 * this.
132
 *
133
 * The caller must take whatever precautions are necessary (such as
134
 * holding appropriate locks) to avoid racing with another
135
 * list-mutation primitive, such as hlist_add_head_rcu() or
136
 * hlist_del_rcu(), running on this same list.  However, it is
137
 * perfectly legal to run concurrently with the _rcu list-traversal
138
 * primitives, such as hlist_for_each_entry_rcu().
139
 */
140
static inline void hlist_del_init_rcu(struct hlist_node *n)
141
{
142
	if (!hlist_unhashed(n)) {
143
		__hlist_del(n);
144
		n->pprev = NULL;
145
	}
146
}
147
 
148
/**
149
 * list_replace_rcu - replace old entry by new one
150
 * @old : the element to be replaced
151
 * @new : the new element to insert
152
 *
153
 * The @old entry will be replaced with the @new entry atomically.
154
 * Note: @old should not be empty.
155
 */
156
static inline void list_replace_rcu(struct list_head *old,
157
				struct list_head *new)
158
{
159
	new->next = old->next;
160
	new->prev = old->prev;
161
	rcu_assign_pointer(list_next_rcu(new->prev), new);
162
	new->next->prev = new;
163
	old->prev = LIST_POISON2;
164
}
165
 
166
/**
167
 * list_splice_init_rcu - splice an RCU-protected list into an existing list.
168
 * @list:	the RCU-protected list to splice
169
 * @head:	the place in the list to splice the first list into
170
 * @sync:	function to sync: synchronize_rcu(), synchronize_sched(), ...
171
 *
172
 * @head can be RCU-read traversed concurrently with this function.
173
 *
174
 * Note that this function blocks.
175
 *
176
 * Important note: the caller must take whatever action is necessary to
177
 *	prevent any other updates to @head.  In principle, it is possible
178
 *	to modify the list as soon as sync() begins execution.
179
 *	If this sort of thing becomes necessary, an alternative version
180
 *	based on call_rcu() could be created.  But only if -really-
181
 *	needed -- there is no shortage of RCU API members.
182
 */
183
static inline void list_splice_init_rcu(struct list_head *list,
184
					struct list_head *head,
185
					void (*sync)(void))
186
{
187
	struct list_head *first = list->next;
188
	struct list_head *last = list->prev;
189
	struct list_head *at = head->next;
190
 
191
	if (list_empty(list))
192
		return;
193
 
5056 serge 194
	/*
195
	 * "first" and "last" tracking list, so initialize it.  RCU readers
196
	 * have access to this list, so we must use INIT_LIST_HEAD_RCU()
197
	 * instead of INIT_LIST_HEAD().
198
	 */
4065 Serge 199
 
200
	INIT_LIST_HEAD(list);
201
 
202
	/*
203
	 * At this point, the list body still points to the source list.
204
	 * Wait for any readers to finish using the list before splicing
205
	 * the list body into the new list.  Any new readers will see
206
	 * an empty list.
207
	 */
208
 
209
	sync();
210
 
211
	/*
212
	 * Readers are finished with the source list, so perform splice.
213
	 * The order is important if the new list is global and accessible
214
	 * to concurrent RCU readers.  Note that RCU readers are not
215
	 * permitted to traverse the prev pointers without excluding
216
	 * this function.
217
	 */
218
 
219
	last->next = at;
220
	rcu_assign_pointer(list_next_rcu(head), first);
221
	first->prev = head;
222
	at->prev = last;
223
}
224
 
225
/**
226
 * list_entry_rcu - get the struct for this entry
227
 * @ptr:        the &struct list_head pointer.
228
 * @type:       the type of the struct this is embedded in.
229
 * @member:     the name of the list_struct within the struct.
230
 *
231
 * This primitive may safely run concurrently with the _rcu list-mutation
232
 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
233
 */
234
#define list_entry_rcu(ptr, type, member) \
5056 serge 235
({ \
236
	typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
4065 Serge 237
	 container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
5056 serge 238
})
4065 Serge 239
 
240
/**
241
 * Where are list_empty_rcu() and list_first_entry_rcu()?
242
 *
243
 * Implementing those functions following their counterparts list_empty() and
244
 * list_first_entry() is not advisable because they lead to subtle race
245
 * conditions as the following snippet shows:
246
 *
247
 * if (!list_empty_rcu(mylist)) {
248
 *	struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
249
 *	do_something(bar);
250
 * }
251
 *
252
 * The list may not be empty when list_empty_rcu checks it, but it may be when
253
 * list_first_entry_rcu rereads the ->next pointer.
254
 *
255
 * Rereading the ->next pointer is not a problem for list_empty() and
256
 * list_first_entry() because they would be protected by a lock that blocks
257
 * writers.
258
 *
259
 * See list_first_or_null_rcu for an alternative.
260
 */
261
 
262
/**
263
 * list_first_or_null_rcu - get the first element from a list
264
 * @ptr:        the list head to take the element from.
265
 * @type:       the type of the struct this is embedded in.
266
 * @member:     the name of the list_struct within the struct.
267
 *
268
 * Note that if the list is empty, it returns NULL.
269
 *
270
 * This primitive may safely run concurrently with the _rcu list-mutation
271
 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
272
 */
273
#define list_first_or_null_rcu(ptr, type, member) \
5056 serge 274
({ \
275
	struct list_head *__ptr = (ptr); \
4103 Serge 276
	  struct list_head *__next = ACCESS_ONCE(__ptr->next); \
5056 serge 277
	likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
278
})
4065 Serge 279
 
280
/**
281
 * list_for_each_entry_rcu	-	iterate over rcu list of given type
282
 * @pos:	the type * to use as a loop cursor.
283
 * @head:	the head for your list.
284
 * @member:	the name of the list_struct within the struct.
285
 *
286
 * This list-traversal primitive may safely run concurrently with
287
 * the _rcu list-mutation primitives such as list_add_rcu()
288
 * as long as the traversal is guarded by rcu_read_lock().
289
 */
290
#define list_for_each_entry_rcu(pos, head, member) \
291
	for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
292
		&pos->member != (head); \
293
		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
294
 
295
/**
296
 * list_for_each_entry_continue_rcu - continue iteration over list of given type
297
 * @pos:	the type * to use as a loop cursor.
298
 * @head:	the head for your list.
299
 * @member:	the name of the list_struct within the struct.
300
 *
301
 * Continue to iterate over list of given type, continuing after
302
 * the current position.
303
 */
304
#define list_for_each_entry_continue_rcu(pos, head, member) 		\
305
	for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
306
	     &pos->member != (head);	\
307
	     pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
308
 
309
/**
310
 * hlist_del_rcu - deletes entry from hash list without re-initialization
311
 * @n: the element to delete from the hash list.
312
 *
313
 * Note: list_unhashed() on entry does not return true after this,
314
 * the entry is in an undefined state. It is useful for RCU based
315
 * lockfree traversal.
316
 *
317
 * In particular, it means that we can not poison the forward
318
 * pointers that may still be used for walking the hash list.
319
 *
320
 * The caller must take whatever precautions are necessary
321
 * (such as holding appropriate locks) to avoid racing
322
 * with another list-mutation primitive, such as hlist_add_head_rcu()
323
 * or hlist_del_rcu(), running on this same list.
324
 * However, it is perfectly legal to run concurrently with
325
 * the _rcu list-traversal primitives, such as
326
 * hlist_for_each_entry().
327
 */
328
static inline void hlist_del_rcu(struct hlist_node *n)
329
{
330
	__hlist_del(n);
331
	n->pprev = LIST_POISON2;
332
}
333
 
334
/**
335
 * hlist_replace_rcu - replace old entry by new one
336
 * @old : the element to be replaced
337
 * @new : the new element to insert
338
 *
339
 * The @old entry will be replaced with the @new entry atomically.
340
 */
341
static inline void hlist_replace_rcu(struct hlist_node *old,
342
					struct hlist_node *new)
343
{
344
	struct hlist_node *next = old->next;
345
 
346
	new->next = next;
347
	new->pprev = old->pprev;
348
	rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
349
	if (next)
350
		new->next->pprev = &new->next;
351
	old->pprev = LIST_POISON2;
352
}
353
 
354
/*
355
 * return the first or the next element in an RCU protected hlist
356
 */
357
#define hlist_first_rcu(head)	(*((struct hlist_node __rcu **)(&(head)->first)))
358
#define hlist_next_rcu(node)	(*((struct hlist_node __rcu **)(&(node)->next)))
359
#define hlist_pprev_rcu(node)	(*((struct hlist_node __rcu **)((node)->pprev)))
360
 
361
/**
362
 * hlist_add_head_rcu
363
 * @n: the element to add to the hash list.
364
 * @h: the list to add to.
365
 *
366
 * Description:
367
 * Adds the specified element to the specified hlist,
368
 * while permitting racing traversals.
369
 *
370
 * The caller must take whatever precautions are necessary
371
 * (such as holding appropriate locks) to avoid racing
372
 * with another list-mutation primitive, such as hlist_add_head_rcu()
373
 * or hlist_del_rcu(), running on this same list.
374
 * However, it is perfectly legal to run concurrently with
375
 * the _rcu list-traversal primitives, such as
376
 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
377
 * problems on Alpha CPUs.  Regardless of the type of CPU, the
378
 * list-traversal primitive must be guarded by rcu_read_lock().
379
 */
380
static inline void hlist_add_head_rcu(struct hlist_node *n,
381
					struct hlist_head *h)
382
{
383
	struct hlist_node *first = h->first;
384
 
385
	n->next = first;
386
	n->pprev = &h->first;
387
	rcu_assign_pointer(hlist_first_rcu(h), n);
388
	if (first)
389
		first->pprev = &n->next;
390
}
391
 
392
/**
393
 * hlist_add_before_rcu
394
 * @n: the new element to add to the hash list.
395
 * @next: the existing element to add the new element before.
396
 *
397
 * Description:
398
 * Adds the specified element to the specified hlist
399
 * before the specified node while permitting racing traversals.
400
 *
401
 * The caller must take whatever precautions are necessary
402
 * (such as holding appropriate locks) to avoid racing
403
 * with another list-mutation primitive, such as hlist_add_head_rcu()
404
 * or hlist_del_rcu(), running on this same list.
405
 * However, it is perfectly legal to run concurrently with
406
 * the _rcu list-traversal primitives, such as
407
 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
408
 * problems on Alpha CPUs.
409
 */
410
static inline void hlist_add_before_rcu(struct hlist_node *n,
411
					struct hlist_node *next)
412
{
413
	n->pprev = next->pprev;
414
	n->next = next;
415
	rcu_assign_pointer(hlist_pprev_rcu(n), n);
416
	next->pprev = &n->next;
417
}
418
 
419
/**
5056 serge 420
 * hlist_add_behind_rcu
421
 * @n: the new element to add to the hash list.
4065 Serge 422
 * @prev: the existing element to add the new element after.
423
 *
424
 * Description:
425
 * Adds the specified element to the specified hlist
426
 * after the specified node while permitting racing traversals.
427
 *
428
 * The caller must take whatever precautions are necessary
429
 * (such as holding appropriate locks) to avoid racing
430
 * with another list-mutation primitive, such as hlist_add_head_rcu()
431
 * or hlist_del_rcu(), running on this same list.
432
 * However, it is perfectly legal to run concurrently with
433
 * the _rcu list-traversal primitives, such as
434
 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
435
 * problems on Alpha CPUs.
436
 */
5056 serge 437
static inline void hlist_add_behind_rcu(struct hlist_node *n,
438
					struct hlist_node *prev)
4065 Serge 439
{
440
	n->next = prev->next;
441
	n->pprev = &prev->next;
442
	rcu_assign_pointer(hlist_next_rcu(prev), n);
443
	if (n->next)
444
		n->next->pprev = &n->next;
445
}
446
 
447
#define __hlist_for_each_rcu(pos, head)				\
448
	for (pos = rcu_dereference(hlist_first_rcu(head));	\
449
	     pos;						\
450
	     pos = rcu_dereference(hlist_next_rcu(pos)))
451
 
452
/**
453
 * hlist_for_each_entry_rcu - iterate over rcu list of given type
454
 * @pos:	the type * to use as a loop cursor.
455
 * @head:	the head for your list.
456
 * @member:	the name of the hlist_node within the struct.
457
 *
458
 * This list-traversal primitive may safely run concurrently with
459
 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
460
 * as long as the traversal is guarded by rcu_read_lock().
461
 */
462
#define hlist_for_each_entry_rcu(pos, head, member)			\
463
	for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
464
			typeof(*(pos)), member);			\
465
		pos;							\
466
		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
467
			&(pos)->member)), typeof(*(pos)), member))
468
 
469
/**
470
 * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
471
 * @pos:	the type * to use as a loop cursor.
472
 * @head:	the head for your list.
473
 * @member:	the name of the hlist_node within the struct.
474
 *
475
 * This list-traversal primitive may safely run concurrently with
476
 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
477
 * as long as the traversal is guarded by rcu_read_lock().
478
 *
479
 * This is the same as hlist_for_each_entry_rcu() except that it does
480
 * not do any RCU debugging or tracing.
481
 */
482
#define hlist_for_each_entry_rcu_notrace(pos, head, member)			\
483
	for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
484
			typeof(*(pos)), member);			\
485
		pos;							\
486
		pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
487
			&(pos)->member)), typeof(*(pos)), member))
488
 
489
/**
490
 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
491
 * @pos:	the type * to use as a loop cursor.
492
 * @head:	the head for your list.
493
 * @member:	the name of the hlist_node within the struct.
494
 *
495
 * This list-traversal primitive may safely run concurrently with
496
 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
497
 * as long as the traversal is guarded by rcu_read_lock().
498
 */
499
#define hlist_for_each_entry_rcu_bh(pos, head, member)			\
500
	for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
501
			typeof(*(pos)), member);			\
502
		pos;							\
503
		pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
504
			&(pos)->member)), typeof(*(pos)), member))
505
 
506
/**
507
 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
508
 * @pos:	the type * to use as a loop cursor.
509
 * @member:	the name of the hlist_node within the struct.
510
 */
511
#define hlist_for_each_entry_continue_rcu(pos, member)			\
512
	for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
513
			typeof(*(pos)), member);			\
514
	     pos;							\
515
	     pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
516
			typeof(*(pos)), member))
517
 
518
/**
519
 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
520
 * @pos:	the type * to use as a loop cursor.
521
 * @member:	the name of the hlist_node within the struct.
522
 */
523
#define hlist_for_each_entry_continue_rcu_bh(pos, member)		\
524
	for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
525
			typeof(*(pos)), member);			\
526
	     pos;							\
527
	     pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
528
			typeof(*(pos)), member))
529
 
530
 
531
#endif	/* __KERNEL__ */
532
#endif