40,7 → 40,7 |
next->prev = new; |
} |
#else |
extern void __list_add_rcu(struct list_head *new, |
void __list_add_rcu(struct list_head *new, |
struct list_head *prev, struct list_head *next); |
#endif |
|
191,7 → 191,11 |
if (list_empty(list)) |
return; |
|
/* "first" and "last" tracking list, so initialize it. */ |
/* |
* "first" and "last" tracking list, so initialize it. RCU readers |
* have access to this list, so we must use INIT_LIST_HEAD_RCU() |
* instead of INIT_LIST_HEAD(). |
*/ |
|
INIT_LIST_HEAD(list); |
|
228,7 → 232,8 |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_entry_rcu(ptr, type, member) \ |
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
({ \ |
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ |
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ |
}) |
|
266,10 → 271,10 |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_first_or_null_rcu(ptr, type, member) \ |
({struct list_head *__ptr = (ptr); \ |
({ \ |
struct list_head *__ptr = (ptr); \ |
struct list_head *__next = ACCESS_ONCE(__ptr->next); \ |
likely(__ptr != __next) ? \ |
list_entry_rcu(__next, type, member) : NULL; \ |
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
}) |
|
/** |
412,9 → 417,9 |
} |
|
/** |
* hlist_add_after_rcu |
* hlist_add_behind_rcu |
* @n: the new element to add to the hash list. |
* @prev: the existing element to add the new element after. |
* @n: the new element to add to the hash list. |
* |
* Description: |
* Adds the specified element to the specified hlist |
429,8 → 434,8 |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. |
*/ |
static inline void hlist_add_after_rcu(struct hlist_node *prev, |
struct hlist_node *n) |
static inline void hlist_add_behind_rcu(struct hlist_node *n, |
struct hlist_node *prev) |
{ |
n->next = prev->next; |
n->pprev = &prev->next; |