Subversion Repositories Kolibri OS

Rev

Rev 6936 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _LINUX_RCULIST_H
  2. #define _LINUX_RCULIST_H
  3.  
  4. #ifdef __KERNEL__
  5.  
  6. /*
  7.  * RCU-protected list version
  8.  */
  9. #include <linux/list.h>
  10. #include <linux/rcupdate.h>
  11.  
  12. /*
  13.  * Why is there no list_empty_rcu()?  Because list_empty() serves this
  14.  * purpose.  The list_empty() function fetches the RCU-protected pointer
  15.  * and compares it to the address of the list head, but neither dereferences
  16.  * this pointer itself nor provides this pointer to the caller.  Therefore,
  17.  * it is not necessary to use rcu_dereference(), so that list_empty() can
  18.  * be used anywhere you would want to use a list_empty_rcu().
  19.  */
  20.  
  21. /*
  22.  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  23.  * @list: list to be initialized
  24.  *
  25.  * You should instead use INIT_LIST_HEAD() for normal initialization and
  26.  * cleanup tasks, when readers have no access to the list being initialized.
  27.  * However, if the list being initialized is visible to readers, you
  28.  * need to keep the compiler from being too mischievous.
  29.  */
  30. static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
  31. {
  32.         WRITE_ONCE(list->next, list);
  33.         WRITE_ONCE(list->prev, list);
  34. }
  35.  
  36. /*
  37.  * return the ->next pointer of a list_head in an rcu safe
  38.  * way, we must not access it directly
  39.  */
  40. #define list_next_rcu(list)     (*((struct list_head __rcu **)(&(list)->next)))
  41.  
  42. /*
  43.  * Insert a new entry between two known consecutive entries.
  44.  *
  45.  * This is only for internal list manipulation where we know
  46.  * the prev/next entries already!
  47.  */
  48. #ifndef CONFIG_DEBUG_LIST
  49. static inline void __list_add_rcu(struct list_head *new,
  50.                 struct list_head *prev, struct list_head *next)
  51. {
  52.         new->next = next;
  53.         new->prev = prev;
  54.         rcu_assign_pointer(list_next_rcu(prev), new);
  55.         next->prev = new;
  56. }
  57. #else
  58. void __list_add_rcu(struct list_head *new,
  59.                     struct list_head *prev, struct list_head *next);
  60. #endif
  61.  
  62. /**
  63.  * list_add_rcu - add a new entry to rcu-protected list
  64.  * @new: new entry to be added
  65.  * @head: list head to add it after
  66.  *
  67.  * Insert a new entry after the specified head.
  68.  * This is good for implementing stacks.
  69.  *
  70.  * The caller must take whatever precautions are necessary
  71.  * (such as holding appropriate locks) to avoid racing
  72.  * with another list-mutation primitive, such as list_add_rcu()
  73.  * or list_del_rcu(), running on this same list.
  74.  * However, it is perfectly legal to run concurrently with
  75.  * the _rcu list-traversal primitives, such as
  76.  * list_for_each_entry_rcu().
  77.  */
  78. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  79. {
  80.         __list_add_rcu(new, head, head->next);
  81. }
  82.  
  83. /**
  84.  * list_add_tail_rcu - add a new entry to rcu-protected list
  85.  * @new: new entry to be added
  86.  * @head: list head to add it before
  87.  *
  88.  * Insert a new entry before the specified head.
  89.  * This is useful for implementing queues.
  90.  *
  91.  * The caller must take whatever precautions are necessary
  92.  * (such as holding appropriate locks) to avoid racing
  93.  * with another list-mutation primitive, such as list_add_tail_rcu()
  94.  * or list_del_rcu(), running on this same list.
  95.  * However, it is perfectly legal to run concurrently with
  96.  * the _rcu list-traversal primitives, such as
  97.  * list_for_each_entry_rcu().
  98.  */
  99. static inline void list_add_tail_rcu(struct list_head *new,
  100.                                         struct list_head *head)
  101. {
  102.         __list_add_rcu(new, head->prev, head);
  103. }
  104.  
  105. /**
  106.  * list_del_rcu - deletes entry from list without re-initialization
  107.  * @entry: the element to delete from the list.
  108.  *
  109.  * Note: list_empty() on entry does not return true after this,
  110.  * the entry is in an undefined state. It is useful for RCU based
  111.  * lockfree traversal.
  112.  *
  113.  * In particular, it means that we can not poison the forward
  114.  * pointers that may still be used for walking the list.
  115.  *
  116.  * The caller must take whatever precautions are necessary
  117.  * (such as holding appropriate locks) to avoid racing
  118.  * with another list-mutation primitive, such as list_del_rcu()
  119.  * or list_add_rcu(), running on this same list.
  120.  * However, it is perfectly legal to run concurrently with
  121.  * the _rcu list-traversal primitives, such as
  122.  * list_for_each_entry_rcu().
  123.  *
  124.  * Note that the caller is not permitted to immediately free
  125.  * the newly deleted entry.  Instead, either synchronize_rcu()
  126.  * or call_rcu() must be used to defer freeing until an RCU
  127.  * grace period has elapsed.
  128.  */
  129. static inline void list_del_rcu(struct list_head *entry)
  130. {
  131.         __list_del_entry(entry);
  132.         entry->prev = LIST_POISON2;
  133. }
  134.  
  135. /**
  136.  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
  137.  * @n: the element to delete from the hash list.
  138.  *
  139.  * Note: list_unhashed() on the node return true after this. It is
  140.  * useful for RCU based read lockfree traversal if the writer side
  141.  * must know if the list entry is still hashed or already unhashed.
  142.  *
  143.  * In particular, it means that we can not poison the forward pointers
  144.  * that may still be used for walking the hash list and we can only
  145.  * zero the pprev pointer so list_unhashed() will return true after
  146.  * this.
  147.  *
  148.  * The caller must take whatever precautions are necessary (such as
  149.  * holding appropriate locks) to avoid racing with another
  150.  * list-mutation primitive, such as hlist_add_head_rcu() or
  151.  * hlist_del_rcu(), running on this same list.  However, it is
  152.  * perfectly legal to run concurrently with the _rcu list-traversal
  153.  * primitives, such as hlist_for_each_entry_rcu().
  154.  */
  155. static inline void hlist_del_init_rcu(struct hlist_node *n)
  156. {
  157.         if (!hlist_unhashed(n)) {
  158.                 __hlist_del(n);
  159.                 n->pprev = NULL;
  160.         }
  161. }
  162.  
  163. /**
  164.  * list_replace_rcu - replace old entry by new one
  165.  * @old : the element to be replaced
  166.  * @new : the new element to insert
  167.  *
  168.  * The @old entry will be replaced with the @new entry atomically.
  169.  * Note: @old should not be empty.
  170.  */
  171. static inline void list_replace_rcu(struct list_head *old,
  172.                                 struct list_head *new)
  173. {
  174.         new->next = old->next;
  175.         new->prev = old->prev;
  176.         rcu_assign_pointer(list_next_rcu(new->prev), new);
  177.         new->next->prev = new;
  178.         old->prev = LIST_POISON2;
  179. }
  180.  
  181. /**
  182.  * __list_splice_init_rcu - join an RCU-protected list into an existing list.
  183.  * @list:       the RCU-protected list to splice
  184.  * @prev:       points to the last element of the existing list
  185.  * @next:       points to the first element of the existing list
  186.  * @sync:       function to sync: synchronize_rcu(), synchronize_sched(), ...
  187.  *
  188.  * The list pointed to by @prev and @next can be RCU-read traversed
  189.  * concurrently with this function.
  190.  *
  191.  * Note that this function blocks.
  192.  *
  193.  * Important note: the caller must take whatever action is necessary to prevent
  194.  * any other updates to the existing list.  In principle, it is possible to
  195.  * modify the list as soon as sync() begins execution. If this sort of thing
  196.  * becomes necessary, an alternative version based on call_rcu() could be
  197.  * created.  But only if -really- needed -- there is no shortage of RCU API
  198.  * members.
  199.  */
  200. static inline void __list_splice_init_rcu(struct list_head *list,
  201.                                           struct list_head *prev,
  202.                                           struct list_head *next,
  203.                                           void (*sync)(void))
  204. {
  205.         struct list_head *first = list->next;
  206.         struct list_head *last = list->prev;
  207.  
  208.         /*
  209.          * "first" and "last" tracking list, so initialize it.  RCU readers
  210.          * have access to this list, so we must use INIT_LIST_HEAD_RCU()
  211.          * instead of INIT_LIST_HEAD().
  212.          */
  213.  
  214.         INIT_LIST_HEAD_RCU(list);
  215.  
  216.         /*
  217.          * At this point, the list body still points to the source list.
  218.          * Wait for any readers to finish using the list before splicing
  219.          * the list body into the new list.  Any new readers will see
  220.          * an empty list.
  221.          */
  222.  
  223.         sync();
  224.  
  225.         /*
  226.          * Readers are finished with the source list, so perform splice.
  227.          * The order is important if the new list is global and accessible
  228.          * to concurrent RCU readers.  Note that RCU readers are not
  229.          * permitted to traverse the prev pointers without excluding
  230.          * this function.
  231.          */
  232.  
  233.         last->next = next;
  234.         rcu_assign_pointer(list_next_rcu(prev), first);
  235.         first->prev = prev;
  236.         next->prev = last;
  237. }
  238.  
  239. /**
  240.  * list_splice_init_rcu - splice an RCU-protected list into an existing list,
  241.  *                        designed for stacks.
  242.  * @list:       the RCU-protected list to splice
  243.  * @head:       the place in the existing list to splice the first list into
  244.  * @sync:       function to sync: synchronize_rcu(), synchronize_sched(), ...
  245.  */
  246. static inline void list_splice_init_rcu(struct list_head *list,
  247.                                         struct list_head *head,
  248.                                         void (*sync)(void))
  249. {
  250.         if (!list_empty(list))
  251.                 __list_splice_init_rcu(list, head, head->next, sync);
  252. }
  253.  
  254. /**
  255.  * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
  256.  *                             list, designed for queues.
  257.  * @list:       the RCU-protected list to splice
  258.  * @head:       the place in the existing list to splice the first list into
  259.  * @sync:       function to sync: synchronize_rcu(), synchronize_sched(), ...
  260.  */
  261. static inline void list_splice_tail_init_rcu(struct list_head *list,
  262.                                              struct list_head *head,
  263.                                              void (*sync)(void))
  264. {
  265.         if (!list_empty(list))
  266.                 __list_splice_init_rcu(list, head->prev, head, sync);
  267. }
  268.  
  269. /**
  270.  * list_entry_rcu - get the struct for this entry
  271.  * @ptr:        the &struct list_head pointer.
  272.  * @type:       the type of the struct this is embedded in.
  273.  * @member:     the name of the list_head within the struct.
  274.  *
  275.  * This primitive may safely run concurrently with the _rcu list-mutation
  276.  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  277.  */
  278. #define list_entry_rcu(ptr, type, member) \
  279.         container_of(lockless_dereference(ptr), type, member)
  280.  
  281. /**
  282.  * Where are list_empty_rcu() and list_first_entry_rcu()?
  283.  *
  284.  * Implementing those functions following their counterparts list_empty() and
  285.  * list_first_entry() is not advisable because they lead to subtle race
  286.  * conditions as the following snippet shows:
  287.  *
  288.  * if (!list_empty_rcu(mylist)) {
  289.  *      struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
  290.  *      do_something(bar);
  291.  * }
  292.  *
  293.  * The list may not be empty when list_empty_rcu checks it, but it may be when
  294.  * list_first_entry_rcu rereads the ->next pointer.
  295.  *
  296.  * Rereading the ->next pointer is not a problem for list_empty() and
  297.  * list_first_entry() because they would be protected by a lock that blocks
  298.  * writers.
  299.  *
  300.  * See list_first_or_null_rcu for an alternative.
  301.  */
  302.  
  303. /**
  304.  * list_first_or_null_rcu - get the first element from a list
  305.  * @ptr:        the list head to take the element from.
  306.  * @type:       the type of the struct this is embedded in.
  307.  * @member:     the name of the list_head within the struct.
  308.  *
  309.  * Note that if the list is empty, it returns NULL.
  310.  *
  311.  * This primitive may safely run concurrently with the _rcu list-mutation
  312.  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  313.  */
  314. #define list_first_or_null_rcu(ptr, type, member) \
  315. ({ \
  316.         struct list_head *__ptr = (ptr); \
  317.         struct list_head *__next = READ_ONCE(__ptr->next); \
  318.         likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
  319. })
  320.  
  321. /**
  322.  * list_next_or_null_rcu - get the first element from a list
  323.  * @head:       the head for the list.
  324.  * @ptr:        the list head to take the next element from.
  325.  * @type:       the type of the struct this is embedded in.
  326.  * @member:     the name of the list_head within the struct.
  327.  *
  328.  * Note that if the ptr is at the end of the list, NULL is returned.
  329.  *
  330.  * This primitive may safely run concurrently with the _rcu list-mutation
  331.  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  332.  */
  333. #define list_next_or_null_rcu(head, ptr, type, member) \
  334. ({ \
  335.         struct list_head *__head = (head); \
  336.         struct list_head *__ptr = (ptr); \
  337.         struct list_head *__next = READ_ONCE(__ptr->next); \
  338.         likely(__next != __head) ? list_entry_rcu(__next, type, \
  339.                                                   member) : NULL; \
  340. })
  341.  
  342. /**
  343.  * list_for_each_entry_rcu      -       iterate over rcu list of given type
  344.  * @pos:        the type * to use as a loop cursor.
  345.  * @head:       the head for your list.
  346.  * @member:     the name of the list_head within the struct.
  347.  *
  348.  * This list-traversal primitive may safely run concurrently with
  349.  * the _rcu list-mutation primitives such as list_add_rcu()
  350.  * as long as the traversal is guarded by rcu_read_lock().
  351.  */
  352. #define list_for_each_entry_rcu(pos, head, member) \
  353.         for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
  354.                 &pos->member != (head); \
  355.                 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
  356.  
  357. /**
  358.  * list_entry_lockless - get the struct for this entry
  359.  * @ptr:        the &struct list_head pointer.
  360.  * @type:       the type of the struct this is embedded in.
  361.  * @member:     the name of the list_head within the struct.
  362.  *
  363.  * This primitive may safely run concurrently with the _rcu list-mutation
  364.  * primitives such as list_add_rcu(), but requires some implicit RCU
  365.  * read-side guarding.  One example is running within a special
  366.  * exception-time environment where preemption is disabled and where
  367.  * lockdep cannot be invoked (in which case updaters must use RCU-sched,
  368.  * as in synchronize_sched(), call_rcu_sched(), and friends).  Another
  369.  * example is when items are added to the list, but never deleted.
  370.  */
  371. #define list_entry_lockless(ptr, type, member) \
  372.         container_of((typeof(ptr))lockless_dereference(ptr), type, member)
  373.  
  374. /**
  375.  * list_for_each_entry_lockless - iterate over rcu list of given type
  376.  * @pos:        the type * to use as a loop cursor.
  377.  * @head:       the head for your list.
  378.  * @member:     the name of the list_struct within the struct.
  379.  *
  380.  * This primitive may safely run concurrently with the _rcu list-mutation
  381.  * primitives such as list_add_rcu(), but requires some implicit RCU
  382.  * read-side guarding.  One example is running within a special
  383.  * exception-time environment where preemption is disabled and where
  384.  * lockdep cannot be invoked (in which case updaters must use RCU-sched,
  385.  * as in synchronize_sched(), call_rcu_sched(), and friends).  Another
  386.  * example is when items are added to the list, but never deleted.
  387.  */
  388. #define list_for_each_entry_lockless(pos, head, member) \
  389.         for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
  390.              &pos->member != (head); \
  391.              pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
  392.  
  393. /**
  394.  * list_for_each_entry_continue_rcu - continue iteration over list of given type
  395.  * @pos:        the type * to use as a loop cursor.
  396.  * @head:       the head for your list.
  397.  * @member:     the name of the list_head within the struct.
  398.  *
  399.  * Continue to iterate over list of given type, continuing after
  400.  * the current position.
  401.  */
  402. #define list_for_each_entry_continue_rcu(pos, head, member)             \
  403.         for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
  404.              &pos->member != (head);    \
  405.              pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
  406.  
  407. /**
  408.  * hlist_del_rcu - deletes entry from hash list without re-initialization
  409.  * @n: the element to delete from the hash list.
  410.  *
  411.  * Note: list_unhashed() on entry does not return true after this,
  412.  * the entry is in an undefined state. It is useful for RCU based
  413.  * lockfree traversal.
  414.  *
  415.  * In particular, it means that we can not poison the forward
  416.  * pointers that may still be used for walking the hash list.
  417.  *
  418.  * The caller must take whatever precautions are necessary
  419.  * (such as holding appropriate locks) to avoid racing
  420.  * with another list-mutation primitive, such as hlist_add_head_rcu()
  421.  * or hlist_del_rcu(), running on this same list.
  422.  * However, it is perfectly legal to run concurrently with
  423.  * the _rcu list-traversal primitives, such as
  424.  * hlist_for_each_entry().
  425.  */
  426. static inline void hlist_del_rcu(struct hlist_node *n)
  427. {
  428.         __hlist_del(n);
  429.         n->pprev = LIST_POISON2;
  430. }
  431.  
  432. /**
  433.  * hlist_replace_rcu - replace old entry by new one
  434.  * @old : the element to be replaced
  435.  * @new : the new element to insert
  436.  *
  437.  * The @old entry will be replaced with the @new entry atomically.
  438.  */
  439. static inline void hlist_replace_rcu(struct hlist_node *old,
  440.                                         struct hlist_node *new)
  441. {
  442.         struct hlist_node *next = old->next;
  443.  
  444.         new->next = next;
  445.         new->pprev = old->pprev;
  446.         rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
  447.         if (next)
  448.                 new->next->pprev = &new->next;
  449.         old->pprev = LIST_POISON2;
  450. }
  451.  
  452. /*
  453.  * return the first or the next element in an RCU protected hlist
  454.  */
  455. #define hlist_first_rcu(head)   (*((struct hlist_node __rcu **)(&(head)->first)))
  456. #define hlist_next_rcu(node)    (*((struct hlist_node __rcu **)(&(node)->next)))
  457. #define hlist_pprev_rcu(node)   (*((struct hlist_node __rcu **)((node)->pprev)))
  458.  
  459. /**
  460.  * hlist_add_head_rcu
  461.  * @n: the element to add to the hash list.
  462.  * @h: the list to add to.
  463.  *
  464.  * Description:
  465.  * Adds the specified element to the specified hlist,
  466.  * while permitting racing traversals.
  467.  *
  468.  * The caller must take whatever precautions are necessary
  469.  * (such as holding appropriate locks) to avoid racing
  470.  * with another list-mutation primitive, such as hlist_add_head_rcu()
  471.  * or hlist_del_rcu(), running on this same list.
  472.  * However, it is perfectly legal to run concurrently with
  473.  * the _rcu list-traversal primitives, such as
  474.  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  475.  * problems on Alpha CPUs.  Regardless of the type of CPU, the
  476.  * list-traversal primitive must be guarded by rcu_read_lock().
  477.  */
  478. static inline void hlist_add_head_rcu(struct hlist_node *n,
  479.                                         struct hlist_head *h)
  480. {
  481.         struct hlist_node *first = h->first;
  482.  
  483.         n->next = first;
  484.         n->pprev = &h->first;
  485.         rcu_assign_pointer(hlist_first_rcu(h), n);
  486.         if (first)
  487.                 first->pprev = &n->next;
  488. }
  489.  
  490. /**
  491.  * hlist_add_before_rcu
  492.  * @n: the new element to add to the hash list.
  493.  * @next: the existing element to add the new element before.
  494.  *
  495.  * Description:
  496.  * Adds the specified element to the specified hlist
  497.  * before the specified node while permitting racing traversals.
  498.  *
  499.  * The caller must take whatever precautions are necessary
  500.  * (such as holding appropriate locks) to avoid racing
  501.  * with another list-mutation primitive, such as hlist_add_head_rcu()
  502.  * or hlist_del_rcu(), running on this same list.
  503.  * However, it is perfectly legal to run concurrently with
  504.  * the _rcu list-traversal primitives, such as
  505.  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  506.  * problems on Alpha CPUs.
  507.  */
  508. static inline void hlist_add_before_rcu(struct hlist_node *n,
  509.                                         struct hlist_node *next)
  510. {
  511.         n->pprev = next->pprev;
  512.         n->next = next;
  513.         rcu_assign_pointer(hlist_pprev_rcu(n), n);
  514.         next->pprev = &n->next;
  515. }
  516.  
  517. /**
  518.  * hlist_add_behind_rcu
  519.  * @n: the new element to add to the hash list.
  520.  * @prev: the existing element to add the new element after.
  521.  *
  522.  * Description:
  523.  * Adds the specified element to the specified hlist
  524.  * after the specified node while permitting racing traversals.
  525.  *
  526.  * The caller must take whatever precautions are necessary
  527.  * (such as holding appropriate locks) to avoid racing
  528.  * with another list-mutation primitive, such as hlist_add_head_rcu()
  529.  * or hlist_del_rcu(), running on this same list.
  530.  * However, it is perfectly legal to run concurrently with
  531.  * the _rcu list-traversal primitives, such as
  532.  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  533.  * problems on Alpha CPUs.
  534.  */
  535. static inline void hlist_add_behind_rcu(struct hlist_node *n,
  536.                                         struct hlist_node *prev)
  537. {
  538.         n->next = prev->next;
  539.         n->pprev = &prev->next;
  540.         rcu_assign_pointer(hlist_next_rcu(prev), n);
  541.         if (n->next)
  542.                 n->next->pprev = &n->next;
  543. }
  544.  
  545. #define __hlist_for_each_rcu(pos, head)                         \
  546.         for (pos = rcu_dereference(hlist_first_rcu(head));      \
  547.              pos;                                               \
  548.              pos = rcu_dereference(hlist_next_rcu(pos)))
  549.  
  550. /**
  551.  * hlist_for_each_entry_rcu - iterate over rcu list of given type
  552.  * @pos:        the type * to use as a loop cursor.
  553.  * @head:       the head for your list.
  554.  * @member:     the name of the hlist_node within the struct.
  555.  *
  556.  * This list-traversal primitive may safely run concurrently with
  557.  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  558.  * as long as the traversal is guarded by rcu_read_lock().
  559.  */
  560. #define hlist_for_each_entry_rcu(pos, head, member)                     \
  561.         for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
  562.                         typeof(*(pos)), member);                        \
  563.                 pos;                                                    \
  564.                 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
  565.                         &(pos)->member)), typeof(*(pos)), member))
  566.  
  567. /**
  568.  * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
  569.  * @pos:        the type * to use as a loop cursor.
  570.  * @head:       the head for your list.
  571.  * @member:     the name of the hlist_node within the struct.
  572.  *
  573.  * This list-traversal primitive may safely run concurrently with
  574.  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  575.  * as long as the traversal is guarded by rcu_read_lock().
  576.  *
  577.  * This is the same as hlist_for_each_entry_rcu() except that it does
  578.  * not do any RCU debugging or tracing.
  579.  */
  580. #define hlist_for_each_entry_rcu_notrace(pos, head, member)                     \
  581.         for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
  582.                         typeof(*(pos)), member);                        \
  583.                 pos;                                                    \
  584.                 pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
  585.                         &(pos)->member)), typeof(*(pos)), member))
  586.  
  587. /**
  588.  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
  589.  * @pos:        the type * to use as a loop cursor.
  590.  * @head:       the head for your list.
  591.  * @member:     the name of the hlist_node within the struct.
  592.  *
  593.  * This list-traversal primitive may safely run concurrently with
  594.  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  595.  * as long as the traversal is guarded by rcu_read_lock().
  596.  */
  597. #define hlist_for_each_entry_rcu_bh(pos, head, member)                  \
  598.         for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
  599.                         typeof(*(pos)), member);                        \
  600.                 pos;                                                    \
  601.                 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
  602.                         &(pos)->member)), typeof(*(pos)), member))
  603.  
  604. /**
  605.  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
  606.  * @pos:        the type * to use as a loop cursor.
  607.  * @member:     the name of the hlist_node within the struct.
  608.  */
  609. #define hlist_for_each_entry_continue_rcu(pos, member)                  \
  610.         for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
  611.                         &(pos)->member)), typeof(*(pos)), member);      \
  612.              pos;                                                       \
  613.              pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
  614.                         &(pos)->member)), typeof(*(pos)), member))
  615.  
  616. /**
  617.  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
  618.  * @pos:        the type * to use as a loop cursor.
  619.  * @member:     the name of the hlist_node within the struct.
  620.  */
  621. #define hlist_for_each_entry_continue_rcu_bh(pos, member)               \
  622.         for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
  623.                         &(pos)->member)), typeof(*(pos)), member);      \
  624.              pos;                                                       \
  625.              pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
  626.                         &(pos)->member)), typeof(*(pos)), member))
  627.  
  628. /**
  629.  * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
  630.  * @pos:        the type * to use as a loop cursor.
  631.  * @member:     the name of the hlist_node within the struct.
  632.  */
  633. #define hlist_for_each_entry_from_rcu(pos, member)                      \
  634.         for (; pos;                                                     \
  635.              pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
  636.                         &(pos)->member)), typeof(*(pos)), member))
  637.  
  638. #endif  /* __KERNEL__ */
  639. #endif
  640.