rculist.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. #ifndef _LINUX_RCULIST_H
  2. #define _LINUX_RCULIST_H
  3. #ifdef __KERNEL__
  4. /*
  5. * RCU-protected list version
  6. */
  7. #include <linux/list.h>
  8. #include <linux/rcupdate.h>
  9. /*
  10. * Why is there no list_empty_rcu()? Because list_empty() serves this
  11. * purpose. The list_empty() function fetches the RCU-protected pointer
  12. * and compares it to the address of the list head, but neither dereferences
  13. * this pointer itself nor provides this pointer to the caller. Therefore,
  14. * it is not necessary to use rcu_dereference(), so that list_empty() can
  15. * be used anywhere you would want to use a list_empty_rcu().
  16. */
  17. /*
  18. * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  19. * @list: list to be initialized
  20. *
  21. * You should instead use INIT_LIST_HEAD() for normal initialization and
  22. * cleanup tasks, when readers have no access to the list being initialized.
  23. * However, if the list being initialized is visible to readers, you
  24. * need to keep the compiler from being too mischievous.
  25. */
  26. static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
  27. {
  28. WRITE_ONCE(list->next, list);
  29. WRITE_ONCE(list->prev, list);
  30. }
  31. /*
  32. * return the ->next pointer of a list_head in an rcu safe
  33. * way, we must not access it directly
  34. */
  35. #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
  36. /*
  37. * Insert a new entry between two known consecutive entries.
  38. *
  39. * This is only for internal list manipulation where we know
  40. * the prev/next entries already!
  41. */
  42. static inline void __list_add_rcu(struct list_head *new,
  43. struct list_head *prev, struct list_head *next)
  44. {
  45. if (!__list_add_valid(new, prev, next))
  46. return;
  47. new->next = next;
  48. new->prev = prev;
  49. rcu_assign_pointer(list_next_rcu(prev), new);
  50. next->prev = new;
  51. }
  52. /**
  53. * list_add_rcu - add a new entry to rcu-protected list
  54. * @new: new entry to be added
  55. * @head: list head to add it after
  56. *
  57. * Insert a new entry after the specified head.
  58. * This is good for implementing stacks.
  59. *
  60. * The caller must take whatever precautions are necessary
  61. * (such as holding appropriate locks) to avoid racing
  62. * with another list-mutation primitive, such as list_add_rcu()
  63. * or list_del_rcu(), running on this same list.
  64. * However, it is perfectly legal to run concurrently with
  65. * the _rcu list-traversal primitives, such as
  66. * list_for_each_entry_rcu().
  67. */
  68. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  69. {
  70. __list_add_rcu(new, head, head->next);
  71. }
  72. /**
  73. * list_add_tail_rcu - add a new entry to rcu-protected list
  74. * @new: new entry to be added
  75. * @head: list head to add it before
  76. *
  77. * Insert a new entry before the specified head.
  78. * This is useful for implementing queues.
  79. *
  80. * The caller must take whatever precautions are necessary
  81. * (such as holding appropriate locks) to avoid racing
  82. * with another list-mutation primitive, such as list_add_tail_rcu()
  83. * or list_del_rcu(), running on this same list.
  84. * However, it is perfectly legal to run concurrently with
  85. * the _rcu list-traversal primitives, such as
  86. * list_for_each_entry_rcu().
  87. */
  88. static inline void list_add_tail_rcu(struct list_head *new,
  89. struct list_head *head)
  90. {
  91. __list_add_rcu(new, head->prev, head);
  92. }
  93. /**
  94. * list_del_rcu - deletes entry from list without re-initialization
  95. * @entry: the element to delete from the list.
  96. *
  97. * Note: list_empty() on entry does not return true after this,
  98. * the entry is in an undefined state. It is useful for RCU based
  99. * lockfree traversal.
  100. *
  101. * In particular, it means that we can not poison the forward
  102. * pointers that may still be used for walking the list.
  103. *
  104. * The caller must take whatever precautions are necessary
  105. * (such as holding appropriate locks) to avoid racing
  106. * with another list-mutation primitive, such as list_del_rcu()
  107. * or list_add_rcu(), running on this same list.
  108. * However, it is perfectly legal to run concurrently with
  109. * the _rcu list-traversal primitives, such as
  110. * list_for_each_entry_rcu().
  111. *
  112. * Note that the caller is not permitted to immediately free
  113. * the newly deleted entry. Instead, either synchronize_rcu()
  114. * or call_rcu() must be used to defer freeing until an RCU
  115. * grace period has elapsed.
  116. */
  117. static inline void list_del_rcu(struct list_head *entry)
  118. {
  119. __list_del_entry(entry);
  120. entry->prev = LIST_POISON2;
  121. }
  122. /**
  123. * hlist_del_init_rcu - deletes entry from hash list with re-initialization
  124. * @n: the element to delete from the hash list.
  125. *
  126. * Note: list_unhashed() on the node return true after this. It is
  127. * useful for RCU based read lockfree traversal if the writer side
  128. * must know if the list entry is still hashed or already unhashed.
  129. *
  130. * In particular, it means that we can not poison the forward pointers
  131. * that may still be used for walking the hash list and we can only
  132. * zero the pprev pointer so list_unhashed() will return true after
  133. * this.
  134. *
  135. * The caller must take whatever precautions are necessary (such as
  136. * holding appropriate locks) to avoid racing with another
  137. * list-mutation primitive, such as hlist_add_head_rcu() or
  138. * hlist_del_rcu(), running on this same list. However, it is
  139. * perfectly legal to run concurrently with the _rcu list-traversal
  140. * primitives, such as hlist_for_each_entry_rcu().
  141. */
  142. static inline void hlist_del_init_rcu(struct hlist_node *n)
  143. {
  144. if (!hlist_unhashed(n)) {
  145. __hlist_del(n);
  146. n->pprev = NULL;
  147. }
  148. }
  149. /**
  150. * list_replace_rcu - replace old entry by new one
  151. * @old : the element to be replaced
  152. * @new : the new element to insert
  153. *
  154. * The @old entry will be replaced with the @new entry atomically.
  155. * Note: @old should not be empty.
  156. */
  157. static inline void list_replace_rcu(struct list_head *old,
  158. struct list_head *new)
  159. {
  160. new->next = old->next;
  161. new->prev = old->prev;
  162. rcu_assign_pointer(list_next_rcu(new->prev), new);
  163. new->next->prev = new;
  164. old->prev = LIST_POISON2;
  165. }
  166. /**
  167. * __list_splice_init_rcu - join an RCU-protected list into an existing list.
  168. * @list: the RCU-protected list to splice
  169. * @prev: points to the last element of the existing list
  170. * @next: points to the first element of the existing list
  171. * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
  172. *
  173. * The list pointed to by @prev and @next can be RCU-read traversed
  174. * concurrently with this function.
  175. *
  176. * Note that this function blocks.
  177. *
  178. * Important note: the caller must take whatever action is necessary to prevent
  179. * any other updates to the existing list. In principle, it is possible to
  180. * modify the list as soon as sync() begins execution. If this sort of thing
  181. * becomes necessary, an alternative version based on call_rcu() could be
  182. * created. But only if -really- needed -- there is no shortage of RCU API
  183. * members.
  184. */
  185. static inline void __list_splice_init_rcu(struct list_head *list,
  186. struct list_head *prev,
  187. struct list_head *next,
  188. void (*sync)(void))
  189. {
  190. struct list_head *first = list->next;
  191. struct list_head *last = list->prev;
  192. /*
  193. * "first" and "last" tracking list, so initialize it. RCU readers
  194. * have access to this list, so we must use INIT_LIST_HEAD_RCU()
  195. * instead of INIT_LIST_HEAD().
  196. */
  197. INIT_LIST_HEAD_RCU(list);
  198. /*
  199. * At this point, the list body still points to the source list.
  200. * Wait for any readers to finish using the list before splicing
  201. * the list body into the new list. Any new readers will see
  202. * an empty list.
  203. */
  204. sync();
  205. /*
  206. * Readers are finished with the source list, so perform splice.
  207. * The order is important if the new list is global and accessible
  208. * to concurrent RCU readers. Note that RCU readers are not
  209. * permitted to traverse the prev pointers without excluding
  210. * this function.
  211. */
  212. last->next = next;
  213. rcu_assign_pointer(list_next_rcu(prev), first);
  214. first->prev = prev;
  215. next->prev = last;
  216. }
  217. /**
  218. * list_splice_init_rcu - splice an RCU-protected list into an existing list,
  219. * designed for stacks.
  220. * @list: the RCU-protected list to splice
  221. * @head: the place in the existing list to splice the first list into
  222. * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
  223. */
  224. static inline void list_splice_init_rcu(struct list_head *list,
  225. struct list_head *head,
  226. void (*sync)(void))
  227. {
  228. if (!list_empty(list))
  229. __list_splice_init_rcu(list, head, head->next, sync);
  230. }
  231. /**
  232. * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
  233. * list, designed for queues.
  234. * @list: the RCU-protected list to splice
  235. * @head: the place in the existing list to splice the first list into
  236. * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
  237. */
  238. static inline void list_splice_tail_init_rcu(struct list_head *list,
  239. struct list_head *head,
  240. void (*sync)(void))
  241. {
  242. if (!list_empty(list))
  243. __list_splice_init_rcu(list, head->prev, head, sync);
  244. }
  245. /**
  246. * list_entry_rcu - get the struct for this entry
  247. * @ptr: the &struct list_head pointer.
  248. * @type: the type of the struct this is embedded in.
  249. * @member: the name of the list_head within the struct.
  250. *
  251. * This primitive may safely run concurrently with the _rcu list-mutation
  252. * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  253. */
  254. #define list_entry_rcu(ptr, type, member) \
  255. container_of(lockless_dereference(ptr), type, member)
  256. /**
  257. * Where are list_empty_rcu() and list_first_entry_rcu()?
  258. *
  259. * Implementing those functions following their counterparts list_empty() and
  260. * list_first_entry() is not advisable because they lead to subtle race
  261. * conditions as the following snippet shows:
  262. *
  263. * if (!list_empty_rcu(mylist)) {
  264. * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
  265. * do_something(bar);
  266. * }
  267. *
  268. * The list may not be empty when list_empty_rcu checks it, but it may be when
  269. * list_first_entry_rcu rereads the ->next pointer.
  270. *
  271. * Rereading the ->next pointer is not a problem for list_empty() and
  272. * list_first_entry() because they would be protected by a lock that blocks
  273. * writers.
  274. *
  275. * See list_first_or_null_rcu for an alternative.
  276. */
  277. /**
  278. * list_first_or_null_rcu - get the first element from a list
  279. * @ptr: the list head to take the element from.
  280. * @type: the type of the struct this is embedded in.
  281. * @member: the name of the list_head within the struct.
  282. *
  283. * Note that if the list is empty, it returns NULL.
  284. *
  285. * This primitive may safely run concurrently with the _rcu list-mutation
  286. * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  287. */
  288. #define list_first_or_null_rcu(ptr, type, member) \
  289. ({ \
  290. struct list_head *__ptr = (ptr); \
  291. struct list_head *__next = READ_ONCE(__ptr->next); \
  292. likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
  293. })
  294. /**
  295. * list_next_or_null_rcu - get the first element from a list
  296. * @head: the head for the list.
  297. * @ptr: the list head to take the next element from.
  298. * @type: the type of the struct this is embedded in.
  299. * @member: the name of the list_head within the struct.
  300. *
  301. * Note that if the ptr is at the end of the list, NULL is returned.
  302. *
  303. * This primitive may safely run concurrently with the _rcu list-mutation
  304. * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  305. */
  306. #define list_next_or_null_rcu(head, ptr, type, member) \
  307. ({ \
  308. struct list_head *__head = (head); \
  309. struct list_head *__ptr = (ptr); \
  310. struct list_head *__next = READ_ONCE(__ptr->next); \
  311. likely(__next != __head) ? list_entry_rcu(__next, type, \
  312. member) : NULL; \
  313. })
  314. /**
  315. * list_for_each_entry_rcu - iterate over rcu list of given type
  316. * @pos: the type * to use as a loop cursor.
  317. * @head: the head for your list.
  318. * @member: the name of the list_head within the struct.
  319. *
  320. * This list-traversal primitive may safely run concurrently with
  321. * the _rcu list-mutation primitives such as list_add_rcu()
  322. * as long as the traversal is guarded by rcu_read_lock().
  323. */
  324. #define list_for_each_entry_rcu(pos, head, member) \
  325. for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
  326. &pos->member != (head); \
  327. pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
  328. /**
  329. * list_entry_lockless - get the struct for this entry
  330. * @ptr: the &struct list_head pointer.
  331. * @type: the type of the struct this is embedded in.
  332. * @member: the name of the list_head within the struct.
  333. *
  334. * This primitive may safely run concurrently with the _rcu list-mutation
  335. * primitives such as list_add_rcu(), but requires some implicit RCU
  336. * read-side guarding. One example is running within a special
  337. * exception-time environment where preemption is disabled and where
  338. * lockdep cannot be invoked (in which case updaters must use RCU-sched,
  339. * as in synchronize_sched(), call_rcu_sched(), and friends). Another
  340. * example is when items are added to the list, but never deleted.
  341. */
  342. #define list_entry_lockless(ptr, type, member) \
  343. container_of((typeof(ptr))lockless_dereference(ptr), type, member)
  344. /**
  345. * list_for_each_entry_lockless - iterate over rcu list of given type
  346. * @pos: the type * to use as a loop cursor.
  347. * @head: the head for your list.
  348. * @member: the name of the list_struct within the struct.
  349. *
  350. * This primitive may safely run concurrently with the _rcu list-mutation
  351. * primitives such as list_add_rcu(), but requires some implicit RCU
  352. * read-side guarding. One example is running within a special
  353. * exception-time environment where preemption is disabled and where
  354. * lockdep cannot be invoked (in which case updaters must use RCU-sched,
  355. * as in synchronize_sched(), call_rcu_sched(), and friends). Another
  356. * example is when items are added to the list, but never deleted.
  357. */
  358. #define list_for_each_entry_lockless(pos, head, member) \
  359. for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
  360. &pos->member != (head); \
  361. pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
  362. /**
  363. * list_for_each_entry_continue_rcu - continue iteration over list of given type
  364. * @pos: the type * to use as a loop cursor.
  365. * @head: the head for your list.
  366. * @member: the name of the list_head within the struct.
  367. *
  368. * Continue to iterate over list of given type, continuing after
  369. * the current position.
  370. */
  371. #define list_for_each_entry_continue_rcu(pos, head, member) \
  372. for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
  373. &pos->member != (head); \
  374. pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
  375. /**
  376. * hlist_del_rcu - deletes entry from hash list without re-initialization
  377. * @n: the element to delete from the hash list.
  378. *
  379. * Note: list_unhashed() on entry does not return true after this,
  380. * the entry is in an undefined state. It is useful for RCU based
  381. * lockfree traversal.
  382. *
  383. * In particular, it means that we can not poison the forward
  384. * pointers that may still be used for walking the hash list.
  385. *
  386. * The caller must take whatever precautions are necessary
  387. * (such as holding appropriate locks) to avoid racing
  388. * with another list-mutation primitive, such as hlist_add_head_rcu()
  389. * or hlist_del_rcu(), running on this same list.
  390. * However, it is perfectly legal to run concurrently with
  391. * the _rcu list-traversal primitives, such as
  392. * hlist_for_each_entry().
  393. */
  394. static inline void hlist_del_rcu(struct hlist_node *n)
  395. {
  396. __hlist_del(n);
  397. n->pprev = LIST_POISON2;
  398. }
  399. /**
  400. * hlist_replace_rcu - replace old entry by new one
  401. * @old : the element to be replaced
  402. * @new : the new element to insert
  403. *
  404. * The @old entry will be replaced with the @new entry atomically.
  405. */
  406. static inline void hlist_replace_rcu(struct hlist_node *old,
  407. struct hlist_node *new)
  408. {
  409. struct hlist_node *next = old->next;
  410. new->next = next;
  411. new->pprev = old->pprev;
  412. rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
  413. if (next)
  414. new->next->pprev = &new->next;
  415. old->pprev = LIST_POISON2;
  416. }
  417. /*
  418. * return the first or the next element in an RCU protected hlist
  419. */
  420. #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
  421. #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
  422. #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
  423. /**
  424. * hlist_add_head_rcu
  425. * @n: the element to add to the hash list.
  426. * @h: the list to add to.
  427. *
  428. * Description:
  429. * Adds the specified element to the specified hlist,
  430. * while permitting racing traversals.
  431. *
  432. * The caller must take whatever precautions are necessary
  433. * (such as holding appropriate locks) to avoid racing
  434. * with another list-mutation primitive, such as hlist_add_head_rcu()
  435. * or hlist_del_rcu(), running on this same list.
  436. * However, it is perfectly legal to run concurrently with
  437. * the _rcu list-traversal primitives, such as
  438. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  439. * problems on Alpha CPUs. Regardless of the type of CPU, the
  440. * list-traversal primitive must be guarded by rcu_read_lock().
  441. */
  442. static inline void hlist_add_head_rcu(struct hlist_node *n,
  443. struct hlist_head *h)
  444. {
  445. struct hlist_node *first = h->first;
  446. n->next = first;
  447. n->pprev = &h->first;
  448. rcu_assign_pointer(hlist_first_rcu(h), n);
  449. if (first)
  450. first->pprev = &n->next;
  451. }
  452. /**
  453. * hlist_add_tail_rcu
  454. * @n: the element to add to the hash list.
  455. * @h: the list to add to.
  456. *
  457. * Description:
  458. * Adds the specified element to the specified hlist,
  459. * while permitting racing traversals.
  460. *
  461. * The caller must take whatever precautions are necessary
  462. * (such as holding appropriate locks) to avoid racing
  463. * with another list-mutation primitive, such as hlist_add_head_rcu()
  464. * or hlist_del_rcu(), running on this same list.
  465. * However, it is perfectly legal to run concurrently with
  466. * the _rcu list-traversal primitives, such as
  467. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  468. * problems on Alpha CPUs. Regardless of the type of CPU, the
  469. * list-traversal primitive must be guarded by rcu_read_lock().
  470. */
  471. static inline void hlist_add_tail_rcu(struct hlist_node *n,
  472. struct hlist_head *h)
  473. {
  474. struct hlist_node *i, *last = NULL;
  475. for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
  476. last = i;
  477. if (last) {
  478. n->next = last->next;
  479. n->pprev = &last->next;
  480. rcu_assign_pointer(hlist_next_rcu(last), n);
  481. } else {
  482. hlist_add_head_rcu(n, h);
  483. }
  484. }
  485. /**
  486. * hlist_add_before_rcu
  487. * @n: the new element to add to the hash list.
  488. * @next: the existing element to add the new element before.
  489. *
  490. * Description:
  491. * Adds the specified element to the specified hlist
  492. * before the specified node while permitting racing traversals.
  493. *
  494. * The caller must take whatever precautions are necessary
  495. * (such as holding appropriate locks) to avoid racing
  496. * with another list-mutation primitive, such as hlist_add_head_rcu()
  497. * or hlist_del_rcu(), running on this same list.
  498. * However, it is perfectly legal to run concurrently with
  499. * the _rcu list-traversal primitives, such as
  500. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  501. * problems on Alpha CPUs.
  502. */
  503. static inline void hlist_add_before_rcu(struct hlist_node *n,
  504. struct hlist_node *next)
  505. {
  506. n->pprev = next->pprev;
  507. n->next = next;
  508. rcu_assign_pointer(hlist_pprev_rcu(n), n);
  509. next->pprev = &n->next;
  510. }
  511. /**
  512. * hlist_add_behind_rcu
  513. * @n: the new element to add to the hash list.
  514. * @prev: the existing element to add the new element after.
  515. *
  516. * Description:
  517. * Adds the specified element to the specified hlist
  518. * after the specified node while permitting racing traversals.
  519. *
  520. * The caller must take whatever precautions are necessary
  521. * (such as holding appropriate locks) to avoid racing
  522. * with another list-mutation primitive, such as hlist_add_head_rcu()
  523. * or hlist_del_rcu(), running on this same list.
  524. * However, it is perfectly legal to run concurrently with
  525. * the _rcu list-traversal primitives, such as
  526. * hlist_for_each_entry_rcu(), used to prevent memory-consistency
  527. * problems on Alpha CPUs.
  528. */
  529. static inline void hlist_add_behind_rcu(struct hlist_node *n,
  530. struct hlist_node *prev)
  531. {
  532. n->next = prev->next;
  533. n->pprev = &prev->next;
  534. rcu_assign_pointer(hlist_next_rcu(prev), n);
  535. if (n->next)
  536. n->next->pprev = &n->next;
  537. }
  538. #define __hlist_for_each_rcu(pos, head) \
  539. for (pos = rcu_dereference(hlist_first_rcu(head)); \
  540. pos; \
  541. pos = rcu_dereference(hlist_next_rcu(pos)))
  542. /**
  543. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  544. * @pos: the type * to use as a loop cursor.
  545. * @head: the head for your list.
  546. * @member: the name of the hlist_node within the struct.
  547. *
  548. * This list-traversal primitive may safely run concurrently with
  549. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  550. * as long as the traversal is guarded by rcu_read_lock().
  551. */
  552. #define hlist_for_each_entry_rcu(pos, head, member) \
  553. for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
  554. typeof(*(pos)), member); \
  555. pos; \
  556. pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
  557. &(pos)->member)), typeof(*(pos)), member))
  558. /**
  559. * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
  560. * @pos: the type * to use as a loop cursor.
  561. * @head: the head for your list.
  562. * @member: the name of the hlist_node within the struct.
  563. *
  564. * This list-traversal primitive may safely run concurrently with
  565. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  566. * as long as the traversal is guarded by rcu_read_lock().
  567. *
  568. * This is the same as hlist_for_each_entry_rcu() except that it does
  569. * not do any RCU debugging or tracing.
  570. */
  571. #define hlist_for_each_entry_rcu_notrace(pos, head, member) \
  572. for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
  573. typeof(*(pos)), member); \
  574. pos; \
  575. pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
  576. &(pos)->member)), typeof(*(pos)), member))
  577. /**
  578. * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
  579. * @pos: the type * to use as a loop cursor.
  580. * @head: the head for your list.
  581. * @member: the name of the hlist_node within the struct.
  582. *
  583. * This list-traversal primitive may safely run concurrently with
  584. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  585. * as long as the traversal is guarded by rcu_read_lock().
  586. */
  587. #define hlist_for_each_entry_rcu_bh(pos, head, member) \
  588. for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
  589. typeof(*(pos)), member); \
  590. pos; \
  591. pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
  592. &(pos)->member)), typeof(*(pos)), member))
  593. /**
  594. * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
  595. * @pos: the type * to use as a loop cursor.
  596. * @member: the name of the hlist_node within the struct.
  597. */
  598. #define hlist_for_each_entry_continue_rcu(pos, member) \
  599. for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
  600. &(pos)->member)), typeof(*(pos)), member); \
  601. pos; \
  602. pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
  603. &(pos)->member)), typeof(*(pos)), member))
  604. /**
  605. * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
  606. * @pos: the type * to use as a loop cursor.
  607. * @member: the name of the hlist_node within the struct.
  608. */
  609. #define hlist_for_each_entry_continue_rcu_bh(pos, member) \
  610. for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
  611. &(pos)->member)), typeof(*(pos)), member); \
  612. pos; \
  613. pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
  614. &(pos)->member)), typeof(*(pos)), member))
  615. /**
  616. * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
  617. * @pos: the type * to use as a loop cursor.
  618. * @member: the name of the hlist_node within the struct.
  619. */
  620. #define hlist_for_each_entry_from_rcu(pos, member) \
  621. for (; pos; \
  622. pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
  623. &(pos)->member)), typeof(*(pos)), member))
  624. #endif /* __KERNEL__ */
  625. #endif