linux_list.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. #ifndef _LINUX_LIST_H
  2. #define _LINUX_LIST_H
  3. #undef offsetof
  4. #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
  5. /**
  6. * container_of - cast a member of a structure out to the containing structure
  7. *
  8. * @ptr: the pointer to the member.
  9. * @type: the type of the container struct this is embedded in.
  10. * @member: the name of the member within the struct.
  11. *
  12. */
  13. #define container_of(ptr, type, member) ({ \
  14. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  15. (type *)( (char *)__mptr - offsetof(type,member) );})
  16. /*
  17. * Check at compile time that something is of a particular type.
  18. * Always evaluates to 1 so you may use it easily in comparisons.
  19. */
  20. #define typecheck(type,x) \
  21. ({ type __dummy; \
  22. typeof(x) __dummy2; \
  23. (void)(&__dummy == &__dummy2); \
  24. 1; \
  25. })
  26. #define prefetch(x) 1
  27. /* empty define to make this work in userspace -HW */
  28. #define smp_wmb()
  29. /*
  30. * These are non-NULL pointers that will result in page faults
  31. * under normal circumstances, used to verify that nobody uses
  32. * non-initialized list entries.
  33. */
  34. #define LIST_POISON1 ((void *) 0x00100100)
  35. #define LIST_POISON2 ((void *) 0x00200200)
  36. /*
  37. * Simple doubly linked list implementation.
  38. *
  39. * Some of the internal functions ("__xxx") are useful when
  40. * manipulating whole lists rather than single entries, as
  41. * sometimes we already know the next/prev entries and we can
  42. * generate better code by using them directly rather than
  43. * using the generic single-entry routines.
  44. */
  45. struct list_head {
  46. struct list_head *next, *prev;
  47. };
  48. #define LIST_HEAD_INIT(name) { &(name), &(name) }
  49. #define LIST_HEAD(name) \
  50. struct list_head name = LIST_HEAD_INIT(name)
  51. #define INIT_LIST_HEAD(ptr) do { \
  52. (ptr)->next = (ptr); (ptr)->prev = (ptr); \
  53. } while (0)
  54. /*
  55. * Insert a new entry between two known consecutive entries.
  56. *
  57. * This is only for internal list manipulation where we know
  58. * the prev/next entries already!
  59. */
  60. static inline void __list_add(struct list_head *new,
  61. struct list_head *prev,
  62. struct list_head *next)
  63. {
  64. next->prev = new;
  65. new->next = next;
  66. new->prev = prev;
  67. prev->next = new;
  68. }
  69. /**
  70. * list_add - add a new entry
  71. * @new: new entry to be added
  72. * @head: list head to add it after
  73. *
  74. * Insert a new entry after the specified head.
  75. * This is good for implementing stacks.
  76. */
  77. static inline void list_add(struct list_head *new, struct list_head *head)
  78. {
  79. __list_add(new, head, head->next);
  80. }
  81. /**
  82. * list_add_tail - add a new entry
  83. * @new: new entry to be added
  84. * @head: list head to add it before
  85. *
  86. * Insert a new entry before the specified head.
  87. * This is useful for implementing queues.
  88. */
  89. static inline void list_add_tail(struct list_head *new, struct list_head *head)
  90. {
  91. __list_add(new, head->prev, head);
  92. }
  93. /*
  94. * Insert a new entry between two known consecutive entries.
  95. *
  96. * This is only for internal list manipulation where we know
  97. * the prev/next entries already!
  98. */
  99. static inline void __list_add_rcu(struct list_head * new,
  100. struct list_head * prev, struct list_head * next)
  101. {
  102. new->next = next;
  103. new->prev = prev;
  104. smp_wmb();
  105. next->prev = new;
  106. prev->next = new;
  107. }
  108. /**
  109. * list_add_rcu - add a new entry to rcu-protected list
  110. * @new: new entry to be added
  111. * @head: list head to add it after
  112. *
  113. * Insert a new entry after the specified head.
  114. * This is good for implementing stacks.
  115. *
  116. * The caller must take whatever precautions are necessary
  117. * (such as holding appropriate locks) to avoid racing
  118. * with another list-mutation primitive, such as list_add_rcu()
  119. * or list_del_rcu(), running on this same list.
  120. * However, it is perfectly legal to run concurrently with
  121. * the _rcu list-traversal primitives, such as
  122. * list_for_each_entry_rcu().
  123. */
  124. static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  125. {
  126. __list_add_rcu(new, head, head->next);
  127. }
  128. /**
  129. * list_add_tail_rcu - add a new entry to rcu-protected list
  130. * @new: new entry to be added
  131. * @head: list head to add it before
  132. *
  133. * Insert a new entry before the specified head.
  134. * This is useful for implementing queues.
  135. *
  136. * The caller must take whatever precautions are necessary
  137. * (such as holding appropriate locks) to avoid racing
  138. * with another list-mutation primitive, such as list_add_tail_rcu()
  139. * or list_del_rcu(), running on this same list.
  140. * However, it is perfectly legal to run concurrently with
  141. * the _rcu list-traversal primitives, such as
  142. * list_for_each_entry_rcu().
  143. */
  144. static inline void list_add_tail_rcu(struct list_head *new,
  145. struct list_head *head)
  146. {
  147. __list_add_rcu(new, head->prev, head);
  148. }
  149. /*
  150. * Delete a list entry by making the prev/next entries
  151. * point to each other.
  152. *
  153. * This is only for internal list manipulation where we know
  154. * the prev/next entries already!
  155. */
  156. static inline void __list_del(struct list_head * prev, struct list_head * next)
  157. {
  158. next->prev = prev;
  159. prev->next = next;
  160. }
  161. /**
  162. * list_del - deletes entry from list.
  163. * @entry: the element to delete from the list.
  164. * Note: list_empty on entry does not return true after this, the entry is
  165. * in an undefined state.
  166. */
  167. static inline void list_del(struct list_head *entry)
  168. {
  169. __list_del(entry->prev, entry->next);
  170. entry->next = LIST_POISON1;
  171. entry->prev = LIST_POISON2;
  172. }
  173. /**
  174. * list_del_rcu - deletes entry from list without re-initialization
  175. * @entry: the element to delete from the list.
  176. *
  177. * Note: list_empty on entry does not return true after this,
  178. * the entry is in an undefined state. It is useful for RCU based
  179. * lockfree traversal.
  180. *
  181. * In particular, it means that we can not poison the forward
  182. * pointers that may still be used for walking the list.
  183. *
  184. * The caller must take whatever precautions are necessary
  185. * (such as holding appropriate locks) to avoid racing
  186. * with another list-mutation primitive, such as list_del_rcu()
  187. * or list_add_rcu(), running on this same list.
  188. * However, it is perfectly legal to run concurrently with
  189. * the _rcu list-traversal primitives, such as
  190. * list_for_each_entry_rcu().
  191. *
  192. * Note that the caller is not permitted to immediately free
  193. * the newly deleted entry. Instead, either synchronize_kernel()
  194. * or call_rcu() must be used to defer freeing until an RCU
  195. * grace period has elapsed.
  196. */
  197. static inline void list_del_rcu(struct list_head *entry)
  198. {
  199. __list_del(entry->prev, entry->next);
  200. entry->prev = LIST_POISON2;
  201. }
  202. /**
  203. * list_del_init - deletes entry from list and reinitialize it.
  204. * @entry: the element to delete from the list.
  205. */
  206. static inline void list_del_init(struct list_head *entry)
  207. {
  208. __list_del(entry->prev, entry->next);
  209. INIT_LIST_HEAD(entry);
  210. }
  211. /**
  212. * list_move - delete from one list and add as another's head
  213. * @list: the entry to move
  214. * @head: the head that will precede our entry
  215. */
  216. static inline void list_move(struct list_head *list, struct list_head *head)
  217. {
  218. __list_del(list->prev, list->next);
  219. list_add(list, head);
  220. }
  221. /**
  222. * list_move_tail - delete from one list and add as another's tail
  223. * @list: the entry to move
  224. * @head: the head that will follow our entry
  225. */
  226. static inline void list_move_tail(struct list_head *list,
  227. struct list_head *head)
  228. {
  229. __list_del(list->prev, list->next);
  230. list_add_tail(list, head);
  231. }
  232. /**
  233. * list_empty - tests whether a list is empty
  234. * @head: the list to test.
  235. */
  236. static inline int list_empty(const struct list_head *head)
  237. {
  238. return head->next == head;
  239. }
  240. /**
  241. * list_empty_careful - tests whether a list is
  242. * empty _and_ checks that no other CPU might be
  243. * in the process of still modifying either member
  244. *
  245. * NOTE: using list_empty_careful() without synchronization
  246. * can only be safe if the only activity that can happen
  247. * to the list entry is list_del_init(). Eg. it cannot be used
  248. * if another CPU could re-list_add() it.
  249. *
  250. * @head: the list to test.
  251. */
  252. static inline int list_empty_careful(const struct list_head *head)
  253. {
  254. struct list_head *next = head->next;
  255. return (next == head) && (next == head->prev);
  256. }
  257. static inline void __list_splice(struct list_head *list,
  258. struct list_head *head)
  259. {
  260. struct list_head *first = list->next;
  261. struct list_head *last = list->prev;
  262. struct list_head *at = head->next;
  263. first->prev = head;
  264. head->next = first;
  265. last->next = at;
  266. at->prev = last;
  267. }
  268. /**
  269. * list_splice - join two lists
  270. * @list: the new list to add.
  271. * @head: the place to add it in the first list.
  272. */
  273. static inline void list_splice(struct list_head *list, struct list_head *head)
  274. {
  275. if (!list_empty(list))
  276. __list_splice(list, head);
  277. }
  278. /**
  279. * list_splice_init - join two lists and reinitialise the emptied list.
  280. * @list: the new list to add.
  281. * @head: the place to add it in the first list.
  282. *
  283. * The list at @list is reinitialised
  284. */
  285. static inline void list_splice_init(struct list_head *list,
  286. struct list_head *head)
  287. {
  288. if (!list_empty(list)) {
  289. __list_splice(list, head);
  290. INIT_LIST_HEAD(list);
  291. }
  292. }
  293. /**
  294. * list_entry - get the struct for this entry
  295. * @ptr: the &struct list_head pointer.
  296. * @type: the type of the struct this is embedded in.
  297. * @member: the name of the list_struct within the struct.
  298. */
  299. #define list_entry(ptr, type, member) \
  300. container_of(ptr, type, member)
  301. /**
  302. * list_for_each - iterate over a list
  303. * @pos: the &struct list_head to use as a loop counter.
  304. * @head: the head for your list.
  305. */
  306. #define list_for_each(pos, head) \
  307. for (pos = (head)->next, prefetch(pos->next); pos != (head); \
  308. pos = pos->next, prefetch(pos->next))
  309. /**
  310. * __list_for_each - iterate over a list
  311. * @pos: the &struct list_head to use as a loop counter.
  312. * @head: the head for your list.
  313. *
  314. * This variant differs from list_for_each() in that it's the
  315. * simplest possible list iteration code, no prefetching is done.
  316. * Use this for code that knows the list to be very short (empty
  317. * or 1 entry) most of the time.
  318. */
  319. #define __list_for_each(pos, head) \
  320. for (pos = (head)->next; pos != (head); pos = pos->next)
  321. /**
  322. * list_for_each_prev - iterate over a list backwards
  323. * @pos: the &struct list_head to use as a loop counter.
  324. * @head: the head for your list.
  325. */
  326. #define list_for_each_prev(pos, head) \
  327. for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
  328. pos = pos->prev, prefetch(pos->prev))
  329. /**
  330. * list_for_each_safe - iterate over a list safe against removal of list entry
  331. * @pos: the &struct list_head to use as a loop counter.
  332. * @n: another &struct list_head to use as temporary storage
  333. * @head: the head for your list.
  334. */
  335. #define list_for_each_safe(pos, n, head) \
  336. for (pos = (head)->next, n = pos->next; pos != (head); \
  337. pos = n, n = pos->next)
  338. /**
  339. * list_for_each_entry - iterate over list of given type
  340. * @pos: the type * to use as a loop counter.
  341. * @head: the head for your list.
  342. * @member: the name of the list_struct within the struct.
  343. */
  344. #define list_for_each_entry(pos, head, member) \
  345. for (pos = list_entry((head)->next, typeof(*pos), member), \
  346. prefetch(pos->member.next); \
  347. &pos->member != (head); \
  348. pos = list_entry(pos->member.next, typeof(*pos), member), \
  349. prefetch(pos->member.next))
  350. /**
  351. * list_for_each_entry_reverse - iterate backwards over list of given type.
  352. * @pos: the type * to use as a loop counter.
  353. * @head: the head for your list.
  354. * @member: the name of the list_struct within the struct.
  355. */
  356. #define list_for_each_entry_reverse(pos, head, member) \
  357. for (pos = list_entry((head)->prev, typeof(*pos), member), \
  358. prefetch(pos->member.prev); \
  359. &pos->member != (head); \
  360. pos = list_entry(pos->member.prev, typeof(*pos), member), \
  361. prefetch(pos->member.prev))
  362. /**
  363. * list_prepare_entry - prepare a pos entry for use as a start point in
  364. * list_for_each_entry_continue
  365. * @pos: the type * to use as a start point
  366. * @head: the head of the list
  367. * @member: the name of the list_struct within the struct.
  368. */
  369. #define list_prepare_entry(pos, head, member) \
  370. ((pos) ? : list_entry(head, typeof(*pos), member))
  371. /**
  372. * list_for_each_entry_continue - iterate over list of given type
  373. * continuing after existing point
  374. * @pos: the type * to use as a loop counter.
  375. * @head: the head for your list.
  376. * @member: the name of the list_struct within the struct.
  377. */
  378. #define list_for_each_entry_continue(pos, head, member) \
  379. for (pos = list_entry(pos->member.next, typeof(*pos), member), \
  380. prefetch(pos->member.next); \
  381. &pos->member != (head); \
  382. pos = list_entry(pos->member.next, typeof(*pos), member), \
  383. prefetch(pos->member.next))
  384. /**
  385. * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  386. * @pos: the type * to use as a loop counter.
  387. * @n: another type * to use as temporary storage
  388. * @head: the head for your list.
  389. * @member: the name of the list_struct within the struct.
  390. */
  391. #define list_for_each_entry_safe(pos, n, head, member) \
  392. for (pos = list_entry((head)->next, typeof(*pos), member), \
  393. n = list_entry(pos->member.next, typeof(*pos), member); \
  394. &pos->member != (head); \
  395. pos = n, n = list_entry(n->member.next, typeof(*n), member))
  396. /**
  397. * list_for_each_rcu - iterate over an rcu-protected list
  398. * @pos: the &struct list_head to use as a loop counter.
  399. * @head: the head for your list.
  400. *
  401. * This list-traversal primitive may safely run concurrently with
  402. * the _rcu list-mutation primitives such as list_add_rcu()
  403. * as long as the traversal is guarded by rcu_read_lock().
  404. */
  405. #define list_for_each_rcu(pos, head) \
  406. for (pos = (head)->next, prefetch(pos->next); pos != (head); \
  407. pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next))
  408. #define __list_for_each_rcu(pos, head) \
  409. for (pos = (head)->next; pos != (head); \
  410. pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
  411. /**
  412. * list_for_each_safe_rcu - iterate over an rcu-protected list safe
  413. * against removal of list entry
  414. * @pos: the &struct list_head to use as a loop counter.
  415. * @n: another &struct list_head to use as temporary storage
  416. * @head: the head for your list.
  417. *
  418. * This list-traversal primitive may safely run concurrently with
  419. * the _rcu list-mutation primitives such as list_add_rcu()
  420. * as long as the traversal is guarded by rcu_read_lock().
  421. */
  422. #define list_for_each_safe_rcu(pos, n, head) \
  423. for (pos = (head)->next, n = pos->next; pos != (head); \
  424. pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next)
  425. /**
  426. * list_for_each_entry_rcu - iterate over rcu list of given type
  427. * @pos: the type * to use as a loop counter.
  428. * @head: the head for your list.
  429. * @member: the name of the list_struct within the struct.
  430. *
  431. * This list-traversal primitive may safely run concurrently with
  432. * the _rcu list-mutation primitives such as list_add_rcu()
  433. * as long as the traversal is guarded by rcu_read_lock().
  434. */
  435. #define list_for_each_entry_rcu(pos, head, member) \
  436. for (pos = list_entry((head)->next, typeof(*pos), member), \
  437. prefetch(pos->member.next); \
  438. &pos->member != (head); \
  439. pos = list_entry(pos->member.next, typeof(*pos), member), \
  440. ({ smp_read_barrier_depends(); 0;}), \
  441. prefetch(pos->member.next))
  442. /**
  443. * list_for_each_continue_rcu - iterate over an rcu-protected list
  444. * continuing after existing point.
  445. * @pos: the &struct list_head to use as a loop counter.
  446. * @head: the head for your list.
  447. *
  448. * This list-traversal primitive may safely run concurrently with
  449. * the _rcu list-mutation primitives such as list_add_rcu()
  450. * as long as the traversal is guarded by rcu_read_lock().
  451. */
  452. #define list_for_each_continue_rcu(pos, head) \
  453. for ((pos) = (pos)->next, prefetch((pos)->next); (pos) != (head); \
  454. (pos) = (pos)->next, ({ smp_read_barrier_depends(); 0;}), prefetch((pos)->next))
  455. /*
  456. * Double linked lists with a single pointer list head.
  457. * Mostly useful for hash tables where the two pointer list head is
  458. * too wasteful.
  459. * You lose the ability to access the tail in O(1).
  460. */
  461. struct hlist_head {
  462. struct hlist_node *first;
  463. };
  464. struct hlist_node {
  465. struct hlist_node *next, **pprev;
  466. };
  467. #define HLIST_HEAD_INIT { .first = NULL }
  468. #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
  469. #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
  470. #define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
  471. static inline int hlist_unhashed(const struct hlist_node *h)
  472. {
  473. return !h->pprev;
  474. }
  475. static inline int hlist_empty(const struct hlist_head *h)
  476. {
  477. return !h->first;
  478. }
  479. static inline void __hlist_del(struct hlist_node *n)
  480. {
  481. struct hlist_node *next = n->next;
  482. struct hlist_node **pprev = n->pprev;
  483. *pprev = next;
  484. if (next)
  485. next->pprev = pprev;
  486. }
  487. static inline void hlist_del(struct hlist_node *n)
  488. {
  489. __hlist_del(n);
  490. n->next = LIST_POISON1;
  491. n->pprev = LIST_POISON2;
  492. }
  493. /**
  494. * hlist_del_rcu - deletes entry from hash list without re-initialization
  495. * @n: the element to delete from the hash list.
  496. *
  497. * Note: list_unhashed() on entry does not return true after this,
  498. * the entry is in an undefined state. It is useful for RCU based
  499. * lockfree traversal.
  500. *
  501. * In particular, it means that we can not poison the forward
  502. * pointers that may still be used for walking the hash list.
  503. *
  504. * The caller must take whatever precautions are necessary
  505. * (such as holding appropriate locks) to avoid racing
  506. * with another list-mutation primitive, such as hlist_add_head_rcu()
  507. * or hlist_del_rcu(), running on this same list.
  508. * However, it is perfectly legal to run concurrently with
  509. * the _rcu list-traversal primitives, such as
  510. * hlist_for_each_entry().
  511. */
  512. static inline void hlist_del_rcu(struct hlist_node *n)
  513. {
  514. __hlist_del(n);
  515. n->pprev = LIST_POISON2;
  516. }
  517. static inline void hlist_del_init(struct hlist_node *n)
  518. {
  519. if (n->pprev) {
  520. __hlist_del(n);
  521. INIT_HLIST_NODE(n);
  522. }
  523. }
  524. #define hlist_del_rcu_init hlist_del_init
  525. static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
  526. {
  527. struct hlist_node *first = h->first;
  528. n->next = first;
  529. if (first)
  530. first->pprev = &n->next;
  531. h->first = n;
  532. n->pprev = &h->first;
  533. }
  534. /**
  535. * hlist_add_head_rcu - adds the specified element to the specified hlist,
  536. * while permitting racing traversals.
  537. * @n: the element to add to the hash list.
  538. * @h: the list to add to.
  539. *
  540. * The caller must take whatever precautions are necessary
  541. * (such as holding appropriate locks) to avoid racing
  542. * with another list-mutation primitive, such as hlist_add_head_rcu()
  543. * or hlist_del_rcu(), running on this same list.
  544. * However, it is perfectly legal to run concurrently with
  545. * the _rcu list-traversal primitives, such as
  546. * hlist_for_each_entry(), but only if smp_read_barrier_depends()
  547. * is used to prevent memory-consistency problems on Alpha CPUs.
  548. * Regardless of the type of CPU, the list-traversal primitive
  549. * must be guarded by rcu_read_lock().
  550. *
  551. * OK, so why don't we have an hlist_for_each_entry_rcu()???
  552. */
  553. static inline void hlist_add_head_rcu(struct hlist_node *n,
  554. struct hlist_head *h)
  555. {
  556. struct hlist_node *first = h->first;
  557. n->next = first;
  558. n->pprev = &h->first;
  559. smp_wmb();
  560. if (first)
  561. first->pprev = &n->next;
  562. h->first = n;
  563. }
  564. /* next must be != NULL */
  565. static inline void hlist_add_before(struct hlist_node *n,
  566. struct hlist_node *next)
  567. {
  568. n->pprev = next->pprev;
  569. n->next = next;
  570. next->pprev = &n->next;
  571. *(n->pprev) = n;
  572. }
  573. static inline void hlist_add_after(struct hlist_node *n,
  574. struct hlist_node *next)
  575. {
  576. next->next = n->next;
  577. n->next = next;
  578. next->pprev = &n->next;
  579. if(next->next)
  580. next->next->pprev = &next->next;
  581. }
  582. #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
  583. #define hlist_for_each(pos, head) \
  584. for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
  585. pos = pos->next)
  586. #define hlist_for_each_safe(pos, n, head) \
  587. for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
  588. pos = n)
  589. /**
  590. * hlist_for_each_entry - iterate over list of given type
  591. * @tpos: the type * to use as a loop counter.
  592. * @pos: the &struct hlist_node to use as a loop counter.
  593. * @head: the head for your list.
  594. * @member: the name of the hlist_node within the struct.
  595. */
  596. #define hlist_for_each_entry(tpos, pos, head, member) \
  597. for (pos = (head)->first; \
  598. pos && ({ prefetch(pos->next); 1;}) && \
  599. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  600. pos = pos->next)
  601. /**
  602. * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
  603. * @tpos: the type * to use as a loop counter.
  604. * @pos: the &struct hlist_node to use as a loop counter.
  605. * @member: the name of the hlist_node within the struct.
  606. */
  607. #define hlist_for_each_entry_continue(tpos, pos, member) \
  608. for (pos = (pos)->next; \
  609. pos && ({ prefetch(pos->next); 1;}) && \
  610. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  611. pos = pos->next)
  612. /**
  613. * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
  614. * @tpos: the type * to use as a loop counter.
  615. * @pos: the &struct hlist_node to use as a loop counter.
  616. * @member: the name of the hlist_node within the struct.
  617. */
  618. #define hlist_for_each_entry_from(tpos, pos, member) \
  619. for (; pos && ({ prefetch(pos->next); 1;}) && \
  620. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  621. pos = pos->next)
  622. /**
  623. * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  624. * @tpos: the type * to use as a loop counter.
  625. * @pos: the &struct hlist_node to use as a loop counter.
  626. * @n: another &struct hlist_node to use as temporary storage
  627. * @head: the head for your list.
  628. * @member: the name of the hlist_node within the struct.
  629. */
  630. #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
  631. for (pos = (head)->first; \
  632. pos && ({ n = pos->next; 1; }) && \
  633. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  634. pos = n)
  635. /**
  636. * hlist_for_each_entry_rcu - iterate over rcu list of given type
  637. * @pos: the type * to use as a loop counter.
  638. * @pos: the &struct hlist_node to use as a loop counter.
  639. * @head: the head for your list.
  640. * @member: the name of the hlist_node within the struct.
  641. *
  642. * This list-traversal primitive may safely run concurrently with
  643. * the _rcu list-mutation primitives such as hlist_add_rcu()
  644. * as long as the traversal is guarded by rcu_read_lock().
  645. */
  646. #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
  647. for (pos = (head)->first; \
  648. pos && ({ prefetch(pos->next); 1;}) && \
  649. ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
  650. pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
  651. #endif