rcu_segcblist.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * RCU segmented callback lists
  4. *
  5. * This seemingly RCU-private file must be available to SRCU users
  6. * because the size of the TREE SRCU srcu_struct structure depends
  7. * on these definitions.
  8. *
  9. * Copyright IBM Corporation, 2017
  10. *
  11. * Authors: Paul E. McKenney <[email protected]>
  12. */
  13. #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
  14. #define __INCLUDE_LINUX_RCU_SEGCBLIST_H
  15. #include <linux/types.h>
  16. #include <linux/atomic.h>
  17. /* Simple unsegmented callback lists. */
  18. struct rcu_cblist {
  19. struct rcu_head *head;
  20. struct rcu_head **tail;
  21. long len;
  22. };
  23. #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
  24. /* Complicated segmented callback lists. ;-) */
  25. /*
  26. * Index values for segments in rcu_segcblist structure.
  27. *
  28. * The segments are as follows:
  29. *
  30. * [head, *tails[RCU_DONE_TAIL]):
  31. * Callbacks whose grace period has elapsed, and thus can be invoked.
  32. * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]):
  33. * Callbacks waiting for the current GP from the current CPU's viewpoint.
  34. * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]):
  35. * Callbacks that arrived before the next GP started, again from
  36. * the current CPU's viewpoint. These can be handled by the next GP.
  37. * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]):
  38. * Callbacks that might have arrived after the next GP started.
  39. * There is some uncertainty as to when a given GP starts and
  40. * ends, but a CPU knows the exact times if it is the one starting
  41. * or ending the GP. Other CPUs know that the previous GP ends
  42. * before the next one starts.
  43. *
  44. * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also
  45. * empty.
  46. *
  47. * The ->gp_seq[] array contains the grace-period number at which the
  48. * corresponding segment of callbacks will be ready to invoke. A given
  49. * element of this array is meaningful only when the corresponding segment
  50. * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks
  51. * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have
  52. * not yet been assigned a grace-period number).
  53. */
  54. #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
  55. #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
  56. #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
  57. #define RCU_NEXT_TAIL 3
  58. #define RCU_CBLIST_NSEGS 4
  59. /*
  60. * ==NOCB Offloading state machine==
  61. *
  62. *
  63. * ----------------------------------------------------------------------------
  64. * | SEGCBLIST_RCU_CORE |
  65. * | |
  66. * | Callbacks processed by rcu_core() from softirqs or local |
  67. * | rcuc kthread, without holding nocb_lock. |
  68. * ----------------------------------------------------------------------------
  69. * |
  70. * v
  71. * ----------------------------------------------------------------------------
  72. * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
  73. * | |
  74. * | Callbacks processed by rcu_core() from softirqs or local |
  75. * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
  76. * | allowing nocb_timer to be armed. |
  77. * ----------------------------------------------------------------------------
  78. * |
  79. * v
  80. * -----------------------------------
  81. * | |
  82. * v v
  83. * --------------------------------------- ----------------------------------|
  84. * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
  85. * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
  86. * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
  87. * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
  88. * | | | |
  89. * | | | |
  90. * | CB kthread woke up and | | GP kthread woke up and |
  91. * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
  92. * | Processes callbacks concurrently | | |
  93. * | with rcu_core(), holding | | |
  94. * | nocb_lock. | | |
  95. * --------------------------------------- -----------------------------------
  96. * | |
  97. * -----------------------------------
  98. * |
  99. * v
  100. * |--------------------------------------------------------------------------|
  101. * | SEGCBLIST_LOCKING | |
  102. * | SEGCBLIST_OFFLOADED | |
  103. * | SEGCBLIST_KTHREAD_GP | |
  104. * | SEGCBLIST_KTHREAD_CB |
  105. * | |
  106. * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
  107. * | handling callbacks. Enable bypass queueing. |
  108. * ----------------------------------------------------------------------------
  109. */
  110. /*
  111. * ==NOCB De-Offloading state machine==
  112. *
  113. *
  114. * |--------------------------------------------------------------------------|
  115. * | SEGCBLIST_LOCKING | |
  116. * | SEGCBLIST_OFFLOADED | |
  117. * | SEGCBLIST_KTHREAD_CB | |
  118. * | SEGCBLIST_KTHREAD_GP |
  119. * | |
  120. * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
  121. * | ignores callbacks. Bypass enqueue is enabled. |
  122. * ----------------------------------------------------------------------------
  123. * |
  124. * v
  125. * |--------------------------------------------------------------------------|
  126. * | SEGCBLIST_RCU_CORE | |
  127. * | SEGCBLIST_LOCKING | |
  128. * | SEGCBLIST_OFFLOADED | |
  129. * | SEGCBLIST_KTHREAD_CB | |
  130. * | SEGCBLIST_KTHREAD_GP |
  131. * | |
  132. * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
  133. * | handles callbacks concurrently. Bypass enqueue is enabled. |
  134. * | Invoke RCU core so we make sure not to preempt it in the middle with |
  135. * | leaving some urgent work unattended within a jiffy. |
  136. * ----------------------------------------------------------------------------
  137. * |
  138. * v
  139. * |--------------------------------------------------------------------------|
  140. * | SEGCBLIST_RCU_CORE | |
  141. * | SEGCBLIST_LOCKING | |
  142. * | SEGCBLIST_KTHREAD_CB | |
  143. * | SEGCBLIST_KTHREAD_GP |
  144. * | |
  145. * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
  146. * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
  147. * | bypass enqueue. |
  148. * ----------------------------------------------------------------------------
  149. * |
  150. * v
  151. * -----------------------------------
  152. * | |
  153. * v v
  154. * ---------------------------------------------------------------------------|
  155. * | | |
  156. * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
  157. * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
  158. * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
  159. * | | |
  160. * | GP kthread woke up and | CB kthread woke up and |
  161. * | acknowledged the fact that | acknowledged the fact that |
  162. * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
  163. * | | The CB kthread goes to sleep |
  164. * | The callbacks from the target CPU | until it ever gets re-offloaded. |
  165. * | will be ignored from the GP kthread | |
  166. * | loop. | |
  167. * ----------------------------------------------------------------------------
  168. * | |
  169. * -----------------------------------
  170. * |
  171. * v
  172. * ----------------------------------------------------------------------------
  173. * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
  174. * | |
  175. * | Callbacks processed by rcu_core() from softirqs or local |
  176. * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
  177. * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
  178. * ----------------------------------------------------------------------------
  179. * |
  180. * v
  181. * ----------------------------------------------------------------------------
  182. * | SEGCBLIST_RCU_CORE |
  183. * | |
  184. * | Callbacks processed by rcu_core() from softirqs or local |
  185. * | rcuc kthread, without holding nocb_lock. |
  186. * ----------------------------------------------------------------------------
  187. */
  188. #define SEGCBLIST_ENABLED BIT(0)
  189. #define SEGCBLIST_RCU_CORE BIT(1)
  190. #define SEGCBLIST_LOCKING BIT(2)
  191. #define SEGCBLIST_KTHREAD_CB BIT(3)
  192. #define SEGCBLIST_KTHREAD_GP BIT(4)
  193. #define SEGCBLIST_OFFLOADED BIT(5)
  194. struct rcu_segcblist {
  195. struct rcu_head *head;
  196. struct rcu_head **tails[RCU_CBLIST_NSEGS];
  197. unsigned long gp_seq[RCU_CBLIST_NSEGS];
  198. #ifdef CONFIG_RCU_NOCB_CPU
  199. atomic_long_t len;
  200. #else
  201. long len;
  202. #endif
  203. long seglen[RCU_CBLIST_NSEGS];
  204. u8 flags;
  205. };
  206. #define RCU_SEGCBLIST_INITIALIZER(n) \
  207. { \
  208. .head = NULL, \
  209. .tails[RCU_DONE_TAIL] = &n.head, \
  210. .tails[RCU_WAIT_TAIL] = &n.head, \
  211. .tails[RCU_NEXT_READY_TAIL] = &n.head, \
  212. .tails[RCU_NEXT_TAIL] = &n.head, \
  213. }
  214. #endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */