rcu.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * Read-Copy Update definitions shared among RCU implementations.
  4. *
  5. * Copyright IBM Corporation, 2011
  6. *
  7. * Author: Paul E. McKenney <paulmck@linux.ibm.com>
  8. */
  9. #ifndef __LINUX_RCU_H
  10. #define __LINUX_RCU_H
  11. #include <trace/events/rcu.h>
  12. /*
  13. * Grace-period counter management.
  14. */
  15. #define RCU_SEQ_CTR_SHIFT 2
  16. #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
  17. /* Low-order bit definition for polled grace-period APIs. */
  18. #define RCU_GET_STATE_COMPLETED 0x1
  19. extern int sysctl_sched_rt_runtime;
  20. /*
  21. * Return the counter portion of a sequence number previously returned
  22. * by rcu_seq_snap() or rcu_seq_current().
  23. */
  24. static inline unsigned long rcu_seq_ctr(unsigned long s)
  25. {
  26. return s >> RCU_SEQ_CTR_SHIFT;
  27. }
  28. /*
  29. * Return the state portion of a sequence number previously returned
  30. * by rcu_seq_snap() or rcu_seq_current().
  31. */
  32. static inline int rcu_seq_state(unsigned long s)
  33. {
  34. return s & RCU_SEQ_STATE_MASK;
  35. }
  36. /*
  37. * Set the state portion of the pointed-to sequence number.
  38. * The caller is responsible for preventing conflicting updates.
  39. */
  40. static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
  41. {
  42. WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
  43. WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
  44. }
  45. /* Adjust sequence number for start of update-side operation. */
  46. static inline void rcu_seq_start(unsigned long *sp)
  47. {
  48. WRITE_ONCE(*sp, *sp + 1);
  49. smp_mb(); /* Ensure update-side operation after counter increment. */
  50. WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
  51. }
  52. /* Compute the end-of-grace-period value for the specified sequence number. */
  53. static inline unsigned long rcu_seq_endval(unsigned long *sp)
  54. {
  55. return (*sp | RCU_SEQ_STATE_MASK) + 1;
  56. }
  57. /* Adjust sequence number for end of update-side operation. */
  58. static inline void rcu_seq_end(unsigned long *sp)
  59. {
  60. smp_mb(); /* Ensure update-side operation before counter increment. */
  61. WARN_ON_ONCE(!rcu_seq_state(*sp));
  62. WRITE_ONCE(*sp, rcu_seq_endval(sp));
  63. }
  64. /*
  65. * rcu_seq_snap - Take a snapshot of the update side's sequence number.
  66. *
  67. * This function returns the earliest value of the grace-period sequence number
  68. * that will indicate that a full grace period has elapsed since the current
  69. * time. Once the grace-period sequence number has reached this value, it will
  70. * be safe to invoke all callbacks that have been registered prior to the
  71. * current time. This value is the current grace-period number plus two to the
  72. * power of the number of low-order bits reserved for state, then rounded up to
  73. * the next value in which the state bits are all zero.
  74. */
  75. static inline unsigned long rcu_seq_snap(unsigned long *sp)
  76. {
  77. unsigned long s;
  78. s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
  79. smp_mb(); /* Above access must not bleed into critical section. */
  80. return s;
  81. }
  82. /* Return the current value the update side's sequence number, no ordering. */
  83. static inline unsigned long rcu_seq_current(unsigned long *sp)
  84. {
  85. return READ_ONCE(*sp);
  86. }
  87. /*
  88. * Given a snapshot from rcu_seq_snap(), determine whether or not the
  89. * corresponding update-side operation has started.
  90. */
  91. static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
  92. {
  93. return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
  94. }
  95. /*
  96. * Given a snapshot from rcu_seq_snap(), determine whether or not a
  97. * full update-side operation has occurred.
  98. */
  99. static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
  100. {
  101. return ULONG_CMP_GE(READ_ONCE(*sp), s);
  102. }
  103. /*
  104. * Given a snapshot from rcu_seq_snap(), determine whether or not a
  105. * full update-side operation has occurred, but do not allow the
  106. * (ULONG_MAX / 2) safety-factor/guard-band.
  107. */
  108. static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
  109. {
  110. unsigned long cur_s = READ_ONCE(*sp);
  111. return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1));
  112. }
  113. /*
  114. * Has a grace period completed since the time the old gp_seq was collected?
  115. */
  116. static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
  117. {
  118. return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
  119. }
  120. /*
  121. * Has a grace period started since the time the old gp_seq was collected?
  122. */
  123. static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
  124. {
  125. return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
  126. new);
  127. }
  128. /*
  129. * Roughly how many full grace periods have elapsed between the collection
  130. * of the two specified grace periods?
  131. */
  132. static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
  133. {
  134. unsigned long rnd_diff;
  135. if (old == new)
  136. return 0;
  137. /*
  138. * Compute the number of grace periods (still shifted up), plus
  139. * one if either of new and old is not an exact grace period.
  140. */
  141. rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
  142. ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
  143. ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
  144. if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
  145. return 1; /* Definitely no grace period has elapsed. */
  146. return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
  147. }
  148. /*
  149. * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
  150. * by call_rcu() and rcu callback execution, and are therefore not part
  151. * of the RCU API. These are in rcupdate.h because they are used by all
  152. * RCU implementations.
  153. */
  154. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  155. # define STATE_RCU_HEAD_READY 0
  156. # define STATE_RCU_HEAD_QUEUED 1
  157. extern const struct debug_obj_descr rcuhead_debug_descr;
  158. static inline int debug_rcu_head_queue(struct rcu_head *head)
  159. {
  160. int r1;
  161. r1 = debug_object_activate(head, &rcuhead_debug_descr);
  162. debug_object_active_state(head, &rcuhead_debug_descr,
  163. STATE_RCU_HEAD_READY,
  164. STATE_RCU_HEAD_QUEUED);
  165. return r1;
  166. }
  167. static inline void debug_rcu_head_unqueue(struct rcu_head *head)
  168. {
  169. debug_object_active_state(head, &rcuhead_debug_descr,
  170. STATE_RCU_HEAD_QUEUED,
  171. STATE_RCU_HEAD_READY);
  172. debug_object_deactivate(head, &rcuhead_debug_descr);
  173. }
  174. #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  175. static inline int debug_rcu_head_queue(struct rcu_head *head)
  176. {
  177. return 0;
  178. }
  179. static inline void debug_rcu_head_unqueue(struct rcu_head *head)
  180. {
  181. }
  182. #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  183. extern int rcu_cpu_stall_suppress_at_boot;
  184. static inline bool rcu_stall_is_suppressed_at_boot(void)
  185. {
  186. return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
  187. }
  188. #ifdef CONFIG_RCU_STALL_COMMON
  189. extern int rcu_cpu_stall_ftrace_dump;
  190. extern int rcu_cpu_stall_suppress;
  191. extern int rcu_cpu_stall_timeout;
  192. extern int rcu_exp_cpu_stall_timeout;
  193. int rcu_jiffies_till_stall_check(void);
  194. int rcu_exp_jiffies_till_stall_check(void);
  195. static inline bool rcu_stall_is_suppressed(void)
  196. {
  197. return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
  198. }
  199. #define rcu_ftrace_dump_stall_suppress() \
  200. do { \
  201. if (!rcu_cpu_stall_suppress) \
  202. rcu_cpu_stall_suppress = 3; \
  203. } while (0)
  204. #define rcu_ftrace_dump_stall_unsuppress() \
  205. do { \
  206. if (rcu_cpu_stall_suppress == 3) \
  207. rcu_cpu_stall_suppress = 0; \
  208. } while (0)
  209. #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
  210. static inline bool rcu_stall_is_suppressed(void)
  211. {
  212. return rcu_stall_is_suppressed_at_boot();
  213. }
  214. #define rcu_ftrace_dump_stall_suppress()
  215. #define rcu_ftrace_dump_stall_unsuppress()
  216. #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
  217. /*
  218. * Strings used in tracepoints need to be exported via the
  219. * tracing system such that tools like perf and trace-cmd can
  220. * translate the string address pointers to actual text.
  221. */
  222. #define TPS(x) tracepoint_string(x)
  223. /*
  224. * Dump the ftrace buffer, but only one time per callsite per boot.
  225. */
  226. #define rcu_ftrace_dump(oops_dump_mode) \
  227. do { \
  228. static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
  229. \
  230. if (!atomic_read(&___rfd_beenhere) && \
  231. !atomic_xchg(&___rfd_beenhere, 1)) { \
  232. tracing_off(); \
  233. rcu_ftrace_dump_stall_suppress(); \
  234. ftrace_dump(oops_dump_mode); \
  235. rcu_ftrace_dump_stall_unsuppress(); \
  236. } \
  237. } while (0)
  238. void rcu_early_boot_tests(void);
  239. void rcu_test_sync_prims(void);
  240. /*
  241. * This function really isn't for public consumption, but RCU is special in
  242. * that context switches can allow the state machine to make progress.
  243. */
  244. extern void resched_cpu(int cpu);
  245. #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
  246. #include <linux/rcu_node_tree.h>
  247. extern int rcu_num_lvls;
  248. extern int num_rcu_lvl[];
  249. extern int rcu_num_nodes;
  250. static bool rcu_fanout_exact;
  251. static int rcu_fanout_leaf;
  252. /*
  253. * Compute the per-level fanout, either using the exact fanout specified
  254. * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
  255. */
  256. static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
  257. {
  258. int i;
  259. for (i = 0; i < RCU_NUM_LVLS; i++)
  260. levelspread[i] = INT_MIN;
  261. if (rcu_fanout_exact) {
  262. levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
  263. for (i = rcu_num_lvls - 2; i >= 0; i--)
  264. levelspread[i] = RCU_FANOUT;
  265. } else {
  266. int ccur;
  267. int cprv;
  268. cprv = nr_cpu_ids;
  269. for (i = rcu_num_lvls - 1; i >= 0; i--) {
  270. ccur = levelcnt[i];
  271. levelspread[i] = (cprv + ccur - 1) / ccur;
  272. cprv = ccur;
  273. }
  274. }
  275. }
  276. extern void rcu_init_geometry(void);
  277. /* Returns a pointer to the first leaf rcu_node structure. */
  278. #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
  279. /* Is this rcu_node a leaf? */
  280. #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
  281. /* Is this rcu_node the last leaf? */
  282. #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
  283. /*
  284. * Do a full breadth-first scan of the {s,}rcu_node structures for the
  285. * specified state structure (for SRCU) or the only rcu_state structure
  286. * (for RCU).
  287. */
  288. #define srcu_for_each_node_breadth_first(sp, rnp) \
  289. for ((rnp) = &(sp)->node[0]; \
  290. (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
  291. #define rcu_for_each_node_breadth_first(rnp) \
  292. srcu_for_each_node_breadth_first(&rcu_state, rnp)
  293. /*
  294. * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
  295. * Note that if there is a singleton rcu_node tree with but one rcu_node
  296. * structure, this loop -will- visit the rcu_node structure. It is still
  297. * a leaf node, even if it is also the root node.
  298. */
  299. #define rcu_for_each_leaf_node(rnp) \
  300. for ((rnp) = rcu_first_leaf_node(); \
  301. (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
  302. /*
  303. * Iterate over all possible CPUs in a leaf RCU node.
  304. */
  305. #define for_each_leaf_node_possible_cpu(rnp, cpu) \
  306. for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
  307. (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
  308. (cpu) <= rnp->grphi; \
  309. (cpu) = cpumask_next((cpu), cpu_possible_mask))
  310. /*
  311. * Iterate over all CPUs in a leaf RCU node's specified mask.
  312. */
  313. #define rcu_find_next_bit(rnp, cpu, mask) \
  314. ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
  315. #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
  316. for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
  317. (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
  318. (cpu) <= rnp->grphi; \
  319. (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
  320. /*
  321. * Wrappers for the rcu_node::lock acquire and release.
  322. *
  323. * Because the rcu_nodes form a tree, the tree traversal locking will observe
  324. * different lock values, this in turn means that an UNLOCK of one level
  325. * followed by a LOCK of another level does not imply a full memory barrier;
  326. * and most importantly transitivity is lost.
  327. *
  328. * In order to restore full ordering between tree levels, augment the regular
  329. * lock acquire functions with smp_mb__after_unlock_lock().
  330. *
  331. * As ->lock of struct rcu_node is a __private field, therefore one should use
  332. * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
  333. */
  334. #define raw_spin_lock_rcu_node(p) \
  335. do { \
  336. raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
  337. smp_mb__after_unlock_lock(); \
  338. } while (0)
  339. #define raw_spin_unlock_rcu_node(p) \
  340. do { \
  341. lockdep_assert_irqs_disabled(); \
  342. raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
  343. } while (0)
  344. #define raw_spin_lock_irq_rcu_node(p) \
  345. do { \
  346. raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
  347. smp_mb__after_unlock_lock(); \
  348. } while (0)
  349. #define raw_spin_unlock_irq_rcu_node(p) \
  350. do { \
  351. lockdep_assert_irqs_disabled(); \
  352. raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
  353. } while (0)
  354. #define raw_spin_lock_irqsave_rcu_node(p, flags) \
  355. do { \
  356. raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
  357. smp_mb__after_unlock_lock(); \
  358. } while (0)
  359. #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
  360. do { \
  361. lockdep_assert_irqs_disabled(); \
  362. raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
  363. } while (0)
  364. #define raw_spin_trylock_rcu_node(p) \
  365. ({ \
  366. bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
  367. \
  368. if (___locked) \
  369. smp_mb__after_unlock_lock(); \
  370. ___locked; \
  371. })
  372. #define raw_lockdep_assert_held_rcu_node(p) \
  373. lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
  374. #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
  375. #ifdef CONFIG_TINY_RCU
  376. /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
  377. static inline bool rcu_gp_is_normal(void) { return true; }
  378. static inline bool rcu_gp_is_expedited(void) { return false; }
  379. static inline bool rcu_async_should_hurry(void) { return false; }
  380. static inline void rcu_expedite_gp(void) { }
  381. static inline void rcu_unexpedite_gp(void) { }
  382. static inline void rcu_async_hurry(void) { }
  383. static inline void rcu_async_relax(void) { }
  384. static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
  385. #else /* #ifdef CONFIG_TINY_RCU */
  386. bool rcu_gp_is_normal(void); /* Internal RCU use. */
  387. bool rcu_gp_is_expedited(void); /* Internal RCU use. */
  388. bool rcu_async_should_hurry(void); /* Internal RCU use. */
  389. void rcu_expedite_gp(void);
  390. void rcu_unexpedite_gp(void);
  391. void rcu_async_hurry(void);
  392. void rcu_async_relax(void);
  393. void rcupdate_announce_bootup_oddness(void);
  394. #ifdef CONFIG_TASKS_RCU_GENERIC
  395. void show_rcu_tasks_gp_kthreads(void);
  396. #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
  397. static inline void show_rcu_tasks_gp_kthreads(void) {}
  398. #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
  399. void rcu_request_urgent_qs_task(struct task_struct *t);
  400. #endif /* #else #ifdef CONFIG_TINY_RCU */
  401. #define RCU_SCHEDULER_INACTIVE 0
  402. #define RCU_SCHEDULER_INIT 1
  403. #define RCU_SCHEDULER_RUNNING 2
  404. enum rcutorture_type {
  405. RCU_FLAVOR,
  406. RCU_TASKS_FLAVOR,
  407. RCU_TASKS_RUDE_FLAVOR,
  408. RCU_TASKS_TRACING_FLAVOR,
  409. RCU_TRIVIAL_FLAVOR,
  410. SRCU_FLAVOR,
  411. INVALID_RCU_FLAVOR
  412. };
  413. #if defined(CONFIG_RCU_LAZY)
  414. unsigned long rcu_lazy_get_jiffies_till_flush(void);
  415. void rcu_lazy_set_jiffies_till_flush(unsigned long j);
  416. #else
  417. static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; }
  418. static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { }
  419. #endif
  420. #if defined(CONFIG_TREE_RCU)
  421. void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
  422. unsigned long *gp_seq);
  423. void do_trace_rcu_torture_read(const char *rcutorturename,
  424. struct rcu_head *rhp,
  425. unsigned long secs,
  426. unsigned long c_old,
  427. unsigned long c);
  428. void rcu_gp_set_torture_wait(int duration);
  429. #else
  430. static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
  431. int *flags, unsigned long *gp_seq)
  432. {
  433. *flags = 0;
  434. *gp_seq = 0;
  435. }
  436. #ifdef CONFIG_RCU_TRACE
  437. void do_trace_rcu_torture_read(const char *rcutorturename,
  438. struct rcu_head *rhp,
  439. unsigned long secs,
  440. unsigned long c_old,
  441. unsigned long c);
  442. #else
  443. #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  444. do { } while (0)
  445. #endif
  446. static inline void rcu_gp_set_torture_wait(int duration) { }
  447. #endif
  448. #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
  449. long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
  450. #endif
  451. #ifdef CONFIG_TINY_SRCU
  452. static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
  453. struct srcu_struct *sp, int *flags,
  454. unsigned long *gp_seq)
  455. {
  456. if (test_type != SRCU_FLAVOR)
  457. return;
  458. *flags = 0;
  459. *gp_seq = sp->srcu_idx;
  460. }
  461. #elif defined(CONFIG_TREE_SRCU)
  462. void srcutorture_get_gp_data(enum rcutorture_type test_type,
  463. struct srcu_struct *sp, int *flags,
  464. unsigned long *gp_seq);
  465. #endif
  466. #ifdef CONFIG_TINY_RCU
  467. static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
  468. static inline unsigned long rcu_get_gp_seq(void) { return 0; }
  469. static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
  470. static inline unsigned long
  471. srcu_batches_completed(struct srcu_struct *sp) { return 0; }
  472. static inline void rcu_force_quiescent_state(void) { }
  473. static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
  474. static inline void show_rcu_gp_kthreads(void) { }
  475. static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
  476. static inline void rcu_fwd_progress_check(unsigned long j) { }
  477. static inline void rcu_gp_slow_register(atomic_t *rgssp) { }
  478. static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { }
  479. #else /* #ifdef CONFIG_TINY_RCU */
  480. bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
  481. unsigned long rcu_get_gp_seq(void);
  482. unsigned long rcu_exp_batches_completed(void);
  483. unsigned long srcu_batches_completed(struct srcu_struct *sp);
  484. bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
  485. void show_rcu_gp_kthreads(void);
  486. int rcu_get_gp_kthreads_prio(void);
  487. void rcu_fwd_progress_check(unsigned long j);
  488. void rcu_force_quiescent_state(void);
  489. extern struct workqueue_struct *rcu_gp_wq;
  490. #ifdef CONFIG_RCU_EXP_KTHREAD
  491. extern struct kthread_worker *rcu_exp_gp_kworker;
  492. extern struct kthread_worker *rcu_exp_par_gp_kworker;
  493. #else /* !CONFIG_RCU_EXP_KTHREAD */
  494. extern struct workqueue_struct *rcu_par_gp_wq;
  495. #endif /* CONFIG_RCU_EXP_KTHREAD */
  496. void rcu_gp_slow_register(atomic_t *rgssp);
  497. void rcu_gp_slow_unregister(atomic_t *rgssp);
  498. #endif /* #else #ifdef CONFIG_TINY_RCU */
  499. #ifdef CONFIG_RCU_NOCB_CPU
  500. void rcu_bind_current_to_nocb(void);
  501. #else
  502. static inline void rcu_bind_current_to_nocb(void) { }
  503. #endif
  504. #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
  505. void show_rcu_tasks_classic_gp_kthread(void);
  506. #else
  507. static inline void show_rcu_tasks_classic_gp_kthread(void) {}
  508. #endif
  509. #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
  510. void show_rcu_tasks_rude_gp_kthread(void);
  511. #else
  512. static inline void show_rcu_tasks_rude_gp_kthread(void) {}
  513. #endif
  514. #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
  515. void show_rcu_tasks_trace_gp_kthread(void);
  516. #else
  517. static inline void show_rcu_tasks_trace_gp_kthread(void) {}
  518. #endif
  519. #ifdef CONFIG_TINY_RCU
  520. static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
  521. #else
  522. bool rcu_cpu_beenfullyonline(int cpu);
  523. #endif
  524. #endif /* __LINUX_RCU_H */