update.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Read-Copy Update mechanism for mutual exclusion
  4. *
  5. * Copyright IBM Corporation, 2001
  6. *
  7. * Authors: Dipankar Sarma <[email protected]>
  8. * Manfred Spraul <[email protected]>
  9. *
  10. * Based on the original work by Paul McKenney <[email protected]>
  11. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  12. * Papers:
  13. * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  14. * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  15. *
  16. * For detailed explanation of Read-Copy Update mechanism see -
  17. * http://lse.sourceforge.net/locking/rcupdate.html
  18. *
  19. */
  20. #include <linux/types.h>
  21. #include <linux/kernel.h>
  22. #include <linux/init.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/smp.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/sched/signal.h>
  27. #include <linux/sched/debug.h>
  28. #include <linux/atomic.h>
  29. #include <linux/bitops.h>
  30. #include <linux/percpu.h>
  31. #include <linux/notifier.h>
  32. #include <linux/cpu.h>
  33. #include <linux/mutex.h>
  34. #include <linux/export.h>
  35. #include <linux/hardirq.h>
  36. #include <linux/delay.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/kthread.h>
  39. #include <linux/tick.h>
  40. #include <linux/rcupdate_wait.h>
  41. #include <linux/sched/isolation.h>
  42. #include <linux/kprobes.h>
  43. #include <linux/slab.h>
  44. #include <linux/irq_work.h>
  45. #include <linux/rcupdate_trace.h>
  46. #include <linux/jiffies.h>
  47. #define CREATE_TRACE_POINTS
  48. #include "rcu.h"
  49. #ifdef MODULE_PARAM_PREFIX
  50. #undef MODULE_PARAM_PREFIX
  51. #endif
  52. #define MODULE_PARAM_PREFIX "rcupdate."
  53. #ifndef CONFIG_TINY_RCU
  54. module_param(rcu_expedited, int, 0444);
  55. module_param(rcu_normal, int, 0444);
  56. static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
  57. #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
  58. module_param(rcu_normal_after_boot, int, 0444);
  59. #endif
  60. #endif /* #ifndef CONFIG_TINY_RCU */
  61. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  62. /**
  63. * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
  64. * @ret: Best guess answer if lockdep cannot be relied on
  65. *
  66. * Returns true if lockdep must be ignored, in which case ``*ret`` contains
  67. * the best guess described below. Otherwise returns false, in which
  68. * case ``*ret`` tells the caller nothing and the caller should instead
  69. * consult lockdep.
  70. *
  71. * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
  72. * RCU-sched read-side critical section. In absence of
  73. * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
  74. * critical section unless it can prove otherwise. Note that disabling
  75. * of preemption (including disabling irqs) counts as an RCU-sched
  76. * read-side critical section. This is useful for debug checks in functions
  77. * that required that they be called within an RCU-sched read-side
  78. * critical section.
  79. *
  80. * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
  81. * and while lockdep is disabled.
  82. *
  83. * Note that if the CPU is in the idle loop from an RCU point of view (ie:
  84. * that we are in the section between ct_idle_enter() and ct_idle_exit())
  85. * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
  86. * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
  87. * in such a section, considering these as in extended quiescent state,
  88. * so such a CPU is effectively never in an RCU read-side critical section
  89. * regardless of what RCU primitives it invokes. This state of affairs is
  90. * required --- we need to keep an RCU-free window in idle where the CPU may
  91. * possibly enter into low power mode. This way we can notice an extended
  92. * quiescent state to other CPUs that started a grace period. Otherwise
  93. * we would delay any grace period as long as we run in the idle task.
  94. *
  95. * Similarly, we avoid claiming an RCU read lock held if the current
  96. * CPU is offline.
  97. */
  98. static bool rcu_read_lock_held_common(bool *ret)
  99. {
  100. if (!debug_lockdep_rcu_enabled()) {
  101. *ret = true;
  102. return true;
  103. }
  104. if (!rcu_is_watching()) {
  105. *ret = false;
  106. return true;
  107. }
  108. if (!rcu_lockdep_current_cpu_online()) {
  109. *ret = false;
  110. return true;
  111. }
  112. return false;
  113. }
  114. int rcu_read_lock_sched_held(void)
  115. {
  116. bool ret;
  117. if (rcu_read_lock_held_common(&ret))
  118. return ret;
  119. return lock_is_held(&rcu_sched_lock_map) || !preemptible();
  120. }
  121. EXPORT_SYMBOL(rcu_read_lock_sched_held);
  122. #endif
  123. #ifndef CONFIG_TINY_RCU
  124. /*
  125. * Should expedited grace-period primitives always fall back to their
  126. * non-expedited counterparts? Intended for use within RCU. Note
  127. * that if the user specifies both rcu_expedited and rcu_normal, then
  128. * rcu_normal wins. (Except during the time period during boot from
  129. * when the first task is spawned until the rcu_set_runtime_mode()
  130. * core_initcall() is invoked, at which point everything is expedited.)
  131. */
  132. bool rcu_gp_is_normal(void)
  133. {
  134. return READ_ONCE(rcu_normal) &&
  135. rcu_scheduler_active != RCU_SCHEDULER_INIT;
  136. }
  137. EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
  138. static atomic_t rcu_async_hurry_nesting = ATOMIC_INIT(1);
  139. /*
  140. * Should call_rcu() callbacks be processed with urgency or are
  141. * they OK being executed with arbitrary delays?
  142. */
  143. bool rcu_async_should_hurry(void)
  144. {
  145. return !IS_ENABLED(CONFIG_RCU_LAZY) ||
  146. atomic_read(&rcu_async_hurry_nesting);
  147. }
  148. EXPORT_SYMBOL_GPL(rcu_async_should_hurry);
  149. /**
  150. * rcu_async_hurry - Make future async RCU callbacks not lazy.
  151. *
  152. * After a call to this function, future calls to call_rcu()
  153. * will be processed in a timely fashion.
  154. */
  155. void rcu_async_hurry(void)
  156. {
  157. if (IS_ENABLED(CONFIG_RCU_LAZY))
  158. atomic_inc(&rcu_async_hurry_nesting);
  159. }
  160. EXPORT_SYMBOL_GPL(rcu_async_hurry);
  161. /**
  162. * rcu_async_relax - Make future async RCU callbacks lazy.
  163. *
  164. * After a call to this function, future calls to call_rcu()
  165. * will be processed in a lazy fashion.
  166. */
  167. void rcu_async_relax(void)
  168. {
  169. if (IS_ENABLED(CONFIG_RCU_LAZY))
  170. atomic_dec(&rcu_async_hurry_nesting);
  171. }
  172. EXPORT_SYMBOL_GPL(rcu_async_relax);
  173. static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
  174. /*
  175. * Should normal grace-period primitives be expedited? Intended for
  176. * use within RCU. Note that this function takes the rcu_expedited
  177. * sysfs/boot variable and rcu_scheduler_active into account as well
  178. * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
  179. * until rcu_gp_is_expedited() returns false is a -really- bad idea.
  180. */
  181. bool rcu_gp_is_expedited(void)
  182. {
  183. return rcu_expedited || atomic_read(&rcu_expedited_nesting);
  184. }
  185. EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
  186. /**
  187. * rcu_expedite_gp - Expedite future RCU grace periods
  188. *
  189. * After a call to this function, future calls to synchronize_rcu() and
  190. * friends act as the corresponding synchronize_rcu_expedited() function
  191. * had instead been called.
  192. */
  193. void rcu_expedite_gp(void)
  194. {
  195. atomic_inc(&rcu_expedited_nesting);
  196. }
  197. EXPORT_SYMBOL_GPL(rcu_expedite_gp);
  198. /**
  199. * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
  200. *
  201. * Undo a prior call to rcu_expedite_gp(). If all prior calls to
  202. * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
  203. * and if the rcu_expedited sysfs/boot parameter is not set, then all
  204. * subsequent calls to synchronize_rcu() and friends will return to
  205. * their normal non-expedited behavior.
  206. */
  207. void rcu_unexpedite_gp(void)
  208. {
  209. atomic_dec(&rcu_expedited_nesting);
  210. }
  211. EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
  212. /*
  213. * Minimum time in milliseconds from the start boot until RCU can consider
  214. * in-kernel boot as completed. This can also be tuned at runtime to end the
  215. * boot earlier, by userspace init code writing the time in milliseconds (even
  216. * 0) to: /sys/module/rcupdate/parameters/rcu_boot_end_delay. The sysfs node
  217. * can also be used to extend the delay to be larger than the default, assuming
  218. * the marking of boot complete has not yet occurred.
  219. */
  220. static int rcu_boot_end_delay = CONFIG_RCU_BOOT_END_DELAY;
  221. static bool rcu_boot_ended __read_mostly;
  222. static bool rcu_boot_end_called __read_mostly;
  223. static DEFINE_MUTEX(rcu_boot_end_lock);
  224. /*
  225. * Inform RCU of the end of the in-kernel boot sequence. The boot sequence will
  226. * not be marked ended until at least rcu_boot_end_delay milliseconds have passed.
  227. */
  228. void rcu_end_inkernel_boot(void);
  229. static void rcu_boot_end_work_fn(struct work_struct *work)
  230. {
  231. rcu_end_inkernel_boot();
  232. }
  233. static DECLARE_DELAYED_WORK(rcu_boot_end_work, rcu_boot_end_work_fn);
  234. /* Must be called with rcu_boot_end_lock held. */
  235. static void rcu_end_inkernel_boot_locked(void)
  236. {
  237. rcu_boot_end_called = true;
  238. if (rcu_boot_ended)
  239. return;
  240. if (rcu_boot_end_delay) {
  241. u64 boot_ms = div_u64(ktime_get_boot_fast_ns(), 1000000UL);
  242. if (boot_ms < rcu_boot_end_delay) {
  243. schedule_delayed_work(&rcu_boot_end_work,
  244. msecs_to_jiffies(rcu_boot_end_delay - boot_ms));
  245. return;
  246. }
  247. }
  248. cancel_delayed_work(&rcu_boot_end_work);
  249. rcu_unexpedite_gp();
  250. rcu_async_relax();
  251. if (rcu_normal_after_boot)
  252. WRITE_ONCE(rcu_normal, 1);
  253. rcu_boot_ended = true;
  254. }
  255. void rcu_end_inkernel_boot(void)
  256. {
  257. mutex_lock(&rcu_boot_end_lock);
  258. rcu_end_inkernel_boot_locked();
  259. mutex_unlock(&rcu_boot_end_lock);
  260. }
  261. static int param_set_rcu_boot_end(const char *val, const struct kernel_param *kp)
  262. {
  263. uint end_ms;
  264. int ret = kstrtouint(val, 0, &end_ms);
  265. if (ret)
  266. return ret;
  267. /*
  268. * rcu_end_inkernel_boot() should be called at least once during init
  269. * before we can allow param changes to end the boot.
  270. */
  271. mutex_lock(&rcu_boot_end_lock);
  272. rcu_boot_end_delay = end_ms;
  273. if (!rcu_boot_ended && rcu_boot_end_called) {
  274. rcu_end_inkernel_boot_locked();
  275. }
  276. mutex_unlock(&rcu_boot_end_lock);
  277. return ret;
  278. }
  279. static const struct kernel_param_ops rcu_boot_end_ops = {
  280. .set = param_set_rcu_boot_end,
  281. .get = param_get_uint,
  282. };
  283. module_param_cb(rcu_boot_end_delay, &rcu_boot_end_ops, &rcu_boot_end_delay, 0644);
  284. /*
  285. * Let rcutorture know when it is OK to turn it up to eleven.
  286. */
  287. bool rcu_inkernel_boot_has_ended(void)
  288. {
  289. return rcu_boot_ended;
  290. }
  291. EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
  292. #endif /* #ifndef CONFIG_TINY_RCU */
  293. /*
  294. * Test each non-SRCU synchronous grace-period wait API. This is
  295. * useful just after a change in mode for these primitives, and
  296. * during early boot.
  297. */
  298. void rcu_test_sync_prims(void)
  299. {
  300. if (!IS_ENABLED(CONFIG_PROVE_RCU))
  301. return;
  302. synchronize_rcu();
  303. synchronize_rcu_expedited();
  304. }
  305. #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
  306. /*
  307. * Switch to run-time mode once RCU has fully initialized.
  308. */
  309. static int __init rcu_set_runtime_mode(void)
  310. {
  311. rcu_test_sync_prims();
  312. rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
  313. kfree_rcu_scheduler_running();
  314. rcu_test_sync_prims();
  315. return 0;
  316. }
  317. core_initcall(rcu_set_runtime_mode);
  318. #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
  319. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  320. static struct lock_class_key rcu_lock_key;
  321. struct lockdep_map rcu_lock_map = {
  322. .name = "rcu_read_lock",
  323. .key = &rcu_lock_key,
  324. .wait_type_outer = LD_WAIT_FREE,
  325. .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
  326. };
  327. EXPORT_SYMBOL_GPL(rcu_lock_map);
  328. static struct lock_class_key rcu_bh_lock_key;
  329. struct lockdep_map rcu_bh_lock_map = {
  330. .name = "rcu_read_lock_bh",
  331. .key = &rcu_bh_lock_key,
  332. .wait_type_outer = LD_WAIT_FREE,
  333. .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
  334. };
  335. EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
  336. static struct lock_class_key rcu_sched_lock_key;
  337. struct lockdep_map rcu_sched_lock_map = {
  338. .name = "rcu_read_lock_sched",
  339. .key = &rcu_sched_lock_key,
  340. .wait_type_outer = LD_WAIT_FREE,
  341. .wait_type_inner = LD_WAIT_SPIN,
  342. };
  343. EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
  344. // Tell lockdep when RCU callbacks are being invoked.
  345. static struct lock_class_key rcu_callback_key;
  346. struct lockdep_map rcu_callback_map =
  347. STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
  348. EXPORT_SYMBOL_GPL(rcu_callback_map);
  349. noinstr int notrace debug_lockdep_rcu_enabled(void)
  350. {
  351. return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
  352. current->lockdep_recursion == 0;
  353. }
  354. EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
  355. /**
  356. * rcu_read_lock_held() - might we be in RCU read-side critical section?
  357. *
  358. * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
  359. * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
  360. * this assumes we are in an RCU read-side critical section unless it can
  361. * prove otherwise. This is useful for debug checks in functions that
  362. * require that they be called within an RCU read-side critical section.
  363. *
  364. * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
  365. * and while lockdep is disabled.
  366. *
  367. * Note that rcu_read_lock() and the matching rcu_read_unlock() must
  368. * occur in the same context, for example, it is illegal to invoke
  369. * rcu_read_unlock() in process context if the matching rcu_read_lock()
  370. * was invoked from within an irq handler.
  371. *
  372. * Note that rcu_read_lock() is disallowed if the CPU is either idle or
  373. * offline from an RCU perspective, so check for those as well.
  374. */
  375. int rcu_read_lock_held(void)
  376. {
  377. bool ret;
  378. if (rcu_read_lock_held_common(&ret))
  379. return ret;
  380. return lock_is_held(&rcu_lock_map);
  381. }
  382. EXPORT_SYMBOL_GPL(rcu_read_lock_held);
  383. /**
  384. * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
  385. *
  386. * Check for bottom half being disabled, which covers both the
  387. * CONFIG_PROVE_RCU and not cases. Note that if someone uses
  388. * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
  389. * will show the situation. This is useful for debug checks in functions
  390. * that require that they be called within an RCU read-side critical
  391. * section.
  392. *
  393. * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
  394. *
  395. * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
  396. * offline from an RCU perspective, so check for those as well.
  397. */
  398. int rcu_read_lock_bh_held(void)
  399. {
  400. bool ret;
  401. if (rcu_read_lock_held_common(&ret))
  402. return ret;
  403. return in_softirq() || irqs_disabled();
  404. }
  405. EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
  406. int rcu_read_lock_any_held(void)
  407. {
  408. bool ret;
  409. if (rcu_read_lock_held_common(&ret))
  410. return ret;
  411. if (lock_is_held(&rcu_lock_map) ||
  412. lock_is_held(&rcu_bh_lock_map) ||
  413. lock_is_held(&rcu_sched_lock_map))
  414. return 1;
  415. return !preemptible();
  416. }
  417. EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
  418. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  419. /**
  420. * wakeme_after_rcu() - Callback function to awaken a task after grace period
  421. * @head: Pointer to rcu_head member within rcu_synchronize structure
  422. *
  423. * Awaken the corresponding task now that a grace period has elapsed.
  424. */
  425. void wakeme_after_rcu(struct rcu_head *head)
  426. {
  427. struct rcu_synchronize *rcu;
  428. rcu = container_of(head, struct rcu_synchronize, head);
  429. complete(&rcu->completion);
  430. }
  431. EXPORT_SYMBOL_GPL(wakeme_after_rcu);
  432. void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
  433. struct rcu_synchronize *rs_array)
  434. {
  435. int i;
  436. int j;
  437. /* Initialize and register callbacks for each crcu_array element. */
  438. for (i = 0; i < n; i++) {
  439. if (checktiny &&
  440. (crcu_array[i] == call_rcu)) {
  441. might_sleep();
  442. continue;
  443. }
  444. for (j = 0; j < i; j++)
  445. if (crcu_array[j] == crcu_array[i])
  446. break;
  447. if (j == i) {
  448. init_rcu_head_on_stack(&rs_array[i].head);
  449. init_completion(&rs_array[i].completion);
  450. (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
  451. }
  452. }
  453. /* Wait for all callbacks to be invoked. */
  454. for (i = 0; i < n; i++) {
  455. if (checktiny &&
  456. (crcu_array[i] == call_rcu))
  457. continue;
  458. for (j = 0; j < i; j++)
  459. if (crcu_array[j] == crcu_array[i])
  460. break;
  461. if (j == i) {
  462. wait_for_completion(&rs_array[i].completion);
  463. destroy_rcu_head_on_stack(&rs_array[i].head);
  464. }
  465. }
  466. }
  467. EXPORT_SYMBOL_GPL(__wait_rcu_gp);
  468. void finish_rcuwait(struct rcuwait *w)
  469. {
  470. rcu_assign_pointer(w->task, NULL);
  471. __set_current_state(TASK_RUNNING);
  472. }
  473. EXPORT_SYMBOL_GPL(finish_rcuwait);
  474. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  475. void init_rcu_head(struct rcu_head *head)
  476. {
  477. debug_object_init(head, &rcuhead_debug_descr);
  478. }
  479. EXPORT_SYMBOL_GPL(init_rcu_head);
  480. void destroy_rcu_head(struct rcu_head *head)
  481. {
  482. debug_object_free(head, &rcuhead_debug_descr);
  483. }
  484. EXPORT_SYMBOL_GPL(destroy_rcu_head);
  485. static bool rcuhead_is_static_object(void *addr)
  486. {
  487. return true;
  488. }
  489. /**
  490. * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
  491. * @head: pointer to rcu_head structure to be initialized
  492. *
  493. * This function informs debugobjects of a new rcu_head structure that
  494. * has been allocated as an auto variable on the stack. This function
  495. * is not required for rcu_head structures that are statically defined or
  496. * that are dynamically allocated on the heap. This function has no
  497. * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
  498. */
  499. void init_rcu_head_on_stack(struct rcu_head *head)
  500. {
  501. debug_object_init_on_stack(head, &rcuhead_debug_descr);
  502. }
  503. EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
  504. /**
  505. * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
  506. * @head: pointer to rcu_head structure to be initialized
  507. *
  508. * This function informs debugobjects that an on-stack rcu_head structure
  509. * is about to go out of scope. As with init_rcu_head_on_stack(), this
  510. * function is not required for rcu_head structures that are statically
  511. * defined or that are dynamically allocated on the heap. Also as with
  512. * init_rcu_head_on_stack(), this function has no effect for
  513. * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
  514. */
  515. void destroy_rcu_head_on_stack(struct rcu_head *head)
  516. {
  517. debug_object_free(head, &rcuhead_debug_descr);
  518. }
  519. EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
  520. const struct debug_obj_descr rcuhead_debug_descr = {
  521. .name = "rcu_head",
  522. .is_static_object = rcuhead_is_static_object,
  523. };
  524. EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
  525. #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  526. #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
  527. void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
  528. unsigned long secs,
  529. unsigned long c_old, unsigned long c)
  530. {
  531. trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
  532. }
  533. EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
  534. #else
  535. #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  536. do { } while (0)
  537. #endif
  538. #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) \
  539. || IS_ENABLED(CONFIG_GKI_HIDDEN_RCUTORTURE)
  540. /* Get rcutorture access to sched_setaffinity(). */
  541. long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  542. {
  543. int ret;
  544. ret = sched_setaffinity(pid, in_mask);
  545. WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
  546. return ret;
  547. }
  548. EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
  549. #endif
  550. #ifdef CONFIG_RCU_STALL_COMMON
  551. int rcu_cpu_stall_ftrace_dump __read_mostly;
  552. module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
  553. int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
  554. EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
  555. module_param(rcu_cpu_stall_suppress, int, 0644);
  556. int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
  557. module_param(rcu_cpu_stall_timeout, int, 0644);
  558. int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
  559. module_param(rcu_exp_cpu_stall_timeout, int, 0644);
  560. #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
  561. // Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
  562. // warnings. Also used by rcutorture even if stall warnings are excluded.
  563. int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
  564. EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
  565. module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
  566. /**
  567. * get_completed_synchronize_rcu - Return a pre-completed polled state cookie
  568. *
  569. * Returns a value that will always be treated by functions like
  570. * poll_state_synchronize_rcu() as a cookie whose grace period has already
  571. * completed.
  572. */
  573. unsigned long get_completed_synchronize_rcu(void)
  574. {
  575. return RCU_GET_STATE_COMPLETED;
  576. }
  577. EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
  578. #ifdef CONFIG_PROVE_RCU
  579. /*
  580. * Early boot self test parameters.
  581. */
  582. static bool rcu_self_test;
  583. module_param(rcu_self_test, bool, 0444);
  584. static int rcu_self_test_counter;
  585. static void test_callback(struct rcu_head *r)
  586. {
  587. rcu_self_test_counter++;
  588. pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
  589. }
  590. DEFINE_STATIC_SRCU(early_srcu);
  591. static unsigned long early_srcu_cookie;
  592. struct early_boot_kfree_rcu {
  593. struct rcu_head rh;
  594. };
  595. static void early_boot_test_call_rcu(void)
  596. {
  597. static struct rcu_head head;
  598. static struct rcu_head shead;
  599. struct early_boot_kfree_rcu *rhp;
  600. call_rcu(&head, test_callback);
  601. if (IS_ENABLED(CONFIG_SRCU)) {
  602. early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
  603. call_srcu(&early_srcu, &shead, test_callback);
  604. }
  605. rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
  606. if (!WARN_ON_ONCE(!rhp))
  607. kfree_rcu(rhp, rh);
  608. }
  609. void rcu_early_boot_tests(void)
  610. {
  611. pr_info("Running RCU self tests\n");
  612. if (rcu_self_test)
  613. early_boot_test_call_rcu();
  614. rcu_test_sync_prims();
  615. }
  616. static int rcu_verify_early_boot_tests(void)
  617. {
  618. int ret = 0;
  619. int early_boot_test_counter = 0;
  620. if (rcu_self_test) {
  621. early_boot_test_counter++;
  622. rcu_barrier();
  623. if (IS_ENABLED(CONFIG_SRCU)) {
  624. early_boot_test_counter++;
  625. srcu_barrier(&early_srcu);
  626. WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
  627. }
  628. }
  629. if (rcu_self_test_counter != early_boot_test_counter) {
  630. WARN_ON(1);
  631. ret = -1;
  632. }
  633. return ret;
  634. }
  635. late_initcall(rcu_verify_early_boot_tests);
  636. #else
  637. void rcu_early_boot_tests(void) {}
  638. #endif /* CONFIG_PROVE_RCU */
  639. #include "tasks.h"
  640. #ifndef CONFIG_TINY_RCU
  641. /*
  642. * Print any significant non-default boot-time settings.
  643. */
  644. void __init rcupdate_announce_bootup_oddness(void)
  645. {
  646. if (rcu_normal)
  647. pr_info("\tNo expedited grace period (rcu_normal).\n");
  648. else if (rcu_normal_after_boot)
  649. pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
  650. else if (rcu_expedited)
  651. pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
  652. if (rcu_cpu_stall_suppress)
  653. pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
  654. if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
  655. pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
  656. rcu_tasks_bootup_oddness();
  657. }
  658. #endif /* #ifndef CONFIG_TINY_RCU */