tasks.h 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * Task-based RCU implementations.
  4. *
  5. * Copyright (C) 2020 Paul E. McKenney
  6. */
  7. #ifdef CONFIG_TASKS_RCU_GENERIC
  8. #include "rcu_segcblist.h"
  9. ////////////////////////////////////////////////////////////////////////
  10. //
  11. // Generic data structures.
  12. struct rcu_tasks;
  13. typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
  14. typedef void (*pregp_func_t)(struct list_head *hop);
  15. typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
  16. typedef void (*postscan_func_t)(struct list_head *hop);
  17. typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
  18. typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
  19. /**
  20. * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
  21. * @cblist: Callback list.
  22. * @lock: Lock protecting per-CPU callback list.
  23. * @rtp_jiffies: Jiffies counter value for statistics.
  24. * @rtp_n_lock_retries: Rough lock-contention statistic.
  25. * @rtp_work: Work queue for invoking callbacks.
  26. * @rtp_irq_work: IRQ work queue for deferred wakeups.
  27. * @barrier_q_head: RCU callback for barrier operation.
  28. * @rtp_blkd_tasks: List of tasks blocked as readers.
  29. * @cpu: CPU number corresponding to this entry.
  30. * @rtpp: Pointer to the rcu_tasks structure.
  31. */
  32. struct rcu_tasks_percpu {
  33. struct rcu_segcblist cblist;
  34. raw_spinlock_t __private lock;
  35. unsigned long rtp_jiffies;
  36. unsigned long rtp_n_lock_retries;
  37. struct work_struct rtp_work;
  38. struct irq_work rtp_irq_work;
  39. struct rcu_head barrier_q_head;
  40. struct list_head rtp_blkd_tasks;
  41. int cpu;
  42. struct rcu_tasks *rtpp;
  43. };
  44. /**
  45. * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
  46. * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
  47. * @cbs_gbl_lock: Lock protecting callback list.
  48. * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
  49. * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
  50. * @gp_func: This flavor's grace-period-wait function.
  51. * @gp_state: Grace period's most recent state transition (debugging).
  52. * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
  53. * @init_fract: Initial backoff sleep interval.
  54. * @gp_jiffies: Time of last @gp_state transition.
  55. * @gp_start: Most recent grace-period start in jiffies.
  56. * @tasks_gp_seq: Number of grace periods completed since boot.
  57. * @n_ipis: Number of IPIs sent to encourage grace periods to end.
  58. * @n_ipis_fails: Number of IPI-send failures.
  59. * @pregp_func: This flavor's pre-grace-period function (optional).
  60. * @pertask_func: This flavor's per-task scan function (optional).
  61. * @postscan_func: This flavor's post-task scan function (optional).
  62. * @holdouts_func: This flavor's holdout-list scan function (optional).
  63. * @postgp_func: This flavor's post-grace-period function (optional).
  64. * @call_func: This flavor's call_rcu()-equivalent function.
  65. * @rtpcpu: This flavor's rcu_tasks_percpu structure.
  66. * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
  67. * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
  68. * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
  69. * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
  70. * @barrier_q_mutex: Serialize barrier operations.
  71. * @barrier_q_count: Number of queues being waited on.
  72. * @barrier_q_completion: Barrier wait/wakeup mechanism.
  73. * @barrier_q_seq: Sequence number for barrier operations.
  74. * @name: This flavor's textual name.
  75. * @kname: This flavor's kthread name.
  76. */
  77. struct rcu_tasks {
  78. struct rcuwait cbs_wait;
  79. raw_spinlock_t cbs_gbl_lock;
  80. struct mutex tasks_gp_mutex;
  81. int gp_state;
  82. int gp_sleep;
  83. int init_fract;
  84. unsigned long gp_jiffies;
  85. unsigned long gp_start;
  86. unsigned long tasks_gp_seq;
  87. unsigned long n_ipis;
  88. unsigned long n_ipis_fails;
  89. struct task_struct *kthread_ptr;
  90. rcu_tasks_gp_func_t gp_func;
  91. pregp_func_t pregp_func;
  92. pertask_func_t pertask_func;
  93. postscan_func_t postscan_func;
  94. holdouts_func_t holdouts_func;
  95. postgp_func_t postgp_func;
  96. call_rcu_func_t call_func;
  97. struct rcu_tasks_percpu __percpu *rtpcpu;
  98. int percpu_enqueue_shift;
  99. int percpu_enqueue_lim;
  100. int percpu_dequeue_lim;
  101. unsigned long percpu_dequeue_gpseq;
  102. struct mutex barrier_q_mutex;
  103. atomic_t barrier_q_count;
  104. struct completion barrier_q_completion;
  105. unsigned long barrier_q_seq;
  106. char *name;
  107. char *kname;
  108. };
  109. static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
  110. #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
  111. static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
  112. .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
  113. .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
  114. }; \
  115. static struct rcu_tasks rt_name = \
  116. { \
  117. .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
  118. .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
  119. .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
  120. .gp_func = gp, \
  121. .call_func = call, \
  122. .rtpcpu = &rt_name ## __percpu, \
  123. .name = n, \
  124. .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
  125. .percpu_enqueue_lim = 1, \
  126. .percpu_dequeue_lim = 1, \
  127. .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
  128. .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
  129. .kname = #rt_name, \
  130. }
  131. /* Track exiting tasks in order to allow them to be waited for. */
  132. DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
  133. /* Avoid IPIing CPUs early in the grace period. */
  134. #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
  135. static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
  136. module_param(rcu_task_ipi_delay, int, 0644);
  137. /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
  138. #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
  139. #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
  140. static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
  141. module_param(rcu_task_stall_timeout, int, 0644);
  142. #define RCU_TASK_STALL_INFO (HZ * 10)
  143. static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
  144. module_param(rcu_task_stall_info, int, 0644);
  145. static int rcu_task_stall_info_mult __read_mostly = 3;
  146. module_param(rcu_task_stall_info_mult, int, 0444);
  147. static int rcu_task_enqueue_lim __read_mostly = -1;
  148. module_param(rcu_task_enqueue_lim, int, 0444);
  149. static bool rcu_task_cb_adjust;
  150. static int rcu_task_contend_lim __read_mostly = 100;
  151. module_param(rcu_task_contend_lim, int, 0444);
  152. static int rcu_task_collapse_lim __read_mostly = 10;
  153. module_param(rcu_task_collapse_lim, int, 0444);
  154. /* RCU tasks grace-period state for debugging. */
  155. #define RTGS_INIT 0
  156. #define RTGS_WAIT_WAIT_CBS 1
  157. #define RTGS_WAIT_GP 2
  158. #define RTGS_PRE_WAIT_GP 3
  159. #define RTGS_SCAN_TASKLIST 4
  160. #define RTGS_POST_SCAN_TASKLIST 5
  161. #define RTGS_WAIT_SCAN_HOLDOUTS 6
  162. #define RTGS_SCAN_HOLDOUTS 7
  163. #define RTGS_POST_GP 8
  164. #define RTGS_WAIT_READERS 9
  165. #define RTGS_INVOKE_CBS 10
  166. #define RTGS_WAIT_CBS 11
  167. #ifndef CONFIG_TINY_RCU
  168. static const char * const rcu_tasks_gp_state_names[] = {
  169. "RTGS_INIT",
  170. "RTGS_WAIT_WAIT_CBS",
  171. "RTGS_WAIT_GP",
  172. "RTGS_PRE_WAIT_GP",
  173. "RTGS_SCAN_TASKLIST",
  174. "RTGS_POST_SCAN_TASKLIST",
  175. "RTGS_WAIT_SCAN_HOLDOUTS",
  176. "RTGS_SCAN_HOLDOUTS",
  177. "RTGS_POST_GP",
  178. "RTGS_WAIT_READERS",
  179. "RTGS_INVOKE_CBS",
  180. "RTGS_WAIT_CBS",
  181. };
  182. #endif /* #ifndef CONFIG_TINY_RCU */
  183. ////////////////////////////////////////////////////////////////////////
  184. //
  185. // Generic code.
  186. static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
  187. /* Record grace-period phase and time. */
  188. static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
  189. {
  190. rtp->gp_state = newstate;
  191. rtp->gp_jiffies = jiffies;
  192. }
  193. #ifndef CONFIG_TINY_RCU
  194. /* Return state name. */
  195. static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
  196. {
  197. int i = data_race(rtp->gp_state); // Let KCSAN detect update races
  198. int j = READ_ONCE(i); // Prevent the compiler from reading twice
  199. if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
  200. return "???";
  201. return rcu_tasks_gp_state_names[j];
  202. }
  203. #endif /* #ifndef CONFIG_TINY_RCU */
  204. // Initialize per-CPU callback lists for the specified flavor of
  205. // Tasks RCU.
  206. static void cblist_init_generic(struct rcu_tasks *rtp)
  207. {
  208. int cpu;
  209. unsigned long flags;
  210. int lim;
  211. int shift;
  212. raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
  213. if (rcu_task_enqueue_lim < 0) {
  214. rcu_task_enqueue_lim = 1;
  215. rcu_task_cb_adjust = true;
  216. } else if (rcu_task_enqueue_lim == 0) {
  217. rcu_task_enqueue_lim = 1;
  218. }
  219. lim = rcu_task_enqueue_lim;
  220. if (lim > nr_cpu_ids)
  221. lim = nr_cpu_ids;
  222. shift = ilog2(nr_cpu_ids / lim);
  223. if (((nr_cpu_ids - 1) >> shift) >= lim)
  224. shift++;
  225. WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
  226. WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
  227. smp_store_release(&rtp->percpu_enqueue_lim, lim);
  228. for_each_possible_cpu(cpu) {
  229. struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
  230. WARN_ON_ONCE(!rtpcp);
  231. if (cpu)
  232. raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
  233. raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
  234. if (rcu_segcblist_empty(&rtpcp->cblist))
  235. rcu_segcblist_init(&rtpcp->cblist);
  236. INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
  237. rtpcp->cpu = cpu;
  238. rtpcp->rtpp = rtp;
  239. if (!rtpcp->rtp_blkd_tasks.next)
  240. INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
  241. raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
  242. }
  243. raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
  244. if (rcu_task_cb_adjust)
  245. pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
  246. pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
  247. }
  248. // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
  249. static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
  250. {
  251. struct rcu_tasks *rtp;
  252. struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
  253. rtp = rtpcp->rtpp;
  254. rcuwait_wake_up(&rtp->cbs_wait);
  255. }
  256. // Enqueue a callback for the specified flavor of Tasks RCU.
  257. static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
  258. struct rcu_tasks *rtp)
  259. {
  260. int chosen_cpu;
  261. unsigned long flags;
  262. int ideal_cpu;
  263. unsigned long j;
  264. bool needadjust = false;
  265. bool needwake;
  266. struct rcu_tasks_percpu *rtpcp;
  267. rhp->next = NULL;
  268. rhp->func = func;
  269. local_irq_save(flags);
  270. rcu_read_lock();
  271. ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
  272. chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
  273. rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
  274. if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
  275. raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
  276. j = jiffies;
  277. if (rtpcp->rtp_jiffies != j) {
  278. rtpcp->rtp_jiffies = j;
  279. rtpcp->rtp_n_lock_retries = 0;
  280. }
  281. if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
  282. READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
  283. needadjust = true; // Defer adjustment to avoid deadlock.
  284. }
  285. if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
  286. raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
  287. cblist_init_generic(rtp);
  288. raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
  289. }
  290. needwake = rcu_segcblist_empty(&rtpcp->cblist);
  291. rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
  292. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  293. if (unlikely(needadjust)) {
  294. raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
  295. if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
  296. WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
  297. WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
  298. smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
  299. pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
  300. }
  301. raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
  302. }
  303. rcu_read_unlock();
  304. /* We can't create the thread unless interrupts are enabled. */
  305. if (needwake && READ_ONCE(rtp->kthread_ptr))
  306. irq_work_queue(&rtpcp->rtp_irq_work);
  307. }
  308. // RCU callback function for rcu_barrier_tasks_generic().
  309. static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
  310. {
  311. struct rcu_tasks *rtp;
  312. struct rcu_tasks_percpu *rtpcp;
  313. rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
  314. rtp = rtpcp->rtpp;
  315. if (atomic_dec_and_test(&rtp->barrier_q_count))
  316. complete(&rtp->barrier_q_completion);
  317. }
  318. // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
  319. // Operates in a manner similar to rcu_barrier().
  320. static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
  321. {
  322. int cpu;
  323. unsigned long flags;
  324. struct rcu_tasks_percpu *rtpcp;
  325. unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
  326. mutex_lock(&rtp->barrier_q_mutex);
  327. if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
  328. smp_mb();
  329. mutex_unlock(&rtp->barrier_q_mutex);
  330. return;
  331. }
  332. rcu_seq_start(&rtp->barrier_q_seq);
  333. init_completion(&rtp->barrier_q_completion);
  334. atomic_set(&rtp->barrier_q_count, 2);
  335. for_each_possible_cpu(cpu) {
  336. if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
  337. break;
  338. rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
  339. rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
  340. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  341. if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
  342. atomic_inc(&rtp->barrier_q_count);
  343. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  344. }
  345. if (atomic_sub_and_test(2, &rtp->barrier_q_count))
  346. complete(&rtp->barrier_q_completion);
  347. wait_for_completion(&rtp->barrier_q_completion);
  348. rcu_seq_end(&rtp->barrier_q_seq);
  349. mutex_unlock(&rtp->barrier_q_mutex);
  350. }
  351. // Advance callbacks and indicate whether either a grace period or
  352. // callback invocation is needed.
  353. static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
  354. {
  355. int cpu;
  356. unsigned long flags;
  357. bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
  358. long n;
  359. long ncbs = 0;
  360. long ncbsnz = 0;
  361. int needgpcb = 0;
  362. for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
  363. struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
  364. /* Advance and accelerate any new callbacks. */
  365. if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
  366. continue;
  367. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  368. // Should we shrink down to a single callback queue?
  369. n = rcu_segcblist_n_cbs(&rtpcp->cblist);
  370. if (n) {
  371. ncbs += n;
  372. if (cpu > 0)
  373. ncbsnz += n;
  374. }
  375. rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
  376. (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
  377. if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
  378. needgpcb |= 0x3;
  379. if (!rcu_segcblist_empty(&rtpcp->cblist))
  380. needgpcb |= 0x1;
  381. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  382. }
  383. // Shrink down to a single callback queue if appropriate.
  384. // This is done in two stages: (1) If there are no more than
  385. // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
  386. // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
  387. // if there has not been an increase in callbacks, limit dequeuing
  388. // to CPU 0. Note the matching RCU read-side critical section in
  389. // call_rcu_tasks_generic().
  390. if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
  391. raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
  392. if (rtp->percpu_enqueue_lim > 1) {
  393. WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
  394. smp_store_release(&rtp->percpu_enqueue_lim, 1);
  395. rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
  396. gpdone = false;
  397. pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
  398. }
  399. raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
  400. }
  401. if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
  402. raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
  403. if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
  404. WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
  405. pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
  406. }
  407. if (rtp->percpu_dequeue_lim == 1) {
  408. for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
  409. struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
  410. WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
  411. }
  412. }
  413. raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
  414. }
  415. return needgpcb;
  416. }
  417. // Advance callbacks and invoke any that are ready.
  418. static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
  419. {
  420. int cpu;
  421. int cpunext;
  422. int cpuwq;
  423. unsigned long flags;
  424. int len;
  425. struct rcu_head *rhp;
  426. struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
  427. struct rcu_tasks_percpu *rtpcp_next;
  428. cpu = rtpcp->cpu;
  429. cpunext = cpu * 2 + 1;
  430. if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
  431. rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
  432. cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
  433. queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
  434. cpunext++;
  435. if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
  436. rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
  437. cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
  438. queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
  439. }
  440. }
  441. if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
  442. return;
  443. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  444. rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
  445. rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
  446. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  447. len = rcl.len;
  448. for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
  449. local_bh_disable();
  450. rhp->func(rhp);
  451. local_bh_enable();
  452. cond_resched();
  453. }
  454. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  455. rcu_segcblist_add_len(&rtpcp->cblist, -len);
  456. (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
  457. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  458. }
  459. // Workqueue flood to advance callbacks and invoke any that are ready.
  460. static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
  461. {
  462. struct rcu_tasks *rtp;
  463. struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
  464. rtp = rtpcp->rtpp;
  465. rcu_tasks_invoke_cbs(rtp, rtpcp);
  466. }
  467. // Wait for one grace period.
  468. static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
  469. {
  470. int needgpcb;
  471. mutex_lock(&rtp->tasks_gp_mutex);
  472. // If there were none, wait a bit and start over.
  473. if (unlikely(midboot)) {
  474. needgpcb = 0x2;
  475. } else {
  476. set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
  477. rcuwait_wait_event(&rtp->cbs_wait,
  478. (needgpcb = rcu_tasks_need_gpcb(rtp)),
  479. TASK_IDLE);
  480. }
  481. if (needgpcb & 0x2) {
  482. // Wait for one grace period.
  483. set_tasks_gp_state(rtp, RTGS_WAIT_GP);
  484. rtp->gp_start = jiffies;
  485. rcu_seq_start(&rtp->tasks_gp_seq);
  486. rtp->gp_func(rtp);
  487. rcu_seq_end(&rtp->tasks_gp_seq);
  488. }
  489. // Invoke callbacks.
  490. set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
  491. rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
  492. mutex_unlock(&rtp->tasks_gp_mutex);
  493. }
  494. // RCU-tasks kthread that detects grace periods and invokes callbacks.
  495. static int __noreturn rcu_tasks_kthread(void *arg)
  496. {
  497. struct rcu_tasks *rtp = arg;
  498. /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
  499. housekeeping_affine(current, HK_TYPE_RCU);
  500. WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
  501. /*
  502. * Each pass through the following loop makes one check for
  503. * newly arrived callbacks, and, if there are some, waits for
  504. * one RCU-tasks grace period and then invokes the callbacks.
  505. * This loop is terminated by the system going down. ;-)
  506. */
  507. for (;;) {
  508. // Wait for one grace period and invoke any callbacks
  509. // that are ready.
  510. rcu_tasks_one_gp(rtp, false);
  511. // Paranoid sleep to keep this from entering a tight loop.
  512. schedule_timeout_idle(rtp->gp_sleep);
  513. }
  514. }
  515. // Wait for a grace period for the specified flavor of Tasks RCU.
  516. static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
  517. {
  518. /* Complain if the scheduler has not started. */
  519. if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
  520. "synchronize_%s() called too soon", rtp->name))
  521. return;
  522. // If the grace-period kthread is running, use it.
  523. if (READ_ONCE(rtp->kthread_ptr)) {
  524. wait_rcu_gp(rtp->call_func);
  525. return;
  526. }
  527. rcu_tasks_one_gp(rtp, true);
  528. }
  529. /* Spawn RCU-tasks grace-period kthread. */
  530. static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
  531. {
  532. struct task_struct *t;
  533. t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
  534. if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
  535. return;
  536. smp_mb(); /* Ensure others see full kthread. */
  537. }
  538. #ifndef CONFIG_TINY_RCU
  539. /*
  540. * Print any non-default Tasks RCU settings.
  541. */
  542. static void __init rcu_tasks_bootup_oddness(void)
  543. {
  544. #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
  545. int rtsimc;
  546. if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
  547. pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
  548. rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
  549. if (rtsimc != rcu_task_stall_info_mult) {
  550. pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
  551. rcu_task_stall_info_mult = rtsimc;
  552. }
  553. #endif /* #ifdef CONFIG_TASKS_RCU */
  554. #ifdef CONFIG_TASKS_RCU
  555. pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
  556. #endif /* #ifdef CONFIG_TASKS_RCU */
  557. #ifdef CONFIG_TASKS_RUDE_RCU
  558. pr_info("\tRude variant of Tasks RCU enabled.\n");
  559. #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
  560. #ifdef CONFIG_TASKS_TRACE_RCU
  561. pr_info("\tTracing variant of Tasks RCU enabled.\n");
  562. #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
  563. }
  564. #endif /* #ifndef CONFIG_TINY_RCU */
  565. #ifndef CONFIG_TINY_RCU
  566. /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
  567. static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
  568. {
  569. int cpu;
  570. bool havecbs = false;
  571. for_each_possible_cpu(cpu) {
  572. struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
  573. if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
  574. havecbs = true;
  575. break;
  576. }
  577. }
  578. pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
  579. rtp->kname,
  580. tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
  581. jiffies - data_race(rtp->gp_jiffies),
  582. data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
  583. data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
  584. ".k"[!!data_race(rtp->kthread_ptr)],
  585. ".C"[havecbs],
  586. s);
  587. }
  588. #endif // #ifndef CONFIG_TINY_RCU
  589. static void exit_tasks_rcu_finish_trace(struct task_struct *t);
  590. #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
  591. ////////////////////////////////////////////////////////////////////////
  592. //
  593. // Shared code between task-list-scanning variants of Tasks RCU.
  594. /* Wait for one RCU-tasks grace period. */
  595. static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
  596. {
  597. struct task_struct *g;
  598. int fract;
  599. LIST_HEAD(holdouts);
  600. unsigned long j;
  601. unsigned long lastinfo;
  602. unsigned long lastreport;
  603. bool reported = false;
  604. int rtsi;
  605. struct task_struct *t;
  606. set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
  607. rtp->pregp_func(&holdouts);
  608. /*
  609. * There were callbacks, so we need to wait for an RCU-tasks
  610. * grace period. Start off by scanning the task list for tasks
  611. * that are not already voluntarily blocked. Mark these tasks
  612. * and make a list of them in holdouts.
  613. */
  614. set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
  615. if (rtp->pertask_func) {
  616. rcu_read_lock();
  617. for_each_process_thread(g, t)
  618. rtp->pertask_func(t, &holdouts);
  619. rcu_read_unlock();
  620. }
  621. set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
  622. rtp->postscan_func(&holdouts);
  623. /*
  624. * Each pass through the following loop scans the list of holdout
  625. * tasks, removing any that are no longer holdouts. When the list
  626. * is empty, we are done.
  627. */
  628. lastreport = jiffies;
  629. lastinfo = lastreport;
  630. rtsi = READ_ONCE(rcu_task_stall_info);
  631. // Start off with initial wait and slowly back off to 1 HZ wait.
  632. fract = rtp->init_fract;
  633. while (!list_empty(&holdouts)) {
  634. ktime_t exp;
  635. bool firstreport;
  636. bool needreport;
  637. int rtst;
  638. // Slowly back off waiting for holdouts
  639. set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
  640. if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
  641. schedule_timeout_idle(fract);
  642. } else {
  643. exp = jiffies_to_nsecs(fract);
  644. __set_current_state(TASK_IDLE);
  645. schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
  646. }
  647. if (fract < HZ)
  648. fract++;
  649. rtst = READ_ONCE(rcu_task_stall_timeout);
  650. needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
  651. if (needreport) {
  652. lastreport = jiffies;
  653. reported = true;
  654. }
  655. firstreport = true;
  656. WARN_ON(signal_pending(current));
  657. set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
  658. rtp->holdouts_func(&holdouts, needreport, &firstreport);
  659. // Print pre-stall informational messages if needed.
  660. j = jiffies;
  661. if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
  662. lastinfo = j;
  663. rtsi = rtsi * rcu_task_stall_info_mult;
  664. pr_info("%s: %s grace period %lu is %lu jiffies old.\n",
  665. __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
  666. }
  667. }
  668. set_tasks_gp_state(rtp, RTGS_POST_GP);
  669. rtp->postgp_func(rtp);
  670. }
  671. #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
  672. #ifdef CONFIG_TASKS_RCU
  673. ////////////////////////////////////////////////////////////////////////
  674. //
  675. // Simple variant of RCU whose quiescent states are voluntary context
  676. // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
  677. // As such, grace periods can take one good long time. There are no
  678. // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
  679. // because this implementation is intended to get the system into a safe
  680. // state for some of the manipulations involved in tracing and the like.
  681. // Finally, this implementation does not support high call_rcu_tasks()
  682. // rates from multiple CPUs. If this is required, per-CPU callback lists
  683. // will be needed.
  684. //
  685. // The implementation uses rcu_tasks_wait_gp(), which relies on function
  686. // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
  687. // function sets these function pointers up so that rcu_tasks_wait_gp()
  688. // invokes these functions in this order:
  689. //
  690. // rcu_tasks_pregp_step():
  691. // Invokes synchronize_rcu() in order to wait for all in-flight
  692. // t->on_rq and t->nvcsw transitions to complete. This works because
  693. // all such transitions are carried out with interrupts disabled.
  694. // rcu_tasks_pertask(), invoked on every non-idle task:
  695. // For every runnable non-idle task other than the current one, use
  696. // get_task_struct() to pin down that task, snapshot that task's
  697. // number of voluntary context switches, and add that task to the
  698. // holdout list.
  699. // rcu_tasks_postscan():
  700. // Invoke synchronize_srcu() to ensure that all tasks that were
  701. // in the process of exiting (and which thus might not know to
  702. // synchronize with this RCU Tasks grace period) have completed
  703. // exiting.
  704. // check_all_holdout_tasks(), repeatedly until holdout list is empty:
  705. // Scans the holdout list, attempting to identify a quiescent state
  706. // for each task on the list. If there is a quiescent state, the
  707. // corresponding task is removed from the holdout list.
  708. // rcu_tasks_postgp():
  709. // Invokes synchronize_rcu() in order to ensure that all prior
  710. // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
  711. // to have happened before the end of this RCU Tasks grace period.
  712. // Again, this works because all such transitions are carried out
  713. // with interrupts disabled.
  714. //
  715. // For each exiting task, the exit_tasks_rcu_start() and
  716. // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
  717. // read-side critical sections waited for by rcu_tasks_postscan().
  718. //
  719. // Pre-grace-period update-side code is ordered before the grace
  720. // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
  721. // is ordered before the grace period via synchronize_rcu() call in
  722. // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
  723. // disabling.
  724. /* Pre-grace-period preparation. */
  725. static void rcu_tasks_pregp_step(struct list_head *hop)
  726. {
  727. /*
  728. * Wait for all pre-existing t->on_rq and t->nvcsw transitions
  729. * to complete. Invoking synchronize_rcu() suffices because all
  730. * these transitions occur with interrupts disabled. Without this
  731. * synchronize_rcu(), a read-side critical section that started
  732. * before the grace period might be incorrectly seen as having
  733. * started after the grace period.
  734. *
  735. * This synchronize_rcu() also dispenses with the need for a
  736. * memory barrier on the first store to t->rcu_tasks_holdout,
  737. * as it forces the store to happen after the beginning of the
  738. * grace period.
  739. */
  740. synchronize_rcu();
  741. }
  742. /* Per-task initial processing. */
  743. static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
  744. {
  745. if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
  746. get_task_struct(t);
  747. t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
  748. WRITE_ONCE(t->rcu_tasks_holdout, true);
  749. list_add(&t->rcu_tasks_holdout_list, hop);
  750. }
  751. }
  752. /* Processing between scanning taskslist and draining the holdout list. */
  753. static void rcu_tasks_postscan(struct list_head *hop)
  754. {
  755. /*
  756. * Exiting tasks may escape the tasklist scan. Those are vulnerable
  757. * until their final schedule() with TASK_DEAD state. To cope with
  758. * this, divide the fragile exit path part in two intersecting
  759. * read side critical sections:
  760. *
  761. * 1) An _SRCU_ read side starting before calling exit_notify(),
  762. * which may remove the task from the tasklist, and ending after
  763. * the final preempt_disable() call in do_exit().
  764. *
  765. * 2) An _RCU_ read side starting with the final preempt_disable()
  766. * call in do_exit() and ending with the final call to schedule()
  767. * with TASK_DEAD state.
  768. *
  769. * This handles the part 1). And postgp will handle part 2) with a
  770. * call to synchronize_rcu().
  771. */
  772. synchronize_srcu(&tasks_rcu_exit_srcu);
  773. }
  774. /* See if tasks are still holding out, complain if so. */
  775. static void check_holdout_task(struct task_struct *t,
  776. bool needreport, bool *firstreport)
  777. {
  778. int cpu;
  779. if (!READ_ONCE(t->rcu_tasks_holdout) ||
  780. t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
  781. !READ_ONCE(t->on_rq) ||
  782. (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
  783. !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
  784. WRITE_ONCE(t->rcu_tasks_holdout, false);
  785. list_del_init(&t->rcu_tasks_holdout_list);
  786. put_task_struct(t);
  787. return;
  788. }
  789. rcu_request_urgent_qs_task(t);
  790. if (!needreport)
  791. return;
  792. if (*firstreport) {
  793. pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
  794. *firstreport = false;
  795. }
  796. cpu = task_cpu(t);
  797. pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
  798. t, ".I"[is_idle_task(t)],
  799. "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
  800. t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
  801. t->rcu_tasks_idle_cpu, cpu);
  802. sched_show_task(t);
  803. }
  804. /* Scan the holdout lists for tasks no longer holding out. */
  805. static void check_all_holdout_tasks(struct list_head *hop,
  806. bool needreport, bool *firstreport)
  807. {
  808. struct task_struct *t, *t1;
  809. list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
  810. check_holdout_task(t, needreport, firstreport);
  811. cond_resched();
  812. }
  813. }
  814. /* Finish off the Tasks-RCU grace period. */
  815. static void rcu_tasks_postgp(struct rcu_tasks *rtp)
  816. {
  817. /*
  818. * Because ->on_rq and ->nvcsw are not guaranteed to have a full
  819. * memory barriers prior to them in the schedule() path, memory
  820. * reordering on other CPUs could cause their RCU-tasks read-side
  821. * critical sections to extend past the end of the grace period.
  822. * However, because these ->nvcsw updates are carried out with
  823. * interrupts disabled, we can use synchronize_rcu() to force the
  824. * needed ordering on all such CPUs.
  825. *
  826. * This synchronize_rcu() also confines all ->rcu_tasks_holdout
  827. * accesses to be within the grace period, avoiding the need for
  828. * memory barriers for ->rcu_tasks_holdout accesses.
  829. *
  830. * In addition, this synchronize_rcu() waits for exiting tasks
  831. * to complete their final preempt_disable() region of execution,
  832. * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
  833. * enforcing the whole region before tasklist removal until
  834. * the final schedule() with TASK_DEAD state to be an RCU TASKS
  835. * read side critical section.
  836. */
  837. synchronize_rcu();
  838. }
  839. void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
  840. DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
  841. /**
  842. * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
  843. * @rhp: structure to be used for queueing the RCU updates.
  844. * @func: actual callback function to be invoked after the grace period
  845. *
  846. * The callback function will be invoked some time after a full grace
  847. * period elapses, in other words after all currently executing RCU
  848. * read-side critical sections have completed. call_rcu_tasks() assumes
  849. * that the read-side critical sections end at a voluntary context
  850. * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
  851. * or transition to usermode execution. As such, there are no read-side
  852. * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
  853. * this primitive is intended to determine that all tasks have passed
  854. * through a safe state, not so much for data-structure synchronization.
  855. *
  856. * See the description of call_rcu() for more detailed information on
  857. * memory ordering guarantees.
  858. */
  859. void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
  860. {
  861. call_rcu_tasks_generic(rhp, func, &rcu_tasks);
  862. }
  863. EXPORT_SYMBOL_GPL(call_rcu_tasks);
  864. /**
  865. * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
  866. *
  867. * Control will return to the caller some time after a full rcu-tasks
  868. * grace period has elapsed, in other words after all currently
  869. * executing rcu-tasks read-side critical sections have elapsed. These
  870. * read-side critical sections are delimited by calls to schedule(),
  871. * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
  872. * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
  873. *
  874. * This is a very specialized primitive, intended only for a few uses in
  875. * tracing and other situations requiring manipulation of function
  876. * preambles and profiling hooks. The synchronize_rcu_tasks() function
  877. * is not (yet) intended for heavy use from multiple CPUs.
  878. *
  879. * See the description of synchronize_rcu() for more detailed information
  880. * on memory ordering guarantees.
  881. */
  882. void synchronize_rcu_tasks(void)
  883. {
  884. synchronize_rcu_tasks_generic(&rcu_tasks);
  885. }
  886. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
  887. /**
  888. * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
  889. *
  890. * Although the current implementation is guaranteed to wait, it is not
  891. * obligated to, for example, if there are no pending callbacks.
  892. */
  893. void rcu_barrier_tasks(void)
  894. {
  895. rcu_barrier_tasks_generic(&rcu_tasks);
  896. }
  897. EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
  898. static int __init rcu_spawn_tasks_kthread(void)
  899. {
  900. cblist_init_generic(&rcu_tasks);
  901. rcu_tasks.gp_sleep = HZ / 10;
  902. rcu_tasks.init_fract = HZ / 10;
  903. rcu_tasks.pregp_func = rcu_tasks_pregp_step;
  904. rcu_tasks.pertask_func = rcu_tasks_pertask;
  905. rcu_tasks.postscan_func = rcu_tasks_postscan;
  906. rcu_tasks.holdouts_func = check_all_holdout_tasks;
  907. rcu_tasks.postgp_func = rcu_tasks_postgp;
  908. rcu_spawn_tasks_kthread_generic(&rcu_tasks);
  909. return 0;
  910. }
  911. #if !defined(CONFIG_TINY_RCU)
  912. void show_rcu_tasks_classic_gp_kthread(void)
  913. {
  914. show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
  915. }
  916. EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
  917. #endif // !defined(CONFIG_TINY_RCU)
  918. /*
  919. * Contribute to protect against tasklist scan blind spot while the
  920. * task is exiting and may be removed from the tasklist. See
  921. * corresponding synchronize_srcu() for further details.
  922. */
  923. void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
  924. {
  925. current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
  926. }
  927. /*
  928. * Contribute to protect against tasklist scan blind spot while the
  929. * task is exiting and may be removed from the tasklist. See
  930. * corresponding synchronize_srcu() for further details.
  931. */
  932. void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
  933. {
  934. struct task_struct *t = current;
  935. __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
  936. }
  937. /*
  938. * Contribute to protect against tasklist scan blind spot while the
  939. * task is exiting and may be removed from the tasklist. See
  940. * corresponding synchronize_srcu() for further details.
  941. */
  942. void exit_tasks_rcu_finish(void)
  943. {
  944. exit_tasks_rcu_stop();
  945. exit_tasks_rcu_finish_trace(current);
  946. }
  947. #else /* #ifdef CONFIG_TASKS_RCU */
  948. void exit_tasks_rcu_start(void) { }
  949. void exit_tasks_rcu_stop(void) { }
  950. void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
  951. #endif /* #else #ifdef CONFIG_TASKS_RCU */
  952. #ifdef CONFIG_TASKS_RUDE_RCU
  953. ////////////////////////////////////////////////////////////////////////
  954. //
  955. // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
  956. // passing an empty function to schedule_on_each_cpu(). This approach
  957. // provides an asynchronous call_rcu_tasks_rude() API and batching of
  958. // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
  959. // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
  960. // and induces otherwise unnecessary context switches on all online CPUs,
  961. // whether idle or not.
  962. //
  963. // Callback handling is provided by the rcu_tasks_kthread() function.
  964. //
  965. // Ordering is provided by the scheduler's context-switch code.
  966. // Empty function to allow workqueues to force a context switch.
  967. static void rcu_tasks_be_rude(struct work_struct *work)
  968. {
  969. }
  970. // Wait for one rude RCU-tasks grace period.
  971. static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
  972. {
  973. rtp->n_ipis += cpumask_weight(cpu_online_mask);
  974. schedule_on_each_cpu(rcu_tasks_be_rude);
  975. }
  976. void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
  977. DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
  978. "RCU Tasks Rude");
  979. /**
  980. * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
  981. * @rhp: structure to be used for queueing the RCU updates.
  982. * @func: actual callback function to be invoked after the grace period
  983. *
  984. * The callback function will be invoked some time after a full grace
  985. * period elapses, in other words after all currently executing RCU
  986. * read-side critical sections have completed. call_rcu_tasks_rude()
  987. * assumes that the read-side critical sections end at context switch,
  988. * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
  989. * usermode execution is schedulable). As such, there are no read-side
  990. * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
  991. * this primitive is intended to determine that all tasks have passed
  992. * through a safe state, not so much for data-structure synchronization.
  993. *
  994. * See the description of call_rcu() for more detailed information on
  995. * memory ordering guarantees.
  996. */
  997. void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
  998. {
  999. call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
  1000. }
  1001. EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
  1002. /**
  1003. * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
  1004. *
  1005. * Control will return to the caller some time after a rude rcu-tasks
  1006. * grace period has elapsed, in other words after all currently
  1007. * executing rcu-tasks read-side critical sections have elapsed. These
  1008. * read-side critical sections are delimited by calls to schedule(),
  1009. * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
  1010. * context), and (in theory, anyway) cond_resched().
  1011. *
  1012. * This is a very specialized primitive, intended only for a few uses in
  1013. * tracing and other situations requiring manipulation of function preambles
  1014. * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
  1015. * (yet) intended for heavy use from multiple CPUs.
  1016. *
  1017. * See the description of synchronize_rcu() for more detailed information
  1018. * on memory ordering guarantees.
  1019. */
  1020. void synchronize_rcu_tasks_rude(void)
  1021. {
  1022. synchronize_rcu_tasks_generic(&rcu_tasks_rude);
  1023. }
  1024. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
  1025. /**
  1026. * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
  1027. *
  1028. * Although the current implementation is guaranteed to wait, it is not
  1029. * obligated to, for example, if there are no pending callbacks.
  1030. */
  1031. void rcu_barrier_tasks_rude(void)
  1032. {
  1033. rcu_barrier_tasks_generic(&rcu_tasks_rude);
  1034. }
  1035. EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
  1036. static int __init rcu_spawn_tasks_rude_kthread(void)
  1037. {
  1038. cblist_init_generic(&rcu_tasks_rude);
  1039. rcu_tasks_rude.gp_sleep = HZ / 10;
  1040. rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
  1041. return 0;
  1042. }
  1043. #if !defined(CONFIG_TINY_RCU)
  1044. void show_rcu_tasks_rude_gp_kthread(void)
  1045. {
  1046. show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
  1047. }
  1048. EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
  1049. #endif // !defined(CONFIG_TINY_RCU)
  1050. #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
  1051. ////////////////////////////////////////////////////////////////////////
  1052. //
  1053. // Tracing variant of Tasks RCU. This variant is designed to be used
  1054. // to protect tracing hooks, including those of BPF. This variant
  1055. // therefore:
  1056. //
  1057. // 1. Has explicit read-side markers to allow finite grace periods
  1058. // in the face of in-kernel loops for PREEMPT=n builds.
  1059. //
  1060. // 2. Protects code in the idle loop, exception entry/exit, and
  1061. // CPU-hotplug code paths, similar to the capabilities of SRCU.
  1062. //
  1063. // 3. Avoids expensive read-side instructions, having overhead similar
  1064. // to that of Preemptible RCU.
  1065. //
  1066. // There are of course downsides. For example, the grace-period code
  1067. // can send IPIs to CPUs, even when those CPUs are in the idle loop or
  1068. // in nohz_full userspace. If needed, these downsides can be at least
  1069. // partially remedied.
  1070. //
  1071. // Perhaps most important, this variant of RCU does not affect the vanilla
  1072. // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
  1073. // readers can operate from idle, offline, and exception entry/exit in no
  1074. // way allows rcu_preempt and rcu_sched readers to also do so.
  1075. //
  1076. // The implementation uses rcu_tasks_wait_gp(), which relies on function
  1077. // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
  1078. // function sets these function pointers up so that rcu_tasks_wait_gp()
  1079. // invokes these functions in this order:
  1080. //
  1081. // rcu_tasks_trace_pregp_step():
  1082. // Disables CPU hotplug, adds all currently executing tasks to the
  1083. // holdout list, then checks the state of all tasks that blocked
  1084. // or were preempted within their current RCU Tasks Trace read-side
  1085. // critical section, adding them to the holdout list if appropriate.
  1086. // Finally, this function re-enables CPU hotplug.
  1087. // The ->pertask_func() pointer is NULL, so there is no per-task processing.
  1088. // rcu_tasks_trace_postscan():
  1089. // Invokes synchronize_rcu() to wait for late-stage exiting tasks
  1090. // to finish exiting.
  1091. // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
  1092. // Scans the holdout list, attempting to identify a quiescent state
  1093. // for each task on the list. If there is a quiescent state, the
  1094. // corresponding task is removed from the holdout list. Once this
  1095. // list is empty, the grace period has completed.
  1096. // rcu_tasks_trace_postgp():
  1097. // Provides the needed full memory barrier and does debug checks.
  1098. //
  1099. // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
  1100. //
  1101. // Pre-grace-period update-side code is ordered before the grace period
  1102. // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
  1103. // read-side code is ordered before the grace period by atomic operations
  1104. // on .b.need_qs flag of each task involved in this process, or by scheduler
  1105. // context-switch ordering (for locked-down non-running readers).
  1106. // The lockdep state must be outside of #ifdef to be useful.
  1107. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  1108. static struct lock_class_key rcu_lock_trace_key;
  1109. struct lockdep_map rcu_trace_lock_map =
  1110. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
  1111. EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
  1112. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  1113. #ifdef CONFIG_TASKS_TRACE_RCU
  1114. // Record outstanding IPIs to each CPU. No point in sending two...
  1115. static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
  1116. // The number of detections of task quiescent state relying on
  1117. // heavyweight readers executing explicit memory barriers.
  1118. static unsigned long n_heavy_reader_attempts;
  1119. static unsigned long n_heavy_reader_updates;
  1120. static unsigned long n_heavy_reader_ofl_updates;
  1121. static unsigned long n_trc_holdouts;
  1122. void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
  1123. DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
  1124. "RCU Tasks Trace");
  1125. /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
  1126. static u8 rcu_ld_need_qs(struct task_struct *t)
  1127. {
  1128. smp_mb(); // Enforce full grace-period ordering.
  1129. return smp_load_acquire(&t->trc_reader_special.b.need_qs);
  1130. }
  1131. /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
  1132. static void rcu_st_need_qs(struct task_struct *t, u8 v)
  1133. {
  1134. smp_store_release(&t->trc_reader_special.b.need_qs, v);
  1135. smp_mb(); // Enforce full grace-period ordering.
  1136. }
  1137. /*
  1138. * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
  1139. * the four-byte operand-size restriction of some platforms.
  1140. * Returns the old value, which is often ignored.
  1141. */
  1142. u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
  1143. {
  1144. union rcu_special ret;
  1145. union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
  1146. union rcu_special trs_new = trs_old;
  1147. if (trs_old.b.need_qs != old)
  1148. return trs_old.b.need_qs;
  1149. trs_new.b.need_qs = new;
  1150. ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
  1151. return ret.b.need_qs;
  1152. }
  1153. EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
  1154. /*
  1155. * If we are the last reader, signal the grace-period kthread.
  1156. * Also remove from the per-CPU list of blocked tasks.
  1157. */
  1158. void rcu_read_unlock_trace_special(struct task_struct *t)
  1159. {
  1160. unsigned long flags;
  1161. struct rcu_tasks_percpu *rtpcp;
  1162. union rcu_special trs;
  1163. // Open-coded full-word version of rcu_ld_need_qs().
  1164. smp_mb(); // Enforce full grace-period ordering.
  1165. trs = smp_load_acquire(&t->trc_reader_special);
  1166. if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
  1167. smp_mb(); // Pairs with update-side barriers.
  1168. // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
  1169. if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
  1170. u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
  1171. TRC_NEED_QS_CHECKED);
  1172. WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
  1173. }
  1174. if (trs.b.blocked) {
  1175. rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
  1176. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  1177. list_del_init(&t->trc_blkd_node);
  1178. WRITE_ONCE(t->trc_reader_special.b.blocked, false);
  1179. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  1180. }
  1181. WRITE_ONCE(t->trc_reader_nesting, 0);
  1182. }
  1183. EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
  1184. /* Add a newly blocked reader task to its CPU's list. */
  1185. void rcu_tasks_trace_qs_blkd(struct task_struct *t)
  1186. {
  1187. unsigned long flags;
  1188. struct rcu_tasks_percpu *rtpcp;
  1189. local_irq_save(flags);
  1190. rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
  1191. raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
  1192. t->trc_blkd_cpu = smp_processor_id();
  1193. if (!rtpcp->rtp_blkd_tasks.next)
  1194. INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
  1195. list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
  1196. WRITE_ONCE(t->trc_reader_special.b.blocked, true);
  1197. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  1198. }
  1199. EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
  1200. /* Add a task to the holdout list, if it is not already on the list. */
  1201. static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
  1202. {
  1203. if (list_empty(&t->trc_holdout_list)) {
  1204. get_task_struct(t);
  1205. list_add(&t->trc_holdout_list, bhp);
  1206. n_trc_holdouts++;
  1207. }
  1208. }
  1209. /* Remove a task from the holdout list, if it is in fact present. */
  1210. static void trc_del_holdout(struct task_struct *t)
  1211. {
  1212. if (!list_empty(&t->trc_holdout_list)) {
  1213. list_del_init(&t->trc_holdout_list);
  1214. put_task_struct(t);
  1215. n_trc_holdouts--;
  1216. }
  1217. }
  1218. /* IPI handler to check task state. */
  1219. static void trc_read_check_handler(void *t_in)
  1220. {
  1221. int nesting;
  1222. struct task_struct *t = current;
  1223. struct task_struct *texp = t_in;
  1224. // If the task is no longer running on this CPU, leave.
  1225. if (unlikely(texp != t))
  1226. goto reset_ipi; // Already on holdout list, so will check later.
  1227. // If the task is not in a read-side critical section, and
  1228. // if this is the last reader, awaken the grace-period kthread.
  1229. nesting = READ_ONCE(t->trc_reader_nesting);
  1230. if (likely(!nesting)) {
  1231. rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
  1232. goto reset_ipi;
  1233. }
  1234. // If we are racing with an rcu_read_unlock_trace(), try again later.
  1235. if (unlikely(nesting < 0))
  1236. goto reset_ipi;
  1237. // Get here if the task is in a read-side critical section.
  1238. // Set its state so that it will update state for the grace-period
  1239. // kthread upon exit from that critical section.
  1240. rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
  1241. reset_ipi:
  1242. // Allow future IPIs to be sent on CPU and for task.
  1243. // Also order this IPI handler against any later manipulations of
  1244. // the intended task.
  1245. smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
  1246. smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
  1247. }
  1248. /* Callback function for scheduler to check locked-down task. */
  1249. static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
  1250. {
  1251. struct list_head *bhp = bhp_in;
  1252. int cpu = task_cpu(t);
  1253. int nesting;
  1254. bool ofl = cpu_is_offline(cpu);
  1255. if (task_curr(t) && !ofl) {
  1256. // If no chance of heavyweight readers, do it the hard way.
  1257. if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
  1258. return -EINVAL;
  1259. // If heavyweight readers are enabled on the remote task,
  1260. // we can inspect its state despite its currently running.
  1261. // However, we cannot safely change its state.
  1262. n_heavy_reader_attempts++;
  1263. // Check for "running" idle tasks on offline CPUs.
  1264. if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
  1265. return -EINVAL; // No quiescent state, do it the hard way.
  1266. n_heavy_reader_updates++;
  1267. nesting = 0;
  1268. } else {
  1269. // The task is not running, so C-language access is safe.
  1270. nesting = t->trc_reader_nesting;
  1271. WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
  1272. if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
  1273. n_heavy_reader_ofl_updates++;
  1274. }
  1275. // If not exiting a read-side critical section, mark as checked
  1276. // so that the grace-period kthread will remove it from the
  1277. // holdout list.
  1278. if (!nesting) {
  1279. rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
  1280. return 0; // In QS, so done.
  1281. }
  1282. if (nesting < 0)
  1283. return -EINVAL; // Reader transitioning, try again later.
  1284. // The task is in a read-side critical section, so set up its
  1285. // state so that it will update state upon exit from that critical
  1286. // section.
  1287. if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
  1288. trc_add_holdout(t, bhp);
  1289. return 0;
  1290. }
  1291. /* Attempt to extract the state for the specified task. */
  1292. static void trc_wait_for_one_reader(struct task_struct *t,
  1293. struct list_head *bhp)
  1294. {
  1295. int cpu;
  1296. // If a previous IPI is still in flight, let it complete.
  1297. if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
  1298. return;
  1299. // The current task had better be in a quiescent state.
  1300. if (t == current) {
  1301. rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
  1302. WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
  1303. return;
  1304. }
  1305. // Attempt to nail down the task for inspection.
  1306. get_task_struct(t);
  1307. if (!task_call_func(t, trc_inspect_reader, bhp)) {
  1308. put_task_struct(t);
  1309. return;
  1310. }
  1311. put_task_struct(t);
  1312. // If this task is not yet on the holdout list, then we are in
  1313. // an RCU read-side critical section. Otherwise, the invocation of
  1314. // trc_add_holdout() that added it to the list did the necessary
  1315. // get_task_struct(). Either way, the task cannot be freed out
  1316. // from under this code.
  1317. // If currently running, send an IPI, either way, add to list.
  1318. trc_add_holdout(t, bhp);
  1319. if (task_curr(t) &&
  1320. time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
  1321. // The task is currently running, so try IPIing it.
  1322. cpu = task_cpu(t);
  1323. // If there is already an IPI outstanding, let it happen.
  1324. if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
  1325. return;
  1326. per_cpu(trc_ipi_to_cpu, cpu) = true;
  1327. t->trc_ipi_to_cpu = cpu;
  1328. rcu_tasks_trace.n_ipis++;
  1329. if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
  1330. // Just in case there is some other reason for
  1331. // failure than the target CPU being offline.
  1332. WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
  1333. __func__, cpu);
  1334. rcu_tasks_trace.n_ipis_fails++;
  1335. per_cpu(trc_ipi_to_cpu, cpu) = false;
  1336. t->trc_ipi_to_cpu = -1;
  1337. }
  1338. }
  1339. }
  1340. /*
  1341. * Initialize for first-round processing for the specified task.
  1342. * Return false if task is NULL or already taken care of, true otherwise.
  1343. */
  1344. static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
  1345. {
  1346. // During early boot when there is only the one boot CPU, there
  1347. // is no idle task for the other CPUs. Also, the grace-period
  1348. // kthread is always in a quiescent state. In addition, just return
  1349. // if this task is already on the list.
  1350. if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
  1351. return false;
  1352. rcu_st_need_qs(t, 0);
  1353. t->trc_ipi_to_cpu = -1;
  1354. return true;
  1355. }
  1356. /* Do first-round processing for the specified task. */
  1357. static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
  1358. {
  1359. if (rcu_tasks_trace_pertask_prep(t, true))
  1360. trc_wait_for_one_reader(t, hop);
  1361. }
  1362. /* Initialize for a new RCU-tasks-trace grace period. */
  1363. static void rcu_tasks_trace_pregp_step(struct list_head *hop)
  1364. {
  1365. LIST_HEAD(blkd_tasks);
  1366. int cpu;
  1367. unsigned long flags;
  1368. struct rcu_tasks_percpu *rtpcp;
  1369. struct task_struct *t;
  1370. // There shouldn't be any old IPIs, but...
  1371. for_each_possible_cpu(cpu)
  1372. WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
  1373. // Disable CPU hotplug across the CPU scan for the benefit of
  1374. // any IPIs that might be needed. This also waits for all readers
  1375. // in CPU-hotplug code paths.
  1376. cpus_read_lock();
  1377. // These rcu_tasks_trace_pertask_prep() calls are serialized to
  1378. // allow safe access to the hop list.
  1379. for_each_online_cpu(cpu) {
  1380. rcu_read_lock();
  1381. t = cpu_curr_snapshot(cpu);
  1382. if (rcu_tasks_trace_pertask_prep(t, true))
  1383. trc_add_holdout(t, hop);
  1384. rcu_read_unlock();
  1385. cond_resched_tasks_rcu_qs();
  1386. }
  1387. // Only after all running tasks have been accounted for is it
  1388. // safe to take care of the tasks that have blocked within their
  1389. // current RCU tasks trace read-side critical section.
  1390. for_each_possible_cpu(cpu) {
  1391. rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
  1392. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  1393. list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
  1394. while (!list_empty(&blkd_tasks)) {
  1395. rcu_read_lock();
  1396. t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
  1397. list_del_init(&t->trc_blkd_node);
  1398. list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
  1399. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  1400. rcu_tasks_trace_pertask(t, hop);
  1401. rcu_read_unlock();
  1402. raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
  1403. }
  1404. raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
  1405. cond_resched_tasks_rcu_qs();
  1406. }
  1407. // Re-enable CPU hotplug now that the holdout list is populated.
  1408. cpus_read_unlock();
  1409. }
  1410. /*
  1411. * Do intermediate processing between task and holdout scans.
  1412. */
  1413. static void rcu_tasks_trace_postscan(struct list_head *hop)
  1414. {
  1415. // Wait for late-stage exiting tasks to finish exiting.
  1416. // These might have passed the call to exit_tasks_rcu_finish().
  1417. synchronize_rcu();
  1418. // Any tasks that exit after this point will set
  1419. // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
  1420. }
  1421. /* Communicate task state back to the RCU tasks trace stall warning request. */
  1422. struct trc_stall_chk_rdr {
  1423. int nesting;
  1424. int ipi_to_cpu;
  1425. u8 needqs;
  1426. };
  1427. static int trc_check_slow_task(struct task_struct *t, void *arg)
  1428. {
  1429. struct trc_stall_chk_rdr *trc_rdrp = arg;
  1430. if (task_curr(t) && cpu_online(task_cpu(t)))
  1431. return false; // It is running, so decline to inspect it.
  1432. trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
  1433. trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
  1434. trc_rdrp->needqs = rcu_ld_need_qs(t);
  1435. return true;
  1436. }
  1437. /* Show the state of a task stalling the current RCU tasks trace GP. */
  1438. static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
  1439. {
  1440. int cpu;
  1441. struct trc_stall_chk_rdr trc_rdr;
  1442. bool is_idle_tsk = is_idle_task(t);
  1443. if (*firstreport) {
  1444. pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
  1445. *firstreport = false;
  1446. }
  1447. cpu = task_cpu(t);
  1448. if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
  1449. pr_alert("P%d: %c%c\n",
  1450. t->pid,
  1451. ".I"[t->trc_ipi_to_cpu >= 0],
  1452. ".i"[is_idle_tsk]);
  1453. else
  1454. pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
  1455. t->pid,
  1456. ".I"[trc_rdr.ipi_to_cpu >= 0],
  1457. ".i"[is_idle_tsk],
  1458. ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
  1459. ".B"[!!data_race(t->trc_reader_special.b.blocked)],
  1460. trc_rdr.nesting,
  1461. " !CN"[trc_rdr.needqs & 0x3],
  1462. " ?"[trc_rdr.needqs > 0x3],
  1463. cpu, cpu_online(cpu) ? "" : "(offline)");
  1464. sched_show_task(t);
  1465. }
  1466. /* List stalled IPIs for RCU tasks trace. */
  1467. static void show_stalled_ipi_trace(void)
  1468. {
  1469. int cpu;
  1470. for_each_possible_cpu(cpu)
  1471. if (per_cpu(trc_ipi_to_cpu, cpu))
  1472. pr_alert("\tIPI outstanding to CPU %d\n", cpu);
  1473. }
  1474. /* Do one scan of the holdout list. */
  1475. static void check_all_holdout_tasks_trace(struct list_head *hop,
  1476. bool needreport, bool *firstreport)
  1477. {
  1478. struct task_struct *g, *t;
  1479. // Disable CPU hotplug across the holdout list scan for IPIs.
  1480. cpus_read_lock();
  1481. list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
  1482. // If safe and needed, try to check the current task.
  1483. if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
  1484. !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
  1485. trc_wait_for_one_reader(t, hop);
  1486. // If check succeeded, remove this task from the list.
  1487. if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
  1488. rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
  1489. trc_del_holdout(t);
  1490. else if (needreport)
  1491. show_stalled_task_trace(t, firstreport);
  1492. cond_resched_tasks_rcu_qs();
  1493. }
  1494. // Re-enable CPU hotplug now that the holdout list scan has completed.
  1495. cpus_read_unlock();
  1496. if (needreport) {
  1497. if (*firstreport)
  1498. pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
  1499. show_stalled_ipi_trace();
  1500. }
  1501. }
  1502. static void rcu_tasks_trace_empty_fn(void *unused)
  1503. {
  1504. }
  1505. /* Wait for grace period to complete and provide ordering. */
  1506. static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
  1507. {
  1508. int cpu;
  1509. // Wait for any lingering IPI handlers to complete. Note that
  1510. // if a CPU has gone offline or transitioned to userspace in the
  1511. // meantime, all IPI handlers should have been drained beforehand.
  1512. // Yes, this assumes that CPUs process IPIs in order. If that ever
  1513. // changes, there will need to be a recheck and/or timed wait.
  1514. for_each_online_cpu(cpu)
  1515. if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
  1516. smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
  1517. smp_mb(); // Caller's code must be ordered after wakeup.
  1518. // Pairs with pretty much every ordering primitive.
  1519. }
  1520. /* Report any needed quiescent state for this exiting task. */
  1521. static void exit_tasks_rcu_finish_trace(struct task_struct *t)
  1522. {
  1523. union rcu_special trs = READ_ONCE(t->trc_reader_special);
  1524. rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
  1525. WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
  1526. if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
  1527. rcu_read_unlock_trace_special(t);
  1528. else
  1529. WRITE_ONCE(t->trc_reader_nesting, 0);
  1530. }
  1531. /**
  1532. * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
  1533. * @rhp: structure to be used for queueing the RCU updates.
  1534. * @func: actual callback function to be invoked after the grace period
  1535. *
  1536. * The callback function will be invoked some time after a trace rcu-tasks
  1537. * grace period elapses, in other words after all currently executing
  1538. * trace rcu-tasks read-side critical sections have completed. These
  1539. * read-side critical sections are delimited by calls to rcu_read_lock_trace()
  1540. * and rcu_read_unlock_trace().
  1541. *
  1542. * See the description of call_rcu() for more detailed information on
  1543. * memory ordering guarantees.
  1544. */
  1545. void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
  1546. {
  1547. call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
  1548. }
  1549. EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
  1550. /**
  1551. * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
  1552. *
  1553. * Control will return to the caller some time after a trace rcu-tasks
  1554. * grace period has elapsed, in other words after all currently executing
  1555. * trace rcu-tasks read-side critical sections have elapsed. These read-side
  1556. * critical sections are delimited by calls to rcu_read_lock_trace()
  1557. * and rcu_read_unlock_trace().
  1558. *
  1559. * This is a very specialized primitive, intended only for a few uses in
  1560. * tracing and other situations requiring manipulation of function preambles
  1561. * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
  1562. * (yet) intended for heavy use from multiple CPUs.
  1563. *
  1564. * See the description of synchronize_rcu() for more detailed information
  1565. * on memory ordering guarantees.
  1566. */
  1567. void synchronize_rcu_tasks_trace(void)
  1568. {
  1569. RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
  1570. synchronize_rcu_tasks_generic(&rcu_tasks_trace);
  1571. }
  1572. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
  1573. /**
  1574. * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
  1575. *
  1576. * Although the current implementation is guaranteed to wait, it is not
  1577. * obligated to, for example, if there are no pending callbacks.
  1578. */
  1579. void rcu_barrier_tasks_trace(void)
  1580. {
  1581. rcu_barrier_tasks_generic(&rcu_tasks_trace);
  1582. }
  1583. EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
  1584. static int __init rcu_spawn_tasks_trace_kthread(void)
  1585. {
  1586. cblist_init_generic(&rcu_tasks_trace);
  1587. if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
  1588. rcu_tasks_trace.gp_sleep = HZ / 10;
  1589. rcu_tasks_trace.init_fract = HZ / 10;
  1590. } else {
  1591. rcu_tasks_trace.gp_sleep = HZ / 200;
  1592. if (rcu_tasks_trace.gp_sleep <= 0)
  1593. rcu_tasks_trace.gp_sleep = 1;
  1594. rcu_tasks_trace.init_fract = HZ / 200;
  1595. if (rcu_tasks_trace.init_fract <= 0)
  1596. rcu_tasks_trace.init_fract = 1;
  1597. }
  1598. rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
  1599. rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
  1600. rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
  1601. rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
  1602. rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
  1603. return 0;
  1604. }
  1605. #if !defined(CONFIG_TINY_RCU)
  1606. void show_rcu_tasks_trace_gp_kthread(void)
  1607. {
  1608. char buf[64];
  1609. sprintf(buf, "N%lu h:%lu/%lu/%lu",
  1610. data_race(n_trc_holdouts),
  1611. data_race(n_heavy_reader_ofl_updates),
  1612. data_race(n_heavy_reader_updates),
  1613. data_race(n_heavy_reader_attempts));
  1614. show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
  1615. }
  1616. EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
  1617. #endif // !defined(CONFIG_TINY_RCU)
  1618. #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
  1619. static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
  1620. #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
  1621. #ifndef CONFIG_TINY_RCU
  1622. void show_rcu_tasks_gp_kthreads(void)
  1623. {
  1624. show_rcu_tasks_classic_gp_kthread();
  1625. show_rcu_tasks_rude_gp_kthread();
  1626. show_rcu_tasks_trace_gp_kthread();
  1627. }
  1628. #endif /* #ifndef CONFIG_TINY_RCU */
  1629. #ifdef CONFIG_PROVE_RCU
  1630. struct rcu_tasks_test_desc {
  1631. struct rcu_head rh;
  1632. const char *name;
  1633. bool notrun;
  1634. unsigned long runstart;
  1635. };
  1636. static struct rcu_tasks_test_desc tests[] = {
  1637. {
  1638. .name = "call_rcu_tasks()",
  1639. /* If not defined, the test is skipped. */
  1640. .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
  1641. },
  1642. {
  1643. .name = "call_rcu_tasks_rude()",
  1644. /* If not defined, the test is skipped. */
  1645. .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
  1646. },
  1647. {
  1648. .name = "call_rcu_tasks_trace()",
  1649. /* If not defined, the test is skipped. */
  1650. .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
  1651. }
  1652. };
  1653. static void test_rcu_tasks_callback(struct rcu_head *rhp)
  1654. {
  1655. struct rcu_tasks_test_desc *rttd =
  1656. container_of(rhp, struct rcu_tasks_test_desc, rh);
  1657. pr_info("Callback from %s invoked.\n", rttd->name);
  1658. rttd->notrun = false;
  1659. }
  1660. static void rcu_tasks_initiate_self_tests(void)
  1661. {
  1662. unsigned long j = jiffies;
  1663. pr_info("Running RCU-tasks wait API self tests\n");
  1664. #ifdef CONFIG_TASKS_RCU
  1665. tests[0].runstart = j;
  1666. synchronize_rcu_tasks();
  1667. call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
  1668. #endif
  1669. #ifdef CONFIG_TASKS_RUDE_RCU
  1670. tests[1].runstart = j;
  1671. synchronize_rcu_tasks_rude();
  1672. call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
  1673. #endif
  1674. #ifdef CONFIG_TASKS_TRACE_RCU
  1675. tests[2].runstart = j;
  1676. synchronize_rcu_tasks_trace();
  1677. call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
  1678. #endif
  1679. }
  1680. /*
  1681. * Return: 0 - test passed
  1682. * 1 - test failed, but have not timed out yet
  1683. * -1 - test failed and timed out
  1684. */
  1685. static int rcu_tasks_verify_self_tests(void)
  1686. {
  1687. int ret = 0;
  1688. int i;
  1689. unsigned long bst = rcu_task_stall_timeout;
  1690. if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
  1691. bst = RCU_TASK_BOOT_STALL_TIMEOUT;
  1692. for (i = 0; i < ARRAY_SIZE(tests); i++) {
  1693. while (tests[i].notrun) { // still hanging.
  1694. if (time_after(jiffies, tests[i].runstart + bst)) {
  1695. pr_err("%s has failed boot-time tests.\n", tests[i].name);
  1696. ret = -1;
  1697. break;
  1698. }
  1699. ret = 1;
  1700. break;
  1701. }
  1702. }
  1703. WARN_ON(ret < 0);
  1704. return ret;
  1705. }
  1706. /*
  1707. * Repeat the rcu_tasks_verify_self_tests() call once every second until the
  1708. * test passes or has timed out.
  1709. */
  1710. static struct delayed_work rcu_tasks_verify_work;
  1711. static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
  1712. {
  1713. int ret = rcu_tasks_verify_self_tests();
  1714. if (ret <= 0)
  1715. return;
  1716. /* Test fails but not timed out yet, reschedule another check */
  1717. schedule_delayed_work(&rcu_tasks_verify_work, HZ);
  1718. }
  1719. static int rcu_tasks_verify_schedule_work(void)
  1720. {
  1721. INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
  1722. rcu_tasks_verify_work_fn(NULL);
  1723. return 0;
  1724. }
  1725. late_initcall(rcu_tasks_verify_schedule_work);
  1726. #else /* #ifdef CONFIG_PROVE_RCU */
  1727. static void rcu_tasks_initiate_self_tests(void) { }
  1728. #endif /* #else #ifdef CONFIG_PROVE_RCU */
  1729. void __init rcu_init_tasks_generic(void)
  1730. {
  1731. #ifdef CONFIG_TASKS_RCU
  1732. rcu_spawn_tasks_kthread();
  1733. #endif
  1734. #ifdef CONFIG_TASKS_RUDE_RCU
  1735. rcu_spawn_tasks_rude_kthread();
  1736. #endif
  1737. #ifdef CONFIG_TASKS_TRACE_RCU
  1738. rcu_spawn_tasks_trace_kthread();
  1739. #endif
  1740. // Run the self-tests.
  1741. rcu_tasks_initiate_self_tests();
  1742. }
  1743. #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
  1744. static inline void rcu_tasks_bootup_oddness(void) {}
  1745. #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */