rcutorture.c 113 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Read-Copy Update module-based torture test facility
  4. *
  5. * Copyright (C) IBM Corporation, 2005, 2006
  6. *
  7. * Authors: Paul E. McKenney <[email protected]>
  8. * Josh Triplett <[email protected]>
  9. *
  10. * See also: Documentation/RCU/torture.rst
  11. */
  12. #define pr_fmt(fmt) fmt
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/kthread.h>
  18. #include <linux/err.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/smp.h>
  21. #include <linux/rcupdate_wait.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/sched/signal.h>
  24. #include <uapi/linux/sched/types.h>
  25. #include <linux/atomic.h>
  26. #include <linux/bitops.h>
  27. #include <linux/completion.h>
  28. #include <linux/moduleparam.h>
  29. #include <linux/percpu.h>
  30. #include <linux/notifier.h>
  31. #include <linux/reboot.h>
  32. #include <linux/freezer.h>
  33. #include <linux/cpu.h>
  34. #include <linux/delay.h>
  35. #include <linux/stat.h>
  36. #include <linux/srcu.h>
  37. #include <linux/slab.h>
  38. #include <linux/trace_clock.h>
  39. #include <asm/byteorder.h>
  40. #include <linux/torture.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/sched/debug.h>
  43. #include <linux/sched/sysctl.h>
  44. #include <linux/oom.h>
  45. #include <linux/tick.h>
  46. #include <linux/rcupdate_trace.h>
  47. #include <linux/nmi.h>
  48. #include "rcu.h"
  49. MODULE_LICENSE("GPL");
  50. MODULE_AUTHOR("Paul E. McKenney <[email protected]> and Josh Triplett <[email protected]>");
  51. /* Bits for ->extendables field, extendables param, and related definitions. */
  52. #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
  53. #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
  54. #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
  55. #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
  56. #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
  57. #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
  58. #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
  59. #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
  60. #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
  61. #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
  62. #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
  63. #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
  64. #define RCUTORTURE_MAX_EXTEND \
  65. (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  66. RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
  67. #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
  68. /* Must be power of two minus one. */
  69. #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
  70. torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
  71. "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
  72. torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
  73. torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
  74. torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
  75. torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
  76. torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
  77. torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
  78. torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
  79. torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
  80. torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
  81. torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
  82. torture_param(bool, gp_cond_exp_full, false,
  83. "Use conditional/async full-stateexpedited GP wait primitives");
  84. torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  85. torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
  86. torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
  87. torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
  88. torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
  89. torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
  90. torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
  91. torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
  92. torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
  93. torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
  94. torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
  95. torture_param(int, nreaders, -1, "Number of RCU reader threads");
  96. torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
  97. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  98. torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
  99. torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
  100. torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
  101. torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
  102. torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
  103. torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
  104. torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
  105. torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
  106. torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
  107. torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
  108. torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
  109. torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
  110. torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
  111. torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
  112. torture_param(int, stutter, 5, "Number of seconds to run/halt test");
  113. torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
  114. torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
  115. torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
  116. torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
  117. torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
  118. static char *torture_type = "rcu";
  119. module_param(torture_type, charp, 0444);
  120. MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
  121. static int nrealnocbers;
  122. static int nrealreaders;
  123. static struct task_struct *writer_task;
  124. static struct task_struct **fakewriter_tasks;
  125. static struct task_struct **reader_tasks;
  126. static struct task_struct **nocb_tasks;
  127. static struct task_struct *stats_task;
  128. static struct task_struct *fqs_task;
  129. static struct task_struct *boost_tasks[NR_CPUS];
  130. static struct task_struct *stall_task;
  131. static struct task_struct **fwd_prog_tasks;
  132. static struct task_struct **barrier_cbs_tasks;
  133. static struct task_struct *barrier_task;
  134. static struct task_struct *read_exit_task;
  135. #define RCU_TORTURE_PIPE_LEN 10
  136. // Mailbox-like structure to check RCU global memory ordering.
  137. struct rcu_torture_reader_check {
  138. unsigned long rtc_myloops;
  139. int rtc_chkrdr;
  140. unsigned long rtc_chkloops;
  141. int rtc_ready;
  142. struct rcu_torture_reader_check *rtc_assigner;
  143. } ____cacheline_internodealigned_in_smp;
  144. // Update-side data structure used to check RCU readers.
  145. struct rcu_torture {
  146. struct rcu_head rtort_rcu;
  147. int rtort_pipe_count;
  148. struct list_head rtort_free;
  149. int rtort_mbtest;
  150. struct rcu_torture_reader_check *rtort_chkp;
  151. };
  152. static LIST_HEAD(rcu_torture_freelist);
  153. static struct rcu_torture __rcu *rcu_torture_current;
  154. static unsigned long rcu_torture_current_version;
  155. static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
  156. static DEFINE_SPINLOCK(rcu_torture_lock);
  157. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
  158. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
  159. static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
  160. static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
  161. static atomic_t n_rcu_torture_alloc;
  162. static atomic_t n_rcu_torture_alloc_fail;
  163. static atomic_t n_rcu_torture_free;
  164. static atomic_t n_rcu_torture_mberror;
  165. static atomic_t n_rcu_torture_mbchk_fail;
  166. static atomic_t n_rcu_torture_mbchk_tries;
  167. static atomic_t n_rcu_torture_error;
  168. static long n_rcu_torture_barrier_error;
  169. static long n_rcu_torture_boost_ktrerror;
  170. static long n_rcu_torture_boost_rterror;
  171. static long n_rcu_torture_boost_failure;
  172. static long n_rcu_torture_boosts;
  173. static atomic_long_t n_rcu_torture_timers;
  174. static long n_barrier_attempts;
  175. static long n_barrier_successes; /* did rcu_barrier test succeed? */
  176. static unsigned long n_read_exits;
  177. static struct list_head rcu_torture_removed;
  178. static unsigned long shutdown_jiffies;
  179. static unsigned long start_gp_seq;
  180. static atomic_long_t n_nocb_offload;
  181. static atomic_long_t n_nocb_deoffload;
  182. static int rcu_torture_writer_state;
  183. #define RTWS_FIXED_DELAY 0
  184. #define RTWS_DELAY 1
  185. #define RTWS_REPLACE 2
  186. #define RTWS_DEF_FREE 3
  187. #define RTWS_EXP_SYNC 4
  188. #define RTWS_COND_GET 5
  189. #define RTWS_COND_GET_FULL 6
  190. #define RTWS_COND_GET_EXP 7
  191. #define RTWS_COND_GET_EXP_FULL 8
  192. #define RTWS_COND_SYNC 9
  193. #define RTWS_COND_SYNC_FULL 10
  194. #define RTWS_COND_SYNC_EXP 11
  195. #define RTWS_COND_SYNC_EXP_FULL 12
  196. #define RTWS_POLL_GET 13
  197. #define RTWS_POLL_GET_FULL 14
  198. #define RTWS_POLL_GET_EXP 15
  199. #define RTWS_POLL_GET_EXP_FULL 16
  200. #define RTWS_POLL_WAIT 17
  201. #define RTWS_POLL_WAIT_FULL 18
  202. #define RTWS_POLL_WAIT_EXP 19
  203. #define RTWS_POLL_WAIT_EXP_FULL 20
  204. #define RTWS_SYNC 21
  205. #define RTWS_STUTTER 22
  206. #define RTWS_STOPPING 23
  207. static const char * const rcu_torture_writer_state_names[] = {
  208. "RTWS_FIXED_DELAY",
  209. "RTWS_DELAY",
  210. "RTWS_REPLACE",
  211. "RTWS_DEF_FREE",
  212. "RTWS_EXP_SYNC",
  213. "RTWS_COND_GET",
  214. "RTWS_COND_GET_FULL",
  215. "RTWS_COND_GET_EXP",
  216. "RTWS_COND_GET_EXP_FULL",
  217. "RTWS_COND_SYNC",
  218. "RTWS_COND_SYNC_FULL",
  219. "RTWS_COND_SYNC_EXP",
  220. "RTWS_COND_SYNC_EXP_FULL",
  221. "RTWS_POLL_GET",
  222. "RTWS_POLL_GET_FULL",
  223. "RTWS_POLL_GET_EXP",
  224. "RTWS_POLL_GET_EXP_FULL",
  225. "RTWS_POLL_WAIT",
  226. "RTWS_POLL_WAIT_FULL",
  227. "RTWS_POLL_WAIT_EXP",
  228. "RTWS_POLL_WAIT_EXP_FULL",
  229. "RTWS_SYNC",
  230. "RTWS_STUTTER",
  231. "RTWS_STOPPING",
  232. };
  233. /* Record reader segment types and duration for first failing read. */
  234. struct rt_read_seg {
  235. int rt_readstate;
  236. unsigned long rt_delay_jiffies;
  237. unsigned long rt_delay_ms;
  238. unsigned long rt_delay_us;
  239. bool rt_preempted;
  240. };
  241. static int err_segs_recorded;
  242. static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
  243. static int rt_read_nsegs;
  244. static const char *rcu_torture_writer_state_getname(void)
  245. {
  246. unsigned int i = READ_ONCE(rcu_torture_writer_state);
  247. if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
  248. return "???";
  249. return rcu_torture_writer_state_names[i];
  250. }
  251. #ifdef CONFIG_RCU_TRACE
  252. static u64 notrace rcu_trace_clock_local(void)
  253. {
  254. u64 ts = trace_clock_local();
  255. (void)do_div(ts, NSEC_PER_USEC);
  256. return ts;
  257. }
  258. #else /* #ifdef CONFIG_RCU_TRACE */
  259. static u64 notrace rcu_trace_clock_local(void)
  260. {
  261. return 0ULL;
  262. }
  263. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  264. /*
  265. * Stop aggressive CPU-hog tests a bit before the end of the test in order
  266. * to avoid interfering with test shutdown.
  267. */
  268. static bool shutdown_time_arrived(void)
  269. {
  270. return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
  271. }
  272. static unsigned long boost_starttime; /* jiffies of next boost test start. */
  273. static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
  274. /* and boost task create/destroy. */
  275. static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
  276. static bool barrier_phase; /* Test phase. */
  277. static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
  278. static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
  279. static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
  280. static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
  281. /*
  282. * Allocate an element from the rcu_tortures pool.
  283. */
  284. static struct rcu_torture *
  285. rcu_torture_alloc(void)
  286. {
  287. struct list_head *p;
  288. spin_lock_bh(&rcu_torture_lock);
  289. if (list_empty(&rcu_torture_freelist)) {
  290. atomic_inc(&n_rcu_torture_alloc_fail);
  291. spin_unlock_bh(&rcu_torture_lock);
  292. return NULL;
  293. }
  294. atomic_inc(&n_rcu_torture_alloc);
  295. p = rcu_torture_freelist.next;
  296. list_del_init(p);
  297. spin_unlock_bh(&rcu_torture_lock);
  298. return container_of(p, struct rcu_torture, rtort_free);
  299. }
  300. /*
  301. * Free an element to the rcu_tortures pool.
  302. */
  303. static void
  304. rcu_torture_free(struct rcu_torture *p)
  305. {
  306. atomic_inc(&n_rcu_torture_free);
  307. spin_lock_bh(&rcu_torture_lock);
  308. list_add_tail(&p->rtort_free, &rcu_torture_freelist);
  309. spin_unlock_bh(&rcu_torture_lock);
  310. }
  311. /*
  312. * Operations vector for selecting different types of tests.
  313. */
  314. struct rcu_torture_ops {
  315. int ttype;
  316. void (*init)(void);
  317. void (*cleanup)(void);
  318. int (*readlock)(void);
  319. void (*read_delay)(struct torture_random_state *rrsp,
  320. struct rt_read_seg *rtrsp);
  321. void (*readunlock)(int idx);
  322. int (*readlock_held)(void);
  323. unsigned long (*get_gp_seq)(void);
  324. unsigned long (*gp_diff)(unsigned long new, unsigned long old);
  325. void (*deferred_free)(struct rcu_torture *p);
  326. void (*sync)(void);
  327. void (*exp_sync)(void);
  328. unsigned long (*get_gp_state_exp)(void);
  329. unsigned long (*start_gp_poll_exp)(void);
  330. void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
  331. bool (*poll_gp_state_exp)(unsigned long oldstate);
  332. void (*cond_sync_exp)(unsigned long oldstate);
  333. void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
  334. unsigned long (*get_gp_state)(void);
  335. void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
  336. unsigned long (*get_gp_completed)(void);
  337. void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
  338. unsigned long (*start_gp_poll)(void);
  339. void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
  340. bool (*poll_gp_state)(unsigned long oldstate);
  341. bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
  342. bool (*poll_need_2gp)(bool poll, bool poll_full);
  343. void (*cond_sync)(unsigned long oldstate);
  344. void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
  345. call_rcu_func_t call;
  346. void (*cb_barrier)(void);
  347. void (*fqs)(void);
  348. void (*stats)(void);
  349. void (*gp_kthread_dbg)(void);
  350. bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
  351. int (*stall_dur)(void);
  352. long cbflood_max;
  353. int irq_capable;
  354. int can_boost;
  355. int extendables;
  356. int slow_gps;
  357. int no_pi_lock;
  358. const char *name;
  359. };
  360. static struct rcu_torture_ops *cur_ops;
  361. /*
  362. * Definitions for rcu torture testing.
  363. */
  364. static int torture_readlock_not_held(void)
  365. {
  366. return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
  367. }
  368. static int rcu_torture_read_lock(void) __acquires(RCU)
  369. {
  370. rcu_read_lock();
  371. return 0;
  372. }
  373. static void
  374. rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
  375. {
  376. unsigned long started;
  377. unsigned long completed;
  378. const unsigned long shortdelay_us = 200;
  379. unsigned long longdelay_ms = 300;
  380. unsigned long long ts;
  381. /* We want a short delay sometimes to make a reader delay the grace
  382. * period, and we want a long delay occasionally to trigger
  383. * force_quiescent_state. */
  384. if (!atomic_read(&rcu_fwd_cb_nodelay) &&
  385. !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
  386. started = cur_ops->get_gp_seq();
  387. ts = rcu_trace_clock_local();
  388. if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
  389. longdelay_ms = 5; /* Avoid triggering BH limits. */
  390. mdelay(longdelay_ms);
  391. rtrsp->rt_delay_ms = longdelay_ms;
  392. completed = cur_ops->get_gp_seq();
  393. do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
  394. started, completed);
  395. }
  396. if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
  397. udelay(shortdelay_us);
  398. rtrsp->rt_delay_us = shortdelay_us;
  399. }
  400. if (!preempt_count() &&
  401. !(torture_random(rrsp) % (nrealreaders * 500))) {
  402. torture_preempt_schedule(); /* QS only if preemptible. */
  403. rtrsp->rt_preempted = true;
  404. }
  405. }
  406. static void rcu_torture_read_unlock(int idx) __releases(RCU)
  407. {
  408. rcu_read_unlock();
  409. }
  410. /*
  411. * Update callback in the pipe. This should be invoked after a grace period.
  412. */
  413. static bool
  414. rcu_torture_pipe_update_one(struct rcu_torture *rp)
  415. {
  416. int i;
  417. struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
  418. if (rtrcp) {
  419. WRITE_ONCE(rp->rtort_chkp, NULL);
  420. smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
  421. }
  422. i = READ_ONCE(rp->rtort_pipe_count);
  423. if (i > RCU_TORTURE_PIPE_LEN)
  424. i = RCU_TORTURE_PIPE_LEN;
  425. atomic_inc(&rcu_torture_wcount[i]);
  426. WRITE_ONCE(rp->rtort_pipe_count, i + 1);
  427. if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  428. rp->rtort_mbtest = 0;
  429. return true;
  430. }
  431. return false;
  432. }
  433. /*
  434. * Update all callbacks in the pipe. Suitable for synchronous grace-period
  435. * primitives.
  436. */
  437. static void
  438. rcu_torture_pipe_update(struct rcu_torture *old_rp)
  439. {
  440. struct rcu_torture *rp;
  441. struct rcu_torture *rp1;
  442. if (old_rp)
  443. list_add(&old_rp->rtort_free, &rcu_torture_removed);
  444. list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
  445. if (rcu_torture_pipe_update_one(rp)) {
  446. list_del(&rp->rtort_free);
  447. rcu_torture_free(rp);
  448. }
  449. }
  450. }
  451. static void
  452. rcu_torture_cb(struct rcu_head *p)
  453. {
  454. struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
  455. if (torture_must_stop_irq()) {
  456. /* Test is ending, just drop callbacks on the floor. */
  457. /* The next initialization will pick up the pieces. */
  458. return;
  459. }
  460. if (rcu_torture_pipe_update_one(rp))
  461. rcu_torture_free(rp);
  462. else
  463. cur_ops->deferred_free(rp);
  464. }
  465. static unsigned long rcu_no_completed(void)
  466. {
  467. return 0;
  468. }
  469. static void rcu_torture_deferred_free(struct rcu_torture *p)
  470. {
  471. call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
  472. }
  473. static void rcu_sync_torture_init(void)
  474. {
  475. INIT_LIST_HEAD(&rcu_torture_removed);
  476. }
  477. static bool rcu_poll_need_2gp(bool poll, bool poll_full)
  478. {
  479. return poll;
  480. }
  481. static struct rcu_torture_ops rcu_ops = {
  482. .ttype = RCU_FLAVOR,
  483. .init = rcu_sync_torture_init,
  484. .readlock = rcu_torture_read_lock,
  485. .read_delay = rcu_read_delay,
  486. .readunlock = rcu_torture_read_unlock,
  487. .readlock_held = torture_readlock_not_held,
  488. .get_gp_seq = rcu_get_gp_seq,
  489. .gp_diff = rcu_seq_diff,
  490. .deferred_free = rcu_torture_deferred_free,
  491. .sync = synchronize_rcu,
  492. .exp_sync = synchronize_rcu_expedited,
  493. .get_gp_state = get_state_synchronize_rcu,
  494. .get_gp_state_full = get_state_synchronize_rcu_full,
  495. .get_gp_completed = get_completed_synchronize_rcu,
  496. .get_gp_completed_full = get_completed_synchronize_rcu_full,
  497. .start_gp_poll = start_poll_synchronize_rcu,
  498. .start_gp_poll_full = start_poll_synchronize_rcu_full,
  499. .poll_gp_state = poll_state_synchronize_rcu,
  500. .poll_gp_state_full = poll_state_synchronize_rcu_full,
  501. .poll_need_2gp = rcu_poll_need_2gp,
  502. .cond_sync = cond_synchronize_rcu,
  503. .cond_sync_full = cond_synchronize_rcu_full,
  504. .get_gp_state_exp = get_state_synchronize_rcu,
  505. .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
  506. .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
  507. .poll_gp_state_exp = poll_state_synchronize_rcu,
  508. .cond_sync_exp = cond_synchronize_rcu_expedited,
  509. .call = call_rcu_hurry,
  510. .cb_barrier = rcu_barrier,
  511. .fqs = rcu_force_quiescent_state,
  512. .stats = NULL,
  513. .gp_kthread_dbg = show_rcu_gp_kthreads,
  514. .check_boost_failed = rcu_check_boost_fail,
  515. .stall_dur = rcu_jiffies_till_stall_check,
  516. .irq_capable = 1,
  517. .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
  518. .extendables = RCUTORTURE_MAX_EXTEND,
  519. .name = "rcu"
  520. };
  521. /*
  522. * Don't even think about trying any of these in real life!!!
  523. * The names includes "busted", and they really means it!
  524. * The only purpose of these functions is to provide a buggy RCU
  525. * implementation to make sure that rcutorture correctly emits
  526. * buggy-RCU error messages.
  527. */
  528. static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
  529. {
  530. /* This is a deliberate bug for testing purposes only! */
  531. rcu_torture_cb(&p->rtort_rcu);
  532. }
  533. static void synchronize_rcu_busted(void)
  534. {
  535. /* This is a deliberate bug for testing purposes only! */
  536. }
  537. static void
  538. call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
  539. {
  540. /* This is a deliberate bug for testing purposes only! */
  541. func(head);
  542. }
  543. static struct rcu_torture_ops rcu_busted_ops = {
  544. .ttype = INVALID_RCU_FLAVOR,
  545. .init = rcu_sync_torture_init,
  546. .readlock = rcu_torture_read_lock,
  547. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  548. .readunlock = rcu_torture_read_unlock,
  549. .readlock_held = torture_readlock_not_held,
  550. .get_gp_seq = rcu_no_completed,
  551. .deferred_free = rcu_busted_torture_deferred_free,
  552. .sync = synchronize_rcu_busted,
  553. .exp_sync = synchronize_rcu_busted,
  554. .call = call_rcu_busted,
  555. .cb_barrier = NULL,
  556. .fqs = NULL,
  557. .stats = NULL,
  558. .irq_capable = 1,
  559. .name = "busted"
  560. };
  561. /*
  562. * Definitions for srcu torture testing.
  563. */
  564. DEFINE_STATIC_SRCU(srcu_ctl);
  565. static struct srcu_struct srcu_ctld;
  566. static struct srcu_struct *srcu_ctlp = &srcu_ctl;
  567. static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
  568. {
  569. return srcu_read_lock(srcu_ctlp);
  570. }
  571. static void
  572. srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
  573. {
  574. long delay;
  575. const long uspertick = 1000000 / HZ;
  576. const long longdelay = 10;
  577. /* We want there to be long-running readers, but not all the time. */
  578. delay = torture_random(rrsp) %
  579. (nrealreaders * 2 * longdelay * uspertick);
  580. if (!delay && in_task()) {
  581. schedule_timeout_interruptible(longdelay);
  582. rtrsp->rt_delay_jiffies = longdelay;
  583. } else {
  584. rcu_read_delay(rrsp, rtrsp);
  585. }
  586. }
  587. static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
  588. {
  589. srcu_read_unlock(srcu_ctlp, idx);
  590. }
  591. static int torture_srcu_read_lock_held(void)
  592. {
  593. return srcu_read_lock_held(srcu_ctlp);
  594. }
  595. static unsigned long srcu_torture_completed(void)
  596. {
  597. return srcu_batches_completed(srcu_ctlp);
  598. }
  599. static void srcu_torture_deferred_free(struct rcu_torture *rp)
  600. {
  601. call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
  602. }
  603. static void srcu_torture_synchronize(void)
  604. {
  605. synchronize_srcu(srcu_ctlp);
  606. }
  607. static unsigned long srcu_torture_get_gp_state(void)
  608. {
  609. return get_state_synchronize_srcu(srcu_ctlp);
  610. }
  611. static unsigned long srcu_torture_start_gp_poll(void)
  612. {
  613. return start_poll_synchronize_srcu(srcu_ctlp);
  614. }
  615. static bool srcu_torture_poll_gp_state(unsigned long oldstate)
  616. {
  617. return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
  618. }
  619. static void srcu_torture_call(struct rcu_head *head,
  620. rcu_callback_t func)
  621. {
  622. call_srcu(srcu_ctlp, head, func);
  623. }
  624. static void srcu_torture_barrier(void)
  625. {
  626. srcu_barrier(srcu_ctlp);
  627. }
  628. static void srcu_torture_stats(void)
  629. {
  630. srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
  631. }
  632. static void srcu_torture_synchronize_expedited(void)
  633. {
  634. synchronize_srcu_expedited(srcu_ctlp);
  635. }
  636. static struct rcu_torture_ops srcu_ops = {
  637. .ttype = SRCU_FLAVOR,
  638. .init = rcu_sync_torture_init,
  639. .readlock = srcu_torture_read_lock,
  640. .read_delay = srcu_read_delay,
  641. .readunlock = srcu_torture_read_unlock,
  642. .readlock_held = torture_srcu_read_lock_held,
  643. .get_gp_seq = srcu_torture_completed,
  644. .deferred_free = srcu_torture_deferred_free,
  645. .sync = srcu_torture_synchronize,
  646. .exp_sync = srcu_torture_synchronize_expedited,
  647. .get_gp_state = srcu_torture_get_gp_state,
  648. .start_gp_poll = srcu_torture_start_gp_poll,
  649. .poll_gp_state = srcu_torture_poll_gp_state,
  650. .call = srcu_torture_call,
  651. .cb_barrier = srcu_torture_barrier,
  652. .stats = srcu_torture_stats,
  653. .cbflood_max = 50000,
  654. .irq_capable = 1,
  655. .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
  656. .name = "srcu"
  657. };
  658. static void srcu_torture_init(void)
  659. {
  660. rcu_sync_torture_init();
  661. WARN_ON(init_srcu_struct(&srcu_ctld));
  662. srcu_ctlp = &srcu_ctld;
  663. }
  664. static void srcu_torture_cleanup(void)
  665. {
  666. cleanup_srcu_struct(&srcu_ctld);
  667. srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
  668. }
  669. /* As above, but dynamically allocated. */
  670. static struct rcu_torture_ops srcud_ops = {
  671. .ttype = SRCU_FLAVOR,
  672. .init = srcu_torture_init,
  673. .cleanup = srcu_torture_cleanup,
  674. .readlock = srcu_torture_read_lock,
  675. .read_delay = srcu_read_delay,
  676. .readunlock = srcu_torture_read_unlock,
  677. .readlock_held = torture_srcu_read_lock_held,
  678. .get_gp_seq = srcu_torture_completed,
  679. .deferred_free = srcu_torture_deferred_free,
  680. .sync = srcu_torture_synchronize,
  681. .exp_sync = srcu_torture_synchronize_expedited,
  682. .get_gp_state = srcu_torture_get_gp_state,
  683. .start_gp_poll = srcu_torture_start_gp_poll,
  684. .poll_gp_state = srcu_torture_poll_gp_state,
  685. .call = srcu_torture_call,
  686. .cb_barrier = srcu_torture_barrier,
  687. .stats = srcu_torture_stats,
  688. .cbflood_max = 50000,
  689. .irq_capable = 1,
  690. .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
  691. .name = "srcud"
  692. };
  693. /* As above, but broken due to inappropriate reader extension. */
  694. static struct rcu_torture_ops busted_srcud_ops = {
  695. .ttype = SRCU_FLAVOR,
  696. .init = srcu_torture_init,
  697. .cleanup = srcu_torture_cleanup,
  698. .readlock = srcu_torture_read_lock,
  699. .read_delay = rcu_read_delay,
  700. .readunlock = srcu_torture_read_unlock,
  701. .readlock_held = torture_srcu_read_lock_held,
  702. .get_gp_seq = srcu_torture_completed,
  703. .deferred_free = srcu_torture_deferred_free,
  704. .sync = srcu_torture_synchronize,
  705. .exp_sync = srcu_torture_synchronize_expedited,
  706. .call = srcu_torture_call,
  707. .cb_barrier = srcu_torture_barrier,
  708. .stats = srcu_torture_stats,
  709. .irq_capable = 1,
  710. .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
  711. .extendables = RCUTORTURE_MAX_EXTEND,
  712. .name = "busted_srcud"
  713. };
  714. /*
  715. * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
  716. * This implementation does not necessarily work well with CPU hotplug.
  717. */
  718. static void synchronize_rcu_trivial(void)
  719. {
  720. int cpu;
  721. for_each_online_cpu(cpu) {
  722. rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
  723. WARN_ON_ONCE(raw_smp_processor_id() != cpu);
  724. }
  725. }
  726. static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
  727. {
  728. preempt_disable();
  729. return 0;
  730. }
  731. static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
  732. {
  733. preempt_enable();
  734. }
  735. static struct rcu_torture_ops trivial_ops = {
  736. .ttype = RCU_TRIVIAL_FLAVOR,
  737. .init = rcu_sync_torture_init,
  738. .readlock = rcu_torture_read_lock_trivial,
  739. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  740. .readunlock = rcu_torture_read_unlock_trivial,
  741. .readlock_held = torture_readlock_not_held,
  742. .get_gp_seq = rcu_no_completed,
  743. .sync = synchronize_rcu_trivial,
  744. .exp_sync = synchronize_rcu_trivial,
  745. .fqs = NULL,
  746. .stats = NULL,
  747. .irq_capable = 1,
  748. .name = "trivial"
  749. };
  750. #ifdef CONFIG_TASKS_RCU
  751. /*
  752. * Definitions for RCU-tasks torture testing.
  753. */
  754. static int tasks_torture_read_lock(void)
  755. {
  756. return 0;
  757. }
  758. static void tasks_torture_read_unlock(int idx)
  759. {
  760. }
  761. static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
  762. {
  763. call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
  764. }
  765. static void synchronize_rcu_mult_test(void)
  766. {
  767. synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
  768. }
  769. static struct rcu_torture_ops tasks_ops = {
  770. .ttype = RCU_TASKS_FLAVOR,
  771. .init = rcu_sync_torture_init,
  772. .readlock = tasks_torture_read_lock,
  773. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  774. .readunlock = tasks_torture_read_unlock,
  775. .get_gp_seq = rcu_no_completed,
  776. .deferred_free = rcu_tasks_torture_deferred_free,
  777. .sync = synchronize_rcu_tasks,
  778. .exp_sync = synchronize_rcu_mult_test,
  779. .call = call_rcu_tasks,
  780. .cb_barrier = rcu_barrier_tasks,
  781. .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
  782. .fqs = NULL,
  783. .stats = NULL,
  784. .irq_capable = 1,
  785. .slow_gps = 1,
  786. .name = "tasks"
  787. };
  788. #define TASKS_OPS &tasks_ops,
  789. #else // #ifdef CONFIG_TASKS_RCU
  790. #define TASKS_OPS
  791. #endif // #else #ifdef CONFIG_TASKS_RCU
  792. #ifdef CONFIG_TASKS_RUDE_RCU
  793. /*
  794. * Definitions for rude RCU-tasks torture testing.
  795. */
  796. static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
  797. {
  798. call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
  799. }
  800. static struct rcu_torture_ops tasks_rude_ops = {
  801. .ttype = RCU_TASKS_RUDE_FLAVOR,
  802. .init = rcu_sync_torture_init,
  803. .readlock = rcu_torture_read_lock_trivial,
  804. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  805. .readunlock = rcu_torture_read_unlock_trivial,
  806. .get_gp_seq = rcu_no_completed,
  807. .deferred_free = rcu_tasks_rude_torture_deferred_free,
  808. .sync = synchronize_rcu_tasks_rude,
  809. .exp_sync = synchronize_rcu_tasks_rude,
  810. .call = call_rcu_tasks_rude,
  811. .cb_barrier = rcu_barrier_tasks_rude,
  812. .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
  813. .cbflood_max = 50000,
  814. .fqs = NULL,
  815. .stats = NULL,
  816. .irq_capable = 1,
  817. .name = "tasks-rude"
  818. };
  819. #define TASKS_RUDE_OPS &tasks_rude_ops,
  820. #else // #ifdef CONFIG_TASKS_RUDE_RCU
  821. #define TASKS_RUDE_OPS
  822. #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
  823. #ifdef CONFIG_TASKS_TRACE_RCU
  824. /*
  825. * Definitions for tracing RCU-tasks torture testing.
  826. */
  827. static int tasks_tracing_torture_read_lock(void)
  828. {
  829. rcu_read_lock_trace();
  830. return 0;
  831. }
  832. static void tasks_tracing_torture_read_unlock(int idx)
  833. {
  834. rcu_read_unlock_trace();
  835. }
  836. static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
  837. {
  838. call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
  839. }
  840. static struct rcu_torture_ops tasks_tracing_ops = {
  841. .ttype = RCU_TASKS_TRACING_FLAVOR,
  842. .init = rcu_sync_torture_init,
  843. .readlock = tasks_tracing_torture_read_lock,
  844. .read_delay = srcu_read_delay, /* just reuse srcu's version. */
  845. .readunlock = tasks_tracing_torture_read_unlock,
  846. .readlock_held = rcu_read_lock_trace_held,
  847. .get_gp_seq = rcu_no_completed,
  848. .deferred_free = rcu_tasks_tracing_torture_deferred_free,
  849. .sync = synchronize_rcu_tasks_trace,
  850. .exp_sync = synchronize_rcu_tasks_trace,
  851. .call = call_rcu_tasks_trace,
  852. .cb_barrier = rcu_barrier_tasks_trace,
  853. .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
  854. .cbflood_max = 50000,
  855. .fqs = NULL,
  856. .stats = NULL,
  857. .irq_capable = 1,
  858. .slow_gps = 1,
  859. .name = "tasks-tracing"
  860. };
  861. #define TASKS_TRACING_OPS &tasks_tracing_ops,
  862. #else // #ifdef CONFIG_TASKS_TRACE_RCU
  863. #define TASKS_TRACING_OPS
  864. #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
  865. static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
  866. {
  867. if (!cur_ops->gp_diff)
  868. return new - old;
  869. return cur_ops->gp_diff(new, old);
  870. }
  871. /*
  872. * RCU torture priority-boost testing. Runs one real-time thread per
  873. * CPU for moderate bursts, repeatedly starting grace periods and waiting
  874. * for them to complete. If a given grace period takes too long, we assume
  875. * that priority inversion has occurred.
  876. */
  877. static int old_rt_runtime = -1;
  878. static void rcu_torture_disable_rt_throttle(void)
  879. {
  880. /*
  881. * Disable RT throttling so that rcutorture's boost threads don't get
  882. * throttled. Only possible if rcutorture is built-in otherwise the
  883. * user should manually do this by setting the sched_rt_period_us and
  884. * sched_rt_runtime sysctls.
  885. */
  886. if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
  887. return;
  888. old_rt_runtime = sysctl_sched_rt_runtime;
  889. sysctl_sched_rt_runtime = -1;
  890. }
  891. static void rcu_torture_enable_rt_throttle(void)
  892. {
  893. if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
  894. return;
  895. sysctl_sched_rt_runtime = old_rt_runtime;
  896. old_rt_runtime = -1;
  897. }
  898. static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
  899. {
  900. int cpu;
  901. static int dbg_done;
  902. unsigned long end = jiffies;
  903. bool gp_done;
  904. unsigned long j;
  905. static unsigned long last_persist;
  906. unsigned long lp;
  907. unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
  908. if (end - *start > mininterval) {
  909. // Recheck after checking time to avoid false positives.
  910. smp_mb(); // Time check before grace-period check.
  911. if (cur_ops->poll_gp_state(gp_state))
  912. return false; // passed, though perhaps just barely
  913. if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
  914. // At most one persisted message per boost test.
  915. j = jiffies;
  916. lp = READ_ONCE(last_persist);
  917. if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
  918. pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
  919. return false; // passed on a technicality
  920. }
  921. VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
  922. n_rcu_torture_boost_failure++;
  923. if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
  924. pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
  925. current->rt_priority, gp_state, end - *start);
  926. cur_ops->gp_kthread_dbg();
  927. // Recheck after print to flag grace period ending during splat.
  928. gp_done = cur_ops->poll_gp_state(gp_state);
  929. pr_info("Boost inversion: GP %lu %s.\n", gp_state,
  930. gp_done ? "ended already" : "still pending");
  931. }
  932. return true; // failed
  933. } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
  934. *start = jiffies;
  935. }
  936. return false; // passed
  937. }
  938. static int rcu_torture_boost(void *arg)
  939. {
  940. unsigned long endtime;
  941. unsigned long gp_state;
  942. unsigned long gp_state_time;
  943. unsigned long oldstarttime;
  944. VERBOSE_TOROUT_STRING("rcu_torture_boost started");
  945. /* Set real-time priority. */
  946. sched_set_fifo_low(current);
  947. /* Each pass through the following loop does one boost-test cycle. */
  948. do {
  949. bool failed = false; // Test failed already in this test interval
  950. bool gp_initiated = false;
  951. if (kthread_should_stop())
  952. goto checkwait;
  953. /* Wait for the next test interval. */
  954. oldstarttime = READ_ONCE(boost_starttime);
  955. while (time_before(jiffies, oldstarttime)) {
  956. schedule_timeout_interruptible(oldstarttime - jiffies);
  957. if (stutter_wait("rcu_torture_boost"))
  958. sched_set_fifo_low(current);
  959. if (torture_must_stop())
  960. goto checkwait;
  961. }
  962. // Do one boost-test interval.
  963. endtime = oldstarttime + test_boost_duration * HZ;
  964. while (time_before(jiffies, endtime)) {
  965. // Has current GP gone too long?
  966. if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
  967. failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
  968. // If we don't have a grace period in flight, start one.
  969. if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
  970. gp_state = cur_ops->start_gp_poll();
  971. gp_initiated = true;
  972. gp_state_time = jiffies;
  973. }
  974. if (stutter_wait("rcu_torture_boost")) {
  975. sched_set_fifo_low(current);
  976. // If the grace period already ended,
  977. // we don't know when that happened, so
  978. // start over.
  979. if (cur_ops->poll_gp_state(gp_state))
  980. gp_initiated = false;
  981. }
  982. if (torture_must_stop())
  983. goto checkwait;
  984. }
  985. // In case the grace period extended beyond the end of the loop.
  986. if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
  987. rcu_torture_boost_failed(gp_state, &gp_state_time);
  988. /*
  989. * Set the start time of the next test interval.
  990. * Yes, this is vulnerable to long delays, but such
  991. * delays simply cause a false negative for the next
  992. * interval. Besides, we are running at RT priority,
  993. * so delays should be relatively rare.
  994. */
  995. while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
  996. if (mutex_trylock(&boost_mutex)) {
  997. if (oldstarttime == boost_starttime) {
  998. WRITE_ONCE(boost_starttime,
  999. jiffies + test_boost_interval * HZ);
  1000. n_rcu_torture_boosts++;
  1001. }
  1002. mutex_unlock(&boost_mutex);
  1003. break;
  1004. }
  1005. schedule_timeout_uninterruptible(1);
  1006. }
  1007. /* Go do the stutter. */
  1008. checkwait: if (stutter_wait("rcu_torture_boost"))
  1009. sched_set_fifo_low(current);
  1010. } while (!torture_must_stop());
  1011. /* Clean up and exit. */
  1012. while (!kthread_should_stop()) {
  1013. torture_shutdown_absorb("rcu_torture_boost");
  1014. schedule_timeout_uninterruptible(1);
  1015. }
  1016. torture_kthread_stopping("rcu_torture_boost");
  1017. return 0;
  1018. }
  1019. /*
  1020. * RCU torture force-quiescent-state kthread. Repeatedly induces
  1021. * bursts of calls to force_quiescent_state(), increasing the probability
  1022. * of occurrence of some important types of race conditions.
  1023. */
  1024. static int
  1025. rcu_torture_fqs(void *arg)
  1026. {
  1027. unsigned long fqs_resume_time;
  1028. int fqs_burst_remaining;
  1029. int oldnice = task_nice(current);
  1030. VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
  1031. do {
  1032. fqs_resume_time = jiffies + fqs_stutter * HZ;
  1033. while (time_before(jiffies, fqs_resume_time) &&
  1034. !kthread_should_stop()) {
  1035. schedule_timeout_interruptible(1);
  1036. }
  1037. fqs_burst_remaining = fqs_duration;
  1038. while (fqs_burst_remaining > 0 &&
  1039. !kthread_should_stop()) {
  1040. cur_ops->fqs();
  1041. udelay(fqs_holdoff);
  1042. fqs_burst_remaining -= fqs_holdoff;
  1043. }
  1044. if (stutter_wait("rcu_torture_fqs"))
  1045. sched_set_normal(current, oldnice);
  1046. } while (!torture_must_stop());
  1047. torture_kthread_stopping("rcu_torture_fqs");
  1048. return 0;
  1049. }
  1050. // Used by writers to randomly choose from the available grace-period primitives.
  1051. static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
  1052. static int nsynctypes;
  1053. /*
  1054. * Determine which grace-period primitives are available.
  1055. */
  1056. static void rcu_torture_write_types(void)
  1057. {
  1058. bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
  1059. bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
  1060. bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
  1061. bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
  1062. /* Initialize synctype[] array. If none set, take default. */
  1063. if (!gp_cond1 &&
  1064. !gp_cond_exp1 &&
  1065. !gp_cond_full1 &&
  1066. !gp_cond_exp_full1 &&
  1067. !gp_exp1 &&
  1068. !gp_poll_exp1 &&
  1069. !gp_poll_exp_full1 &&
  1070. !gp_normal1 &&
  1071. !gp_poll1 &&
  1072. !gp_poll_full1 &&
  1073. !gp_sync1) {
  1074. gp_cond1 = true;
  1075. gp_cond_exp1 = true;
  1076. gp_cond_full1 = true;
  1077. gp_cond_exp_full1 = true;
  1078. gp_exp1 = true;
  1079. gp_poll_exp1 = true;
  1080. gp_poll_exp_full1 = true;
  1081. gp_normal1 = true;
  1082. gp_poll1 = true;
  1083. gp_poll_full1 = true;
  1084. gp_sync1 = true;
  1085. }
  1086. if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
  1087. synctype[nsynctypes++] = RTWS_COND_GET;
  1088. pr_info("%s: Testing conditional GPs.\n", __func__);
  1089. } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
  1090. pr_alert("%s: gp_cond without primitives.\n", __func__);
  1091. }
  1092. if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
  1093. synctype[nsynctypes++] = RTWS_COND_GET_EXP;
  1094. pr_info("%s: Testing conditional expedited GPs.\n", __func__);
  1095. } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
  1096. pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
  1097. }
  1098. if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
  1099. synctype[nsynctypes++] = RTWS_COND_GET_FULL;
  1100. pr_info("%s: Testing conditional full-state GPs.\n", __func__);
  1101. } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
  1102. pr_alert("%s: gp_cond_full without primitives.\n", __func__);
  1103. }
  1104. if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
  1105. synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
  1106. pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
  1107. } else if (gp_cond_exp_full &&
  1108. (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
  1109. pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
  1110. }
  1111. if (gp_exp1 && cur_ops->exp_sync) {
  1112. synctype[nsynctypes++] = RTWS_EXP_SYNC;
  1113. pr_info("%s: Testing expedited GPs.\n", __func__);
  1114. } else if (gp_exp && !cur_ops->exp_sync) {
  1115. pr_alert("%s: gp_exp without primitives.\n", __func__);
  1116. }
  1117. if (gp_normal1 && cur_ops->deferred_free) {
  1118. synctype[nsynctypes++] = RTWS_DEF_FREE;
  1119. pr_info("%s: Testing asynchronous GPs.\n", __func__);
  1120. } else if (gp_normal && !cur_ops->deferred_free) {
  1121. pr_alert("%s: gp_normal without primitives.\n", __func__);
  1122. }
  1123. if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
  1124. synctype[nsynctypes++] = RTWS_POLL_GET;
  1125. pr_info("%s: Testing polling GPs.\n", __func__);
  1126. } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
  1127. pr_alert("%s: gp_poll without primitives.\n", __func__);
  1128. }
  1129. if (gp_poll_full1 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
  1130. synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
  1131. pr_info("%s: Testing polling full-state GPs.\n", __func__);
  1132. } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
  1133. pr_alert("%s: gp_poll_full without primitives.\n", __func__);
  1134. }
  1135. if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
  1136. synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
  1137. pr_info("%s: Testing polling expedited GPs.\n", __func__);
  1138. } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
  1139. pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
  1140. }
  1141. if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
  1142. synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
  1143. pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
  1144. } else if (gp_poll_exp_full &&
  1145. (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
  1146. pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
  1147. }
  1148. if (gp_sync1 && cur_ops->sync) {
  1149. synctype[nsynctypes++] = RTWS_SYNC;
  1150. pr_info("%s: Testing normal GPs.\n", __func__);
  1151. } else if (gp_sync && !cur_ops->sync) {
  1152. pr_alert("%s: gp_sync without primitives.\n", __func__);
  1153. }
  1154. }
  1155. /*
  1156. * Do the specified rcu_torture_writer() synchronous grace period,
  1157. * while also testing out the polled APIs. Note well that the single-CPU
  1158. * grace-period optimizations must be accounted for.
  1159. */
  1160. static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
  1161. {
  1162. unsigned long cookie;
  1163. struct rcu_gp_oldstate cookie_full;
  1164. bool dopoll;
  1165. bool dopoll_full;
  1166. unsigned long r = torture_random(trsp);
  1167. dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
  1168. dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
  1169. if (dopoll || dopoll_full)
  1170. cpus_read_lock();
  1171. if (dopoll)
  1172. cookie = cur_ops->get_gp_state();
  1173. if (dopoll_full)
  1174. cur_ops->get_gp_state_full(&cookie_full);
  1175. if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
  1176. sync();
  1177. sync();
  1178. WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
  1179. "%s: Cookie check 3 failed %pS() online %*pbl.",
  1180. __func__, sync, cpumask_pr_args(cpu_online_mask));
  1181. WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
  1182. "%s: Cookie check 4 failed %pS() online %*pbl",
  1183. __func__, sync, cpumask_pr_args(cpu_online_mask));
  1184. if (dopoll || dopoll_full)
  1185. cpus_read_unlock();
  1186. }
  1187. /*
  1188. * RCU torture writer kthread. Repeatedly substitutes a new structure
  1189. * for that pointed to by rcu_torture_current, freeing the old structure
  1190. * after a series of grace periods (the "pipeline").
  1191. */
  1192. static int
  1193. rcu_torture_writer(void *arg)
  1194. {
  1195. bool boot_ended;
  1196. bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
  1197. unsigned long cookie;
  1198. struct rcu_gp_oldstate cookie_full;
  1199. int expediting = 0;
  1200. unsigned long gp_snap;
  1201. struct rcu_gp_oldstate gp_snap_full;
  1202. int i;
  1203. int idx;
  1204. int oldnice = task_nice(current);
  1205. struct rcu_torture *rp;
  1206. struct rcu_torture *old_rp;
  1207. static DEFINE_TORTURE_RANDOM(rand);
  1208. bool stutter_waited;
  1209. VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
  1210. if (!can_expedite)
  1211. pr_alert("%s" TORTURE_FLAG
  1212. " GP expediting controlled from boot/sysfs for %s.\n",
  1213. torture_type, cur_ops->name);
  1214. if (WARN_ONCE(nsynctypes == 0,
  1215. "%s: No update-side primitives.\n", __func__)) {
  1216. /*
  1217. * No updates primitives, so don't try updating.
  1218. * The resulting test won't be testing much, hence the
  1219. * above WARN_ONCE().
  1220. */
  1221. rcu_torture_writer_state = RTWS_STOPPING;
  1222. torture_kthread_stopping("rcu_torture_writer");
  1223. return 0;
  1224. }
  1225. do {
  1226. rcu_torture_writer_state = RTWS_FIXED_DELAY;
  1227. torture_hrtimeout_us(500, 1000, &rand);
  1228. rp = rcu_torture_alloc();
  1229. if (rp == NULL)
  1230. continue;
  1231. rp->rtort_pipe_count = 0;
  1232. rcu_torture_writer_state = RTWS_DELAY;
  1233. udelay(torture_random(&rand) & 0x3ff);
  1234. rcu_torture_writer_state = RTWS_REPLACE;
  1235. old_rp = rcu_dereference_check(rcu_torture_current,
  1236. current == writer_task);
  1237. rp->rtort_mbtest = 1;
  1238. rcu_assign_pointer(rcu_torture_current, rp);
  1239. smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
  1240. if (old_rp) {
  1241. i = old_rp->rtort_pipe_count;
  1242. if (i > RCU_TORTURE_PIPE_LEN)
  1243. i = RCU_TORTURE_PIPE_LEN;
  1244. atomic_inc(&rcu_torture_wcount[i]);
  1245. WRITE_ONCE(old_rp->rtort_pipe_count,
  1246. old_rp->rtort_pipe_count + 1);
  1247. // Make sure readers block polled grace periods.
  1248. if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
  1249. idx = cur_ops->readlock();
  1250. cookie = cur_ops->get_gp_state();
  1251. WARN_ONCE(cur_ops->poll_gp_state(cookie),
  1252. "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
  1253. __func__,
  1254. rcu_torture_writer_state_getname(),
  1255. rcu_torture_writer_state,
  1256. cookie, cur_ops->get_gp_state());
  1257. if (cur_ops->get_gp_completed) {
  1258. cookie = cur_ops->get_gp_completed();
  1259. WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
  1260. }
  1261. cur_ops->readunlock(idx);
  1262. }
  1263. if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
  1264. idx = cur_ops->readlock();
  1265. cur_ops->get_gp_state_full(&cookie_full);
  1266. WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
  1267. "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
  1268. __func__,
  1269. rcu_torture_writer_state_getname(),
  1270. rcu_torture_writer_state,
  1271. cpumask_pr_args(cpu_online_mask));
  1272. if (cur_ops->get_gp_completed_full) {
  1273. cur_ops->get_gp_completed_full(&cookie_full);
  1274. WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
  1275. }
  1276. cur_ops->readunlock(idx);
  1277. }
  1278. switch (synctype[torture_random(&rand) % nsynctypes]) {
  1279. case RTWS_DEF_FREE:
  1280. rcu_torture_writer_state = RTWS_DEF_FREE;
  1281. cur_ops->deferred_free(old_rp);
  1282. break;
  1283. case RTWS_EXP_SYNC:
  1284. rcu_torture_writer_state = RTWS_EXP_SYNC;
  1285. do_rtws_sync(&rand, cur_ops->exp_sync);
  1286. rcu_torture_pipe_update(old_rp);
  1287. break;
  1288. case RTWS_COND_GET:
  1289. rcu_torture_writer_state = RTWS_COND_GET;
  1290. gp_snap = cur_ops->get_gp_state();
  1291. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1292. rcu_torture_writer_state = RTWS_COND_SYNC;
  1293. cur_ops->cond_sync(gp_snap);
  1294. rcu_torture_pipe_update(old_rp);
  1295. break;
  1296. case RTWS_COND_GET_EXP:
  1297. rcu_torture_writer_state = RTWS_COND_GET_EXP;
  1298. gp_snap = cur_ops->get_gp_state_exp();
  1299. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1300. rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
  1301. cur_ops->cond_sync_exp(gp_snap);
  1302. rcu_torture_pipe_update(old_rp);
  1303. break;
  1304. case RTWS_COND_GET_FULL:
  1305. rcu_torture_writer_state = RTWS_COND_GET_FULL;
  1306. cur_ops->get_gp_state_full(&gp_snap_full);
  1307. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1308. rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
  1309. cur_ops->cond_sync_full(&gp_snap_full);
  1310. rcu_torture_pipe_update(old_rp);
  1311. break;
  1312. case RTWS_COND_GET_EXP_FULL:
  1313. rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
  1314. cur_ops->get_gp_state_full(&gp_snap_full);
  1315. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1316. rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
  1317. cur_ops->cond_sync_exp_full(&gp_snap_full);
  1318. rcu_torture_pipe_update(old_rp);
  1319. break;
  1320. case RTWS_POLL_GET:
  1321. rcu_torture_writer_state = RTWS_POLL_GET;
  1322. gp_snap = cur_ops->start_gp_poll();
  1323. rcu_torture_writer_state = RTWS_POLL_WAIT;
  1324. while (!cur_ops->poll_gp_state(gp_snap))
  1325. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1326. &rand);
  1327. rcu_torture_pipe_update(old_rp);
  1328. break;
  1329. case RTWS_POLL_GET_FULL:
  1330. rcu_torture_writer_state = RTWS_POLL_GET_FULL;
  1331. cur_ops->start_gp_poll_full(&gp_snap_full);
  1332. rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
  1333. while (!cur_ops->poll_gp_state_full(&gp_snap_full))
  1334. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1335. &rand);
  1336. rcu_torture_pipe_update(old_rp);
  1337. break;
  1338. case RTWS_POLL_GET_EXP:
  1339. rcu_torture_writer_state = RTWS_POLL_GET_EXP;
  1340. gp_snap = cur_ops->start_gp_poll_exp();
  1341. rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
  1342. while (!cur_ops->poll_gp_state_exp(gp_snap))
  1343. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1344. &rand);
  1345. rcu_torture_pipe_update(old_rp);
  1346. break;
  1347. case RTWS_POLL_GET_EXP_FULL:
  1348. rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
  1349. cur_ops->start_gp_poll_exp_full(&gp_snap_full);
  1350. rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
  1351. while (!cur_ops->poll_gp_state_full(&gp_snap_full))
  1352. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1353. &rand);
  1354. rcu_torture_pipe_update(old_rp);
  1355. break;
  1356. case RTWS_SYNC:
  1357. rcu_torture_writer_state = RTWS_SYNC;
  1358. do_rtws_sync(&rand, cur_ops->sync);
  1359. rcu_torture_pipe_update(old_rp);
  1360. break;
  1361. default:
  1362. WARN_ON_ONCE(1);
  1363. break;
  1364. }
  1365. }
  1366. WRITE_ONCE(rcu_torture_current_version,
  1367. rcu_torture_current_version + 1);
  1368. /* Cycle through nesting levels of rcu_expedite_gp() calls. */
  1369. if (can_expedite &&
  1370. !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
  1371. WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
  1372. if (expediting >= 0)
  1373. rcu_expedite_gp();
  1374. else
  1375. rcu_unexpedite_gp();
  1376. if (++expediting > 3)
  1377. expediting = -expediting;
  1378. } else if (!can_expedite) { /* Disabled during boot, recheck. */
  1379. can_expedite = !rcu_gp_is_expedited() &&
  1380. !rcu_gp_is_normal();
  1381. }
  1382. rcu_torture_writer_state = RTWS_STUTTER;
  1383. boot_ended = rcu_inkernel_boot_has_ended();
  1384. stutter_waited = stutter_wait("rcu_torture_writer");
  1385. if (stutter_waited &&
  1386. !atomic_read(&rcu_fwd_cb_nodelay) &&
  1387. !cur_ops->slow_gps &&
  1388. !torture_must_stop() &&
  1389. boot_ended)
  1390. for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
  1391. if (list_empty(&rcu_tortures[i].rtort_free) &&
  1392. rcu_access_pointer(rcu_torture_current) !=
  1393. &rcu_tortures[i]) {
  1394. tracing_off();
  1395. WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
  1396. rcu_ftrace_dump(DUMP_ALL);
  1397. }
  1398. if (stutter_waited)
  1399. sched_set_normal(current, oldnice);
  1400. } while (!torture_must_stop());
  1401. rcu_torture_current = NULL; // Let stats task know that we are done.
  1402. /* Reset expediting back to unexpedited. */
  1403. if (expediting > 0)
  1404. expediting = -expediting;
  1405. while (can_expedite && expediting++ < 0)
  1406. rcu_unexpedite_gp();
  1407. WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
  1408. if (!can_expedite)
  1409. pr_alert("%s" TORTURE_FLAG
  1410. " Dynamic grace-period expediting was disabled.\n",
  1411. torture_type);
  1412. rcu_torture_writer_state = RTWS_STOPPING;
  1413. torture_kthread_stopping("rcu_torture_writer");
  1414. return 0;
  1415. }
  1416. /*
  1417. * RCU torture fake writer kthread. Repeatedly calls sync, with a random
  1418. * delay between calls.
  1419. */
  1420. static int
  1421. rcu_torture_fakewriter(void *arg)
  1422. {
  1423. unsigned long gp_snap;
  1424. struct rcu_gp_oldstate gp_snap_full;
  1425. DEFINE_TORTURE_RANDOM(rand);
  1426. VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
  1427. set_user_nice(current, MAX_NICE);
  1428. if (WARN_ONCE(nsynctypes == 0,
  1429. "%s: No update-side primitives.\n", __func__)) {
  1430. /*
  1431. * No updates primitives, so don't try updating.
  1432. * The resulting test won't be testing much, hence the
  1433. * above WARN_ONCE().
  1434. */
  1435. torture_kthread_stopping("rcu_torture_fakewriter");
  1436. return 0;
  1437. }
  1438. do {
  1439. torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
  1440. if (cur_ops->cb_barrier != NULL &&
  1441. torture_random(&rand) % (nfakewriters * 8) == 0) {
  1442. cur_ops->cb_barrier();
  1443. } else {
  1444. switch (synctype[torture_random(&rand) % nsynctypes]) {
  1445. case RTWS_DEF_FREE:
  1446. break;
  1447. case RTWS_EXP_SYNC:
  1448. cur_ops->exp_sync();
  1449. break;
  1450. case RTWS_COND_GET:
  1451. gp_snap = cur_ops->get_gp_state();
  1452. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1453. cur_ops->cond_sync(gp_snap);
  1454. break;
  1455. case RTWS_COND_GET_EXP:
  1456. gp_snap = cur_ops->get_gp_state_exp();
  1457. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1458. cur_ops->cond_sync_exp(gp_snap);
  1459. break;
  1460. case RTWS_COND_GET_FULL:
  1461. cur_ops->get_gp_state_full(&gp_snap_full);
  1462. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1463. cur_ops->cond_sync_full(&gp_snap_full);
  1464. break;
  1465. case RTWS_COND_GET_EXP_FULL:
  1466. cur_ops->get_gp_state_full(&gp_snap_full);
  1467. torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
  1468. cur_ops->cond_sync_exp_full(&gp_snap_full);
  1469. break;
  1470. case RTWS_POLL_GET:
  1471. gp_snap = cur_ops->start_gp_poll();
  1472. while (!cur_ops->poll_gp_state(gp_snap)) {
  1473. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1474. &rand);
  1475. }
  1476. break;
  1477. case RTWS_POLL_GET_FULL:
  1478. cur_ops->start_gp_poll_full(&gp_snap_full);
  1479. while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
  1480. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1481. &rand);
  1482. }
  1483. break;
  1484. case RTWS_POLL_GET_EXP:
  1485. gp_snap = cur_ops->start_gp_poll_exp();
  1486. while (!cur_ops->poll_gp_state_exp(gp_snap)) {
  1487. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1488. &rand);
  1489. }
  1490. break;
  1491. case RTWS_POLL_GET_EXP_FULL:
  1492. cur_ops->start_gp_poll_exp_full(&gp_snap_full);
  1493. while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
  1494. torture_hrtimeout_jiffies(torture_random(&rand) % 16,
  1495. &rand);
  1496. }
  1497. break;
  1498. case RTWS_SYNC:
  1499. cur_ops->sync();
  1500. break;
  1501. default:
  1502. WARN_ON_ONCE(1);
  1503. break;
  1504. }
  1505. }
  1506. stutter_wait("rcu_torture_fakewriter");
  1507. } while (!torture_must_stop());
  1508. torture_kthread_stopping("rcu_torture_fakewriter");
  1509. return 0;
  1510. }
  1511. static void rcu_torture_timer_cb(struct rcu_head *rhp)
  1512. {
  1513. kfree(rhp);
  1514. }
  1515. // Set up and carry out testing of RCU's global memory ordering
  1516. static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
  1517. struct torture_random_state *trsp)
  1518. {
  1519. unsigned long loops;
  1520. int noc = torture_num_online_cpus();
  1521. int rdrchked;
  1522. int rdrchker;
  1523. struct rcu_torture_reader_check *rtrcp; // Me.
  1524. struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
  1525. struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
  1526. struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
  1527. if (myid < 0)
  1528. return; // Don't try this from timer handlers.
  1529. // Increment my counter.
  1530. rtrcp = &rcu_torture_reader_mbchk[myid];
  1531. WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
  1532. // Attempt to assign someone else some checking work.
  1533. rdrchked = torture_random(trsp) % nrealreaders;
  1534. rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
  1535. rdrchker = torture_random(trsp) % nrealreaders;
  1536. rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
  1537. if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
  1538. smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
  1539. !READ_ONCE(rtp->rtort_chkp) &&
  1540. !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
  1541. rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
  1542. WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
  1543. rtrcp->rtc_chkrdr = rdrchked;
  1544. WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
  1545. if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
  1546. cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
  1547. (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
  1548. }
  1549. // If assigned some completed work, do it!
  1550. rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
  1551. if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
  1552. return; // No work or work not yet ready.
  1553. rdrchked = rtrcp_assigner->rtc_chkrdr;
  1554. if (WARN_ON_ONCE(rdrchked < 0))
  1555. return;
  1556. rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
  1557. loops = READ_ONCE(rtrcp_chked->rtc_myloops);
  1558. atomic_inc(&n_rcu_torture_mbchk_tries);
  1559. if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
  1560. atomic_inc(&n_rcu_torture_mbchk_fail);
  1561. rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
  1562. rtrcp_assigner->rtc_ready = 0;
  1563. smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
  1564. smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
  1565. }
  1566. /*
  1567. * Do one extension of an RCU read-side critical section using the
  1568. * current reader state in readstate (set to zero for initial entry
  1569. * to extended critical section), set the new state as specified by
  1570. * newstate (set to zero for final exit from extended critical section),
  1571. * and random-number-generator state in trsp. If this is neither the
  1572. * beginning or end of the critical section and if there was actually a
  1573. * change, do a ->read_delay().
  1574. */
  1575. static void rcutorture_one_extend(int *readstate, int newstate,
  1576. struct torture_random_state *trsp,
  1577. struct rt_read_seg *rtrsp)
  1578. {
  1579. unsigned long flags;
  1580. int idxnew1 = -1;
  1581. int idxnew2 = -1;
  1582. int idxold1 = *readstate;
  1583. int idxold2 = idxold1;
  1584. int statesnew = ~*readstate & newstate;
  1585. int statesold = *readstate & ~newstate;
  1586. WARN_ON_ONCE(idxold2 < 0);
  1587. WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
  1588. rtrsp->rt_readstate = newstate;
  1589. /* First, put new protection in place to avoid critical-section gap. */
  1590. if (statesnew & RCUTORTURE_RDR_BH)
  1591. local_bh_disable();
  1592. if (statesnew & RCUTORTURE_RDR_RBH)
  1593. rcu_read_lock_bh();
  1594. if (statesnew & RCUTORTURE_RDR_IRQ)
  1595. local_irq_disable();
  1596. if (statesnew & RCUTORTURE_RDR_PREEMPT)
  1597. preempt_disable();
  1598. if (statesnew & RCUTORTURE_RDR_SCHED)
  1599. rcu_read_lock_sched();
  1600. if (statesnew & RCUTORTURE_RDR_RCU_1)
  1601. idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
  1602. if (statesnew & RCUTORTURE_RDR_RCU_2)
  1603. idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
  1604. /*
  1605. * Next, remove old protection, in decreasing order of strength
  1606. * to avoid unlock paths that aren't safe in the stronger
  1607. * context. Namely: BH can not be enabled with disabled interrupts.
  1608. * Additionally PREEMPT_RT requires that BH is enabled in preemptible
  1609. * context.
  1610. */
  1611. if (statesold & RCUTORTURE_RDR_IRQ)
  1612. local_irq_enable();
  1613. if (statesold & RCUTORTURE_RDR_PREEMPT)
  1614. preempt_enable();
  1615. if (statesold & RCUTORTURE_RDR_SCHED)
  1616. rcu_read_unlock_sched();
  1617. if (statesold & RCUTORTURE_RDR_BH)
  1618. local_bh_enable();
  1619. if (statesold & RCUTORTURE_RDR_RBH)
  1620. rcu_read_unlock_bh();
  1621. if (statesold & RCUTORTURE_RDR_RCU_2) {
  1622. cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
  1623. WARN_ON_ONCE(idxnew2 != -1);
  1624. idxold2 = 0;
  1625. }
  1626. if (statesold & RCUTORTURE_RDR_RCU_1) {
  1627. bool lockit;
  1628. lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
  1629. if (lockit)
  1630. raw_spin_lock_irqsave(&current->pi_lock, flags);
  1631. cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
  1632. WARN_ON_ONCE(idxnew1 != -1);
  1633. idxold1 = 0;
  1634. if (lockit)
  1635. raw_spin_unlock_irqrestore(&current->pi_lock, flags);
  1636. }
  1637. /* Delay if neither beginning nor end and there was a change. */
  1638. if ((statesnew || statesold) && *readstate && newstate)
  1639. cur_ops->read_delay(trsp, rtrsp);
  1640. /* Update the reader state. */
  1641. if (idxnew1 == -1)
  1642. idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
  1643. WARN_ON_ONCE(idxnew1 < 0);
  1644. if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
  1645. pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
  1646. if (idxnew2 == -1)
  1647. idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
  1648. WARN_ON_ONCE(idxnew2 < 0);
  1649. WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
  1650. *readstate = idxnew1 | idxnew2 | newstate;
  1651. WARN_ON_ONCE(*readstate < 0);
  1652. if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
  1653. pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
  1654. }
  1655. /* Return the biggest extendables mask given current RCU and boot parameters. */
  1656. static int rcutorture_extend_mask_max(void)
  1657. {
  1658. int mask;
  1659. WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
  1660. mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
  1661. mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
  1662. return mask;
  1663. }
  1664. /* Return a random protection state mask, but with at least one bit set. */
  1665. static int
  1666. rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
  1667. {
  1668. int mask = rcutorture_extend_mask_max();
  1669. unsigned long randmask1 = torture_random(trsp) >> 8;
  1670. unsigned long randmask2 = randmask1 >> 3;
  1671. unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
  1672. unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
  1673. unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
  1674. WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
  1675. /* Mostly only one bit (need preemption!), sometimes lots of bits. */
  1676. if (!(randmask1 & 0x7))
  1677. mask = mask & randmask2;
  1678. else
  1679. mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
  1680. // Can't have nested RCU reader without outer RCU reader.
  1681. if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
  1682. if (oldmask & RCUTORTURE_RDR_RCU_1)
  1683. mask &= ~RCUTORTURE_RDR_RCU_2;
  1684. else
  1685. mask |= RCUTORTURE_RDR_RCU_1;
  1686. }
  1687. /*
  1688. * Can't enable bh w/irq disabled.
  1689. */
  1690. if (mask & RCUTORTURE_RDR_IRQ)
  1691. mask |= oldmask & bhs;
  1692. /*
  1693. * Ideally these sequences would be detected in debug builds
  1694. * (regardless of RT), but until then don't stop testing
  1695. * them on non-RT.
  1696. */
  1697. if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
  1698. /* Can't modify BH in atomic context */
  1699. if (oldmask & preempts_irq)
  1700. mask &= ~bhs;
  1701. if ((oldmask | mask) & preempts_irq)
  1702. mask |= oldmask & bhs;
  1703. }
  1704. return mask ?: RCUTORTURE_RDR_RCU_1;
  1705. }
  1706. /*
  1707. * Do a randomly selected number of extensions of an existing RCU read-side
  1708. * critical section.
  1709. */
  1710. static struct rt_read_seg *
  1711. rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
  1712. struct rt_read_seg *rtrsp)
  1713. {
  1714. int i;
  1715. int j;
  1716. int mask = rcutorture_extend_mask_max();
  1717. WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
  1718. if (!((mask - 1) & mask))
  1719. return rtrsp; /* Current RCU reader not extendable. */
  1720. /* Bias towards larger numbers of loops. */
  1721. i = (torture_random(trsp) >> 3);
  1722. i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
  1723. for (j = 0; j < i; j++) {
  1724. mask = rcutorture_extend_mask(*readstate, trsp);
  1725. rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
  1726. }
  1727. return &rtrsp[j];
  1728. }
  1729. /*
  1730. * Do one read-side critical section, returning false if there was
  1731. * no data to read. Can be invoked both from process context and
  1732. * from a timer handler.
  1733. */
  1734. static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
  1735. {
  1736. bool checkpolling = !(torture_random(trsp) & 0xfff);
  1737. unsigned long cookie;
  1738. struct rcu_gp_oldstate cookie_full;
  1739. int i;
  1740. unsigned long started;
  1741. unsigned long completed;
  1742. int newstate;
  1743. struct rcu_torture *p;
  1744. int pipe_count;
  1745. int readstate = 0;
  1746. struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
  1747. struct rt_read_seg *rtrsp = &rtseg[0];
  1748. struct rt_read_seg *rtrsp1;
  1749. unsigned long long ts;
  1750. WARN_ON_ONCE(!rcu_is_watching());
  1751. newstate = rcutorture_extend_mask(readstate, trsp);
  1752. rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
  1753. if (checkpolling) {
  1754. if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
  1755. cookie = cur_ops->get_gp_state();
  1756. if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
  1757. cur_ops->get_gp_state_full(&cookie_full);
  1758. }
  1759. started = cur_ops->get_gp_seq();
  1760. ts = rcu_trace_clock_local();
  1761. p = rcu_dereference_check(rcu_torture_current,
  1762. !cur_ops->readlock_held || cur_ops->readlock_held());
  1763. if (p == NULL) {
  1764. /* Wait for rcu_torture_writer to get underway */
  1765. rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
  1766. return false;
  1767. }
  1768. if (p->rtort_mbtest == 0)
  1769. atomic_inc(&n_rcu_torture_mberror);
  1770. rcu_torture_reader_do_mbchk(myid, p, trsp);
  1771. rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
  1772. preempt_disable();
  1773. pipe_count = READ_ONCE(p->rtort_pipe_count);
  1774. if (pipe_count > RCU_TORTURE_PIPE_LEN) {
  1775. /* Should not happen, but... */
  1776. pipe_count = RCU_TORTURE_PIPE_LEN;
  1777. }
  1778. completed = cur_ops->get_gp_seq();
  1779. if (pipe_count > 1) {
  1780. do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
  1781. ts, started, completed);
  1782. rcu_ftrace_dump(DUMP_ALL);
  1783. }
  1784. __this_cpu_inc(rcu_torture_count[pipe_count]);
  1785. completed = rcutorture_seq_diff(completed, started);
  1786. if (completed > RCU_TORTURE_PIPE_LEN) {
  1787. /* Should not happen, but... */
  1788. completed = RCU_TORTURE_PIPE_LEN;
  1789. }
  1790. __this_cpu_inc(rcu_torture_batch[completed]);
  1791. preempt_enable();
  1792. if (checkpolling) {
  1793. if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
  1794. WARN_ONCE(cur_ops->poll_gp_state(cookie),
  1795. "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
  1796. __func__,
  1797. rcu_torture_writer_state_getname(),
  1798. rcu_torture_writer_state,
  1799. cookie, cur_ops->get_gp_state());
  1800. if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
  1801. WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
  1802. "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
  1803. __func__,
  1804. rcu_torture_writer_state_getname(),
  1805. rcu_torture_writer_state,
  1806. cpumask_pr_args(cpu_online_mask));
  1807. }
  1808. rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
  1809. WARN_ON_ONCE(readstate);
  1810. // This next splat is expected behavior if leakpointer, especially
  1811. // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
  1812. WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
  1813. /* If error or close call, record the sequence of reader protections. */
  1814. if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
  1815. i = 0;
  1816. for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
  1817. err_segs[i++] = *rtrsp1;
  1818. rt_read_nsegs = i;
  1819. }
  1820. return true;
  1821. }
  1822. static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
  1823. /*
  1824. * RCU torture reader from timer handler. Dereferences rcu_torture_current,
  1825. * incrementing the corresponding element of the pipeline array. The
  1826. * counter in the element should never be greater than 1, otherwise, the
  1827. * RCU implementation is broken.
  1828. */
  1829. static void rcu_torture_timer(struct timer_list *unused)
  1830. {
  1831. atomic_long_inc(&n_rcu_torture_timers);
  1832. (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
  1833. /* Test call_rcu() invocation from interrupt handler. */
  1834. if (cur_ops->call) {
  1835. struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
  1836. if (rhp)
  1837. cur_ops->call(rhp, rcu_torture_timer_cb);
  1838. }
  1839. }
  1840. /*
  1841. * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
  1842. * incrementing the corresponding element of the pipeline array. The
  1843. * counter in the element should never be greater than 1, otherwise, the
  1844. * RCU implementation is broken.
  1845. */
  1846. static int
  1847. rcu_torture_reader(void *arg)
  1848. {
  1849. unsigned long lastsleep = jiffies;
  1850. long myid = (long)arg;
  1851. int mynumonline = myid;
  1852. DEFINE_TORTURE_RANDOM(rand);
  1853. struct timer_list t;
  1854. VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
  1855. set_user_nice(current, MAX_NICE);
  1856. if (irqreader && cur_ops->irq_capable)
  1857. timer_setup_on_stack(&t, rcu_torture_timer, 0);
  1858. tick_dep_set_task(current, TICK_DEP_BIT_RCU);
  1859. do {
  1860. if (irqreader && cur_ops->irq_capable) {
  1861. if (!timer_pending(&t))
  1862. mod_timer(&t, jiffies + 1);
  1863. }
  1864. if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
  1865. schedule_timeout_interruptible(HZ);
  1866. if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
  1867. torture_hrtimeout_us(500, 1000, &rand);
  1868. lastsleep = jiffies + 10;
  1869. }
  1870. while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
  1871. schedule_timeout_interruptible(HZ / 5);
  1872. stutter_wait("rcu_torture_reader");
  1873. } while (!torture_must_stop());
  1874. if (irqreader && cur_ops->irq_capable) {
  1875. del_timer_sync(&t);
  1876. destroy_timer_on_stack(&t);
  1877. }
  1878. tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
  1879. torture_kthread_stopping("rcu_torture_reader");
  1880. return 0;
  1881. }
  1882. /*
  1883. * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
  1884. * increase race probabilities and fuzzes the interval between toggling.
  1885. */
  1886. static int rcu_nocb_toggle(void *arg)
  1887. {
  1888. int cpu;
  1889. int maxcpu = -1;
  1890. int oldnice = task_nice(current);
  1891. long r;
  1892. DEFINE_TORTURE_RANDOM(rand);
  1893. ktime_t toggle_delay;
  1894. unsigned long toggle_fuzz;
  1895. ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
  1896. VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
  1897. while (!rcu_inkernel_boot_has_ended())
  1898. schedule_timeout_interruptible(HZ / 10);
  1899. for_each_online_cpu(cpu)
  1900. maxcpu = cpu;
  1901. WARN_ON(maxcpu < 0);
  1902. if (toggle_interval > ULONG_MAX)
  1903. toggle_fuzz = ULONG_MAX >> 3;
  1904. else
  1905. toggle_fuzz = toggle_interval >> 3;
  1906. if (toggle_fuzz <= 0)
  1907. toggle_fuzz = NSEC_PER_USEC;
  1908. do {
  1909. r = torture_random(&rand);
  1910. cpu = (r >> 4) % (maxcpu + 1);
  1911. if (r & 0x1) {
  1912. rcu_nocb_cpu_offload(cpu);
  1913. atomic_long_inc(&n_nocb_offload);
  1914. } else {
  1915. rcu_nocb_cpu_deoffload(cpu);
  1916. atomic_long_inc(&n_nocb_deoffload);
  1917. }
  1918. toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
  1919. set_current_state(TASK_INTERRUPTIBLE);
  1920. schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
  1921. if (stutter_wait("rcu_nocb_toggle"))
  1922. sched_set_normal(current, oldnice);
  1923. } while (!torture_must_stop());
  1924. torture_kthread_stopping("rcu_nocb_toggle");
  1925. return 0;
  1926. }
  1927. /*
  1928. * Print torture statistics. Caller must ensure that there is only
  1929. * one call to this function at a given time!!! This is normally
  1930. * accomplished by relying on the module system to only have one copy
  1931. * of the module loaded, and then by giving the rcu_torture_stats
  1932. * kthread full control (or the init/cleanup functions when rcu_torture_stats
  1933. * thread is not running).
  1934. */
  1935. static void
  1936. rcu_torture_stats_print(void)
  1937. {
  1938. int cpu;
  1939. int i;
  1940. long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  1941. long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  1942. struct rcu_torture *rtcp;
  1943. static unsigned long rtcv_snap = ULONG_MAX;
  1944. static bool splatted;
  1945. struct task_struct *wtp;
  1946. for_each_possible_cpu(cpu) {
  1947. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  1948. pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
  1949. batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
  1950. }
  1951. }
  1952. for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
  1953. if (pipesummary[i] != 0)
  1954. break;
  1955. }
  1956. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  1957. rtcp = rcu_access_pointer(rcu_torture_current);
  1958. pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
  1959. rtcp,
  1960. rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
  1961. rcu_torture_current_version,
  1962. list_empty(&rcu_torture_freelist),
  1963. atomic_read(&n_rcu_torture_alloc),
  1964. atomic_read(&n_rcu_torture_alloc_fail),
  1965. atomic_read(&n_rcu_torture_free));
  1966. pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
  1967. atomic_read(&n_rcu_torture_mberror),
  1968. atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
  1969. n_rcu_torture_barrier_error,
  1970. n_rcu_torture_boost_ktrerror,
  1971. n_rcu_torture_boost_rterror);
  1972. pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
  1973. n_rcu_torture_boost_failure,
  1974. n_rcu_torture_boosts,
  1975. atomic_long_read(&n_rcu_torture_timers));
  1976. torture_onoff_stats();
  1977. pr_cont("barrier: %ld/%ld:%ld ",
  1978. data_race(n_barrier_successes),
  1979. data_race(n_barrier_attempts),
  1980. data_race(n_rcu_torture_barrier_error));
  1981. pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
  1982. pr_cont("nocb-toggles: %ld:%ld\n",
  1983. atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
  1984. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  1985. if (atomic_read(&n_rcu_torture_mberror) ||
  1986. atomic_read(&n_rcu_torture_mbchk_fail) ||
  1987. n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
  1988. n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
  1989. i > 1) {
  1990. pr_cont("%s", "!!! ");
  1991. atomic_inc(&n_rcu_torture_error);
  1992. WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
  1993. WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
  1994. WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
  1995. WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
  1996. WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
  1997. WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
  1998. WARN_ON_ONCE(i > 1); // Too-short grace period
  1999. }
  2000. pr_cont("Reader Pipe: ");
  2001. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  2002. pr_cont(" %ld", pipesummary[i]);
  2003. pr_cont("\n");
  2004. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  2005. pr_cont("Reader Batch: ");
  2006. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  2007. pr_cont(" %ld", batchsummary[i]);
  2008. pr_cont("\n");
  2009. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  2010. pr_cont("Free-Block Circulation: ");
  2011. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  2012. pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
  2013. }
  2014. pr_cont("\n");
  2015. if (cur_ops->stats)
  2016. cur_ops->stats();
  2017. if (rtcv_snap == rcu_torture_current_version &&
  2018. rcu_access_pointer(rcu_torture_current) &&
  2019. !rcu_stall_is_suppressed()) {
  2020. int __maybe_unused flags = 0;
  2021. unsigned long __maybe_unused gp_seq = 0;
  2022. rcutorture_get_gp_data(cur_ops->ttype,
  2023. &flags, &gp_seq);
  2024. srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
  2025. &flags, &gp_seq);
  2026. wtp = READ_ONCE(writer_task);
  2027. pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
  2028. rcu_torture_writer_state_getname(),
  2029. rcu_torture_writer_state, gp_seq, flags,
  2030. wtp == NULL ? ~0U : wtp->__state,
  2031. wtp == NULL ? -1 : (int)task_cpu(wtp));
  2032. if (!splatted && wtp) {
  2033. sched_show_task(wtp);
  2034. splatted = true;
  2035. }
  2036. if (cur_ops->gp_kthread_dbg)
  2037. cur_ops->gp_kthread_dbg();
  2038. rcu_ftrace_dump(DUMP_ALL);
  2039. }
  2040. rtcv_snap = rcu_torture_current_version;
  2041. }
  2042. /*
  2043. * Periodically prints torture statistics, if periodic statistics printing
  2044. * was specified via the stat_interval module parameter.
  2045. */
  2046. static int
  2047. rcu_torture_stats(void *arg)
  2048. {
  2049. VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
  2050. do {
  2051. schedule_timeout_interruptible(stat_interval * HZ);
  2052. rcu_torture_stats_print();
  2053. torture_shutdown_absorb("rcu_torture_stats");
  2054. } while (!torture_must_stop());
  2055. torture_kthread_stopping("rcu_torture_stats");
  2056. return 0;
  2057. }
  2058. /* Test mem_dump_obj() and friends. */
  2059. static void rcu_torture_mem_dump_obj(void)
  2060. {
  2061. struct rcu_head *rhp;
  2062. struct kmem_cache *kcp;
  2063. static int z;
  2064. kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
  2065. if (WARN_ON_ONCE(!kcp))
  2066. return;
  2067. rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
  2068. if (WARN_ON_ONCE(!rhp)) {
  2069. kmem_cache_destroy(kcp);
  2070. return;
  2071. }
  2072. pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
  2073. pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
  2074. mem_dump_obj(ZERO_SIZE_PTR);
  2075. pr_alert("mem_dump_obj(NULL):");
  2076. mem_dump_obj(NULL);
  2077. pr_alert("mem_dump_obj(%px):", &rhp);
  2078. mem_dump_obj(&rhp);
  2079. pr_alert("mem_dump_obj(%px):", rhp);
  2080. mem_dump_obj(rhp);
  2081. pr_alert("mem_dump_obj(%px):", &rhp->func);
  2082. mem_dump_obj(&rhp->func);
  2083. pr_alert("mem_dump_obj(%px):", &z);
  2084. mem_dump_obj(&z);
  2085. kmem_cache_free(kcp, rhp);
  2086. kmem_cache_destroy(kcp);
  2087. rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
  2088. if (WARN_ON_ONCE(!rhp))
  2089. return;
  2090. pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
  2091. pr_alert("mem_dump_obj(kmalloc %px):", rhp);
  2092. mem_dump_obj(rhp);
  2093. pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
  2094. mem_dump_obj(&rhp->func);
  2095. kfree(rhp);
  2096. rhp = vmalloc(4096);
  2097. if (WARN_ON_ONCE(!rhp))
  2098. return;
  2099. pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
  2100. pr_alert("mem_dump_obj(vmalloc %px):", rhp);
  2101. mem_dump_obj(rhp);
  2102. pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
  2103. mem_dump_obj(&rhp->func);
  2104. vfree(rhp);
  2105. }
  2106. static void
  2107. rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
  2108. {
  2109. pr_alert("%s" TORTURE_FLAG
  2110. "--- %s: nreaders=%d nfakewriters=%d "
  2111. "stat_interval=%d verbose=%d test_no_idle_hz=%d "
  2112. "shuffle_interval=%d stutter=%d irqreader=%d "
  2113. "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
  2114. "test_boost=%d/%d test_boost_interval=%d "
  2115. "test_boost_duration=%d shutdown_secs=%d "
  2116. "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
  2117. "stall_cpu_block=%d "
  2118. "n_barrier_cbs=%d "
  2119. "onoff_interval=%d onoff_holdoff=%d "
  2120. "read_exit_delay=%d read_exit_burst=%d "
  2121. "nocbs_nthreads=%d nocbs_toggle=%d\n",
  2122. torture_type, tag, nrealreaders, nfakewriters,
  2123. stat_interval, verbose, test_no_idle_hz, shuffle_interval,
  2124. stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
  2125. test_boost, cur_ops->can_boost,
  2126. test_boost_interval, test_boost_duration, shutdown_secs,
  2127. stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
  2128. stall_cpu_block,
  2129. n_barrier_cbs,
  2130. onoff_interval, onoff_holdoff,
  2131. read_exit_delay, read_exit_burst,
  2132. nocbs_nthreads, nocbs_toggle);
  2133. }
  2134. static int rcutorture_booster_cleanup(unsigned int cpu)
  2135. {
  2136. struct task_struct *t;
  2137. if (boost_tasks[cpu] == NULL)
  2138. return 0;
  2139. mutex_lock(&boost_mutex);
  2140. t = boost_tasks[cpu];
  2141. boost_tasks[cpu] = NULL;
  2142. rcu_torture_enable_rt_throttle();
  2143. mutex_unlock(&boost_mutex);
  2144. /* This must be outside of the mutex, otherwise deadlock! */
  2145. torture_stop_kthread(rcu_torture_boost, t);
  2146. return 0;
  2147. }
  2148. static int rcutorture_booster_init(unsigned int cpu)
  2149. {
  2150. int retval;
  2151. if (boost_tasks[cpu] != NULL)
  2152. return 0; /* Already created, nothing more to do. */
  2153. // Testing RCU priority boosting requires rcutorture do
  2154. // some serious abuse. Counter this by running ksoftirqd
  2155. // at higher priority.
  2156. if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
  2157. struct sched_param sp;
  2158. struct task_struct *t;
  2159. t = per_cpu(ksoftirqd, cpu);
  2160. WARN_ON_ONCE(!t);
  2161. sp.sched_priority = 2;
  2162. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  2163. }
  2164. /* Don't allow time recalculation while creating a new task. */
  2165. mutex_lock(&boost_mutex);
  2166. rcu_torture_disable_rt_throttle();
  2167. VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
  2168. boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
  2169. cpu, "rcu_torture_boost_%u");
  2170. if (IS_ERR(boost_tasks[cpu])) {
  2171. retval = PTR_ERR(boost_tasks[cpu]);
  2172. VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
  2173. n_rcu_torture_boost_ktrerror++;
  2174. boost_tasks[cpu] = NULL;
  2175. mutex_unlock(&boost_mutex);
  2176. return retval;
  2177. }
  2178. mutex_unlock(&boost_mutex);
  2179. return 0;
  2180. }
  2181. /*
  2182. * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
  2183. * induces a CPU stall for the time specified by stall_cpu.
  2184. */
  2185. static int rcu_torture_stall(void *args)
  2186. {
  2187. int idx;
  2188. unsigned long stop_at;
  2189. VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
  2190. if (stall_cpu_holdoff > 0) {
  2191. VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
  2192. schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
  2193. VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
  2194. }
  2195. if (!kthread_should_stop() && stall_gp_kthread > 0) {
  2196. VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
  2197. rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
  2198. for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
  2199. if (kthread_should_stop())
  2200. break;
  2201. schedule_timeout_uninterruptible(HZ);
  2202. }
  2203. }
  2204. if (!kthread_should_stop() && stall_cpu > 0) {
  2205. VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
  2206. stop_at = ktime_get_seconds() + stall_cpu;
  2207. /* RCU CPU stall is expected behavior in following code. */
  2208. idx = cur_ops->readlock();
  2209. if (stall_cpu_irqsoff)
  2210. local_irq_disable();
  2211. else if (!stall_cpu_block)
  2212. preempt_disable();
  2213. pr_alert("%s start on CPU %d.\n",
  2214. __func__, raw_smp_processor_id());
  2215. while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
  2216. stop_at))
  2217. if (stall_cpu_block) {
  2218. #ifdef CONFIG_PREEMPTION
  2219. preempt_schedule();
  2220. #else
  2221. schedule_timeout_uninterruptible(HZ);
  2222. #endif
  2223. } else if (stall_no_softlockup) {
  2224. touch_softlockup_watchdog();
  2225. }
  2226. if (stall_cpu_irqsoff)
  2227. local_irq_enable();
  2228. else if (!stall_cpu_block)
  2229. preempt_enable();
  2230. cur_ops->readunlock(idx);
  2231. }
  2232. pr_alert("%s end.\n", __func__);
  2233. torture_shutdown_absorb("rcu_torture_stall");
  2234. while (!kthread_should_stop())
  2235. schedule_timeout_interruptible(10 * HZ);
  2236. return 0;
  2237. }
  2238. /* Spawn CPU-stall kthread, if stall_cpu specified. */
  2239. static int __init rcu_torture_stall_init(void)
  2240. {
  2241. if (stall_cpu <= 0 && stall_gp_kthread <= 0)
  2242. return 0;
  2243. return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
  2244. }
  2245. /* State structure for forward-progress self-propagating RCU callback. */
  2246. struct fwd_cb_state {
  2247. struct rcu_head rh;
  2248. int stop;
  2249. };
  2250. /*
  2251. * Forward-progress self-propagating RCU callback function. Because
  2252. * callbacks run from softirq, this function is an implicit RCU read-side
  2253. * critical section.
  2254. */
  2255. static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
  2256. {
  2257. struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
  2258. if (READ_ONCE(fcsp->stop)) {
  2259. WRITE_ONCE(fcsp->stop, 2);
  2260. return;
  2261. }
  2262. cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
  2263. }
  2264. /* State for continuous-flood RCU callbacks. */
  2265. struct rcu_fwd_cb {
  2266. struct rcu_head rh;
  2267. struct rcu_fwd_cb *rfc_next;
  2268. struct rcu_fwd *rfc_rfp;
  2269. int rfc_gps;
  2270. };
  2271. #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
  2272. #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
  2273. #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
  2274. #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
  2275. #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
  2276. struct rcu_launder_hist {
  2277. long n_launders;
  2278. unsigned long launder_gp_seq;
  2279. };
  2280. struct rcu_fwd {
  2281. spinlock_t rcu_fwd_lock;
  2282. struct rcu_fwd_cb *rcu_fwd_cb_head;
  2283. struct rcu_fwd_cb **rcu_fwd_cb_tail;
  2284. long n_launders_cb;
  2285. unsigned long rcu_fwd_startat;
  2286. struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
  2287. unsigned long rcu_launder_gp_seq_start;
  2288. int rcu_fwd_id;
  2289. };
  2290. static DEFINE_MUTEX(rcu_fwd_mutex);
  2291. static struct rcu_fwd *rcu_fwds;
  2292. static unsigned long rcu_fwd_seq;
  2293. static atomic_long_t rcu_fwd_max_cbs;
  2294. static bool rcu_fwd_emergency_stop;
  2295. static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
  2296. {
  2297. unsigned long gps;
  2298. unsigned long gps_old;
  2299. int i;
  2300. int j;
  2301. for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
  2302. if (rfp->n_launders_hist[i].n_launders > 0)
  2303. break;
  2304. pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
  2305. __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
  2306. gps_old = rfp->rcu_launder_gp_seq_start;
  2307. for (j = 0; j <= i; j++) {
  2308. gps = rfp->n_launders_hist[j].launder_gp_seq;
  2309. pr_cont(" %ds/%d: %ld:%ld",
  2310. j + 1, FWD_CBS_HIST_DIV,
  2311. rfp->n_launders_hist[j].n_launders,
  2312. rcutorture_seq_diff(gps, gps_old));
  2313. gps_old = gps;
  2314. }
  2315. pr_cont("\n");
  2316. }
  2317. /* Callback function for continuous-flood RCU callbacks. */
  2318. static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
  2319. {
  2320. unsigned long flags;
  2321. int i;
  2322. struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
  2323. struct rcu_fwd_cb **rfcpp;
  2324. struct rcu_fwd *rfp = rfcp->rfc_rfp;
  2325. rfcp->rfc_next = NULL;
  2326. rfcp->rfc_gps++;
  2327. spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
  2328. rfcpp = rfp->rcu_fwd_cb_tail;
  2329. rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
  2330. WRITE_ONCE(*rfcpp, rfcp);
  2331. WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
  2332. i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
  2333. if (i >= ARRAY_SIZE(rfp->n_launders_hist))
  2334. i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
  2335. rfp->n_launders_hist[i].n_launders++;
  2336. rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
  2337. spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
  2338. }
  2339. // Give the scheduler a chance, even on nohz_full CPUs.
  2340. static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
  2341. {
  2342. if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
  2343. // Real call_rcu() floods hit userspace, so emulate that.
  2344. if (need_resched() || (iter & 0xfff))
  2345. schedule();
  2346. return;
  2347. }
  2348. // No userspace emulation: CB invocation throttles call_rcu()
  2349. cond_resched();
  2350. }
  2351. /*
  2352. * Free all callbacks on the rcu_fwd_cb_head list, either because the
  2353. * test is over or because we hit an OOM event.
  2354. */
  2355. static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
  2356. {
  2357. unsigned long flags;
  2358. unsigned long freed = 0;
  2359. struct rcu_fwd_cb *rfcp;
  2360. for (;;) {
  2361. spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
  2362. rfcp = rfp->rcu_fwd_cb_head;
  2363. if (!rfcp) {
  2364. spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
  2365. break;
  2366. }
  2367. rfp->rcu_fwd_cb_head = rfcp->rfc_next;
  2368. if (!rfp->rcu_fwd_cb_head)
  2369. rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
  2370. spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
  2371. kfree(rfcp);
  2372. freed++;
  2373. rcu_torture_fwd_prog_cond_resched(freed);
  2374. if (tick_nohz_full_enabled()) {
  2375. local_irq_save(flags);
  2376. rcu_momentary_dyntick_idle();
  2377. local_irq_restore(flags);
  2378. }
  2379. }
  2380. return freed;
  2381. }
  2382. /* Carry out need_resched()/cond_resched() forward-progress testing. */
  2383. static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
  2384. int *tested, int *tested_tries)
  2385. {
  2386. unsigned long cver;
  2387. unsigned long dur;
  2388. struct fwd_cb_state fcs;
  2389. unsigned long gps;
  2390. int idx;
  2391. int sd;
  2392. int sd4;
  2393. bool selfpropcb = false;
  2394. unsigned long stopat;
  2395. static DEFINE_TORTURE_RANDOM(trs);
  2396. pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
  2397. if (!cur_ops->sync)
  2398. return; // Cannot do need_resched() forward progress testing without ->sync.
  2399. if (cur_ops->call && cur_ops->cb_barrier) {
  2400. init_rcu_head_on_stack(&fcs.rh);
  2401. selfpropcb = true;
  2402. }
  2403. /* Tight loop containing cond_resched(). */
  2404. atomic_inc(&rcu_fwd_cb_nodelay);
  2405. cur_ops->sync(); /* Later readers see above write. */
  2406. if (selfpropcb) {
  2407. WRITE_ONCE(fcs.stop, 0);
  2408. cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
  2409. }
  2410. cver = READ_ONCE(rcu_torture_current_version);
  2411. gps = cur_ops->get_gp_seq();
  2412. sd = cur_ops->stall_dur() + 1;
  2413. sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
  2414. dur = sd4 + torture_random(&trs) % (sd - sd4);
  2415. WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
  2416. stopat = rfp->rcu_fwd_startat + dur;
  2417. while (time_before(jiffies, stopat) &&
  2418. !shutdown_time_arrived() &&
  2419. !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
  2420. idx = cur_ops->readlock();
  2421. udelay(10);
  2422. cur_ops->readunlock(idx);
  2423. if (!fwd_progress_need_resched || need_resched())
  2424. cond_resched();
  2425. }
  2426. (*tested_tries)++;
  2427. if (!time_before(jiffies, stopat) &&
  2428. !shutdown_time_arrived() &&
  2429. !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
  2430. (*tested)++;
  2431. cver = READ_ONCE(rcu_torture_current_version) - cver;
  2432. gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
  2433. WARN_ON(!cver && gps < 2);
  2434. pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
  2435. rfp->rcu_fwd_id, dur, cver, gps);
  2436. }
  2437. if (selfpropcb) {
  2438. WRITE_ONCE(fcs.stop, 1);
  2439. cur_ops->sync(); /* Wait for running CB to complete. */
  2440. pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
  2441. cur_ops->cb_barrier(); /* Wait for queued callbacks. */
  2442. }
  2443. if (selfpropcb) {
  2444. WARN_ON(READ_ONCE(fcs.stop) != 2);
  2445. destroy_rcu_head_on_stack(&fcs.rh);
  2446. }
  2447. schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
  2448. atomic_dec(&rcu_fwd_cb_nodelay);
  2449. }
  2450. /* Carry out call_rcu() forward-progress testing. */
  2451. static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
  2452. {
  2453. unsigned long cver;
  2454. unsigned long flags;
  2455. unsigned long gps;
  2456. int i;
  2457. long n_launders;
  2458. long n_launders_cb_snap;
  2459. long n_launders_sa;
  2460. long n_max_cbs;
  2461. long n_max_gps;
  2462. struct rcu_fwd_cb *rfcp;
  2463. struct rcu_fwd_cb *rfcpn;
  2464. unsigned long stopat;
  2465. unsigned long stoppedat;
  2466. pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
  2467. if (READ_ONCE(rcu_fwd_emergency_stop))
  2468. return; /* Get out of the way quickly, no GP wait! */
  2469. if (!cur_ops->call)
  2470. return; /* Can't do call_rcu() fwd prog without ->call. */
  2471. /* Loop continuously posting RCU callbacks. */
  2472. atomic_inc(&rcu_fwd_cb_nodelay);
  2473. cur_ops->sync(); /* Later readers see above write. */
  2474. WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
  2475. stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
  2476. n_launders = 0;
  2477. rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
  2478. n_launders_sa = 0;
  2479. n_max_cbs = 0;
  2480. n_max_gps = 0;
  2481. for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
  2482. rfp->n_launders_hist[i].n_launders = 0;
  2483. cver = READ_ONCE(rcu_torture_current_version);
  2484. gps = cur_ops->get_gp_seq();
  2485. rfp->rcu_launder_gp_seq_start = gps;
  2486. tick_dep_set_task(current, TICK_DEP_BIT_RCU);
  2487. while (time_before(jiffies, stopat) &&
  2488. !shutdown_time_arrived() &&
  2489. !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
  2490. rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
  2491. rfcpn = NULL;
  2492. if (rfcp)
  2493. rfcpn = READ_ONCE(rfcp->rfc_next);
  2494. if (rfcpn) {
  2495. if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
  2496. ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
  2497. break;
  2498. rfp->rcu_fwd_cb_head = rfcpn;
  2499. n_launders++;
  2500. n_launders_sa++;
  2501. } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
  2502. rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
  2503. if (WARN_ON_ONCE(!rfcp)) {
  2504. schedule_timeout_interruptible(1);
  2505. continue;
  2506. }
  2507. n_max_cbs++;
  2508. n_launders_sa = 0;
  2509. rfcp->rfc_gps = 0;
  2510. rfcp->rfc_rfp = rfp;
  2511. } else {
  2512. rfcp = NULL;
  2513. }
  2514. if (rfcp)
  2515. cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
  2516. rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
  2517. if (tick_nohz_full_enabled()) {
  2518. local_irq_save(flags);
  2519. rcu_momentary_dyntick_idle();
  2520. local_irq_restore(flags);
  2521. }
  2522. }
  2523. stoppedat = jiffies;
  2524. n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
  2525. cver = READ_ONCE(rcu_torture_current_version) - cver;
  2526. gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
  2527. pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
  2528. cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
  2529. (void)rcu_torture_fwd_prog_cbfree(rfp);
  2530. if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
  2531. !shutdown_time_arrived()) {
  2532. WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
  2533. pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
  2534. __func__,
  2535. stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
  2536. n_launders + n_max_cbs - n_launders_cb_snap,
  2537. n_launders, n_launders_sa,
  2538. n_max_gps, n_max_cbs, cver, gps);
  2539. atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
  2540. mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
  2541. rcu_torture_fwd_cb_hist(rfp);
  2542. mutex_unlock(&rcu_fwd_mutex);
  2543. }
  2544. schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
  2545. tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
  2546. atomic_dec(&rcu_fwd_cb_nodelay);
  2547. }
  2548. /*
  2549. * OOM notifier, but this only prints diagnostic information for the
  2550. * current forward-progress test.
  2551. */
  2552. static int rcutorture_oom_notify(struct notifier_block *self,
  2553. unsigned long notused, void *nfreed)
  2554. {
  2555. int i;
  2556. long ncbs;
  2557. struct rcu_fwd *rfp;
  2558. mutex_lock(&rcu_fwd_mutex);
  2559. rfp = rcu_fwds;
  2560. if (!rfp) {
  2561. mutex_unlock(&rcu_fwd_mutex);
  2562. return NOTIFY_OK;
  2563. }
  2564. WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
  2565. __func__);
  2566. for (i = 0; i < fwd_progress; i++) {
  2567. rcu_torture_fwd_cb_hist(&rfp[i]);
  2568. rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
  2569. }
  2570. WRITE_ONCE(rcu_fwd_emergency_stop, true);
  2571. smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
  2572. ncbs = 0;
  2573. for (i = 0; i < fwd_progress; i++)
  2574. ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
  2575. pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
  2576. cur_ops->cb_barrier();
  2577. ncbs = 0;
  2578. for (i = 0; i < fwd_progress; i++)
  2579. ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
  2580. pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
  2581. cur_ops->cb_barrier();
  2582. ncbs = 0;
  2583. for (i = 0; i < fwd_progress; i++)
  2584. ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
  2585. pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
  2586. smp_mb(); /* Frees before return to avoid redoing OOM. */
  2587. (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
  2588. pr_info("%s returning after OOM processing.\n", __func__);
  2589. mutex_unlock(&rcu_fwd_mutex);
  2590. return NOTIFY_OK;
  2591. }
  2592. static struct notifier_block rcutorture_oom_nb = {
  2593. .notifier_call = rcutorture_oom_notify
  2594. };
  2595. /* Carry out grace-period forward-progress testing. */
  2596. static int rcu_torture_fwd_prog(void *args)
  2597. {
  2598. bool firsttime = true;
  2599. long max_cbs;
  2600. int oldnice = task_nice(current);
  2601. unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
  2602. struct rcu_fwd *rfp = args;
  2603. int tested = 0;
  2604. int tested_tries = 0;
  2605. VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
  2606. rcu_bind_current_to_nocb();
  2607. if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
  2608. set_user_nice(current, MAX_NICE);
  2609. do {
  2610. if (!rfp->rcu_fwd_id) {
  2611. schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
  2612. WRITE_ONCE(rcu_fwd_emergency_stop, false);
  2613. if (!firsttime) {
  2614. max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
  2615. pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
  2616. }
  2617. firsttime = false;
  2618. WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
  2619. } else {
  2620. while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
  2621. schedule_timeout_interruptible(1);
  2622. oldseq = READ_ONCE(rcu_fwd_seq);
  2623. }
  2624. pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
  2625. if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
  2626. rcu_torture_fwd_prog_cr(rfp);
  2627. if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
  2628. (!IS_ENABLED(CONFIG_TINY_RCU) ||
  2629. (rcu_inkernel_boot_has_ended() &&
  2630. torture_num_online_cpus() > rfp->rcu_fwd_id)))
  2631. rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
  2632. /* Avoid slow periods, better to test when busy. */
  2633. if (stutter_wait("rcu_torture_fwd_prog"))
  2634. sched_set_normal(current, oldnice);
  2635. } while (!torture_must_stop());
  2636. /* Short runs might not contain a valid forward-progress attempt. */
  2637. if (!rfp->rcu_fwd_id) {
  2638. WARN_ON(!tested && tested_tries >= 5);
  2639. pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
  2640. }
  2641. torture_kthread_stopping("rcu_torture_fwd_prog");
  2642. return 0;
  2643. }
  2644. /* If forward-progress checking is requested and feasible, spawn the thread. */
  2645. static int __init rcu_torture_fwd_prog_init(void)
  2646. {
  2647. int i;
  2648. int ret = 0;
  2649. struct rcu_fwd *rfp;
  2650. if (!fwd_progress)
  2651. return 0; /* Not requested, so don't do it. */
  2652. if (fwd_progress >= nr_cpu_ids) {
  2653. VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
  2654. fwd_progress = nr_cpu_ids;
  2655. } else if (fwd_progress < 0) {
  2656. fwd_progress = nr_cpu_ids;
  2657. }
  2658. if ((!cur_ops->sync && !cur_ops->call) ||
  2659. (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
  2660. cur_ops == &rcu_busted_ops) {
  2661. VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
  2662. fwd_progress = 0;
  2663. return 0;
  2664. }
  2665. if (stall_cpu > 0) {
  2666. VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
  2667. fwd_progress = 0;
  2668. if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
  2669. return -EINVAL; /* In module, can fail back to user. */
  2670. WARN_ON(1); /* Make sure rcutorture notices conflict. */
  2671. return 0;
  2672. }
  2673. if (fwd_progress_holdoff <= 0)
  2674. fwd_progress_holdoff = 1;
  2675. if (fwd_progress_div <= 0)
  2676. fwd_progress_div = 4;
  2677. rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
  2678. fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
  2679. if (!rfp || !fwd_prog_tasks) {
  2680. kfree(rfp);
  2681. kfree(fwd_prog_tasks);
  2682. fwd_prog_tasks = NULL;
  2683. fwd_progress = 0;
  2684. return -ENOMEM;
  2685. }
  2686. for (i = 0; i < fwd_progress; i++) {
  2687. spin_lock_init(&rfp[i].rcu_fwd_lock);
  2688. rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
  2689. rfp[i].rcu_fwd_id = i;
  2690. }
  2691. mutex_lock(&rcu_fwd_mutex);
  2692. rcu_fwds = rfp;
  2693. mutex_unlock(&rcu_fwd_mutex);
  2694. register_oom_notifier(&rcutorture_oom_nb);
  2695. for (i = 0; i < fwd_progress; i++) {
  2696. ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
  2697. if (ret) {
  2698. fwd_progress = i;
  2699. return ret;
  2700. }
  2701. }
  2702. return 0;
  2703. }
  2704. static void rcu_torture_fwd_prog_cleanup(void)
  2705. {
  2706. int i;
  2707. struct rcu_fwd *rfp;
  2708. if (!rcu_fwds || !fwd_prog_tasks)
  2709. return;
  2710. for (i = 0; i < fwd_progress; i++)
  2711. torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
  2712. unregister_oom_notifier(&rcutorture_oom_nb);
  2713. mutex_lock(&rcu_fwd_mutex);
  2714. rfp = rcu_fwds;
  2715. rcu_fwds = NULL;
  2716. mutex_unlock(&rcu_fwd_mutex);
  2717. kfree(rfp);
  2718. kfree(fwd_prog_tasks);
  2719. fwd_prog_tasks = NULL;
  2720. }
  2721. /* Callback function for RCU barrier testing. */
  2722. static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
  2723. {
  2724. atomic_inc(&barrier_cbs_invoked);
  2725. }
  2726. /* IPI handler to get callback posted on desired CPU, if online. */
  2727. static void rcu_torture_barrier1cb(void *rcu_void)
  2728. {
  2729. struct rcu_head *rhp = rcu_void;
  2730. cur_ops->call(rhp, rcu_torture_barrier_cbf);
  2731. }
  2732. /* kthread function to register callbacks used to test RCU barriers. */
  2733. static int rcu_torture_barrier_cbs(void *arg)
  2734. {
  2735. long myid = (long)arg;
  2736. bool lastphase = false;
  2737. bool newphase;
  2738. struct rcu_head rcu;
  2739. init_rcu_head_on_stack(&rcu);
  2740. VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
  2741. set_user_nice(current, MAX_NICE);
  2742. do {
  2743. wait_event(barrier_cbs_wq[myid],
  2744. (newphase =
  2745. smp_load_acquire(&barrier_phase)) != lastphase ||
  2746. torture_must_stop());
  2747. lastphase = newphase;
  2748. if (torture_must_stop())
  2749. break;
  2750. /*
  2751. * The above smp_load_acquire() ensures barrier_phase load
  2752. * is ordered before the following ->call().
  2753. */
  2754. if (smp_call_function_single(myid, rcu_torture_barrier1cb,
  2755. &rcu, 1)) {
  2756. // IPI failed, so use direct call from current CPU.
  2757. cur_ops->call(&rcu, rcu_torture_barrier_cbf);
  2758. }
  2759. if (atomic_dec_and_test(&barrier_cbs_count))
  2760. wake_up(&barrier_wq);
  2761. } while (!torture_must_stop());
  2762. if (cur_ops->cb_barrier != NULL)
  2763. cur_ops->cb_barrier();
  2764. destroy_rcu_head_on_stack(&rcu);
  2765. torture_kthread_stopping("rcu_torture_barrier_cbs");
  2766. return 0;
  2767. }
  2768. /* kthread function to drive and coordinate RCU barrier testing. */
  2769. static int rcu_torture_barrier(void *arg)
  2770. {
  2771. int i;
  2772. VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
  2773. do {
  2774. atomic_set(&barrier_cbs_invoked, 0);
  2775. atomic_set(&barrier_cbs_count, n_barrier_cbs);
  2776. /* Ensure barrier_phase ordered after prior assignments. */
  2777. smp_store_release(&barrier_phase, !barrier_phase);
  2778. for (i = 0; i < n_barrier_cbs; i++)
  2779. wake_up(&barrier_cbs_wq[i]);
  2780. wait_event(barrier_wq,
  2781. atomic_read(&barrier_cbs_count) == 0 ||
  2782. torture_must_stop());
  2783. if (torture_must_stop())
  2784. break;
  2785. n_barrier_attempts++;
  2786. cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
  2787. if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
  2788. n_rcu_torture_barrier_error++;
  2789. pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
  2790. atomic_read(&barrier_cbs_invoked),
  2791. n_barrier_cbs);
  2792. WARN_ON(1);
  2793. // Wait manually for the remaining callbacks
  2794. i = 0;
  2795. do {
  2796. if (WARN_ON(i++ > HZ))
  2797. i = INT_MIN;
  2798. schedule_timeout_interruptible(1);
  2799. cur_ops->cb_barrier();
  2800. } while (atomic_read(&barrier_cbs_invoked) !=
  2801. n_barrier_cbs &&
  2802. !torture_must_stop());
  2803. smp_mb(); // Can't trust ordering if broken.
  2804. if (!torture_must_stop())
  2805. pr_err("Recovered: barrier_cbs_invoked = %d\n",
  2806. atomic_read(&barrier_cbs_invoked));
  2807. } else {
  2808. n_barrier_successes++;
  2809. }
  2810. schedule_timeout_interruptible(HZ / 10);
  2811. } while (!torture_must_stop());
  2812. torture_kthread_stopping("rcu_torture_barrier");
  2813. return 0;
  2814. }
  2815. /* Initialize RCU barrier testing. */
  2816. static int rcu_torture_barrier_init(void)
  2817. {
  2818. int i;
  2819. int ret;
  2820. if (n_barrier_cbs <= 0)
  2821. return 0;
  2822. if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
  2823. pr_alert("%s" TORTURE_FLAG
  2824. " Call or barrier ops missing for %s,\n",
  2825. torture_type, cur_ops->name);
  2826. pr_alert("%s" TORTURE_FLAG
  2827. " RCU barrier testing omitted from run.\n",
  2828. torture_type);
  2829. return 0;
  2830. }
  2831. atomic_set(&barrier_cbs_count, 0);
  2832. atomic_set(&barrier_cbs_invoked, 0);
  2833. barrier_cbs_tasks =
  2834. kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
  2835. GFP_KERNEL);
  2836. barrier_cbs_wq =
  2837. kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
  2838. if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
  2839. return -ENOMEM;
  2840. for (i = 0; i < n_barrier_cbs; i++) {
  2841. init_waitqueue_head(&barrier_cbs_wq[i]);
  2842. ret = torture_create_kthread(rcu_torture_barrier_cbs,
  2843. (void *)(long)i,
  2844. barrier_cbs_tasks[i]);
  2845. if (ret)
  2846. return ret;
  2847. }
  2848. return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
  2849. }
  2850. /* Clean up after RCU barrier testing. */
  2851. static void rcu_torture_barrier_cleanup(void)
  2852. {
  2853. int i;
  2854. torture_stop_kthread(rcu_torture_barrier, barrier_task);
  2855. if (barrier_cbs_tasks != NULL) {
  2856. for (i = 0; i < n_barrier_cbs; i++)
  2857. torture_stop_kthread(rcu_torture_barrier_cbs,
  2858. barrier_cbs_tasks[i]);
  2859. kfree(barrier_cbs_tasks);
  2860. barrier_cbs_tasks = NULL;
  2861. }
  2862. if (barrier_cbs_wq != NULL) {
  2863. kfree(barrier_cbs_wq);
  2864. barrier_cbs_wq = NULL;
  2865. }
  2866. }
  2867. static bool rcu_torture_can_boost(void)
  2868. {
  2869. static int boost_warn_once;
  2870. int prio;
  2871. if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
  2872. return false;
  2873. if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
  2874. return false;
  2875. prio = rcu_get_gp_kthreads_prio();
  2876. if (!prio)
  2877. return false;
  2878. if (prio < 2) {
  2879. if (boost_warn_once == 1)
  2880. return false;
  2881. pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
  2882. boost_warn_once = 1;
  2883. return false;
  2884. }
  2885. return true;
  2886. }
  2887. static bool read_exit_child_stop;
  2888. static bool read_exit_child_stopped;
  2889. static wait_queue_head_t read_exit_wq;
  2890. // Child kthread which just does an rcutorture reader and exits.
  2891. static int rcu_torture_read_exit_child(void *trsp_in)
  2892. {
  2893. struct torture_random_state *trsp = trsp_in;
  2894. set_user_nice(current, MAX_NICE);
  2895. // Minimize time between reading and exiting.
  2896. while (!kthread_should_stop())
  2897. schedule_timeout_uninterruptible(1);
  2898. (void)rcu_torture_one_read(trsp, -1);
  2899. return 0;
  2900. }
  2901. // Parent kthread which creates and destroys read-exit child kthreads.
  2902. static int rcu_torture_read_exit(void *unused)
  2903. {
  2904. bool errexit = false;
  2905. int i;
  2906. struct task_struct *tsp;
  2907. DEFINE_TORTURE_RANDOM(trs);
  2908. // Allocate and initialize.
  2909. set_user_nice(current, MAX_NICE);
  2910. VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
  2911. // Each pass through this loop does one read-exit episode.
  2912. do {
  2913. VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
  2914. for (i = 0; i < read_exit_burst; i++) {
  2915. if (READ_ONCE(read_exit_child_stop))
  2916. break;
  2917. stutter_wait("rcu_torture_read_exit");
  2918. // Spawn child.
  2919. tsp = kthread_run(rcu_torture_read_exit_child,
  2920. &trs, "%s", "rcu_torture_read_exit_child");
  2921. if (IS_ERR(tsp)) {
  2922. TOROUT_ERRSTRING("out of memory");
  2923. errexit = true;
  2924. break;
  2925. }
  2926. cond_resched();
  2927. kthread_stop(tsp);
  2928. n_read_exits++;
  2929. }
  2930. VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
  2931. rcu_barrier(); // Wait for task_struct free, avoid OOM.
  2932. i = 0;
  2933. for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
  2934. schedule_timeout_uninterruptible(HZ);
  2935. } while (!errexit && !READ_ONCE(read_exit_child_stop));
  2936. // Clean up and exit.
  2937. smp_store_release(&read_exit_child_stopped, true); // After reaping.
  2938. smp_mb(); // Store before wakeup.
  2939. wake_up(&read_exit_wq);
  2940. while (!torture_must_stop())
  2941. schedule_timeout_uninterruptible(1);
  2942. torture_kthread_stopping("rcu_torture_read_exit");
  2943. return 0;
  2944. }
  2945. static int rcu_torture_read_exit_init(void)
  2946. {
  2947. if (read_exit_burst <= 0)
  2948. return 0;
  2949. init_waitqueue_head(&read_exit_wq);
  2950. read_exit_child_stop = false;
  2951. read_exit_child_stopped = false;
  2952. return torture_create_kthread(rcu_torture_read_exit, NULL,
  2953. read_exit_task);
  2954. }
  2955. static void rcu_torture_read_exit_cleanup(void)
  2956. {
  2957. if (!read_exit_task)
  2958. return;
  2959. WRITE_ONCE(read_exit_child_stop, true);
  2960. smp_mb(); // Above write before wait.
  2961. wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
  2962. torture_stop_kthread(rcutorture_read_exit, read_exit_task);
  2963. }
  2964. static enum cpuhp_state rcutor_hp;
  2965. static void
  2966. rcu_torture_cleanup(void)
  2967. {
  2968. int firsttime;
  2969. int flags = 0;
  2970. unsigned long gp_seq = 0;
  2971. int i;
  2972. if (torture_cleanup_begin()) {
  2973. if (cur_ops->cb_barrier != NULL) {
  2974. pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
  2975. cur_ops->cb_barrier();
  2976. }
  2977. rcu_gp_slow_unregister(NULL);
  2978. return;
  2979. }
  2980. if (!cur_ops) {
  2981. torture_cleanup_end();
  2982. rcu_gp_slow_unregister(NULL);
  2983. return;
  2984. }
  2985. if (cur_ops->gp_kthread_dbg)
  2986. cur_ops->gp_kthread_dbg();
  2987. rcu_torture_read_exit_cleanup();
  2988. rcu_torture_barrier_cleanup();
  2989. rcu_torture_fwd_prog_cleanup();
  2990. torture_stop_kthread(rcu_torture_stall, stall_task);
  2991. torture_stop_kthread(rcu_torture_writer, writer_task);
  2992. if (nocb_tasks) {
  2993. for (i = 0; i < nrealnocbers; i++)
  2994. torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
  2995. kfree(nocb_tasks);
  2996. nocb_tasks = NULL;
  2997. }
  2998. if (reader_tasks) {
  2999. for (i = 0; i < nrealreaders; i++)
  3000. torture_stop_kthread(rcu_torture_reader,
  3001. reader_tasks[i]);
  3002. kfree(reader_tasks);
  3003. reader_tasks = NULL;
  3004. }
  3005. kfree(rcu_torture_reader_mbchk);
  3006. rcu_torture_reader_mbchk = NULL;
  3007. if (fakewriter_tasks) {
  3008. for (i = 0; i < nfakewriters; i++)
  3009. torture_stop_kthread(rcu_torture_fakewriter,
  3010. fakewriter_tasks[i]);
  3011. kfree(fakewriter_tasks);
  3012. fakewriter_tasks = NULL;
  3013. }
  3014. rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
  3015. srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
  3016. pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
  3017. cur_ops->name, (long)gp_seq, flags,
  3018. rcutorture_seq_diff(gp_seq, start_gp_seq));
  3019. torture_stop_kthread(rcu_torture_stats, stats_task);
  3020. torture_stop_kthread(rcu_torture_fqs, fqs_task);
  3021. if (rcu_torture_can_boost() && rcutor_hp >= 0)
  3022. cpuhp_remove_state(rcutor_hp);
  3023. /*
  3024. * Wait for all RCU callbacks to fire, then do torture-type-specific
  3025. * cleanup operations.
  3026. */
  3027. if (cur_ops->cb_barrier != NULL) {
  3028. pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
  3029. cur_ops->cb_barrier();
  3030. }
  3031. if (cur_ops->cleanup != NULL)
  3032. cur_ops->cleanup();
  3033. rcu_torture_mem_dump_obj();
  3034. rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
  3035. if (err_segs_recorded) {
  3036. pr_alert("Failure/close-call rcutorture reader segments:\n");
  3037. if (rt_read_nsegs == 0)
  3038. pr_alert("\t: No segments recorded!!!\n");
  3039. firsttime = 1;
  3040. for (i = 0; i < rt_read_nsegs; i++) {
  3041. pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
  3042. if (err_segs[i].rt_delay_jiffies != 0) {
  3043. pr_cont("%s%ldjiffies", firsttime ? "" : "+",
  3044. err_segs[i].rt_delay_jiffies);
  3045. firsttime = 0;
  3046. }
  3047. if (err_segs[i].rt_delay_ms != 0) {
  3048. pr_cont("%s%ldms", firsttime ? "" : "+",
  3049. err_segs[i].rt_delay_ms);
  3050. firsttime = 0;
  3051. }
  3052. if (err_segs[i].rt_delay_us != 0) {
  3053. pr_cont("%s%ldus", firsttime ? "" : "+",
  3054. err_segs[i].rt_delay_us);
  3055. firsttime = 0;
  3056. }
  3057. pr_cont("%s\n",
  3058. err_segs[i].rt_preempted ? "preempted" : "");
  3059. }
  3060. }
  3061. if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
  3062. rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
  3063. else if (torture_onoff_failures())
  3064. rcu_torture_print_module_parms(cur_ops,
  3065. "End of test: RCU_HOTPLUG");
  3066. else
  3067. rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
  3068. torture_cleanup_end();
  3069. rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
  3070. }
  3071. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  3072. static void rcu_torture_leak_cb(struct rcu_head *rhp)
  3073. {
  3074. }
  3075. static void rcu_torture_err_cb(struct rcu_head *rhp)
  3076. {
  3077. /*
  3078. * This -might- happen due to race conditions, but is unlikely.
  3079. * The scenario that leads to this happening is that the
  3080. * first of the pair of duplicate callbacks is queued,
  3081. * someone else starts a grace period that includes that
  3082. * callback, then the second of the pair must wait for the
  3083. * next grace period. Unlikely, but can happen. If it
  3084. * does happen, the debug-objects subsystem won't have splatted.
  3085. */
  3086. pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
  3087. }
  3088. #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  3089. /*
  3090. * Verify that double-free causes debug-objects to complain, but only
  3091. * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
  3092. * cannot be carried out.
  3093. */
  3094. static void rcu_test_debug_objects(void)
  3095. {
  3096. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  3097. struct rcu_head rh1;
  3098. struct rcu_head rh2;
  3099. struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
  3100. init_rcu_head_on_stack(&rh1);
  3101. init_rcu_head_on_stack(&rh2);
  3102. pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
  3103. /* Try to queue the rh2 pair of callbacks for the same grace period. */
  3104. preempt_disable(); /* Prevent preemption from interrupting test. */
  3105. rcu_read_lock(); /* Make it impossible to finish a grace period. */
  3106. call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
  3107. local_irq_disable(); /* Make it harder to start a new grace period. */
  3108. call_rcu_hurry(&rh2, rcu_torture_leak_cb);
  3109. call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
  3110. if (rhp) {
  3111. call_rcu_hurry(rhp, rcu_torture_leak_cb);
  3112. call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
  3113. }
  3114. local_irq_enable();
  3115. rcu_read_unlock();
  3116. preempt_enable();
  3117. /* Wait for them all to get done so we can safely return. */
  3118. rcu_barrier();
  3119. pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
  3120. destroy_rcu_head_on_stack(&rh1);
  3121. destroy_rcu_head_on_stack(&rh2);
  3122. kfree(rhp);
  3123. #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  3124. pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
  3125. #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  3126. }
  3127. static void rcutorture_sync(void)
  3128. {
  3129. static unsigned long n;
  3130. if (cur_ops->sync && !(++n & 0xfff))
  3131. cur_ops->sync();
  3132. }
  3133. static int __init
  3134. rcu_torture_init(void)
  3135. {
  3136. long i;
  3137. int cpu;
  3138. int firsterr = 0;
  3139. int flags = 0;
  3140. unsigned long gp_seq = 0;
  3141. static struct rcu_torture_ops *torture_ops[] = {
  3142. &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
  3143. TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
  3144. &trivial_ops,
  3145. };
  3146. if (!torture_init_begin(torture_type, verbose))
  3147. return -EBUSY;
  3148. /* Process args and tell the world that the torturer is on the job. */
  3149. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  3150. cur_ops = torture_ops[i];
  3151. if (strcmp(torture_type, cur_ops->name) == 0)
  3152. break;
  3153. }
  3154. if (i == ARRAY_SIZE(torture_ops)) {
  3155. pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
  3156. torture_type);
  3157. pr_alert("rcu-torture types:");
  3158. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  3159. pr_cont(" %s", torture_ops[i]->name);
  3160. pr_cont("\n");
  3161. firsterr = -EINVAL;
  3162. cur_ops = NULL;
  3163. goto unwind;
  3164. }
  3165. if (cur_ops->fqs == NULL && fqs_duration != 0) {
  3166. pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
  3167. fqs_duration = 0;
  3168. }
  3169. if (cur_ops->init)
  3170. cur_ops->init();
  3171. if (nreaders >= 0) {
  3172. nrealreaders = nreaders;
  3173. } else {
  3174. nrealreaders = num_online_cpus() - 2 - nreaders;
  3175. if (nrealreaders <= 0)
  3176. nrealreaders = 1;
  3177. }
  3178. rcu_torture_print_module_parms(cur_ops, "Start of test");
  3179. rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
  3180. srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
  3181. start_gp_seq = gp_seq;
  3182. pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
  3183. cur_ops->name, (long)gp_seq, flags);
  3184. /* Set up the freelist. */
  3185. INIT_LIST_HEAD(&rcu_torture_freelist);
  3186. for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
  3187. rcu_tortures[i].rtort_mbtest = 0;
  3188. list_add_tail(&rcu_tortures[i].rtort_free,
  3189. &rcu_torture_freelist);
  3190. }
  3191. /* Initialize the statistics so that each run gets its own numbers. */
  3192. rcu_torture_current = NULL;
  3193. rcu_torture_current_version = 0;
  3194. atomic_set(&n_rcu_torture_alloc, 0);
  3195. atomic_set(&n_rcu_torture_alloc_fail, 0);
  3196. atomic_set(&n_rcu_torture_free, 0);
  3197. atomic_set(&n_rcu_torture_mberror, 0);
  3198. atomic_set(&n_rcu_torture_mbchk_fail, 0);
  3199. atomic_set(&n_rcu_torture_mbchk_tries, 0);
  3200. atomic_set(&n_rcu_torture_error, 0);
  3201. n_rcu_torture_barrier_error = 0;
  3202. n_rcu_torture_boost_ktrerror = 0;
  3203. n_rcu_torture_boost_rterror = 0;
  3204. n_rcu_torture_boost_failure = 0;
  3205. n_rcu_torture_boosts = 0;
  3206. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  3207. atomic_set(&rcu_torture_wcount[i], 0);
  3208. for_each_possible_cpu(cpu) {
  3209. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  3210. per_cpu(rcu_torture_count, cpu)[i] = 0;
  3211. per_cpu(rcu_torture_batch, cpu)[i] = 0;
  3212. }
  3213. }
  3214. err_segs_recorded = 0;
  3215. rt_read_nsegs = 0;
  3216. /* Start up the kthreads. */
  3217. rcu_torture_write_types();
  3218. firsterr = torture_create_kthread(rcu_torture_writer, NULL,
  3219. writer_task);
  3220. if (torture_init_error(firsterr))
  3221. goto unwind;
  3222. if (nfakewriters > 0) {
  3223. fakewriter_tasks = kcalloc(nfakewriters,
  3224. sizeof(fakewriter_tasks[0]),
  3225. GFP_KERNEL);
  3226. if (fakewriter_tasks == NULL) {
  3227. TOROUT_ERRSTRING("out of memory");
  3228. firsterr = -ENOMEM;
  3229. goto unwind;
  3230. }
  3231. }
  3232. for (i = 0; i < nfakewriters; i++) {
  3233. firsterr = torture_create_kthread(rcu_torture_fakewriter,
  3234. NULL, fakewriter_tasks[i]);
  3235. if (torture_init_error(firsterr))
  3236. goto unwind;
  3237. }
  3238. reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
  3239. GFP_KERNEL);
  3240. rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
  3241. GFP_KERNEL);
  3242. if (!reader_tasks || !rcu_torture_reader_mbchk) {
  3243. TOROUT_ERRSTRING("out of memory");
  3244. firsterr = -ENOMEM;
  3245. goto unwind;
  3246. }
  3247. for (i = 0; i < nrealreaders; i++) {
  3248. rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
  3249. firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
  3250. reader_tasks[i]);
  3251. if (torture_init_error(firsterr))
  3252. goto unwind;
  3253. }
  3254. nrealnocbers = nocbs_nthreads;
  3255. if (WARN_ON(nrealnocbers < 0))
  3256. nrealnocbers = 1;
  3257. if (WARN_ON(nocbs_toggle < 0))
  3258. nocbs_toggle = HZ;
  3259. if (nrealnocbers > 0) {
  3260. nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
  3261. if (nocb_tasks == NULL) {
  3262. TOROUT_ERRSTRING("out of memory");
  3263. firsterr = -ENOMEM;
  3264. goto unwind;
  3265. }
  3266. } else {
  3267. nocb_tasks = NULL;
  3268. }
  3269. for (i = 0; i < nrealnocbers; i++) {
  3270. firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
  3271. if (torture_init_error(firsterr))
  3272. goto unwind;
  3273. }
  3274. if (stat_interval > 0) {
  3275. firsterr = torture_create_kthread(rcu_torture_stats, NULL,
  3276. stats_task);
  3277. if (torture_init_error(firsterr))
  3278. goto unwind;
  3279. }
  3280. if (test_no_idle_hz && shuffle_interval > 0) {
  3281. firsterr = torture_shuffle_init(shuffle_interval * HZ);
  3282. if (torture_init_error(firsterr))
  3283. goto unwind;
  3284. }
  3285. if (stutter < 0)
  3286. stutter = 0;
  3287. if (stutter) {
  3288. int t;
  3289. t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
  3290. firsterr = torture_stutter_init(stutter * HZ, t);
  3291. if (torture_init_error(firsterr))
  3292. goto unwind;
  3293. }
  3294. if (fqs_duration < 0)
  3295. fqs_duration = 0;
  3296. if (fqs_duration) {
  3297. /* Create the fqs thread */
  3298. firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
  3299. fqs_task);
  3300. if (torture_init_error(firsterr))
  3301. goto unwind;
  3302. }
  3303. if (test_boost_interval < 1)
  3304. test_boost_interval = 1;
  3305. if (test_boost_duration < 2)
  3306. test_boost_duration = 2;
  3307. if (rcu_torture_can_boost()) {
  3308. boost_starttime = jiffies + test_boost_interval * HZ;
  3309. firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
  3310. rcutorture_booster_init,
  3311. rcutorture_booster_cleanup);
  3312. rcutor_hp = firsterr;
  3313. if (torture_init_error(firsterr))
  3314. goto unwind;
  3315. }
  3316. shutdown_jiffies = jiffies + shutdown_secs * HZ;
  3317. firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
  3318. if (torture_init_error(firsterr))
  3319. goto unwind;
  3320. firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
  3321. rcutorture_sync);
  3322. if (torture_init_error(firsterr))
  3323. goto unwind;
  3324. firsterr = rcu_torture_stall_init();
  3325. if (torture_init_error(firsterr))
  3326. goto unwind;
  3327. firsterr = rcu_torture_fwd_prog_init();
  3328. if (torture_init_error(firsterr))
  3329. goto unwind;
  3330. firsterr = rcu_torture_barrier_init();
  3331. if (torture_init_error(firsterr))
  3332. goto unwind;
  3333. firsterr = rcu_torture_read_exit_init();
  3334. if (torture_init_error(firsterr))
  3335. goto unwind;
  3336. if (object_debug)
  3337. rcu_test_debug_objects();
  3338. torture_init_end();
  3339. rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
  3340. return 0;
  3341. unwind:
  3342. torture_init_end();
  3343. rcu_torture_cleanup();
  3344. if (shutdown_secs) {
  3345. WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
  3346. kernel_power_off();
  3347. }
  3348. return firsterr;
  3349. }
  3350. module_init(rcu_torture_init);
  3351. module_exit(rcu_torture_cleanup);