rcu.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM rcu
  4. #if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_RCU_H
  6. #include <linux/tracepoint.h>
  7. #ifdef CONFIG_RCU_TRACE
  8. #define TRACE_EVENT_RCU TRACE_EVENT
  9. #else
  10. #define TRACE_EVENT_RCU TRACE_EVENT_NOP
  11. #endif
  12. /*
  13. * Tracepoint for start/end markers used for utilization calculations.
  14. * By convention, the string is of the following forms:
  15. *
  16. * "Start <activity>" -- Mark the start of the specified activity,
  17. * such as "context switch". Nesting is permitted.
  18. * "End <activity>" -- Mark the end of the specified activity.
  19. *
  20. * An "@" character within "<activity>" is a comment character: Data
  21. * reduction scripts will ignore the "@" and the remainder of the line.
  22. */
  23. TRACE_EVENT(rcu_utilization,
  24. TP_PROTO(const char *s),
  25. TP_ARGS(s),
  26. TP_STRUCT__entry(
  27. __field(const char *, s)
  28. ),
  29. TP_fast_assign(
  30. __entry->s = s;
  31. ),
  32. TP_printk("%s", __entry->s)
  33. );
  34. #if defined(CONFIG_TREE_RCU)
  35. /*
  36. * Tracepoint for grace-period events. Takes a string identifying the
  37. * RCU flavor, the grace-period number, and a string identifying the
  38. * grace-period-related event as follows:
  39. *
  40. * "AccReadyCB": CPU accelerates new callbacks to RCU_NEXT_READY_TAIL.
  41. * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
  42. * "newreq": Request a new grace period.
  43. * "start": Start a grace period.
  44. * "cpustart": CPU first notices a grace-period start.
  45. * "cpuqs": CPU passes through a quiescent state.
  46. * "cpuonl": CPU comes online.
  47. * "cpuofl": CPU goes offline.
  48. * "cpuofl-bgp": CPU goes offline while blocking a grace period.
  49. * "reqwait": GP kthread sleeps waiting for grace-period request.
  50. * "reqwaitsig": GP kthread awakened by signal from reqwait state.
  51. * "fqswait": GP kthread waiting until time to force quiescent states.
  52. * "fqsstart": GP kthread starts forcing quiescent states.
  53. * "fqsend": GP kthread done forcing quiescent states.
  54. * "fqswaitsig": GP kthread awakened by signal from fqswait state.
  55. * "end": End a grace period.
  56. * "cpuend": CPU first notices a grace-period end.
  57. */
  58. TRACE_EVENT_RCU(rcu_grace_period,
  59. TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
  60. TP_ARGS(rcuname, gp_seq, gpevent),
  61. TP_STRUCT__entry(
  62. __field(const char *, rcuname)
  63. __field(long, gp_seq)
  64. __field(const char *, gpevent)
  65. ),
  66. TP_fast_assign(
  67. __entry->rcuname = rcuname;
  68. __entry->gp_seq = (long)gp_seq;
  69. __entry->gpevent = gpevent;
  70. ),
  71. TP_printk("%s %ld %s",
  72. __entry->rcuname, __entry->gp_seq, __entry->gpevent)
  73. );
  74. /*
  75. * Tracepoint for future grace-period events. The caller should pull
  76. * the data from the rcu_node structure, other than rcuname, which comes
  77. * from the rcu_state structure, and event, which is one of the following:
  78. *
  79. * "Cleanup": Clean up rcu_node structure after previous GP.
  80. * "CleanupMore": Clean up, and another GP is needed.
  81. * "EndWait": Complete wait.
  82. * "NoGPkthread": The RCU grace-period kthread has not yet started.
  83. * "Prestarted": Someone beat us to the request
  84. * "Startedleaf": Leaf node marked for future GP.
  85. * "Startedleafroot": All nodes from leaf to root marked for future GP.
  86. * "Startedroot": Requested a nocb grace period based on root-node data.
  87. * "Startleaf": Request a grace period based on leaf-node data.
  88. * "StartWait": Start waiting for the requested grace period.
  89. */
  90. TRACE_EVENT_RCU(rcu_future_grace_period,
  91. TP_PROTO(const char *rcuname, unsigned long gp_seq,
  92. unsigned long gp_seq_req, u8 level, int grplo, int grphi,
  93. const char *gpevent),
  94. TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
  95. TP_STRUCT__entry(
  96. __field(const char *, rcuname)
  97. __field(long, gp_seq)
  98. __field(long, gp_seq_req)
  99. __field(u8, level)
  100. __field(int, grplo)
  101. __field(int, grphi)
  102. __field(const char *, gpevent)
  103. ),
  104. TP_fast_assign(
  105. __entry->rcuname = rcuname;
  106. __entry->gp_seq = (long)gp_seq;
  107. __entry->gp_seq_req = (long)gp_seq_req;
  108. __entry->level = level;
  109. __entry->grplo = grplo;
  110. __entry->grphi = grphi;
  111. __entry->gpevent = gpevent;
  112. ),
  113. TP_printk("%s %ld %ld %u %d %d %s",
  114. __entry->rcuname, (long)__entry->gp_seq, (long)__entry->gp_seq_req, __entry->level,
  115. __entry->grplo, __entry->grphi, __entry->gpevent)
  116. );
  117. /*
  118. * Tracepoint for grace-period-initialization events. These are
  119. * distinguished by the type of RCU, the new grace-period number, the
  120. * rcu_node structure level, the starting and ending CPU covered by the
  121. * rcu_node structure, and the mask of CPUs that will be waited for.
  122. * All but the type of RCU are extracted from the rcu_node structure.
  123. */
  124. TRACE_EVENT_RCU(rcu_grace_period_init,
  125. TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
  126. int grplo, int grphi, unsigned long qsmask),
  127. TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
  128. TP_STRUCT__entry(
  129. __field(const char *, rcuname)
  130. __field(long, gp_seq)
  131. __field(u8, level)
  132. __field(int, grplo)
  133. __field(int, grphi)
  134. __field(unsigned long, qsmask)
  135. ),
  136. TP_fast_assign(
  137. __entry->rcuname = rcuname;
  138. __entry->gp_seq = (long)gp_seq;
  139. __entry->level = level;
  140. __entry->grplo = grplo;
  141. __entry->grphi = grphi;
  142. __entry->qsmask = qsmask;
  143. ),
  144. TP_printk("%s %ld %u %d %d %lx",
  145. __entry->rcuname, __entry->gp_seq, __entry->level,
  146. __entry->grplo, __entry->grphi, __entry->qsmask)
  147. );
  148. /*
  149. * Tracepoint for expedited grace-period events. Takes a string identifying
  150. * the RCU flavor, the expedited grace-period sequence number, and a string
  151. * identifying the grace-period-related event as follows:
  152. *
  153. * "snap": Captured snapshot of expedited grace period sequence number.
  154. * "start": Started a real expedited grace period.
  155. * "reset": Started resetting the tree
  156. * "select": Started selecting the CPUs to wait on.
  157. * "selectofl": Selected CPU partially offline.
  158. * "startwait": Started waiting on selected CPUs.
  159. * "end": Ended a real expedited grace period.
  160. * "endwake": Woke piggybackers up.
  161. * "done": Someone else did the expedited grace period for us.
  162. */
  163. TRACE_EVENT_RCU(rcu_exp_grace_period,
  164. TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
  165. TP_ARGS(rcuname, gpseq, gpevent),
  166. TP_STRUCT__entry(
  167. __field(const char *, rcuname)
  168. __field(long, gpseq)
  169. __field(const char *, gpevent)
  170. ),
  171. TP_fast_assign(
  172. __entry->rcuname = rcuname;
  173. __entry->gpseq = (long)gpseq;
  174. __entry->gpevent = gpevent;
  175. ),
  176. TP_printk("%s %ld %s",
  177. __entry->rcuname, __entry->gpseq, __entry->gpevent)
  178. );
  179. /*
  180. * Tracepoint for expedited grace-period funnel-locking events. Takes a
  181. * string identifying the RCU flavor, an integer identifying the rcu_node
  182. * combining-tree level, another pair of integers identifying the lowest-
  183. * and highest-numbered CPU associated with the current rcu_node structure,
  184. * and a string. identifying the grace-period-related event as follows:
  185. *
  186. * "nxtlvl": Advance to next level of rcu_node funnel
  187. * "wait": Wait for someone else to do expedited GP
  188. */
  189. TRACE_EVENT_RCU(rcu_exp_funnel_lock,
  190. TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
  191. const char *gpevent),
  192. TP_ARGS(rcuname, level, grplo, grphi, gpevent),
  193. TP_STRUCT__entry(
  194. __field(const char *, rcuname)
  195. __field(u8, level)
  196. __field(int, grplo)
  197. __field(int, grphi)
  198. __field(const char *, gpevent)
  199. ),
  200. TP_fast_assign(
  201. __entry->rcuname = rcuname;
  202. __entry->level = level;
  203. __entry->grplo = grplo;
  204. __entry->grphi = grphi;
  205. __entry->gpevent = gpevent;
  206. ),
  207. TP_printk("%s %d %d %d %s",
  208. __entry->rcuname, __entry->level, __entry->grplo,
  209. __entry->grphi, __entry->gpevent)
  210. );
  211. #ifdef CONFIG_RCU_NOCB_CPU
  212. /*
  213. * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
  214. * to assist debugging of these handoffs.
  215. *
  216. * The first argument is the name of the RCU flavor, and the second is
  217. * the number of the offloaded CPU are extracted. The third and final
  218. * argument is a string as follows:
  219. *
  220. * "AlreadyAwake": The to-be-awakened rcuo kthread is already awake.
  221. * "Bypass": rcuo GP kthread sees non-empty ->nocb_bypass.
  222. * "CBSleep": rcuo CB kthread sleeping waiting for CBs.
  223. * "Check": rcuo GP kthread checking specified CPU for work.
  224. * "DeferredWake": Timer expired or polled check, time to wake.
  225. * "DoWake": The to-be-awakened rcuo kthread needs to be awakened.
  226. * "EndSleep": Done waiting for GP for !rcu_nocb_poll.
  227. * "FirstBQ": New CB to empty ->nocb_bypass (->cblist maybe non-empty).
  228. * "FirstBQnoWake": FirstBQ plus rcuo kthread need not be awakened.
  229. * "FirstBQwake": FirstBQ plus rcuo kthread must be awakened.
  230. * "FirstQ": New CB to empty ->cblist (->nocb_bypass maybe non-empty).
  231. * "NeedWaitGP": rcuo GP kthread must wait on a grace period.
  232. * "Poll": Start of new polling cycle for rcu_nocb_poll.
  233. * "Sleep": Sleep waiting for GP for !rcu_nocb_poll.
  234. * "Timer": Deferred-wake timer expired.
  235. * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
  236. * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
  237. * "WakeNot": Don't wake rcuo kthread.
  238. * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
  239. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
  240. * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended.
  241. * "WokeEmpty": rcuo CB kthread woke to find empty list.
  242. */
  243. TRACE_EVENT_RCU(rcu_nocb_wake,
  244. TP_PROTO(const char *rcuname, int cpu, const char *reason),
  245. TP_ARGS(rcuname, cpu, reason),
  246. TP_STRUCT__entry(
  247. __field(const char *, rcuname)
  248. __field(int, cpu)
  249. __field(const char *, reason)
  250. ),
  251. TP_fast_assign(
  252. __entry->rcuname = rcuname;
  253. __entry->cpu = cpu;
  254. __entry->reason = reason;
  255. ),
  256. TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
  257. );
  258. #endif
  259. /*
  260. * Tracepoint for tasks blocking within preemptible-RCU read-side
  261. * critical sections. Track the type of RCU (which one day might
  262. * include SRCU), the grace-period number that the task is blocking
  263. * (the current or the next), and the task's PID.
  264. */
  265. TRACE_EVENT_RCU(rcu_preempt_task,
  266. TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
  267. TP_ARGS(rcuname, pid, gp_seq),
  268. TP_STRUCT__entry(
  269. __field(const char *, rcuname)
  270. __field(long, gp_seq)
  271. __field(int, pid)
  272. ),
  273. TP_fast_assign(
  274. __entry->rcuname = rcuname;
  275. __entry->gp_seq = (long)gp_seq;
  276. __entry->pid = pid;
  277. ),
  278. TP_printk("%s %ld %d",
  279. __entry->rcuname, __entry->gp_seq, __entry->pid)
  280. );
  281. /*
  282. * Tracepoint for tasks that blocked within a given preemptible-RCU
  283. * read-side critical section exiting that critical section. Track the
  284. * type of RCU (which one day might include SRCU) and the task's PID.
  285. */
  286. TRACE_EVENT_RCU(rcu_unlock_preempted_task,
  287. TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
  288. TP_ARGS(rcuname, gp_seq, pid),
  289. TP_STRUCT__entry(
  290. __field(const char *, rcuname)
  291. __field(long, gp_seq)
  292. __field(int, pid)
  293. ),
  294. TP_fast_assign(
  295. __entry->rcuname = rcuname;
  296. __entry->gp_seq = (long)gp_seq;
  297. __entry->pid = pid;
  298. ),
  299. TP_printk("%s %ld %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
  300. );
  301. /*
  302. * Tracepoint for quiescent-state-reporting events. These are
  303. * distinguished by the type of RCU, the grace-period number, the
  304. * mask of quiescent lower-level entities, the rcu_node structure level,
  305. * the starting and ending CPU covered by the rcu_node structure, and
  306. * whether there are any blocked tasks blocking the current grace period.
  307. * All but the type of RCU are extracted from the rcu_node structure.
  308. */
  309. TRACE_EVENT_RCU(rcu_quiescent_state_report,
  310. TP_PROTO(const char *rcuname, unsigned long gp_seq,
  311. unsigned long mask, unsigned long qsmask,
  312. u8 level, int grplo, int grphi, int gp_tasks),
  313. TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
  314. TP_STRUCT__entry(
  315. __field(const char *, rcuname)
  316. __field(long, gp_seq)
  317. __field(unsigned long, mask)
  318. __field(unsigned long, qsmask)
  319. __field(u8, level)
  320. __field(int, grplo)
  321. __field(int, grphi)
  322. __field(u8, gp_tasks)
  323. ),
  324. TP_fast_assign(
  325. __entry->rcuname = rcuname;
  326. __entry->gp_seq = (long)gp_seq;
  327. __entry->mask = mask;
  328. __entry->qsmask = qsmask;
  329. __entry->level = level;
  330. __entry->grplo = grplo;
  331. __entry->grphi = grphi;
  332. __entry->gp_tasks = gp_tasks;
  333. ),
  334. TP_printk("%s %ld %lx>%lx %u %d %d %u",
  335. __entry->rcuname, __entry->gp_seq,
  336. __entry->mask, __entry->qsmask, __entry->level,
  337. __entry->grplo, __entry->grphi, __entry->gp_tasks)
  338. );
  339. /*
  340. * Tracepoint for quiescent states detected by force_quiescent_state().
  341. * These trace events include the type of RCU, the grace-period number
  342. * that was blocked by the CPU, the CPU itself, and the type of quiescent
  343. * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
  344. * a CPU that has been in dyntick-idle mode for too long.
  345. */
  346. TRACE_EVENT_RCU(rcu_fqs,
  347. TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
  348. TP_ARGS(rcuname, gp_seq, cpu, qsevent),
  349. TP_STRUCT__entry(
  350. __field(const char *, rcuname)
  351. __field(long, gp_seq)
  352. __field(int, cpu)
  353. __field(const char *, qsevent)
  354. ),
  355. TP_fast_assign(
  356. __entry->rcuname = rcuname;
  357. __entry->gp_seq = (long)gp_seq;
  358. __entry->cpu = cpu;
  359. __entry->qsevent = qsevent;
  360. ),
  361. TP_printk("%s %ld %d %s",
  362. __entry->rcuname, __entry->gp_seq,
  363. __entry->cpu, __entry->qsevent)
  364. );
  365. /*
  366. * Tracepoint for RCU stall events. Takes a string identifying the RCU flavor
  367. * and a string identifying which function detected the RCU stall as follows:
  368. *
  369. * "StallDetected": Scheduler-tick detects other CPU's stalls.
  370. * "SelfDetected": Scheduler-tick detects a current CPU's stall.
  371. * "ExpeditedStall": Expedited grace period detects stalls.
  372. */
  373. TRACE_EVENT(rcu_stall_warning,
  374. TP_PROTO(const char *rcuname, const char *msg),
  375. TP_ARGS(rcuname, msg),
  376. TP_STRUCT__entry(
  377. __field(const char *, rcuname)
  378. __field(const char *, msg)
  379. ),
  380. TP_fast_assign(
  381. __entry->rcuname = rcuname;
  382. __entry->msg = msg;
  383. ),
  384. TP_printk("%s %s",
  385. __entry->rcuname, __entry->msg)
  386. );
  387. #endif /* #if defined(CONFIG_TREE_RCU) */
  388. /*
  389. * Tracepoint for dyntick-idle entry/exit events. These take 2 strings
  390. * as argument:
  391. * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
  392. * being in dyntick-idle mode.
  393. * context: "USER" or "IDLE" or "IRQ".
  394. * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
  395. *
  396. * These events also take a pair of numbers, which indicate the nesting
  397. * depth before and after the event of interest, and a third number that is
  398. * the ->dynticks counter. Note that task-related and interrupt-related
  399. * events use two separate counters, and that the "++=" and "--=" events
  400. * for irq/NMI will change the counter by two, otherwise by one.
  401. */
  402. TRACE_EVENT_RCU(rcu_dyntick,
  403. TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
  404. TP_ARGS(polarity, oldnesting, newnesting, dynticks),
  405. TP_STRUCT__entry(
  406. __field(const char *, polarity)
  407. __field(long, oldnesting)
  408. __field(long, newnesting)
  409. __field(int, dynticks)
  410. ),
  411. TP_fast_assign(
  412. __entry->polarity = polarity;
  413. __entry->oldnesting = oldnesting;
  414. __entry->newnesting = newnesting;
  415. __entry->dynticks = dynticks;
  416. ),
  417. TP_printk("%s %lx %lx %#3x", __entry->polarity,
  418. __entry->oldnesting, __entry->newnesting,
  419. __entry->dynticks & 0xfff)
  420. );
  421. /*
  422. * Tracepoint for the registration of a single RCU callback function.
  423. * The first argument is the type of RCU, the second argument is
  424. * a pointer to the RCU callback itself, the third element is the
  425. * number of lazy callbacks queued, and the fourth element is the
  426. * total number of callbacks queued.
  427. */
  428. TRACE_EVENT_RCU(rcu_callback,
  429. TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
  430. TP_ARGS(rcuname, rhp, qlen),
  431. TP_STRUCT__entry(
  432. __field(const char *, rcuname)
  433. __field(void *, rhp)
  434. __field(void *, func)
  435. __field(long, qlen)
  436. ),
  437. TP_fast_assign(
  438. __entry->rcuname = rcuname;
  439. __entry->rhp = rhp;
  440. __entry->func = rhp->func;
  441. __entry->qlen = qlen;
  442. ),
  443. TP_printk("%s rhp=%p func=%ps %ld",
  444. __entry->rcuname, __entry->rhp, __entry->func,
  445. __entry->qlen)
  446. );
  447. TRACE_EVENT_RCU(rcu_segcb_stats,
  448. TP_PROTO(struct rcu_segcblist *rs, const char *ctx),
  449. TP_ARGS(rs, ctx),
  450. TP_STRUCT__entry(
  451. __field(const char *, ctx)
  452. __array(unsigned long, gp_seq, RCU_CBLIST_NSEGS)
  453. __array(long, seglen, RCU_CBLIST_NSEGS)
  454. ),
  455. TP_fast_assign(
  456. __entry->ctx = ctx;
  457. memcpy(__entry->seglen, rs->seglen, RCU_CBLIST_NSEGS * sizeof(long));
  458. memcpy(__entry->gp_seq, rs->gp_seq, RCU_CBLIST_NSEGS * sizeof(unsigned long));
  459. ),
  460. TP_printk("%s seglen: (DONE=%ld, WAIT=%ld, NEXT_READY=%ld, NEXT=%ld) "
  461. "gp_seq: (DONE=%lu, WAIT=%lu, NEXT_READY=%lu, NEXT=%lu)", __entry->ctx,
  462. __entry->seglen[0], __entry->seglen[1], __entry->seglen[2], __entry->seglen[3],
  463. __entry->gp_seq[0], __entry->gp_seq[1], __entry->gp_seq[2], __entry->gp_seq[3])
  464. );
  465. /*
  466. * Tracepoint for the registration of a single RCU callback of the special
  467. * kvfree() form. The first argument is the RCU type, the second argument
  468. * is a pointer to the RCU callback, the third argument is the offset
  469. * of the callback within the enclosing RCU-protected data structure,
  470. * the fourth argument is the number of lazy callbacks queued, and the
  471. * fifth argument is the total number of callbacks queued.
  472. */
  473. TRACE_EVENT_RCU(rcu_kvfree_callback,
  474. TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
  475. long qlen),
  476. TP_ARGS(rcuname, rhp, offset, qlen),
  477. TP_STRUCT__entry(
  478. __field(const char *, rcuname)
  479. __field(void *, rhp)
  480. __field(unsigned long, offset)
  481. __field(long, qlen)
  482. ),
  483. TP_fast_assign(
  484. __entry->rcuname = rcuname;
  485. __entry->rhp = rhp;
  486. __entry->offset = offset;
  487. __entry->qlen = qlen;
  488. ),
  489. TP_printk("%s rhp=%p func=%ld %ld",
  490. __entry->rcuname, __entry->rhp, __entry->offset,
  491. __entry->qlen)
  492. );
  493. /*
  494. * Tracepoint for marking the beginning rcu_do_batch, performed to start
  495. * RCU callback invocation. The first argument is the RCU flavor,
  496. * the second is the number of lazy callbacks queued, the third is
  497. * the total number of callbacks queued, and the fourth argument is
  498. * the current RCU-callback batch limit.
  499. */
  500. TRACE_EVENT_RCU(rcu_batch_start,
  501. TP_PROTO(const char *rcuname, long qlen, long blimit),
  502. TP_ARGS(rcuname, qlen, blimit),
  503. TP_STRUCT__entry(
  504. __field(const char *, rcuname)
  505. __field(long, qlen)
  506. __field(long, blimit)
  507. ),
  508. TP_fast_assign(
  509. __entry->rcuname = rcuname;
  510. __entry->qlen = qlen;
  511. __entry->blimit = blimit;
  512. ),
  513. TP_printk("%s CBs=%ld bl=%ld",
  514. __entry->rcuname, __entry->qlen, __entry->blimit)
  515. );
  516. /*
  517. * Tracepoint for the invocation of a single RCU callback function.
  518. * The first argument is the type of RCU, and the second argument is
  519. * a pointer to the RCU callback itself.
  520. */
  521. TRACE_EVENT_RCU(rcu_invoke_callback,
  522. TP_PROTO(const char *rcuname, struct rcu_head *rhp),
  523. TP_ARGS(rcuname, rhp),
  524. TP_STRUCT__entry(
  525. __field(const char *, rcuname)
  526. __field(void *, rhp)
  527. __field(void *, func)
  528. ),
  529. TP_fast_assign(
  530. __entry->rcuname = rcuname;
  531. __entry->rhp = rhp;
  532. __entry->func = rhp->func;
  533. ),
  534. TP_printk("%s rhp=%p func=%ps",
  535. __entry->rcuname, __entry->rhp, __entry->func)
  536. );
  537. /*
  538. * Tracepoint for the invocation of a single RCU callback of the special
  539. * kvfree() form. The first argument is the RCU flavor, the second
  540. * argument is a pointer to the RCU callback, and the third argument
  541. * is the offset of the callback within the enclosing RCU-protected
  542. * data structure.
  543. */
  544. TRACE_EVENT_RCU(rcu_invoke_kvfree_callback,
  545. TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
  546. TP_ARGS(rcuname, rhp, offset),
  547. TP_STRUCT__entry(
  548. __field(const char *, rcuname)
  549. __field(void *, rhp)
  550. __field(unsigned long, offset)
  551. ),
  552. TP_fast_assign(
  553. __entry->rcuname = rcuname;
  554. __entry->rhp = rhp;
  555. __entry->offset = offset;
  556. ),
  557. TP_printk("%s rhp=%p func=%ld",
  558. __entry->rcuname, __entry->rhp, __entry->offset)
  559. );
  560. /*
  561. * Tracepoint for the invocation of a single RCU callback of the special
  562. * kfree_bulk() form. The first argument is the RCU flavor, the second
  563. * argument is a number of elements in array to free, the third is an
  564. * address of the array holding nr_records entries.
  565. */
  566. TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback,
  567. TP_PROTO(const char *rcuname, unsigned long nr_records, void **p),
  568. TP_ARGS(rcuname, nr_records, p),
  569. TP_STRUCT__entry(
  570. __field(const char *, rcuname)
  571. __field(unsigned long, nr_records)
  572. __field(void **, p)
  573. ),
  574. TP_fast_assign(
  575. __entry->rcuname = rcuname;
  576. __entry->nr_records = nr_records;
  577. __entry->p = p;
  578. ),
  579. TP_printk("%s bulk=0x%p nr_records=%lu",
  580. __entry->rcuname, __entry->p, __entry->nr_records)
  581. );
  582. /*
  583. * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
  584. * invoked. The first argument is the name of the RCU flavor,
  585. * the second argument is number of callbacks actually invoked,
  586. * the third argument (cb) is whether or not any of the callbacks that
  587. * were ready to invoke at the beginning of this batch are still
  588. * queued, the fourth argument (nr) is the return value of need_resched(),
  589. * the fifth argument (iit) is 1 if the current task is the idle task,
  590. * and the sixth argument (risk) is the return value from
  591. * rcu_is_callbacks_kthread().
  592. */
  593. TRACE_EVENT_RCU(rcu_batch_end,
  594. TP_PROTO(const char *rcuname, int callbacks_invoked,
  595. char cb, char nr, char iit, char risk),
  596. TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
  597. TP_STRUCT__entry(
  598. __field(const char *, rcuname)
  599. __field(int, callbacks_invoked)
  600. __field(char, cb)
  601. __field(char, nr)
  602. __field(char, iit)
  603. __field(char, risk)
  604. ),
  605. TP_fast_assign(
  606. __entry->rcuname = rcuname;
  607. __entry->callbacks_invoked = callbacks_invoked;
  608. __entry->cb = cb;
  609. __entry->nr = nr;
  610. __entry->iit = iit;
  611. __entry->risk = risk;
  612. ),
  613. TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
  614. __entry->rcuname, __entry->callbacks_invoked,
  615. __entry->cb ? 'C' : '.',
  616. __entry->nr ? 'S' : '.',
  617. __entry->iit ? 'I' : '.',
  618. __entry->risk ? 'R' : '.')
  619. );
  620. /*
  621. * Tracepoint for rcutorture readers. The first argument is the name
  622. * of the RCU flavor from rcutorture's viewpoint and the second argument
  623. * is the callback address. The third argument is the start time in
  624. * seconds, and the last two arguments are the grace period numbers
  625. * at the beginning and end of the read, respectively. Note that the
  626. * callback address can be NULL.
  627. */
  628. #define RCUTORTURENAME_LEN 8
  629. TRACE_EVENT_RCU(rcu_torture_read,
  630. TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
  631. unsigned long secs, unsigned long c_old, unsigned long c),
  632. TP_ARGS(rcutorturename, rhp, secs, c_old, c),
  633. TP_STRUCT__entry(
  634. __array(char, rcutorturename, RCUTORTURENAME_LEN)
  635. __field(struct rcu_head *, rhp)
  636. __field(unsigned long, secs)
  637. __field(unsigned long, c_old)
  638. __field(unsigned long, c)
  639. ),
  640. TP_fast_assign(
  641. strncpy(__entry->rcutorturename, rcutorturename,
  642. RCUTORTURENAME_LEN);
  643. __entry->rcutorturename[RCUTORTURENAME_LEN - 1] = 0;
  644. __entry->rhp = rhp;
  645. __entry->secs = secs;
  646. __entry->c_old = c_old;
  647. __entry->c = c;
  648. ),
  649. TP_printk("%s torture read %p %luus c: %lu %lu",
  650. __entry->rcutorturename, __entry->rhp,
  651. __entry->secs, __entry->c_old, __entry->c)
  652. );
  653. /*
  654. * Tracepoint for rcu_barrier() execution. The string "s" describes
  655. * the rcu_barrier phase:
  656. * "Begin": rcu_barrier() started.
  657. * "CB": An rcu_barrier_callback() invoked a callback, not the last.
  658. * "EarlyExit": rcu_barrier() piggybacked, thus early exit.
  659. * "Inc1": rcu_barrier() piggyback check counter incremented.
  660. * "Inc2": rcu_barrier() piggyback check counter incremented.
  661. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
  662. * "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
  663. * "LastCB": An rcu_barrier_callback() invoked the last callback.
  664. * "NQ": rcu_barrier() found a CPU with no callbacks.
  665. * "OnlineQ": rcu_barrier() found online CPU with callbacks.
  666. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
  667. * is the count of remaining callbacks, and "done" is the piggybacking count.
  668. */
  669. TRACE_EVENT_RCU(rcu_barrier,
  670. TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
  671. TP_ARGS(rcuname, s, cpu, cnt, done),
  672. TP_STRUCT__entry(
  673. __field(const char *, rcuname)
  674. __field(const char *, s)
  675. __field(int, cpu)
  676. __field(int, cnt)
  677. __field(unsigned long, done)
  678. ),
  679. TP_fast_assign(
  680. __entry->rcuname = rcuname;
  681. __entry->s = s;
  682. __entry->cpu = cpu;
  683. __entry->cnt = cnt;
  684. __entry->done = done;
  685. ),
  686. TP_printk("%s %s cpu %d remaining %d # %lu",
  687. __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
  688. __entry->done)
  689. );
  690. #endif /* _TRACE_RCU_H */
  691. /* This part must be outside protection */
  692. #include <trace/define_trace.h>