sched.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM sched
  4. #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_SCHED_H
  6. #include <linux/kthread.h>
  7. #include <linux/sched/numa_balancing.h>
  8. #include <linux/tracepoint.h>
  9. #include <linux/binfmts.h>
  10. /*
  11. * Tracepoint for calling kthread_stop, performed to end a kthread:
  12. */
  13. TRACE_EVENT(sched_kthread_stop,
  14. TP_PROTO(struct task_struct *t),
  15. TP_ARGS(t),
  16. TP_STRUCT__entry(
  17. __array( char, comm, TASK_COMM_LEN )
  18. __field( pid_t, pid )
  19. ),
  20. TP_fast_assign(
  21. memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
  22. __entry->pid = t->pid;
  23. ),
  24. TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  25. );
  26. /*
  27. * Tracepoint for the return value of the kthread stopping:
  28. */
  29. TRACE_EVENT(sched_kthread_stop_ret,
  30. TP_PROTO(int ret),
  31. TP_ARGS(ret),
  32. TP_STRUCT__entry(
  33. __field( int, ret )
  34. ),
  35. TP_fast_assign(
  36. __entry->ret = ret;
  37. ),
  38. TP_printk("ret=%d", __entry->ret)
  39. );
  40. /**
  41. * sched_kthread_work_queue_work - called when a work gets queued
  42. * @worker: pointer to the kthread_worker
  43. * @work: pointer to struct kthread_work
  44. *
  45. * This event occurs when a work is queued immediately or once a
  46. * delayed work is actually queued (ie: once the delay has been
  47. * reached).
  48. */
  49. TRACE_EVENT(sched_kthread_work_queue_work,
  50. TP_PROTO(struct kthread_worker *worker,
  51. struct kthread_work *work),
  52. TP_ARGS(worker, work),
  53. TP_STRUCT__entry(
  54. __field( void *, work )
  55. __field( void *, function)
  56. __field( void *, worker)
  57. ),
  58. TP_fast_assign(
  59. __entry->work = work;
  60. __entry->function = work->func;
  61. __entry->worker = worker;
  62. ),
  63. TP_printk("work struct=%p function=%ps worker=%p",
  64. __entry->work, __entry->function, __entry->worker)
  65. );
  66. /**
  67. * sched_kthread_work_execute_start - called immediately before the work callback
  68. * @work: pointer to struct kthread_work
  69. *
  70. * Allows to track kthread work execution.
  71. */
  72. TRACE_EVENT(sched_kthread_work_execute_start,
  73. TP_PROTO(struct kthread_work *work),
  74. TP_ARGS(work),
  75. TP_STRUCT__entry(
  76. __field( void *, work )
  77. __field( void *, function)
  78. ),
  79. TP_fast_assign(
  80. __entry->work = work;
  81. __entry->function = work->func;
  82. ),
  83. TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
  84. );
  85. /**
  86. * sched_kthread_work_execute_end - called immediately after the work callback
  87. * @work: pointer to struct work_struct
  88. * @function: pointer to worker function
  89. *
  90. * Allows to track workqueue execution.
  91. */
  92. TRACE_EVENT(sched_kthread_work_execute_end,
  93. TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
  94. TP_ARGS(work, function),
  95. TP_STRUCT__entry(
  96. __field( void *, work )
  97. __field( void *, function)
  98. ),
  99. TP_fast_assign(
  100. __entry->work = work;
  101. __entry->function = function;
  102. ),
  103. TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
  104. );
  105. /*
  106. * Tracepoint for waking up a task:
  107. */
  108. DECLARE_EVENT_CLASS(sched_wakeup_template,
  109. TP_PROTO(struct task_struct *p),
  110. TP_ARGS(__perf_task(p)),
  111. TP_STRUCT__entry(
  112. __array( char, comm, TASK_COMM_LEN )
  113. __field( pid_t, pid )
  114. __field( int, prio )
  115. __field( int, target_cpu )
  116. ),
  117. TP_fast_assign(
  118. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  119. __entry->pid = p->pid;
  120. __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
  121. __entry->target_cpu = task_cpu(p);
  122. ),
  123. TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
  124. __entry->comm, __entry->pid, __entry->prio,
  125. __entry->target_cpu)
  126. );
  127. /*
  128. * Tracepoint called when waking a task; this tracepoint is guaranteed to be
  129. * called from the waking context.
  130. */
  131. DEFINE_EVENT(sched_wakeup_template, sched_waking,
  132. TP_PROTO(struct task_struct *p),
  133. TP_ARGS(p));
  134. /*
  135. * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
  136. * It is not always called from the waking context.
  137. */
  138. DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
  139. TP_PROTO(struct task_struct *p),
  140. TP_ARGS(p));
  141. /*
  142. * Tracepoint for waking up a new task:
  143. */
  144. DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
  145. TP_PROTO(struct task_struct *p),
  146. TP_ARGS(p));
  147. #ifdef CREATE_TRACE_POINTS
  148. static inline long __trace_sched_switch_state(bool preempt,
  149. unsigned int prev_state,
  150. struct task_struct *p)
  151. {
  152. unsigned int state;
  153. #ifdef CONFIG_SCHED_DEBUG
  154. BUG_ON(p != current);
  155. #endif /* CONFIG_SCHED_DEBUG */
  156. /*
  157. * Preemption ignores task state, therefore preempted tasks are always
  158. * RUNNING (we will not have dequeued if state != RUNNING).
  159. */
  160. if (preempt)
  161. return TASK_REPORT_MAX;
  162. /*
  163. * task_state_index() uses fls() and returns a value from 0-8 range.
  164. * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
  165. * it for left shift operation to get the correct task->state
  166. * mapping.
  167. */
  168. state = __task_state_index(prev_state, p->exit_state);
  169. return state ? (1 << (state - 1)) : state;
  170. }
  171. #endif /* CREATE_TRACE_POINTS */
  172. /*
  173. * Tracepoint for task switches, performed by the scheduler:
  174. */
  175. TRACE_EVENT(sched_switch,
  176. TP_PROTO(bool preempt,
  177. struct task_struct *prev,
  178. struct task_struct *next,
  179. unsigned int prev_state),
  180. TP_ARGS(preempt, prev, next, prev_state),
  181. TP_STRUCT__entry(
  182. __array( char, prev_comm, TASK_COMM_LEN )
  183. __field( pid_t, prev_pid )
  184. __field( int, prev_prio )
  185. __field( long, prev_state )
  186. __array( char, next_comm, TASK_COMM_LEN )
  187. __field( pid_t, next_pid )
  188. __field( int, next_prio )
  189. ),
  190. TP_fast_assign(
  191. memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
  192. __entry->prev_pid = prev->pid;
  193. __entry->prev_prio = prev->prio;
  194. __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
  195. memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  196. __entry->next_pid = next->pid;
  197. __entry->next_prio = next->prio;
  198. /* XXX SCHED_DEADLINE */
  199. ),
  200. TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
  201. __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
  202. (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
  203. __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
  204. { TASK_INTERRUPTIBLE, "S" },
  205. { TASK_UNINTERRUPTIBLE, "D" },
  206. { __TASK_STOPPED, "T" },
  207. { __TASK_TRACED, "t" },
  208. { EXIT_DEAD, "X" },
  209. { EXIT_ZOMBIE, "Z" },
  210. { TASK_PARKED, "P" },
  211. { TASK_DEAD, "I" }) :
  212. "R",
  213. __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
  214. __entry->next_comm, __entry->next_pid, __entry->next_prio)
  215. );
  216. /*
  217. * Tracepoint for a task being migrated:
  218. */
  219. TRACE_EVENT(sched_migrate_task,
  220. TP_PROTO(struct task_struct *p, int dest_cpu),
  221. TP_ARGS(p, dest_cpu),
  222. TP_STRUCT__entry(
  223. __array( char, comm, TASK_COMM_LEN )
  224. __field( pid_t, pid )
  225. __field( int, prio )
  226. __field( int, orig_cpu )
  227. __field( int, dest_cpu )
  228. ),
  229. TP_fast_assign(
  230. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  231. __entry->pid = p->pid;
  232. __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
  233. __entry->orig_cpu = task_cpu(p);
  234. __entry->dest_cpu = dest_cpu;
  235. ),
  236. TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
  237. __entry->comm, __entry->pid, __entry->prio,
  238. __entry->orig_cpu, __entry->dest_cpu)
  239. );
  240. DECLARE_EVENT_CLASS(sched_process_template,
  241. TP_PROTO(struct task_struct *p),
  242. TP_ARGS(p),
  243. TP_STRUCT__entry(
  244. __array( char, comm, TASK_COMM_LEN )
  245. __field( pid_t, pid )
  246. __field( int, prio )
  247. ),
  248. TP_fast_assign(
  249. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  250. __entry->pid = p->pid;
  251. __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
  252. ),
  253. TP_printk("comm=%s pid=%d prio=%d",
  254. __entry->comm, __entry->pid, __entry->prio)
  255. );
  256. /*
  257. * Tracepoint for freeing a task:
  258. */
  259. DEFINE_EVENT(sched_process_template, sched_process_free,
  260. TP_PROTO(struct task_struct *p),
  261. TP_ARGS(p));
  262. /*
  263. * Tracepoint for a task exiting:
  264. */
  265. DEFINE_EVENT(sched_process_template, sched_process_exit,
  266. TP_PROTO(struct task_struct *p),
  267. TP_ARGS(p));
  268. /*
  269. * Tracepoint for waiting on task to unschedule:
  270. */
  271. DEFINE_EVENT(sched_process_template, sched_wait_task,
  272. TP_PROTO(struct task_struct *p),
  273. TP_ARGS(p));
  274. /*
  275. * Tracepoint for a waiting task:
  276. */
  277. TRACE_EVENT(sched_process_wait,
  278. TP_PROTO(struct pid *pid),
  279. TP_ARGS(pid),
  280. TP_STRUCT__entry(
  281. __array( char, comm, TASK_COMM_LEN )
  282. __field( pid_t, pid )
  283. __field( int, prio )
  284. ),
  285. TP_fast_assign(
  286. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  287. __entry->pid = pid_nr(pid);
  288. __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
  289. ),
  290. TP_printk("comm=%s pid=%d prio=%d",
  291. __entry->comm, __entry->pid, __entry->prio)
  292. );
  293. /*
  294. * Tracepoint for kernel_clone:
  295. */
  296. TRACE_EVENT(sched_process_fork,
  297. TP_PROTO(struct task_struct *parent, struct task_struct *child),
  298. TP_ARGS(parent, child),
  299. TP_STRUCT__entry(
  300. __array( char, parent_comm, TASK_COMM_LEN )
  301. __field( pid_t, parent_pid )
  302. __array( char, child_comm, TASK_COMM_LEN )
  303. __field( pid_t, child_pid )
  304. ),
  305. TP_fast_assign(
  306. memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
  307. __entry->parent_pid = parent->pid;
  308. memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
  309. __entry->child_pid = child->pid;
  310. ),
  311. TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
  312. __entry->parent_comm, __entry->parent_pid,
  313. __entry->child_comm, __entry->child_pid)
  314. );
  315. /*
  316. * Tracepoint for exec:
  317. */
  318. TRACE_EVENT(sched_process_exec,
  319. TP_PROTO(struct task_struct *p, pid_t old_pid,
  320. struct linux_binprm *bprm),
  321. TP_ARGS(p, old_pid, bprm),
  322. TP_STRUCT__entry(
  323. __string( filename, bprm->filename )
  324. __field( pid_t, pid )
  325. __field( pid_t, old_pid )
  326. ),
  327. TP_fast_assign(
  328. __assign_str(filename, bprm->filename);
  329. __entry->pid = p->pid;
  330. __entry->old_pid = old_pid;
  331. ),
  332. TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
  333. __entry->pid, __entry->old_pid)
  334. );
  335. #ifdef CONFIG_SCHEDSTATS
  336. #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
  337. #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
  338. #else
  339. #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
  340. #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
  341. #endif
  342. /*
  343. * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
  344. * adding sched_stat support to SCHED_FIFO/RR would be welcome.
  345. */
  346. DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
  347. TP_PROTO(struct task_struct *tsk, u64 delay),
  348. TP_ARGS(__perf_task(tsk), __perf_count(delay)),
  349. TP_STRUCT__entry(
  350. __array( char, comm, TASK_COMM_LEN )
  351. __field( pid_t, pid )
  352. __field( u64, delay )
  353. ),
  354. TP_fast_assign(
  355. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  356. __entry->pid = tsk->pid;
  357. __entry->delay = delay;
  358. ),
  359. TP_printk("comm=%s pid=%d delay=%Lu [ns]",
  360. __entry->comm, __entry->pid,
  361. (unsigned long long)__entry->delay)
  362. );
  363. /*
  364. * Tracepoint for accounting wait time (time the task is runnable
  365. * but not actually running due to scheduler contention).
  366. */
  367. DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
  368. TP_PROTO(struct task_struct *tsk, u64 delay),
  369. TP_ARGS(tsk, delay));
  370. /*
  371. * Tracepoint for accounting sleep time (time the task is not runnable,
  372. * including iowait, see below).
  373. */
  374. DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
  375. TP_PROTO(struct task_struct *tsk, u64 delay),
  376. TP_ARGS(tsk, delay));
  377. /*
  378. * Tracepoint for accounting iowait time (time the task is not runnable
  379. * due to waiting on IO to complete).
  380. */
  381. DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
  382. TP_PROTO(struct task_struct *tsk, u64 delay),
  383. TP_ARGS(tsk, delay));
  384. /*
  385. * Tracepoint for accounting blocked time (time the task is in uninterruptible).
  386. */
  387. DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
  388. TP_PROTO(struct task_struct *tsk, u64 delay),
  389. TP_ARGS(tsk, delay));
  390. /*
  391. * Tracepoint for recording the cause of uninterruptible sleep.
  392. */
  393. TRACE_EVENT(sched_blocked_reason,
  394. TP_PROTO(struct task_struct *tsk),
  395. TP_ARGS(tsk),
  396. TP_STRUCT__entry(
  397. __field( pid_t, pid )
  398. __field( void*, caller )
  399. __field( bool, io_wait )
  400. ),
  401. TP_fast_assign(
  402. __entry->pid = tsk->pid;
  403. __entry->caller = (void *)__get_wchan(tsk);
  404. __entry->io_wait = tsk->in_iowait;
  405. ),
  406. TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
  407. );
  408. /*
  409. * Tracepoint for accounting runtime (time the task is executing
  410. * on a CPU).
  411. */
  412. DECLARE_EVENT_CLASS(sched_stat_runtime,
  413. TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
  414. TP_ARGS(tsk, __perf_count(runtime), vruntime),
  415. TP_STRUCT__entry(
  416. __array( char, comm, TASK_COMM_LEN )
  417. __field( pid_t, pid )
  418. __field( u64, runtime )
  419. __field( u64, vruntime )
  420. ),
  421. TP_fast_assign(
  422. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  423. __entry->pid = tsk->pid;
  424. __entry->runtime = runtime;
  425. __entry->vruntime = vruntime;
  426. ),
  427. TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
  428. __entry->comm, __entry->pid,
  429. (unsigned long long)__entry->runtime,
  430. (unsigned long long)__entry->vruntime)
  431. );
  432. DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
  433. TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
  434. TP_ARGS(tsk, runtime, vruntime));
  435. /*
  436. * Tracepoint for showing priority inheritance modifying a tasks
  437. * priority.
  438. */
  439. TRACE_EVENT(sched_pi_setprio,
  440. TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
  441. TP_ARGS(tsk, pi_task),
  442. TP_STRUCT__entry(
  443. __array( char, comm, TASK_COMM_LEN )
  444. __field( pid_t, pid )
  445. __field( int, oldprio )
  446. __field( int, newprio )
  447. ),
  448. TP_fast_assign(
  449. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  450. __entry->pid = tsk->pid;
  451. __entry->oldprio = tsk->prio;
  452. __entry->newprio = pi_task ?
  453. min(tsk->normal_prio, pi_task->prio) :
  454. tsk->normal_prio;
  455. /* XXX SCHED_DEADLINE bits missing */
  456. ),
  457. TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
  458. __entry->comm, __entry->pid,
  459. __entry->oldprio, __entry->newprio)
  460. );
  461. #ifdef CONFIG_DETECT_HUNG_TASK
  462. TRACE_EVENT(sched_process_hang,
  463. TP_PROTO(struct task_struct *tsk),
  464. TP_ARGS(tsk),
  465. TP_STRUCT__entry(
  466. __array( char, comm, TASK_COMM_LEN )
  467. __field( pid_t, pid )
  468. ),
  469. TP_fast_assign(
  470. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  471. __entry->pid = tsk->pid;
  472. ),
  473. TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  474. );
  475. #endif /* CONFIG_DETECT_HUNG_TASK */
  476. /*
  477. * Tracks migration of tasks from one runqueue to another. Can be used to
  478. * detect if automatic NUMA balancing is bouncing between nodes.
  479. */
  480. TRACE_EVENT(sched_move_numa,
  481. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  482. TP_ARGS(tsk, src_cpu, dst_cpu),
  483. TP_STRUCT__entry(
  484. __field( pid_t, pid )
  485. __field( pid_t, tgid )
  486. __field( pid_t, ngid )
  487. __field( int, src_cpu )
  488. __field( int, src_nid )
  489. __field( int, dst_cpu )
  490. __field( int, dst_nid )
  491. ),
  492. TP_fast_assign(
  493. __entry->pid = task_pid_nr(tsk);
  494. __entry->tgid = task_tgid_nr(tsk);
  495. __entry->ngid = task_numa_group_id(tsk);
  496. __entry->src_cpu = src_cpu;
  497. __entry->src_nid = cpu_to_node(src_cpu);
  498. __entry->dst_cpu = dst_cpu;
  499. __entry->dst_nid = cpu_to_node(dst_cpu);
  500. ),
  501. TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
  502. __entry->pid, __entry->tgid, __entry->ngid,
  503. __entry->src_cpu, __entry->src_nid,
  504. __entry->dst_cpu, __entry->dst_nid)
  505. );
  506. DECLARE_EVENT_CLASS(sched_numa_pair_template,
  507. TP_PROTO(struct task_struct *src_tsk, int src_cpu,
  508. struct task_struct *dst_tsk, int dst_cpu),
  509. TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
  510. TP_STRUCT__entry(
  511. __field( pid_t, src_pid )
  512. __field( pid_t, src_tgid )
  513. __field( pid_t, src_ngid )
  514. __field( int, src_cpu )
  515. __field( int, src_nid )
  516. __field( pid_t, dst_pid )
  517. __field( pid_t, dst_tgid )
  518. __field( pid_t, dst_ngid )
  519. __field( int, dst_cpu )
  520. __field( int, dst_nid )
  521. ),
  522. TP_fast_assign(
  523. __entry->src_pid = task_pid_nr(src_tsk);
  524. __entry->src_tgid = task_tgid_nr(src_tsk);
  525. __entry->src_ngid = task_numa_group_id(src_tsk);
  526. __entry->src_cpu = src_cpu;
  527. __entry->src_nid = cpu_to_node(src_cpu);
  528. __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
  529. __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
  530. __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
  531. __entry->dst_cpu = dst_cpu;
  532. __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
  533. ),
  534. TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
  535. __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
  536. __entry->src_cpu, __entry->src_nid,
  537. __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
  538. __entry->dst_cpu, __entry->dst_nid)
  539. );
  540. DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
  541. TP_PROTO(struct task_struct *src_tsk, int src_cpu,
  542. struct task_struct *dst_tsk, int dst_cpu),
  543. TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
  544. );
  545. DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
  546. TP_PROTO(struct task_struct *src_tsk, int src_cpu,
  547. struct task_struct *dst_tsk, int dst_cpu),
  548. TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
  549. );
  550. /*
  551. * Tracepoint for waking a polling cpu without an IPI.
  552. */
  553. TRACE_EVENT(sched_wake_idle_without_ipi,
  554. TP_PROTO(int cpu),
  555. TP_ARGS(cpu),
  556. TP_STRUCT__entry(
  557. __field( int, cpu )
  558. ),
  559. TP_fast_assign(
  560. __entry->cpu = cpu;
  561. ),
  562. TP_printk("cpu=%d", __entry->cpu)
  563. );
  564. /*
  565. * Following tracepoints are not exported in tracefs and provide hooking
  566. * mechanisms only for testing and debugging purposes.
  567. *
  568. * Postfixed with _tp to make them easily identifiable in the code.
  569. */
  570. DECLARE_TRACE(pelt_cfs_tp,
  571. TP_PROTO(struct cfs_rq *cfs_rq),
  572. TP_ARGS(cfs_rq));
  573. DECLARE_TRACE(pelt_rt_tp,
  574. TP_PROTO(struct rq *rq),
  575. TP_ARGS(rq));
  576. DECLARE_TRACE(pelt_dl_tp,
  577. TP_PROTO(struct rq *rq),
  578. TP_ARGS(rq));
  579. DECLARE_TRACE(pelt_thermal_tp,
  580. TP_PROTO(struct rq *rq),
  581. TP_ARGS(rq));
  582. DECLARE_TRACE(pelt_irq_tp,
  583. TP_PROTO(struct rq *rq),
  584. TP_ARGS(rq));
  585. DECLARE_TRACE(pelt_se_tp,
  586. TP_PROTO(struct sched_entity *se),
  587. TP_ARGS(se));
  588. DECLARE_TRACE(sched_cpu_capacity_tp,
  589. TP_PROTO(struct rq *rq),
  590. TP_ARGS(rq));
  591. DECLARE_TRACE(sched_overutilized_tp,
  592. TP_PROTO(struct root_domain *rd, bool overutilized),
  593. TP_ARGS(rd, overutilized));
  594. DECLARE_TRACE(sched_util_est_cfs_tp,
  595. TP_PROTO(struct cfs_rq *cfs_rq),
  596. TP_ARGS(cfs_rq));
  597. DECLARE_TRACE(sched_util_est_se_tp,
  598. TP_PROTO(struct sched_entity *se),
  599. TP_ARGS(se));
  600. DECLARE_TRACE(sched_update_nr_running_tp,
  601. TP_PROTO(struct rq *rq, int change),
  602. TP_ARGS(rq, change));
  603. #endif /* _TRACE_SCHED_H */
  604. /* This part must be outside protection */
  605. #include <trace/define_trace.h>