runqslower.bpf.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2019 Facebook
  3. #include "vmlinux.h"
  4. #include <bpf/bpf_helpers.h>
  5. #include "runqslower.h"
  6. #define TASK_RUNNING 0
  7. #define BPF_F_CURRENT_CPU 0xffffffffULL
  8. const volatile __u64 min_us = 0;
  9. const volatile pid_t targ_pid = 0;
  10. struct {
  11. __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
  12. __uint(map_flags, BPF_F_NO_PREALLOC);
  13. __type(key, int);
  14. __type(value, u64);
  15. } start SEC(".maps");
  16. struct {
  17. __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
  18. __uint(key_size, sizeof(u32));
  19. __uint(value_size, sizeof(u32));
  20. } events SEC(".maps");
  21. /* record enqueue timestamp */
  22. __always_inline
  23. static int trace_enqueue(struct task_struct *t)
  24. {
  25. u32 pid = t->pid;
  26. u64 *ptr;
  27. if (!pid || (targ_pid && targ_pid != pid))
  28. return 0;
  29. ptr = bpf_task_storage_get(&start, t, 0,
  30. BPF_LOCAL_STORAGE_GET_F_CREATE);
  31. if (!ptr)
  32. return 0;
  33. *ptr = bpf_ktime_get_ns();
  34. return 0;
  35. }
  36. SEC("tp_btf/sched_wakeup")
  37. int handle__sched_wakeup(u64 *ctx)
  38. {
  39. /* TP_PROTO(struct task_struct *p) */
  40. struct task_struct *p = (void *)ctx[0];
  41. return trace_enqueue(p);
  42. }
  43. SEC("tp_btf/sched_wakeup_new")
  44. int handle__sched_wakeup_new(u64 *ctx)
  45. {
  46. /* TP_PROTO(struct task_struct *p) */
  47. struct task_struct *p = (void *)ctx[0];
  48. return trace_enqueue(p);
  49. }
  50. SEC("tp_btf/sched_switch")
  51. int handle__sched_switch(u64 *ctx)
  52. {
  53. /* TP_PROTO(bool preempt, struct task_struct *prev,
  54. * struct task_struct *next)
  55. */
  56. struct task_struct *prev = (struct task_struct *)ctx[1];
  57. struct task_struct *next = (struct task_struct *)ctx[2];
  58. struct runq_event event = {};
  59. u64 *tsp, delta_us;
  60. long state;
  61. u32 pid;
  62. /* ivcsw: treat like an enqueue event and store timestamp */
  63. if (prev->__state == TASK_RUNNING)
  64. trace_enqueue(prev);
  65. pid = next->pid;
  66. /* For pid mismatch, save a bpf_task_storage_get */
  67. if (!pid || (targ_pid && targ_pid != pid))
  68. return 0;
  69. /* fetch timestamp and calculate delta */
  70. tsp = bpf_task_storage_get(&start, next, 0, 0);
  71. if (!tsp)
  72. return 0; /* missed enqueue */
  73. delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
  74. if (min_us && delta_us <= min_us)
  75. return 0;
  76. event.pid = pid;
  77. event.delta_us = delta_us;
  78. bpf_get_current_comm(&event.task, sizeof(event.task));
  79. /* output */
  80. bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
  81. &event, sizeof(event));
  82. bpf_task_storage_delete(&start, next);
  83. return 0;
  84. }
  85. char LICENSE[] SEC("license") = "GPL";