walt_tp.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/cpu.h>
  6. #include <linux/tracepoint.h>
  7. #include <trace/hooks/sched.h>
  8. #include "trace.h"
  9. #define CREATE_TRACE_POINTS
  10. #include "perf_trace_counters.h"
  11. unsigned int sysctl_sched_dynamic_tp_enable;
  12. #define USE_CPUHP_STATE CPUHP_AP_ONLINE_DYN
  13. DEFINE_PER_CPU(u32, cntenset_val);
  14. DEFINE_PER_CPU(unsigned long, previous_ccnt);
  15. DEFINE_PER_CPU(unsigned long[NUM_L1_CTRS], previous_l1_cnts);
  16. DEFINE_PER_CPU(unsigned long[NUM_AMU_CTRS], previous_amu_cnts);
  17. DEFINE_PER_CPU(u32, old_pid);
  18. DEFINE_PER_CPU(u32, hotplug_flag);
  19. DEFINE_PER_CPU(u64, prev_time);
  20. static int tracectr_cpu_hotplug_coming_up(unsigned int cpu)
  21. {
  22. per_cpu(hotplug_flag, cpu) = 1;
  23. return 0;
  24. }
  25. static void setup_prev_cnts(u32 cpu, u32 cnten_val)
  26. {
  27. int i;
  28. if (cnten_val & CC)
  29. per_cpu(previous_ccnt, cpu) =
  30. read_sysreg(pmccntr_el0);
  31. for (i = 0; i < NUM_L1_CTRS; i++) {
  32. if (cnten_val & (1 << i)) {
  33. /* Select */
  34. write_sysreg(i, pmselr_el0);
  35. isb();
  36. /* Read value */
  37. per_cpu(previous_l1_cnts[i], cpu) =
  38. read_sysreg(pmxevcntr_el0);
  39. }
  40. }
  41. }
  42. void tracectr_notifier(void *ignore, bool preempt,
  43. struct task_struct *prev, struct task_struct *next,
  44. unsigned int prev_state)
  45. {
  46. u32 cnten_val;
  47. int current_pid;
  48. u32 cpu = task_cpu(next);
  49. u64 now;
  50. if (!trace_sched_switch_with_ctrs_enabled())
  51. return;
  52. current_pid = next->pid;
  53. if (per_cpu(old_pid, cpu) != -1) {
  54. cnten_val = read_sysreg(pmcntenset_el0);
  55. per_cpu(cntenset_val, cpu) = cnten_val;
  56. /* Disable all the counters that were enabled */
  57. write_sysreg(cnten_val, pmcntenclr_el0);
  58. if (per_cpu(hotplug_flag, cpu) == 1) {
  59. per_cpu(hotplug_flag, cpu) = 0;
  60. setup_prev_cnts(cpu, cnten_val);
  61. } else {
  62. trace_sched_switch_with_ctrs(preempt, prev, next);
  63. now = sched_clock();
  64. if ((now - per_cpu(prev_time, cpu)) > NSEC_PER_SEC) {
  65. trace_sched_switch_ctrs_cfg(cpu);
  66. per_cpu(prev_time, cpu) = now;
  67. }
  68. }
  69. /* Enable all the counters that were disabled */
  70. write_sysreg(cnten_val, pmcntenset_el0);
  71. }
  72. per_cpu(old_pid, cpu) = current_pid;
  73. }
  74. static void register_sched_switch_ctrs(void)
  75. {
  76. int cpu, rc;
  77. for_each_possible_cpu(cpu)
  78. per_cpu(old_pid, cpu) = -1;
  79. rc = cpuhp_setup_state_nocalls(USE_CPUHP_STATE, "tracectr_cpu_hotplug",
  80. tracectr_cpu_hotplug_coming_up, NULL);
  81. if (rc >= 0)
  82. register_trace_sched_switch(tracectr_notifier, NULL);
  83. }
  84. static void unregister_sched_switch_ctrs(void)
  85. {
  86. unregister_trace_sched_switch(tracectr_notifier, NULL);
  87. cpuhp_remove_state_nocalls(USE_CPUHP_STATE);
  88. }
  89. const struct cpumask *sched_trace_rd_span(struct root_domain *rd)
  90. {
  91. #ifdef CONFIG_SMP
  92. return rd ? rd->span : NULL;
  93. #else
  94. return NULL;
  95. #endif
  96. }
  97. static void sched_overutilized(void *data, struct root_domain *rd,
  98. bool overutilized)
  99. {
  100. if (trace_sched_overutilized_enabled()) {
  101. char span[SPAN_SIZE];
  102. cpumap_print_to_pagebuf(false, span, sched_trace_rd_span(rd));
  103. trace_sched_overutilized(overutilized, span);
  104. }
  105. }
  106. static void walt_register_dynamic_tp_events(void)
  107. {
  108. register_trace_sched_overutilized_tp(sched_overutilized, NULL);
  109. register_sched_switch_ctrs();
  110. }
  111. static void walt_unregister_dynamic_tp_events(void)
  112. {
  113. unregister_trace_sched_overutilized_tp(sched_overutilized, NULL);
  114. unregister_sched_switch_ctrs();
  115. }
  116. int sched_dynamic_tp_handler(struct ctl_table *table, int write,
  117. void __user *buffer, size_t *lenp, loff_t *ppos)
  118. {
  119. static DEFINE_MUTEX(mutex);
  120. int ret = 0, *val = (unsigned int *)table->data;
  121. unsigned int old_val;
  122. mutex_lock(&mutex);
  123. old_val = sysctl_sched_dynamic_tp_enable;
  124. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  125. if (ret || !write || (old_val == sysctl_sched_dynamic_tp_enable))
  126. goto done;
  127. if (*val)
  128. walt_register_dynamic_tp_events();
  129. else
  130. walt_unregister_dynamic_tp_events();
  131. done:
  132. mutex_unlock(&mutex);
  133. return ret;
  134. }