stats.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * /proc/schedstat implementation
  4. */
  5. void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
  6. struct sched_statistics *stats)
  7. {
  8. u64 wait_start, prev_wait_start;
  9. wait_start = rq_clock(rq);
  10. prev_wait_start = schedstat_val(stats->wait_start);
  11. if (p && likely(wait_start > prev_wait_start))
  12. wait_start -= prev_wait_start;
  13. __schedstat_set(stats->wait_start, wait_start);
  14. }
  15. void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
  16. struct sched_statistics *stats)
  17. {
  18. u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
  19. if (p) {
  20. if (task_on_rq_migrating(p)) {
  21. /*
  22. * Preserve migrating task's wait time so wait_start
  23. * time stamp can be adjusted to accumulate wait time
  24. * prior to migration.
  25. */
  26. __schedstat_set(stats->wait_start, delta);
  27. return;
  28. }
  29. trace_sched_stat_wait(p, delta);
  30. }
  31. __schedstat_set(stats->wait_max,
  32. max(schedstat_val(stats->wait_max), delta));
  33. __schedstat_inc(stats->wait_count);
  34. __schedstat_add(stats->wait_sum, delta);
  35. __schedstat_set(stats->wait_start, 0);
  36. }
  37. void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
  38. struct sched_statistics *stats)
  39. {
  40. u64 sleep_start, block_start;
  41. sleep_start = schedstat_val(stats->sleep_start);
  42. block_start = schedstat_val(stats->block_start);
  43. if (sleep_start) {
  44. u64 delta = rq_clock(rq) - sleep_start;
  45. if ((s64)delta < 0)
  46. delta = 0;
  47. if (unlikely(delta > schedstat_val(stats->sleep_max)))
  48. __schedstat_set(stats->sleep_max, delta);
  49. __schedstat_set(stats->sleep_start, 0);
  50. __schedstat_add(stats->sum_sleep_runtime, delta);
  51. if (p) {
  52. account_scheduler_latency(p, delta >> 10, 1);
  53. trace_sched_stat_sleep(p, delta);
  54. }
  55. }
  56. if (block_start) {
  57. u64 delta = rq_clock(rq) - block_start;
  58. if ((s64)delta < 0)
  59. delta = 0;
  60. if (unlikely(delta > schedstat_val(stats->block_max)))
  61. __schedstat_set(stats->block_max, delta);
  62. __schedstat_set(stats->block_start, 0);
  63. __schedstat_add(stats->sum_sleep_runtime, delta);
  64. __schedstat_add(stats->sum_block_runtime, delta);
  65. if (p) {
  66. if (p->in_iowait) {
  67. __schedstat_add(stats->iowait_sum, delta);
  68. __schedstat_inc(stats->iowait_count);
  69. trace_sched_stat_iowait(p, delta);
  70. }
  71. trace_sched_stat_blocked(p, delta);
  72. /*
  73. * Blocking time is in units of nanosecs, so shift by
  74. * 20 to get a milliseconds-range estimation of the
  75. * amount of time that the task spent sleeping:
  76. */
  77. if (unlikely(prof_on == SLEEP_PROFILING)) {
  78. profile_hits(SLEEP_PROFILING,
  79. (void *)get_wchan(p),
  80. delta >> 20);
  81. }
  82. account_scheduler_latency(p, delta >> 10, 0);
  83. }
  84. }
  85. }
  86. /*
  87. * Current schedstat API version.
  88. *
  89. * Bump this up when changing the output format or the meaning of an existing
  90. * format, so that tools can adapt (or abort)
  91. */
  92. #define SCHEDSTAT_VERSION 15
  93. static int show_schedstat(struct seq_file *seq, void *v)
  94. {
  95. int cpu;
  96. if (v == (void *)1) {
  97. seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
  98. seq_printf(seq, "timestamp %lu\n", jiffies);
  99. } else {
  100. struct rq *rq;
  101. #ifdef CONFIG_SMP
  102. struct sched_domain *sd;
  103. int dcount = 0;
  104. #endif
  105. cpu = (unsigned long)(v - 2);
  106. rq = cpu_rq(cpu);
  107. /* runqueue-specific stats */
  108. seq_printf(seq,
  109. "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
  110. cpu, rq->yld_count,
  111. rq->sched_count, rq->sched_goidle,
  112. rq->ttwu_count, rq->ttwu_local,
  113. rq->rq_cpu_time,
  114. rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
  115. seq_printf(seq, "\n");
  116. #ifdef CONFIG_SMP
  117. /* domain-specific stats */
  118. rcu_read_lock();
  119. for_each_domain(cpu, sd) {
  120. enum cpu_idle_type itype;
  121. seq_printf(seq, "domain%d %*pb", dcount++,
  122. cpumask_pr_args(sched_domain_span(sd)));
  123. for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
  124. itype++) {
  125. seq_printf(seq, " %u %u %u %u %u %u %u %u",
  126. sd->lb_count[itype],
  127. sd->lb_balanced[itype],
  128. sd->lb_failed[itype],
  129. sd->lb_imbalance[itype],
  130. sd->lb_gained[itype],
  131. sd->lb_hot_gained[itype],
  132. sd->lb_nobusyq[itype],
  133. sd->lb_nobusyg[itype]);
  134. }
  135. seq_printf(seq,
  136. " %u %u %u %u %u %u %u %u %u %u %u %u\n",
  137. sd->alb_count, sd->alb_failed, sd->alb_pushed,
  138. sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
  139. sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
  140. sd->ttwu_wake_remote, sd->ttwu_move_affine,
  141. sd->ttwu_move_balance);
  142. }
  143. rcu_read_unlock();
  144. #endif
  145. }
  146. return 0;
  147. }
  148. /*
  149. * This iterator needs some explanation.
  150. * It returns 1 for the header position.
  151. * This means 2 is cpu 0.
  152. * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
  153. * to use cpumask_* to iterate over the CPUs.
  154. */
  155. static void *schedstat_start(struct seq_file *file, loff_t *offset)
  156. {
  157. unsigned long n = *offset;
  158. if (n == 0)
  159. return (void *) 1;
  160. n--;
  161. if (n > 0)
  162. n = cpumask_next(n - 1, cpu_online_mask);
  163. else
  164. n = cpumask_first(cpu_online_mask);
  165. *offset = n + 1;
  166. if (n < nr_cpu_ids)
  167. return (void *)(unsigned long)(n + 2);
  168. return NULL;
  169. }
  170. static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
  171. {
  172. (*offset)++;
  173. return schedstat_start(file, offset);
  174. }
  175. static void schedstat_stop(struct seq_file *file, void *data)
  176. {
  177. }
  178. static const struct seq_operations schedstat_sops = {
  179. .start = schedstat_start,
  180. .next = schedstat_next,
  181. .stop = schedstat_stop,
  182. .show = show_schedstat,
  183. };
  184. static int __init proc_schedstat_init(void)
  185. {
  186. proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
  187. return 0;
  188. }
  189. subsys_initcall(proc_schedstat_init);