stat.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cpumask.h>
  3. #include <linux/fs.h>
  4. #include <linux/init.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/kernel_stat.h>
  7. #include <linux/proc_fs.h>
  8. #include <linux/sched.h>
  9. #include <linux/sched/stat.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/slab.h>
  12. #include <linux/time.h>
  13. #include <linux/time_namespace.h>
  14. #include <linux/irqnr.h>
  15. #include <linux/sched/cputime.h>
  16. #include <linux/tick.h>
  17. #ifndef arch_irq_stat_cpu
  18. #define arch_irq_stat_cpu(cpu) 0
  19. #endif
  20. #ifndef arch_irq_stat
  21. #define arch_irq_stat() 0
  22. #endif
  23. #ifdef arch_idle_time
  24. u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
  25. {
  26. u64 idle;
  27. idle = kcs->cpustat[CPUTIME_IDLE];
  28. if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
  29. idle += arch_idle_time(cpu);
  30. return idle;
  31. }
  32. static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
  33. {
  34. u64 iowait;
  35. iowait = kcs->cpustat[CPUTIME_IOWAIT];
  36. if (cpu_online(cpu) && nr_iowait_cpu(cpu))
  37. iowait += arch_idle_time(cpu);
  38. return iowait;
  39. }
  40. #else
  41. u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
  42. {
  43. u64 idle, idle_usecs = -1ULL;
  44. if (cpu_online(cpu))
  45. idle_usecs = get_cpu_idle_time_us(cpu, NULL);
  46. if (idle_usecs == -1ULL)
  47. /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
  48. idle = kcs->cpustat[CPUTIME_IDLE];
  49. else
  50. idle = idle_usecs * NSEC_PER_USEC;
  51. return idle;
  52. }
  53. static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
  54. {
  55. u64 iowait, iowait_usecs = -1ULL;
  56. if (cpu_online(cpu))
  57. iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
  58. if (iowait_usecs == -1ULL)
  59. /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
  60. iowait = kcs->cpustat[CPUTIME_IOWAIT];
  61. else
  62. iowait = iowait_usecs * NSEC_PER_USEC;
  63. return iowait;
  64. }
  65. #endif
  66. static void show_irq_gap(struct seq_file *p, unsigned int gap)
  67. {
  68. static const char zeros[] = " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0";
  69. while (gap > 0) {
  70. unsigned int inc;
  71. inc = min_t(unsigned int, gap, ARRAY_SIZE(zeros) / 2);
  72. seq_write(p, zeros, 2 * inc);
  73. gap -= inc;
  74. }
  75. }
  76. static void show_all_irqs(struct seq_file *p)
  77. {
  78. unsigned int i, next = 0;
  79. for_each_active_irq(i) {
  80. show_irq_gap(p, i - next);
  81. seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
  82. next = i + 1;
  83. }
  84. show_irq_gap(p, nr_irqs - next);
  85. }
  86. static int show_stat(struct seq_file *p, void *v)
  87. {
  88. int i, j;
  89. u64 user, nice, system, idle, iowait, irq, softirq, steal;
  90. u64 guest, guest_nice;
  91. u64 sum = 0;
  92. u64 sum_softirq = 0;
  93. unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
  94. struct timespec64 boottime;
  95. user = nice = system = idle = iowait =
  96. irq = softirq = steal = 0;
  97. guest = guest_nice = 0;
  98. getboottime64(&boottime);
  99. /* shift boot timestamp according to the timens offset */
  100. timens_sub_boottime(&boottime);
  101. for_each_possible_cpu(i) {
  102. struct kernel_cpustat kcpustat;
  103. u64 *cpustat = kcpustat.cpustat;
  104. kcpustat_cpu_fetch(&kcpustat, i);
  105. user += cpustat[CPUTIME_USER];
  106. nice += cpustat[CPUTIME_NICE];
  107. system += cpustat[CPUTIME_SYSTEM];
  108. idle += get_idle_time(&kcpustat, i);
  109. iowait += get_iowait_time(&kcpustat, i);
  110. irq += cpustat[CPUTIME_IRQ];
  111. softirq += cpustat[CPUTIME_SOFTIRQ];
  112. steal += cpustat[CPUTIME_STEAL];
  113. guest += cpustat[CPUTIME_GUEST];
  114. guest_nice += cpustat[CPUTIME_GUEST_NICE];
  115. sum += kstat_cpu_irqs_sum(i);
  116. sum += arch_irq_stat_cpu(i);
  117. for (j = 0; j < NR_SOFTIRQS; j++) {
  118. unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
  119. per_softirq_sums[j] += softirq_stat;
  120. sum_softirq += softirq_stat;
  121. }
  122. }
  123. sum += arch_irq_stat();
  124. seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
  125. seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
  126. seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
  127. seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
  128. seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
  129. seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
  130. seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
  131. seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
  132. seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
  133. seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
  134. seq_putc(p, '\n');
  135. for_each_online_cpu(i) {
  136. struct kernel_cpustat kcpustat;
  137. u64 *cpustat = kcpustat.cpustat;
  138. kcpustat_cpu_fetch(&kcpustat, i);
  139. /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
  140. user = cpustat[CPUTIME_USER];
  141. nice = cpustat[CPUTIME_NICE];
  142. system = cpustat[CPUTIME_SYSTEM];
  143. idle = get_idle_time(&kcpustat, i);
  144. iowait = get_iowait_time(&kcpustat, i);
  145. irq = cpustat[CPUTIME_IRQ];
  146. softirq = cpustat[CPUTIME_SOFTIRQ];
  147. steal = cpustat[CPUTIME_STEAL];
  148. guest = cpustat[CPUTIME_GUEST];
  149. guest_nice = cpustat[CPUTIME_GUEST_NICE];
  150. seq_printf(p, "cpu%d", i);
  151. seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
  152. seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
  153. seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
  154. seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
  155. seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
  156. seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
  157. seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
  158. seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
  159. seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
  160. seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
  161. seq_putc(p, '\n');
  162. }
  163. seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
  164. show_all_irqs(p);
  165. seq_printf(p,
  166. "\nctxt %llu\n"
  167. "btime %llu\n"
  168. "processes %lu\n"
  169. "procs_running %u\n"
  170. "procs_blocked %u\n",
  171. nr_context_switches(),
  172. (unsigned long long)boottime.tv_sec,
  173. total_forks,
  174. nr_running(),
  175. nr_iowait());
  176. seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq);
  177. for (i = 0; i < NR_SOFTIRQS; i++)
  178. seq_put_decimal_ull(p, " ", per_softirq_sums[i]);
  179. seq_putc(p, '\n');
  180. return 0;
  181. }
  182. static int stat_open(struct inode *inode, struct file *file)
  183. {
  184. unsigned int size = 1024 + 128 * num_online_cpus();
  185. /* minimum size to display an interrupt count : 2 bytes */
  186. size += 2 * nr_irqs;
  187. return single_open_size(file, show_stat, NULL, size);
  188. }
  189. static const struct proc_ops stat_proc_ops = {
  190. .proc_flags = PROC_ENTRY_PERMANENT,
  191. .proc_open = stat_open,
  192. .proc_read_iter = seq_read_iter,
  193. .proc_lseek = seq_lseek,
  194. .proc_release = single_release,
  195. };
  196. static int __init proc_stat_init(void)
  197. {
  198. proc_create("stat", 0, NULL, &stat_proc_ops);
  199. return 0;
  200. }
  201. fs_initcall(proc_stat_init);