idle.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Idle functions for s390.
  4. *
  5. * Copyright IBM Corp. 2014
  6. *
  7. * Author(s): Martin Schwidefsky <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/kernel_stat.h>
  11. #include <linux/notifier.h>
  12. #include <linux/init.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched/cputime.h>
  15. #include <trace/events/power.h>
  16. #include <asm/cpu_mf.h>
  17. #include <asm/nmi.h>
  18. #include <asm/smp.h>
  19. #include "entry.h"
  20. static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
  21. void account_idle_time_irq(void)
  22. {
  23. struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
  24. u64 cycles_new[8];
  25. int i;
  26. clear_cpu_flag(CIF_ENABLED_WAIT);
  27. if (smp_cpu_mtid) {
  28. stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
  29. for (i = 0; i < smp_cpu_mtid; i++)
  30. this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
  31. }
  32. idle->clock_idle_exit = S390_lowcore.int_clock;
  33. idle->timer_idle_exit = S390_lowcore.sys_enter_timer;
  34. S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
  35. S390_lowcore.last_update_clock = idle->clock_idle_exit;
  36. S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
  37. S390_lowcore.last_update_timer = idle->timer_idle_exit;
  38. }
  39. void noinstr arch_cpu_idle(void)
  40. {
  41. struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
  42. unsigned long idle_time;
  43. unsigned long psw_mask;
  44. /* Wait for external, I/O or machine check interrupt. */
  45. psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
  46. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
  47. clear_cpu_flag(CIF_NOHZ_DELAY);
  48. /* psw_idle() returns with interrupts disabled. */
  49. psw_idle(idle, psw_mask);
  50. /* Account time spent with enabled wait psw loaded as idle time. */
  51. raw_write_seqcount_begin(&idle->seqcount);
  52. idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
  53. idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
  54. idle->idle_time += idle_time;
  55. idle->idle_count++;
  56. account_idle_time(cputime_to_nsecs(idle_time));
  57. raw_write_seqcount_end(&idle->seqcount);
  58. raw_local_irq_enable();
  59. }
  60. static ssize_t show_idle_count(struct device *dev,
  61. struct device_attribute *attr, char *buf)
  62. {
  63. struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
  64. unsigned long idle_count;
  65. unsigned int seq;
  66. do {
  67. seq = read_seqcount_begin(&idle->seqcount);
  68. idle_count = READ_ONCE(idle->idle_count);
  69. if (READ_ONCE(idle->clock_idle_enter))
  70. idle_count++;
  71. } while (read_seqcount_retry(&idle->seqcount, seq));
  72. return sprintf(buf, "%lu\n", idle_count);
  73. }
  74. DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
  75. static ssize_t show_idle_time(struct device *dev,
  76. struct device_attribute *attr, char *buf)
  77. {
  78. unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
  79. struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
  80. unsigned int seq;
  81. do {
  82. seq = read_seqcount_begin(&idle->seqcount);
  83. idle_time = READ_ONCE(idle->idle_time);
  84. idle_enter = READ_ONCE(idle->clock_idle_enter);
  85. idle_exit = READ_ONCE(idle->clock_idle_exit);
  86. } while (read_seqcount_retry(&idle->seqcount, seq));
  87. in_idle = 0;
  88. now = get_tod_clock();
  89. if (idle_enter) {
  90. if (idle_exit) {
  91. in_idle = idle_exit - idle_enter;
  92. } else if (now > idle_enter) {
  93. in_idle = now - idle_enter;
  94. }
  95. }
  96. idle_time += in_idle;
  97. return sprintf(buf, "%lu\n", idle_time >> 12);
  98. }
  99. DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
  100. u64 arch_cpu_idle_time(int cpu)
  101. {
  102. struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
  103. unsigned long now, idle_enter, idle_exit, in_idle;
  104. unsigned int seq;
  105. do {
  106. seq = read_seqcount_begin(&idle->seqcount);
  107. idle_enter = READ_ONCE(idle->clock_idle_enter);
  108. idle_exit = READ_ONCE(idle->clock_idle_exit);
  109. } while (read_seqcount_retry(&idle->seqcount, seq));
  110. in_idle = 0;
  111. now = get_tod_clock();
  112. if (idle_enter) {
  113. if (idle_exit) {
  114. in_idle = idle_exit - idle_enter;
  115. } else if (now > idle_enter) {
  116. in_idle = now - idle_enter;
  117. }
  118. }
  119. return cputime_to_nsecs(in_idle);
  120. }
  121. void arch_cpu_idle_enter(void)
  122. {
  123. }
  124. void arch_cpu_idle_exit(void)
  125. {
  126. }
  127. void arch_cpu_idle_dead(void)
  128. {
  129. cpu_die();
  130. }