idle.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Idle daemon for PowerPC. Idle daemon will handle any action
  4. * that needs to be taken when the system becomes idle.
  5. *
  6. * Originally written by Cort Dougan ([email protected]).
  7. * Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
  8. * Paul Mackerras and others.
  9. *
  10. * iSeries supported added by Mike Corrigan <[email protected]>
  11. *
  12. * Additional shared processor, SMT, and firmware support
  13. * Copyright (c) 2003 Dave Engebretsen <[email protected]>
  14. *
  15. * 32-bit and 64-bit versions merged by Paul Mackerras <[email protected]>
  16. */
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/smp.h>
  20. #include <linux/cpu.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/tick.h>
  23. #include <asm/processor.h>
  24. #include <asm/cputable.h>
  25. #include <asm/time.h>
  26. #include <asm/machdep.h>
  27. #include <asm/runlatch.h>
  28. #include <asm/smp.h>
  29. unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
  30. EXPORT_SYMBOL(cpuidle_disable);
  31. static int __init powersave_off(char *arg)
  32. {
  33. ppc_md.power_save = NULL;
  34. cpuidle_disable = IDLE_POWERSAVE_OFF;
  35. return 1;
  36. }
  37. __setup("powersave=off", powersave_off);
  38. void arch_cpu_idle(void)
  39. {
  40. ppc64_runlatch_off();
  41. if (ppc_md.power_save) {
  42. ppc_md.power_save();
  43. /*
  44. * Some power_save functions return with
  45. * interrupts enabled, some don't.
  46. */
  47. if (irqs_disabled())
  48. raw_local_irq_enable();
  49. } else {
  50. raw_local_irq_enable();
  51. /*
  52. * Go into low thread priority and possibly
  53. * low power mode.
  54. */
  55. HMT_low();
  56. HMT_very_low();
  57. }
  58. HMT_medium();
  59. ppc64_runlatch_on();
  60. }
  61. int powersave_nap;
  62. #ifdef CONFIG_PPC_970_NAP
  63. void power4_idle(void)
  64. {
  65. if (!cpu_has_feature(CPU_FTR_CAN_NAP))
  66. return;
  67. if (!powersave_nap)
  68. return;
  69. if (!prep_irq_for_idle())
  70. return;
  71. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  72. asm volatile(PPC_DSSALL " ; sync" ::: "memory");
  73. power4_idle_nap();
  74. /*
  75. * power4_idle_nap returns with interrupts enabled (soft and hard).
  76. * to our caller with interrupts enabled (soft and hard). Our caller
  77. * can cope with either interrupts disabled or enabled upon return.
  78. */
  79. }
  80. #endif
  81. #ifdef CONFIG_SYSCTL
  82. /*
  83. * Register the sysctl to set/clear powersave_nap.
  84. */
  85. static struct ctl_table powersave_nap_ctl_table[] = {
  86. {
  87. .procname = "powersave-nap",
  88. .data = &powersave_nap,
  89. .maxlen = sizeof(int),
  90. .mode = 0644,
  91. .proc_handler = proc_dointvec,
  92. },
  93. {}
  94. };
  95. static struct ctl_table powersave_nap_sysctl_root[] = {
  96. {
  97. .procname = "kernel",
  98. .mode = 0555,
  99. .child = powersave_nap_ctl_table,
  100. },
  101. {}
  102. };
  103. static int __init
  104. register_powersave_nap_sysctl(void)
  105. {
  106. register_sysctl_table(powersave_nap_sysctl_root);
  107. return 0;
  108. }
  109. __initcall(register_powersave_nap_sysctl);
  110. #endif