cpuidle44xx.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * OMAP4+ CPU idle Routines
  4. *
  5. * Copyright (C) 2011-2013 Texas Instruments, Inc.
  6. * Santosh Shilimkar <[email protected]>
  7. * Rajendra Nayak <[email protected]>
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/cpuidle.h>
  11. #include <linux/cpu_pm.h>
  12. #include <linux/export.h>
  13. #include <linux/tick.h>
  14. #include <asm/cpuidle.h>
  15. #include "common.h"
  16. #include "pm.h"
  17. #include "prm.h"
  18. #include "soc.h"
  19. #include "clockdomain.h"
  20. #define MAX_CPUS 2
  21. /* Machine specific information */
  22. struct idle_statedata {
  23. u32 cpu_state;
  24. u32 mpu_logic_state;
  25. u32 mpu_state;
  26. u32 mpu_state_vote;
  27. };
  28. static struct idle_statedata omap4_idle_data[] = {
  29. {
  30. .cpu_state = PWRDM_POWER_ON,
  31. .mpu_state = PWRDM_POWER_ON,
  32. .mpu_logic_state = PWRDM_POWER_RET,
  33. },
  34. {
  35. .cpu_state = PWRDM_POWER_OFF,
  36. .mpu_state = PWRDM_POWER_RET,
  37. .mpu_logic_state = PWRDM_POWER_RET,
  38. },
  39. {
  40. .cpu_state = PWRDM_POWER_OFF,
  41. .mpu_state = PWRDM_POWER_RET,
  42. .mpu_logic_state = PWRDM_POWER_OFF,
  43. },
  44. };
  45. static struct idle_statedata omap5_idle_data[] = {
  46. {
  47. .cpu_state = PWRDM_POWER_ON,
  48. .mpu_state = PWRDM_POWER_ON,
  49. .mpu_logic_state = PWRDM_POWER_ON,
  50. },
  51. {
  52. .cpu_state = PWRDM_POWER_RET,
  53. .mpu_state = PWRDM_POWER_RET,
  54. .mpu_logic_state = PWRDM_POWER_RET,
  55. },
  56. };
  57. static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
  58. static struct clockdomain *cpu_clkdm[MAX_CPUS];
  59. static atomic_t abort_barrier;
  60. static bool cpu_done[MAX_CPUS];
  61. static struct idle_statedata *state_ptr = &omap4_idle_data[0];
  62. static DEFINE_RAW_SPINLOCK(mpu_lock);
  63. /* Private functions */
  64. /**
  65. * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
  66. * @dev: cpuidle device
  67. * @drv: cpuidle driver
  68. * @index: the index of state to be entered
  69. *
  70. * Called from the CPUidle framework to program the device to the
  71. * specified low power state selected by the governor.
  72. * Returns the amount of time spent in the low power state.
  73. */
  74. static int omap_enter_idle_simple(struct cpuidle_device *dev,
  75. struct cpuidle_driver *drv,
  76. int index)
  77. {
  78. omap_do_wfi();
  79. return index;
  80. }
  81. static int omap_enter_idle_smp(struct cpuidle_device *dev,
  82. struct cpuidle_driver *drv,
  83. int index)
  84. {
  85. struct idle_statedata *cx = state_ptr + index;
  86. unsigned long flag;
  87. raw_spin_lock_irqsave(&mpu_lock, flag);
  88. cx->mpu_state_vote++;
  89. if (cx->mpu_state_vote == num_online_cpus()) {
  90. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  91. omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
  92. }
  93. raw_spin_unlock_irqrestore(&mpu_lock, flag);
  94. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  95. raw_spin_lock_irqsave(&mpu_lock, flag);
  96. if (cx->mpu_state_vote == num_online_cpus())
  97. omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
  98. cx->mpu_state_vote--;
  99. raw_spin_unlock_irqrestore(&mpu_lock, flag);
  100. return index;
  101. }
  102. static int omap_enter_idle_coupled(struct cpuidle_device *dev,
  103. struct cpuidle_driver *drv,
  104. int index)
  105. {
  106. struct idle_statedata *cx = state_ptr + index;
  107. u32 mpuss_can_lose_context = 0;
  108. int error;
  109. /*
  110. * CPU0 has to wait and stay ON until CPU1 is OFF state.
  111. * This is necessary to honour hardware recommondation
  112. * of triggeing all the possible low power modes once CPU1 is
  113. * out of coherency and in OFF mode.
  114. */
  115. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  116. while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
  117. cpu_relax();
  118. /*
  119. * CPU1 could have already entered & exited idle
  120. * without hitting off because of a wakeup
  121. * or a failed attempt to hit off mode. Check for
  122. * that here, otherwise we could spin forever
  123. * waiting for CPU1 off.
  124. */
  125. if (cpu_done[1])
  126. goto fail;
  127. }
  128. }
  129. mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
  130. (cx->mpu_logic_state == PWRDM_POWER_OFF);
  131. /* Enter broadcast mode for periodic timers */
  132. RCU_NONIDLE(tick_broadcast_enable());
  133. /* Enter broadcast mode for one-shot timers */
  134. RCU_NONIDLE(tick_broadcast_enter());
  135. /*
  136. * Call idle CPU PM enter notifier chain so that
  137. * VFP and per CPU interrupt context is saved.
  138. */
  139. error = cpu_pm_enter();
  140. if (error)
  141. goto cpu_pm_out;
  142. if (dev->cpu == 0) {
  143. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  144. RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
  145. /*
  146. * Call idle CPU cluster PM enter notifier chain
  147. * to save GIC and wakeupgen context.
  148. */
  149. if (mpuss_can_lose_context) {
  150. error = cpu_cluster_pm_enter();
  151. if (error) {
  152. index = 0;
  153. cx = state_ptr + index;
  154. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  155. RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
  156. mpuss_can_lose_context = 0;
  157. }
  158. }
  159. }
  160. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  161. cpu_done[dev->cpu] = true;
  162. /* Wakeup CPU1 only if it is not offlined */
  163. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  164. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
  165. mpuss_can_lose_context)
  166. gic_dist_disable();
  167. RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
  168. RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
  169. RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
  170. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
  171. mpuss_can_lose_context) {
  172. while (gic_dist_disabled()) {
  173. udelay(1);
  174. cpu_relax();
  175. }
  176. gic_timer_retrigger();
  177. }
  178. }
  179. /*
  180. * Call idle CPU cluster PM exit notifier chain
  181. * to restore GIC and wakeupgen context.
  182. */
  183. if (dev->cpu == 0 && mpuss_can_lose_context)
  184. cpu_cluster_pm_exit();
  185. /*
  186. * Call idle CPU PM exit notifier chain to restore
  187. * VFP and per CPU IRQ context.
  188. */
  189. cpu_pm_exit();
  190. cpu_pm_out:
  191. RCU_NONIDLE(tick_broadcast_exit());
  192. fail:
  193. cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
  194. cpu_done[dev->cpu] = false;
  195. return index;
  196. }
  197. static struct cpuidle_driver omap4_idle_driver = {
  198. .name = "omap4_idle",
  199. .owner = THIS_MODULE,
  200. .states = {
  201. {
  202. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  203. .exit_latency = 2 + 2,
  204. .target_residency = 5,
  205. .enter = omap_enter_idle_simple,
  206. .name = "C1",
  207. .desc = "CPUx ON, MPUSS ON"
  208. },
  209. {
  210. /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
  211. .exit_latency = 328 + 440,
  212. .target_residency = 960,
  213. .flags = CPUIDLE_FLAG_COUPLED,
  214. .enter = omap_enter_idle_coupled,
  215. .name = "C2",
  216. .desc = "CPUx OFF, MPUSS CSWR",
  217. },
  218. {
  219. /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
  220. .exit_latency = 460 + 518,
  221. .target_residency = 1100,
  222. .flags = CPUIDLE_FLAG_COUPLED,
  223. .enter = omap_enter_idle_coupled,
  224. .name = "C3",
  225. .desc = "CPUx OFF, MPUSS OSWR",
  226. },
  227. },
  228. .state_count = ARRAY_SIZE(omap4_idle_data),
  229. .safe_state_index = 0,
  230. };
  231. static struct cpuidle_driver omap5_idle_driver = {
  232. .name = "omap5_idle",
  233. .owner = THIS_MODULE,
  234. .states = {
  235. {
  236. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  237. .exit_latency = 2 + 2,
  238. .target_residency = 5,
  239. .enter = omap_enter_idle_simple,
  240. .name = "C1",
  241. .desc = "CPUx WFI, MPUSS ON"
  242. },
  243. {
  244. /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
  245. .exit_latency = 48 + 60,
  246. .target_residency = 100,
  247. .flags = CPUIDLE_FLAG_TIMER_STOP,
  248. .enter = omap_enter_idle_smp,
  249. .name = "C2",
  250. .desc = "CPUx CSWR, MPUSS CSWR",
  251. },
  252. },
  253. .state_count = ARRAY_SIZE(omap5_idle_data),
  254. .safe_state_index = 0,
  255. };
  256. /* Public functions */
  257. /**
  258. * omap4_idle_init - Init routine for OMAP4+ idle
  259. *
  260. * Registers the OMAP4+ specific cpuidle driver to the cpuidle
  261. * framework with the valid set of states.
  262. */
  263. int __init omap4_idle_init(void)
  264. {
  265. struct cpuidle_driver *idle_driver;
  266. if (soc_is_omap54xx()) {
  267. state_ptr = &omap5_idle_data[0];
  268. idle_driver = &omap5_idle_driver;
  269. } else {
  270. state_ptr = &omap4_idle_data[0];
  271. idle_driver = &omap4_idle_driver;
  272. }
  273. mpu_pd = pwrdm_lookup("mpu_pwrdm");
  274. cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
  275. cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
  276. if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
  277. return -ENODEV;
  278. cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
  279. cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
  280. if (!cpu_clkdm[0] || !cpu_clkdm[1])
  281. return -ENODEV;
  282. return cpuidle_register(idle_driver, cpu_online_mask);
  283. }