omap-mpuss-lowpower.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * OMAP MPUSS low power code
  4. *
  5. * Copyright (C) 2011 Texas Instruments, Inc.
  6. * Santosh Shilimkar <[email protected]>
  7. *
  8. * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
  9. * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
  10. * CPU0 and CPU1 LPRM modules.
  11. * CPU0, CPU1 and MPUSS each have there own power domain and
  12. * hence multiple low power combinations of MPUSS are possible.
  13. *
  14. * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
  15. * because the mode is not supported by hw constraints of dormant
  16. * mode. While waking up from the dormant mode, a reset signal
  17. * to the Cortex-A9 processor must be asserted by the external
  18. * power controller.
  19. *
  20. * With architectural inputs and hardware recommendations, only
  21. * below modes are supported from power gain vs latency point of view.
  22. *
  23. * CPU0 CPU1 MPUSS
  24. * ----------------------------------------------
  25. * ON ON ON
  26. * ON(Inactive) OFF ON(Inactive)
  27. * OFF OFF CSWR
  28. * OFF OFF OSWR
  29. * OFF OFF OFF(Device OFF *TBD)
  30. * ----------------------------------------------
  31. *
  32. * Note: CPU0 is the master core and it is the last CPU to go down
  33. * and first to wake-up when MPUSS low power states are excercised
  34. */
  35. #include <linux/kernel.h>
  36. #include <linux/io.h>
  37. #include <linux/errno.h>
  38. #include <linux/linkage.h>
  39. #include <linux/smp.h>
  40. #include <asm/cacheflush.h>
  41. #include <asm/tlbflush.h>
  42. #include <asm/smp_scu.h>
  43. #include <asm/suspend.h>
  44. #include <asm/virt.h>
  45. #include <asm/hardware/cache-l2x0.h>
  46. #include "soc.h"
  47. #include "common.h"
  48. #include "omap44xx.h"
  49. #include "omap4-sar-layout.h"
  50. #include "pm.h"
  51. #include "prcm_mpu44xx.h"
  52. #include "prcm_mpu54xx.h"
  53. #include "prminst44xx.h"
  54. #include "prcm44xx.h"
  55. #include "prm44xx.h"
  56. #include "prm-regbits-44xx.h"
  57. static void __iomem *sar_base;
  58. static u32 old_cpu1_ns_pa_addr;
  59. #if defined(CONFIG_PM) && defined(CONFIG_SMP)
  60. struct omap4_cpu_pm_info {
  61. struct powerdomain *pwrdm;
  62. void __iomem *scu_sar_addr;
  63. void __iomem *wkup_sar_addr;
  64. void __iomem *l2x0_sar_addr;
  65. };
  66. /**
  67. * struct cpu_pm_ops - CPU pm operations
  68. * @finish_suspend: CPU suspend finisher function pointer
  69. * @resume: CPU resume function pointer
  70. * @scu_prepare: CPU Snoop Control program function pointer
  71. * @hotplug_restart: CPU restart function pointer
  72. *
  73. * Structure holds functions pointer for CPU low power operations like
  74. * suspend, resume and scu programming.
  75. */
  76. struct cpu_pm_ops {
  77. int (*finish_suspend)(unsigned long cpu_state);
  78. void (*resume)(void);
  79. void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
  80. void (*hotplug_restart)(void);
  81. };
  82. static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
  83. static struct powerdomain *mpuss_pd;
  84. static u32 cpu_context_offset;
  85. static int default_finish_suspend(unsigned long cpu_state)
  86. {
  87. omap_do_wfi();
  88. return 0;
  89. }
  90. static void dummy_cpu_resume(void)
  91. {}
  92. static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
  93. {}
  94. static struct cpu_pm_ops omap_pm_ops = {
  95. .finish_suspend = default_finish_suspend,
  96. .resume = dummy_cpu_resume,
  97. .scu_prepare = dummy_scu_prepare,
  98. .hotplug_restart = dummy_cpu_resume,
  99. };
  100. /*
  101. * Program the wakeup routine address for the CPU0 and CPU1
  102. * used for OFF or DORMANT wakeup.
  103. */
  104. static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
  105. {
  106. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
  107. if (pm_info->wkup_sar_addr)
  108. writel_relaxed(addr, pm_info->wkup_sar_addr);
  109. }
  110. /*
  111. * Store the SCU power status value to scratchpad memory
  112. */
  113. static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
  114. {
  115. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
  116. u32 scu_pwr_st;
  117. switch (cpu_state) {
  118. case PWRDM_POWER_RET:
  119. scu_pwr_st = SCU_PM_DORMANT;
  120. break;
  121. case PWRDM_POWER_OFF:
  122. scu_pwr_st = SCU_PM_POWEROFF;
  123. break;
  124. case PWRDM_POWER_ON:
  125. case PWRDM_POWER_INACTIVE:
  126. default:
  127. scu_pwr_st = SCU_PM_NORMAL;
  128. break;
  129. }
  130. if (pm_info->scu_sar_addr)
  131. writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr);
  132. }
  133. /* Helper functions for MPUSS OSWR */
  134. static inline void mpuss_clear_prev_logic_pwrst(void)
  135. {
  136. u32 reg;
  137. reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
  138. OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
  139. omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
  140. OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
  141. }
  142. static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
  143. {
  144. u32 reg;
  145. if (cpu_id) {
  146. reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
  147. cpu_context_offset);
  148. omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
  149. cpu_context_offset);
  150. } else {
  151. reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
  152. cpu_context_offset);
  153. omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
  154. cpu_context_offset);
  155. }
  156. }
  157. /*
  158. * Store the CPU cluster state for L2X0 low power operations.
  159. */
  160. static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
  161. {
  162. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
  163. if (pm_info->l2x0_sar_addr)
  164. writel_relaxed(save_state, pm_info->l2x0_sar_addr);
  165. }
  166. /*
  167. * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
  168. * in every restore MPUSS OFF path.
  169. */
  170. #ifdef CONFIG_CACHE_L2X0
  171. static void __init save_l2x0_context(void)
  172. {
  173. void __iomem *l2x0_base = omap4_get_l2cache_base();
  174. if (l2x0_base && sar_base) {
  175. writel_relaxed(l2x0_saved_regs.aux_ctrl,
  176. sar_base + L2X0_AUXCTRL_OFFSET);
  177. writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  178. sar_base + L2X0_PREFETCH_CTRL_OFFSET);
  179. }
  180. }
  181. #else
  182. static void __init save_l2x0_context(void)
  183. {}
  184. #endif
  185. /**
  186. * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
  187. * The purpose of this function is to manage low power programming
  188. * of OMAP4 MPUSS subsystem
  189. * @cpu : CPU ID
  190. * @power_state: Low power state.
  191. *
  192. * MPUSS states for the context save:
  193. * save_state =
  194. * 0 - Nothing lost and no need to save: MPUSS INACTIVE
  195. * 1 - CPUx L1 and logic lost: MPUSS CSWR
  196. * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
  197. * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
  198. */
  199. int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
  200. {
  201. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
  202. unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
  203. if (omap_rev() == OMAP4430_REV_ES1_0)
  204. return -ENXIO;
  205. switch (power_state) {
  206. case PWRDM_POWER_ON:
  207. case PWRDM_POWER_INACTIVE:
  208. save_state = 0;
  209. break;
  210. case PWRDM_POWER_OFF:
  211. cpu_logic_state = PWRDM_POWER_OFF;
  212. save_state = 1;
  213. break;
  214. case PWRDM_POWER_RET:
  215. if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
  216. save_state = 0;
  217. break;
  218. default:
  219. /*
  220. * CPUx CSWR is invalid hardware state. Also CPUx OSWR
  221. * doesn't make much scense, since logic is lost and $L1
  222. * needs to be cleaned because of coherency. This makes
  223. * CPUx OSWR equivalent to CPUX OFF and hence not supported
  224. */
  225. WARN_ON(1);
  226. return -ENXIO;
  227. }
  228. pwrdm_pre_transition(NULL);
  229. /*
  230. * Check MPUSS next state and save interrupt controller if needed.
  231. * In MPUSS OSWR or device OFF, interrupt controller contest is lost.
  232. */
  233. mpuss_clear_prev_logic_pwrst();
  234. if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
  235. (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
  236. save_state = 2;
  237. cpu_clear_prev_logic_pwrst(cpu);
  238. pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
  239. pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
  240. set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
  241. omap_pm_ops.scu_prepare(cpu, power_state);
  242. l2x0_pwrst_prepare(cpu, save_state);
  243. /*
  244. * Call low level function with targeted low power state.
  245. */
  246. if (save_state)
  247. cpu_suspend(save_state, omap_pm_ops.finish_suspend);
  248. else
  249. omap_pm_ops.finish_suspend(save_state);
  250. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
  251. gic_dist_enable();
  252. /*
  253. * Restore the CPUx power state to ON otherwise CPUx
  254. * power domain can transitions to programmed low power
  255. * state while doing WFI outside the low powe code. On
  256. * secure devices, CPUx does WFI which can result in
  257. * domain transition
  258. */
  259. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  260. pwrdm_post_transition(NULL);
  261. return 0;
  262. }
  263. /**
  264. * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
  265. * @cpu : CPU ID
  266. * @power_state: CPU low power state.
  267. */
  268. int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
  269. {
  270. struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
  271. unsigned int cpu_state = 0;
  272. if (omap_rev() == OMAP4430_REV_ES1_0)
  273. return -ENXIO;
  274. /* Use the achievable power state for the domain */
  275. power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm,
  276. false, power_state);
  277. if (power_state == PWRDM_POWER_OFF)
  278. cpu_state = 1;
  279. pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
  280. pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
  281. set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
  282. omap_pm_ops.scu_prepare(cpu, power_state);
  283. /*
  284. * CPU never retuns back if targeted power state is OFF mode.
  285. * CPU ONLINE follows normal CPU ONLINE ptah via
  286. * omap4_secondary_startup().
  287. */
  288. omap_pm_ops.finish_suspend(cpu_state);
  289. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  290. return 0;
  291. }
  292. /*
  293. * Enable Mercury Fast HG retention mode by default.
  294. */
  295. static void enable_mercury_retention_mode(void)
  296. {
  297. u32 reg;
  298. reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST,
  299. OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
  300. /* Enable HG_EN, HG_RAMPUP = fast mode */
  301. reg |= BIT(24) | BIT(25);
  302. omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST,
  303. OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
  304. }
  305. /*
  306. * Initialise OMAP4 MPUSS
  307. */
  308. int __init omap4_mpuss_init(void)
  309. {
  310. struct omap4_cpu_pm_info *pm_info;
  311. if (omap_rev() == OMAP4430_REV_ES1_0) {
  312. WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
  313. return -ENODEV;
  314. }
  315. /* Initilaise per CPU PM information */
  316. pm_info = &per_cpu(omap4_pm_info, 0x0);
  317. if (sar_base) {
  318. pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
  319. if (cpu_is_omap44xx())
  320. pm_info->wkup_sar_addr = sar_base +
  321. CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
  322. else
  323. pm_info->wkup_sar_addr = sar_base +
  324. OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
  325. pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
  326. }
  327. pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
  328. if (!pm_info->pwrdm) {
  329. pr_err("Lookup failed for CPU0 pwrdm\n");
  330. return -ENODEV;
  331. }
  332. /* Clear CPU previous power domain state */
  333. pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
  334. cpu_clear_prev_logic_pwrst(0);
  335. /* Initialise CPU0 power domain state to ON */
  336. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  337. pm_info = &per_cpu(omap4_pm_info, 0x1);
  338. if (sar_base) {
  339. pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
  340. if (cpu_is_omap44xx())
  341. pm_info->wkup_sar_addr = sar_base +
  342. CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
  343. else
  344. pm_info->wkup_sar_addr = sar_base +
  345. OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
  346. pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
  347. }
  348. pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
  349. if (!pm_info->pwrdm) {
  350. pr_err("Lookup failed for CPU1 pwrdm\n");
  351. return -ENODEV;
  352. }
  353. /* Clear CPU previous power domain state */
  354. pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
  355. cpu_clear_prev_logic_pwrst(1);
  356. /* Initialise CPU1 power domain state to ON */
  357. pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
  358. mpuss_pd = pwrdm_lookup("mpu_pwrdm");
  359. if (!mpuss_pd) {
  360. pr_err("Failed to lookup MPUSS power domain\n");
  361. return -ENODEV;
  362. }
  363. pwrdm_clear_all_prev_pwrst(mpuss_pd);
  364. mpuss_clear_prev_logic_pwrst();
  365. if (sar_base) {
  366. /* Save device type on scratchpad for low level code to use */
  367. writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0,
  368. sar_base + OMAP_TYPE_OFFSET);
  369. save_l2x0_context();
  370. }
  371. if (cpu_is_omap44xx()) {
  372. omap_pm_ops.finish_suspend = omap4_finish_suspend;
  373. omap_pm_ops.resume = omap4_cpu_resume;
  374. omap_pm_ops.scu_prepare = scu_pwrst_prepare;
  375. omap_pm_ops.hotplug_restart = omap4_secondary_startup;
  376. cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
  377. } else if (soc_is_omap54xx() || soc_is_dra7xx()) {
  378. cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
  379. enable_mercury_retention_mode();
  380. }
  381. if (cpu_is_omap446x())
  382. omap_pm_ops.hotplug_restart = omap4460_secondary_startup;
  383. return 0;
  384. }
  385. #endif
  386. u32 omap4_get_cpu1_ns_pa_addr(void)
  387. {
  388. return old_cpu1_ns_pa_addr;
  389. }
  390. /*
  391. * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
  392. * current kernel's secondary_startup() early before
  393. * clockdomains_init(). Otherwise clockdomain_init() can
  394. * wake CPU1 and cause a hang.
  395. */
  396. void __init omap4_mpuss_early_init(void)
  397. {
  398. unsigned long startup_pa;
  399. void __iomem *ns_pa_addr;
  400. if (!(soc_is_omap44xx() || soc_is_omap54xx()))
  401. return;
  402. sar_base = omap4_get_sar_ram_base();
  403. /* Save old NS_PA_ADDR for validity checks later on */
  404. if (soc_is_omap44xx())
  405. ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
  406. else
  407. ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
  408. old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
  409. if (soc_is_omap443x())
  410. startup_pa = __pa_symbol(omap4_secondary_startup);
  411. else if (soc_is_omap446x())
  412. startup_pa = __pa_symbol(omap4460_secondary_startup);
  413. else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
  414. startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
  415. else
  416. startup_pa = __pa_symbol(omap5_secondary_startup);
  417. if (soc_is_omap44xx())
  418. writel_relaxed(startup_pa, sar_base +
  419. CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
  420. else
  421. writel_relaxed(startup_pa, sar_base +
  422. OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
  423. }