tc2_pm.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Created by: Nicolas Pitre, October 2012
  4. * Copyright: (C) 2012-2013 Linaro Limited
  5. *
  6. * Some portions of this file were originally written by Achin Gupta
  7. * Copyright: (C) 2012 ARM Limited
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/init.h>
  11. #include <linux/io.h>
  12. #include <linux/kernel.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/errno.h>
  16. #include <linux/irqchip/arm-gic.h>
  17. #include <asm/mcpm.h>
  18. #include <asm/proc-fns.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/cputype.h>
  21. #include <asm/cp15.h>
  22. #include <linux/arm-cci.h>
  23. #include "spc.h"
  24. /* SCC conf registers */
  25. #define RESET_CTRL 0x018
  26. #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
  27. #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
  28. #define A15_CONF 0x400
  29. #define A7_CONF 0x500
  30. #define SYS_INFO 0x700
  31. #define SPC_BASE 0xb00
  32. static void __iomem *scc;
  33. #define TC2_CLUSTERS 2
  34. #define TC2_MAX_CPUS_PER_CLUSTER 3
  35. static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
  36. static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
  37. {
  38. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  39. if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
  40. return -EINVAL;
  41. ve_spc_set_resume_addr(cluster, cpu,
  42. __pa_symbol(mcpm_entry_point));
  43. ve_spc_cpu_wakeup_irq(cluster, cpu, true);
  44. return 0;
  45. }
  46. static int tc2_pm_cluster_powerup(unsigned int cluster)
  47. {
  48. pr_debug("%s: cluster %u\n", __func__, cluster);
  49. if (cluster >= TC2_CLUSTERS)
  50. return -EINVAL;
  51. ve_spc_powerdown(cluster, false);
  52. return 0;
  53. }
  54. static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
  55. {
  56. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  57. BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
  58. ve_spc_cpu_wakeup_irq(cluster, cpu, true);
  59. /*
  60. * If the CPU is committed to power down, make sure
  61. * the power controller will be in charge of waking it
  62. * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
  63. * to the CPU by disabling the GIC CPU IF to prevent wfi
  64. * from completing execution behind power controller back
  65. */
  66. gic_cpu_if_down(0);
  67. }
  68. static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
  69. {
  70. pr_debug("%s: cluster %u\n", __func__, cluster);
  71. BUG_ON(cluster >= TC2_CLUSTERS);
  72. ve_spc_powerdown(cluster, true);
  73. ve_spc_global_wakeup_irq(true);
  74. }
  75. static void tc2_pm_cpu_cache_disable(void)
  76. {
  77. v7_exit_coherency_flush(louis);
  78. }
  79. static void tc2_pm_cluster_cache_disable(void)
  80. {
  81. if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
  82. /*
  83. * On the Cortex-A15 we need to disable
  84. * L2 prefetching before flushing the cache.
  85. */
  86. asm volatile(
  87. "mcr p15, 1, %0, c15, c0, 3 \n\t"
  88. "isb \n\t"
  89. "dsb "
  90. : : "r" (0x400) );
  91. }
  92. v7_exit_coherency_flush(all);
  93. cci_disable_port_by_cpu(read_cpuid_mpidr());
  94. }
  95. static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
  96. {
  97. u32 mask = cluster ?
  98. RESET_A7_NCORERESET(cpu)
  99. : RESET_A15_NCORERESET(cpu);
  100. return !(readl_relaxed(scc + RESET_CTRL) & mask);
  101. }
  102. #define POLL_MSEC 10
  103. #define TIMEOUT_MSEC 1000
  104. static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
  105. {
  106. unsigned tries;
  107. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  108. BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
  109. for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
  110. pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
  111. __func__, cpu, cluster,
  112. readl_relaxed(scc + RESET_CTRL));
  113. /*
  114. * We need the CPU to reach WFI, but the power
  115. * controller may put the cluster in reset and
  116. * power it off as soon as that happens, before
  117. * we have a chance to see STANDBYWFI.
  118. *
  119. * So we need to check for both conditions:
  120. */
  121. if (tc2_core_in_reset(cpu, cluster) ||
  122. ve_spc_cpu_in_wfi(cpu, cluster))
  123. return 0; /* success: the CPU is halted */
  124. /* Otherwise, wait and retry: */
  125. msleep(POLL_MSEC);
  126. }
  127. return -ETIMEDOUT; /* timeout */
  128. }
  129. static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
  130. {
  131. ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
  132. }
  133. static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
  134. {
  135. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  136. BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
  137. ve_spc_cpu_wakeup_irq(cluster, cpu, false);
  138. ve_spc_set_resume_addr(cluster, cpu, 0);
  139. }
  140. static void tc2_pm_cluster_is_up(unsigned int cluster)
  141. {
  142. pr_debug("%s: cluster %u\n", __func__, cluster);
  143. BUG_ON(cluster >= TC2_CLUSTERS);
  144. ve_spc_powerdown(cluster, false);
  145. ve_spc_global_wakeup_irq(false);
  146. }
  147. static const struct mcpm_platform_ops tc2_pm_power_ops = {
  148. .cpu_powerup = tc2_pm_cpu_powerup,
  149. .cluster_powerup = tc2_pm_cluster_powerup,
  150. .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare,
  151. .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare,
  152. .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
  153. .cpu_cache_disable = tc2_pm_cpu_cache_disable,
  154. .cluster_cache_disable = tc2_pm_cluster_cache_disable,
  155. .wait_for_powerdown = tc2_pm_wait_for_powerdown,
  156. .cpu_is_up = tc2_pm_cpu_is_up,
  157. .cluster_is_up = tc2_pm_cluster_is_up,
  158. };
  159. /*
  160. * Enable cluster-level coherency, in preparation for turning on the MMU.
  161. */
  162. static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
  163. {
  164. asm volatile (" \n"
  165. " cmp r0, #1 \n"
  166. " bxne lr \n"
  167. " b cci_enable_port_for_self ");
  168. }
  169. static int __init tc2_pm_init(void)
  170. {
  171. unsigned int mpidr, cpu, cluster;
  172. int ret, irq;
  173. u32 a15_cluster_id, a7_cluster_id, sys_info;
  174. struct device_node *np;
  175. /*
  176. * The power management-related features are hidden behind
  177. * SCC registers. We need to extract runtime information like
  178. * cluster ids and number of CPUs really available in clusters.
  179. */
  180. np = of_find_compatible_node(NULL, NULL,
  181. "arm,vexpress-scc,v2p-ca15_a7");
  182. scc = of_iomap(np, 0);
  183. if (!scc)
  184. return -ENODEV;
  185. a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
  186. a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
  187. if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
  188. return -EINVAL;
  189. sys_info = readl_relaxed(scc + SYS_INFO);
  190. tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
  191. tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
  192. irq = irq_of_parse_and_map(np, 0);
  193. /*
  194. * A subset of the SCC registers is also used to communicate
  195. * with the SPC (power controller). We need to be able to
  196. * drive it very early in the boot process to power up
  197. * processors, so we initialize the SPC driver here.
  198. */
  199. ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
  200. if (ret)
  201. return ret;
  202. if (!cci_probed())
  203. return -ENODEV;
  204. mpidr = read_cpuid_mpidr();
  205. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  206. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  207. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  208. if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
  209. pr_err("%s: boot CPU is out of bound!\n", __func__);
  210. return -EINVAL;
  211. }
  212. ret = mcpm_platform_register(&tc2_pm_power_ops);
  213. if (!ret) {
  214. mcpm_sync_init(tc2_pm_power_up_setup);
  215. /* test if we can (re)enable the CCI on our own */
  216. BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
  217. pr_info("TC2 power management initialized\n");
  218. }
  219. return ret;
  220. }
  221. early_initcall(tc2_pm_init);