sleep44xx.S 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * OMAP44xx sleep code.
  4. *
  5. * Copyright (C) 2011 Texas Instruments, Inc.
  6. * Santosh Shilimkar <[email protected]>
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/assembler.h>
  10. #include <asm/smp_scu.h>
  11. #include <asm/memory.h>
  12. #include <asm/hardware/cache-l2x0.h>
  13. #include "omap-secure.h"
  14. #include "common.h"
  15. #include "omap44xx.h"
  16. #include "omap4-sar-layout.h"
  17. .arch armv7-a
  18. #if defined(CONFIG_SMP) && defined(CONFIG_PM)
  19. .arch_extension sec
  20. .macro DO_SMC
  21. dsb
  22. smc #0
  23. dsb
  24. .endm
  25. #ifdef CONFIG_ARCH_OMAP4
  26. /*
  27. * =============================
  28. * == CPU suspend finisher ==
  29. * =============================
  30. *
  31. * void omap4_finish_suspend(unsigned long cpu_state)
  32. *
  33. * This function code saves the CPU context and performs the CPU
  34. * power down sequence. Calling WFI effectively changes the CPU
  35. * power domains states to the desired target power state.
  36. *
  37. * @cpu_state : contains context save state (r0)
  38. * 0 - No context lost
  39. * 1 - CPUx L1 and logic lost: MPUSS CSWR
  40. * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
  41. * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
  42. * @return: This function never returns for CPU OFF and DORMANT power states.
  43. * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
  44. * from this follows a full CPU reset path via ROM code to CPU restore code.
  45. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  46. * It returns to the caller for CPU INACTIVE and ON power states or in case
  47. * CPU failed to transition to targeted OFF/DORMANT state.
  48. *
  49. * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
  50. * stack frame and it expects the caller to take care of it. Hence the entire
  51. * stack frame is saved to avoid possible stack corruption.
  52. */
  53. ENTRY(omap4_finish_suspend)
  54. stmfd sp!, {r4-r12, lr}
  55. cmp r0, #0x0
  56. beq do_WFI @ No lowpower state, jump to WFI
  57. /*
  58. * Flush all data from the L1 data cache before disabling
  59. * SCTLR.C bit.
  60. */
  61. bl omap4_get_sar_ram_base
  62. ldr r9, [r0, #OMAP_TYPE_OFFSET]
  63. cmp r9, #0x1 @ Check for HS device
  64. bne skip_secure_l1_clean
  65. mov r0, #SCU_PM_NORMAL
  66. mov r1, #0xFF @ clean seucre L1
  67. stmfd r13!, {r4-r12, r14}
  68. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  69. DO_SMC
  70. ldmfd r13!, {r4-r12, r14}
  71. skip_secure_l1_clean:
  72. bl v7_flush_dcache_all
  73. /*
  74. * Clear the SCTLR.C bit to prevent further data cache
  75. * allocation. Clearing SCTLR.C would make all the data accesses
  76. * strongly ordered and would not hit the cache.
  77. */
  78. mrc p15, 0, r0, c1, c0, 0
  79. bic r0, r0, #(1 << 2) @ Disable the C bit
  80. mcr p15, 0, r0, c1, c0, 0
  81. isb
  82. bl v7_invalidate_l1
  83. /*
  84. * Switch the CPU from Symmetric Multiprocessing (SMP) mode
  85. * to AsymmetricMultiprocessing (AMP) mode by programming
  86. * the SCU power status to DORMANT or OFF mode.
  87. * This enables the CPU to be taken out of coherency by
  88. * preventing the CPU from receiving cache, TLB, or BTB
  89. * maintenance operations broadcast by other CPUs in the cluster.
  90. */
  91. bl omap4_get_sar_ram_base
  92. mov r8, r0
  93. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  94. cmp r9, #0x1 @ Check for HS device
  95. bne scu_gp_set
  96. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  97. ands r0, r0, #0x0f
  98. ldreq r0, [r8, #SCU_OFFSET0]
  99. ldrne r0, [r8, #SCU_OFFSET1]
  100. mov r1, #0x00
  101. stmfd r13!, {r4-r12, r14}
  102. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  103. DO_SMC
  104. ldmfd r13!, {r4-r12, r14}
  105. b skip_scu_gp_set
  106. scu_gp_set:
  107. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  108. ands r0, r0, #0x0f
  109. ldreq r1, [r8, #SCU_OFFSET0]
  110. ldrne r1, [r8, #SCU_OFFSET1]
  111. bl omap4_get_scu_base
  112. bl scu_power_mode
  113. skip_scu_gp_set:
  114. mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
  115. tst r0, #(1 << 18)
  116. mrcne p15, 0, r0, c1, c0, 1
  117. bicne r0, r0, #(1 << 6) @ Disable SMP bit
  118. mcrne p15, 0, r0, c1, c0, 1
  119. isb
  120. dsb
  121. #ifdef CONFIG_CACHE_L2X0
  122. /*
  123. * Clean and invalidate the L2 cache.
  124. * Common cache-l2x0.c functions can't be used here since it
  125. * uses spinlocks. We are out of coherency here with data cache
  126. * disabled. The spinlock implementation uses exclusive load/store
  127. * instruction which can fail without data cache being enabled.
  128. * OMAP4 hardware doesn't support exclusive monitor which can
  129. * overcome exclusive access issue. Because of this, CPU can
  130. * lead to deadlock.
  131. */
  132. bl omap4_get_sar_ram_base
  133. mov r8, r0
  134. mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
  135. ands r5, r5, #0x0f
  136. ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
  137. ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
  138. cmp r0, #3
  139. bne do_WFI
  140. #ifdef CONFIG_PL310_ERRATA_727915
  141. mov r0, #0x03
  142. mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
  143. DO_SMC
  144. #endif
  145. bl omap4_get_l2cache_base
  146. mov r2, r0
  147. ldr r0, =0xffff
  148. str r0, [r2, #L2X0_CLEAN_INV_WAY]
  149. wait:
  150. ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
  151. ldr r1, =0xffff
  152. ands r0, r0, r1
  153. bne wait
  154. #ifdef CONFIG_PL310_ERRATA_727915
  155. mov r0, #0x00
  156. mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
  157. DO_SMC
  158. #endif
  159. l2x_sync:
  160. bl omap4_get_l2cache_base
  161. mov r2, r0
  162. mov r0, #0x0
  163. str r0, [r2, #L2X0_CACHE_SYNC]
  164. sync:
  165. ldr r0, [r2, #L2X0_CACHE_SYNC]
  166. ands r0, r0, #0x1
  167. bne sync
  168. #endif
  169. do_WFI:
  170. bl omap_do_wfi
  171. /*
  172. * CPU is here when it failed to enter OFF/DORMANT or
  173. * no low power state was attempted.
  174. */
  175. mrc p15, 0, r0, c1, c0, 0
  176. tst r0, #(1 << 2) @ Check C bit enabled?
  177. orreq r0, r0, #(1 << 2) @ Enable the C bit
  178. mcreq p15, 0, r0, c1, c0, 0
  179. isb
  180. /*
  181. * Ensure the CPU power state is set to NORMAL in
  182. * SCU power state so that CPU is back in coherency.
  183. * In non-coherent mode CPU can lock-up and lead to
  184. * system deadlock.
  185. */
  186. mrc p15, 0, r0, c1, c0, 1
  187. tst r0, #(1 << 6) @ Check SMP bit enabled?
  188. orreq r0, r0, #(1 << 6)
  189. mcreq p15, 0, r0, c1, c0, 1
  190. isb
  191. bl omap4_get_sar_ram_base
  192. mov r8, r0
  193. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  194. cmp r9, #0x1 @ Check for HS device
  195. bne scu_gp_clear
  196. mov r0, #SCU_PM_NORMAL
  197. mov r1, #0x00
  198. stmfd r13!, {r4-r12, r14}
  199. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  200. DO_SMC
  201. ldmfd r13!, {r4-r12, r14}
  202. b skip_scu_gp_clear
  203. scu_gp_clear:
  204. bl omap4_get_scu_base
  205. mov r1, #SCU_PM_NORMAL
  206. bl scu_power_mode
  207. skip_scu_gp_clear:
  208. isb
  209. dsb
  210. ldmfd sp!, {r4-r12, pc}
  211. ENDPROC(omap4_finish_suspend)
  212. /*
  213. * ============================
  214. * == CPU resume entry point ==
  215. * ============================
  216. *
  217. * void omap4_cpu_resume(void)
  218. *
  219. * ROM code jumps to this function while waking up from CPU
  220. * OFF or DORMANT state. Physical address of the function is
  221. * stored in the SAR RAM while entering to OFF or DORMANT mode.
  222. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  223. */
  224. ENTRY(omap4_cpu_resume)
  225. /*
  226. * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
  227. * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
  228. * init and for CPU1, a secure PPA API provided. CPU0 must be ON
  229. * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
  230. * OMAP443X GP devices- SMP bit isn't accessible.
  231. * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
  232. */
  233. ldr r8, =OMAP44XX_SAR_RAM_BASE
  234. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  235. cmp r9, #0x1 @ Skip if GP device
  236. bne skip_ns_smp_enable
  237. mrc p15, 0, r0, c0, c0, 5
  238. ands r0, r0, #0x0f
  239. beq skip_ns_smp_enable
  240. ppa_actrl_retry:
  241. mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
  242. adr r1, ppa_zero_params_offset
  243. ldr r3, [r1]
  244. add r3, r3, r1 @ Pointer to ppa_zero_params
  245. mov r1, #0x0 @ Process ID
  246. mov r2, #0x4 @ Flag
  247. mov r6, #0xff
  248. mov r12, #0x00 @ Secure Service ID
  249. DO_SMC
  250. cmp r0, #0x0 @ API returns 0 on success.
  251. beq enable_smp_bit
  252. b ppa_actrl_retry
  253. enable_smp_bit:
  254. mrc p15, 0, r0, c1, c0, 1
  255. tst r0, #(1 << 6) @ Check SMP bit enabled?
  256. orreq r0, r0, #(1 << 6)
  257. mcreq p15, 0, r0, c1, c0, 1
  258. isb
  259. skip_ns_smp_enable:
  260. #ifdef CONFIG_CACHE_L2X0
  261. /*
  262. * Restore the L2 AUXCTRL and enable the L2 cache.
  263. * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
  264. * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
  265. * register r0 contains value to be programmed.
  266. * L2 cache is already invalidate by ROM code as part
  267. * of MPUSS OFF wakeup path.
  268. */
  269. ldr r2, =OMAP44XX_L2CACHE_BASE
  270. ldr r0, [r2, #L2X0_CTRL]
  271. and r0, #0x0f
  272. cmp r0, #1
  273. beq skip_l2en @ Skip if already enabled
  274. ldr r3, =OMAP44XX_SAR_RAM_BASE
  275. ldr r1, [r3, #OMAP_TYPE_OFFSET]
  276. cmp r1, #0x1 @ Check for HS device
  277. bne set_gp_por
  278. ldr r0, =OMAP4_PPA_L2_POR_INDEX
  279. ldr r1, =OMAP44XX_SAR_RAM_BASE
  280. ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
  281. adr r1, ppa_por_params_offset
  282. ldr r3, [r1]
  283. add r3, r3, r1 @ Pointer to ppa_por_params
  284. str r4, [r3, #0x04]
  285. mov r1, #0x0 @ Process ID
  286. mov r2, #0x4 @ Flag
  287. mov r6, #0xff
  288. mov r12, #0x00 @ Secure Service ID
  289. DO_SMC
  290. b set_aux_ctrl
  291. set_gp_por:
  292. ldr r1, =OMAP44XX_SAR_RAM_BASE
  293. ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
  294. ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
  295. DO_SMC
  296. set_aux_ctrl:
  297. ldr r1, =OMAP44XX_SAR_RAM_BASE
  298. ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
  299. ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
  300. DO_SMC
  301. mov r0, #0x1
  302. ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
  303. DO_SMC
  304. skip_l2en:
  305. #endif
  306. b cpu_resume @ Jump to generic resume
  307. ppa_por_params_offset:
  308. .long ppa_por_params - .
  309. ENDPROC(omap4_cpu_resume)
  310. #endif /* CONFIG_ARCH_OMAP4 */
  311. #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
  312. ENTRY(omap_do_wfi)
  313. stmfd sp!, {lr}
  314. #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
  315. /* Drain interconnect write buffers. */
  316. bl omap_interconnect_sync
  317. #endif
  318. /*
  319. * Execute an ISB instruction to ensure that all of the
  320. * CP15 register changes have been committed.
  321. */
  322. isb
  323. /*
  324. * Execute a barrier instruction to ensure that all cache,
  325. * TLB and branch predictor maintenance operations issued
  326. * by any CPU in the cluster have completed.
  327. */
  328. dsb
  329. dmb
  330. /*
  331. * Execute a WFI instruction and wait until the
  332. * STANDBYWFI output is asserted to indicate that the
  333. * CPU is in idle and low power state. CPU can specualatively
  334. * prefetch the instructions so add NOPs after WFI. Sixteen
  335. * NOPs as per Cortex-A9 pipeline.
  336. */
  337. wfi @ Wait For Interrupt
  338. nop
  339. nop
  340. nop
  341. nop
  342. nop
  343. nop
  344. nop
  345. nop
  346. nop
  347. nop
  348. nop
  349. nop
  350. nop
  351. nop
  352. nop
  353. nop
  354. ldmfd sp!, {pc}
  355. ppa_zero_params_offset:
  356. .long ppa_zero_params - .
  357. ENDPROC(omap_do_wfi)
  358. .data
  359. .align 2
  360. ppa_zero_params:
  361. .word 0
  362. ppa_por_params:
  363. .word 1, 0