hyp-init.S 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012,2013 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <linux/arm-smccc.h>
  7. #include <linux/linkage.h>
  8. #include <asm/alternative.h>
  9. #include <asm/assembler.h>
  10. #include <asm/el2_setup.h>
  11. #include <asm/kvm_arm.h>
  12. #include <asm/kvm_asm.h>
  13. #include <asm/kvm_mmu.h>
  14. #include <asm/pgtable-hwdef.h>
  15. #include <asm/sysreg.h>
  16. #include <asm/virt.h>
  17. .text
  18. .pushsection .idmap.text, "ax"
  19. .align 11
  20. SYM_CODE_START(__kvm_hyp_init)
  21. ventry __invalid // Synchronous EL2t
  22. ventry __invalid // IRQ EL2t
  23. ventry __invalid // FIQ EL2t
  24. ventry __invalid // Error EL2t
  25. ventry __invalid // Synchronous EL2h
  26. ventry __invalid // IRQ EL2h
  27. ventry __invalid // FIQ EL2h
  28. ventry __invalid // Error EL2h
  29. ventry __do_hyp_init // Synchronous 64-bit EL1
  30. ventry __invalid // IRQ 64-bit EL1
  31. ventry __invalid // FIQ 64-bit EL1
  32. ventry __invalid // Error 64-bit EL1
  33. ventry __invalid // Synchronous 32-bit EL1
  34. ventry __invalid // IRQ 32-bit EL1
  35. ventry __invalid // FIQ 32-bit EL1
  36. ventry __invalid // Error 32-bit EL1
  37. __invalid:
  38. b .
  39. /*
  40. * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
  41. *
  42. * x0: SMCCC function ID
  43. * x1: struct kvm_nvhe_init_params PA
  44. */
  45. __do_hyp_init:
  46. /* Check for a stub HVC call */
  47. cmp x0, #HVC_STUB_HCALL_NR
  48. b.lo __kvm_handle_stub_hvc
  49. mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
  50. cmp x0, x3
  51. b.eq 1f
  52. mov x0, #SMCCC_RET_NOT_SUPPORTED
  53. eret
  54. 1: mov x0, x1
  55. mov x3, lr
  56. bl ___kvm_hyp_init // Clobbers x0..x2
  57. mov lr, x3
  58. /* Hello, World! */
  59. mov x0, #SMCCC_RET_SUCCESS
  60. eret
  61. SYM_CODE_END(__kvm_hyp_init)
  62. /*
  63. * Initialize the hypervisor in EL2.
  64. *
  65. * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
  66. * and leave x3 for the caller.
  67. *
  68. * x0: struct kvm_nvhe_init_params PA
  69. */
  70. SYM_CODE_START_LOCAL(___kvm_hyp_init)
  71. ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
  72. msr tpidr_el2, x1
  73. ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA]
  74. mov sp, x1
  75. ldr x1, [x0, #NVHE_INIT_MAIR_EL2]
  76. msr mair_el2, x1
  77. ldr x1, [x0, #NVHE_INIT_HCR_EL2]
  78. msr hcr_el2, x1
  79. mrs x1, ID_AA64MMFR0_EL1
  80. and x1, x1, #(0xf << ID_AA64MMFR0_EL1_FGT_SHIFT)
  81. cbz x1, 1f
  82. ldr x1, [x0, #NVHE_INIT_HFGWTR_EL2]
  83. msr_s SYS_HFGWTR_EL2, x1
  84. 1:
  85. ldr x1, [x0, #NVHE_INIT_VTTBR]
  86. msr vttbr_el2, x1
  87. ldr x1, [x0, #NVHE_INIT_VTCR]
  88. msr vtcr_el2, x1
  89. ldr x1, [x0, #NVHE_INIT_PGD_PA]
  90. phys_to_ttbr x2, x1
  91. alternative_if ARM64_HAS_CNP
  92. orr x2, x2, #TTBR_CNP_BIT
  93. alternative_else_nop_endif
  94. msr ttbr0_el2, x2
  95. /*
  96. * Set the PS bits in TCR_EL2.
  97. */
  98. ldr x0, [x0, #NVHE_INIT_TCR_EL2]
  99. tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
  100. msr tcr_el2, x0
  101. isb
  102. /* Invalidate the stale TLBs from Bootloader */
  103. tlbi alle2
  104. tlbi vmalls12e1
  105. dsb sy
  106. mov_q x0, INIT_SCTLR_EL2_MMU_ON
  107. alternative_if ARM64_HAS_ADDRESS_AUTH
  108. mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
  109. SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
  110. orr x0, x0, x1
  111. alternative_else_nop_endif
  112. msr sctlr_el2, x0
  113. isb
  114. /* Set the host vector */
  115. ldr x0, =__kvm_hyp_host_vector
  116. msr vbar_el2, x0
  117. ret
  118. SYM_CODE_END(___kvm_hyp_init)
  119. /*
  120. * PSCI CPU_ON entry point
  121. *
  122. * x0: struct kvm_nvhe_init_params PA
  123. */
  124. SYM_CODE_START(kvm_hyp_cpu_entry)
  125. mov x1, #1 // is_cpu_on = true
  126. b __kvm_hyp_init_cpu
  127. SYM_CODE_END(kvm_hyp_cpu_entry)
  128. /*
  129. * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
  130. *
  131. * x0: struct kvm_nvhe_init_params PA
  132. */
  133. SYM_CODE_START(kvm_hyp_cpu_resume)
  134. mov x1, #0 // is_cpu_on = false
  135. b __kvm_hyp_init_cpu
  136. SYM_CODE_END(kvm_hyp_cpu_resume)
  137. /*
  138. * Common code for CPU entry points. Initializes EL2 state and
  139. * installs the hypervisor before handing over to a C handler.
  140. *
  141. * x0: struct kvm_nvhe_init_params PA
  142. * x1: bool is_cpu_on
  143. */
  144. SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
  145. mov x28, x0 // Stash arguments
  146. mov x29, x1
  147. /* Check that the core was booted in EL2. */
  148. mrs x0, CurrentEL
  149. cmp x0, #CurrentEL_EL2
  150. b.eq 2f
  151. /* The core booted in EL1. KVM cannot be initialized on it. */
  152. 1: wfe
  153. wfi
  154. b 1b
  155. 2: msr SPsel, #1 // We want to use SP_EL{1,2}
  156. /* Initialize EL2 CPU state to sane values. */
  157. init_el2_state // Clobbers x0..x2
  158. finalise_el2_state
  159. /* Enable MMU, set vectors and stack. */
  160. mov x0, x28
  161. bl ___kvm_hyp_init // Clobbers x0..x2
  162. /* Leave idmap. */
  163. mov x0, x29
  164. ldr x1, =kvm_host_psci_cpu_entry
  165. br x1
  166. SYM_CODE_END(__kvm_hyp_init_cpu)
  167. SYM_CODE_START(__kvm_handle_stub_hvc)
  168. cmp x0, #HVC_SOFT_RESTART
  169. b.ne 1f
  170. /* This is where we're about to jump, staying at EL2 */
  171. msr elr_el2, x1
  172. mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
  173. msr spsr_el2, x0
  174. /* Shuffle the arguments, and don't come back */
  175. mov x0, x2
  176. mov x1, x3
  177. mov x2, x4
  178. b reset
  179. 1: cmp x0, #HVC_RESET_VECTORS
  180. b.ne 1f
  181. /*
  182. * Set the HVC_RESET_VECTORS return code before entering the common
  183. * path so that we do not clobber x0-x2 in case we are coming via
  184. * HVC_SOFT_RESTART.
  185. */
  186. mov x0, xzr
  187. reset:
  188. /* Reset kvm back to the hyp stub. */
  189. mov_q x5, INIT_SCTLR_EL2_MMU_OFF
  190. pre_disable_mmu_workaround
  191. msr sctlr_el2, x5
  192. isb
  193. alternative_if ARM64_KVM_PROTECTED_MODE
  194. mov_q x5, HCR_HOST_NVHE_FLAGS
  195. msr hcr_el2, x5
  196. alternative_else_nop_endif
  197. /* Install stub vectors */
  198. adr_l x5, __hyp_stub_vectors
  199. msr vbar_el2, x5
  200. eret
  201. 1: /* Bad stub call */
  202. mov_q x0, HVC_STUB_ERR
  203. eret
  204. SYM_CODE_END(__kvm_handle_stub_hvc)
  205. SYM_FUNC_START(__pkvm_init_switch_pgd)
  206. /* Turn the MMU off */
  207. pre_disable_mmu_workaround
  208. mrs x2, sctlr_el2
  209. bic x3, x2, #SCTLR_ELx_M
  210. msr sctlr_el2, x3
  211. isb
  212. tlbi alle2
  213. /* Install the new pgtables */
  214. ldr x3, [x0, #NVHE_INIT_PGD_PA]
  215. phys_to_ttbr x4, x3
  216. alternative_if ARM64_HAS_CNP
  217. orr x4, x4, #TTBR_CNP_BIT
  218. alternative_else_nop_endif
  219. msr ttbr0_el2, x4
  220. /* Set the new stack pointer */
  221. ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA]
  222. mov sp, x0
  223. /* And turn the MMU back on! */
  224. set_sctlr_el2 x2
  225. ret x1
  226. SYM_FUNC_END(__pkvm_init_switch_pgd)
  227. .popsection