host.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2020 - Google Inc
  4. * Author: Andrew Scull <[email protected]>
  5. */
  6. #include <linux/linkage.h>
  7. #include <asm/assembler.h>
  8. #include <asm/kvm_arm.h>
  9. #include <asm/kvm_asm.h>
  10. #include <asm/kvm_mmu.h>
  11. .text
  12. SYM_FUNC_START(__host_exit)
  13. get_host_ctxt x0, x1
  14. /* Store the host regs x2 and x3 */
  15. stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
  16. /* Retrieve the host regs x0-x1 from the stack */
  17. ldp x2, x3, [sp], #16 // x0, x1
  18. /* Store the host regs x0-x1 and x4-x17 */
  19. stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
  20. stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
  21. stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
  22. stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
  23. stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
  24. stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
  25. stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
  26. stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
  27. /* Store the host regs x18-x29, lr */
  28. save_callee_saved_regs x0
  29. /* Save the host context pointer in x29 across the function call */
  30. mov x29, x0
  31. bl handle_trap
  32. /* Restore host regs x0-x17 */
  33. __host_enter_restore_full:
  34. ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
  35. ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
  36. ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
  37. ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
  38. /* x0-7 are use for panic arguments */
  39. __host_enter_for_panic:
  40. ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
  41. ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
  42. ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
  43. ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
  44. ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
  45. /* Restore host regs x18-x29, lr */
  46. restore_callee_saved_regs x29
  47. /* Do not touch any register after this! */
  48. __host_enter_without_restoring:
  49. eret
  50. sb
  51. SYM_FUNC_END(__host_exit)
  52. /*
  53. * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
  54. */
  55. SYM_FUNC_START(__host_enter)
  56. mov x29, x0
  57. b __host_enter_restore_full
  58. SYM_FUNC_END(__host_enter)
  59. /*
  60. * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
  61. * u64 elr, u64 par);
  62. */
  63. SYM_FUNC_START(__hyp_do_panic)
  64. /* Prepare and exit to the host's panic funciton. */
  65. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  66. PSR_MODE_EL1h)
  67. msr spsr_el2, lr
  68. adr_l lr, nvhe_hyp_panic_handler
  69. hyp_kimg_va lr, x6
  70. msr elr_el2, lr
  71. mov x29, x0
  72. #ifdef CONFIG_NVHE_EL2_DEBUG
  73. /* Ensure host stage-2 is disabled */
  74. mrs x0, hcr_el2
  75. bic x0, x0, #HCR_VM
  76. msr hcr_el2, x0
  77. isb
  78. tlbi vmalls12e1
  79. dsb nsh
  80. #endif
  81. /* Load the panic arguments into x0-7 */
  82. mrs x0, esr_el2
  83. mov x4, x3
  84. mov x3, x2
  85. hyp_pa x3, x6
  86. get_vcpu_ptr x5, x6
  87. mrs x6, far_el2
  88. mrs x7, hpfar_el2
  89. /* Enter the host, conditionally restoring the host context. */
  90. cbz x29, __host_enter_without_restoring
  91. b __host_enter_for_panic
  92. SYM_FUNC_END(__hyp_do_panic)
  93. SYM_FUNC_START(__host_hvc)
  94. ldp x0, x1, [sp] // Don't fixup the stack yet
  95. /* No stub for you, sonny Jim */
  96. alternative_if ARM64_KVM_PROTECTED_MODE
  97. b __host_exit
  98. alternative_else_nop_endif
  99. /* Check for a stub HVC call */
  100. cmp x0, #HVC_STUB_HCALL_NR
  101. b.hs __host_exit
  102. add sp, sp, #16
  103. /*
  104. * Compute the idmap address of __kvm_handle_stub_hvc and
  105. * jump there.
  106. *
  107. * Preserve x0-x4, which may contain stub parameters.
  108. */
  109. adr_l x5, __kvm_handle_stub_hvc
  110. hyp_pa x5, x6
  111. br x5
  112. SYM_FUNC_END(__host_hvc)
  113. .macro host_el1_sync_vect
  114. .align 7
  115. .L__vect_start\@:
  116. stp x0, x1, [sp, #-16]!
  117. mrs x0, esr_el2
  118. ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
  119. cmp x0, #ESR_ELx_EC_HVC64
  120. b.eq __host_hvc
  121. b __host_exit
  122. .L__vect_end\@:
  123. .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
  124. .error "host_el1_sync_vect larger than vector entry"
  125. .endif
  126. .endm
  127. .macro invalid_host_el2_vect
  128. .align 7
  129. /*
  130. * Test whether the SP has overflowed, without corrupting a GPR.
  131. * nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
  132. * of SP should always be 1.
  133. */
  134. add sp, sp, x0 // sp' = sp + x0
  135. sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
  136. tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
  137. sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
  138. sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
  139. /* If a guest is loaded, panic out of it. */
  140. stp x0, x1, [sp, #-16]!
  141. get_loaded_vcpu x0, x1
  142. cbnz x0, __guest_exit_panic
  143. add sp, sp, #16
  144. /*
  145. * The panic may not be clean if the exception is taken before the host
  146. * context has been saved by __host_exit or after the hyp context has
  147. * been partially clobbered by __host_enter.
  148. */
  149. b hyp_panic
  150. .L__hyp_sp_overflow\@:
  151. /* Switch to the overflow stack */
  152. adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
  153. b hyp_panic_bad_stack
  154. ASM_BUG()
  155. .endm
  156. .macro invalid_host_el1_vect
  157. .align 7
  158. mov x0, xzr /* restore_host = false */
  159. mrs x1, spsr_el2
  160. mrs x2, elr_el2
  161. mrs x3, par_el1
  162. b __hyp_do_panic
  163. .endm
  164. /*
  165. * The host vector does not use an ESB instruction in order to avoid consuming
  166. * SErrors that should only be consumed by the host. Guest entry is deferred by
  167. * __guest_enter if there are any pending asynchronous exceptions so hyp will
  168. * always return to the host without having consumerd host SErrors.
  169. *
  170. * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
  171. * host knows about the EL2 vectors already, and there is no point in hiding
  172. * them.
  173. */
  174. .align 11
  175. SYM_CODE_START(__kvm_hyp_host_vector)
  176. invalid_host_el2_vect // Synchronous EL2t
  177. invalid_host_el2_vect // IRQ EL2t
  178. invalid_host_el2_vect // FIQ EL2t
  179. invalid_host_el2_vect // Error EL2t
  180. invalid_host_el2_vect // Synchronous EL2h
  181. invalid_host_el2_vect // IRQ EL2h
  182. invalid_host_el2_vect // FIQ EL2h
  183. invalid_host_el2_vect // Error EL2h
  184. host_el1_sync_vect // Synchronous 64-bit EL1/EL0
  185. invalid_host_el1_vect // IRQ 64-bit EL1/EL0
  186. invalid_host_el1_vect // FIQ 64-bit EL1/EL0
  187. invalid_host_el1_vect // Error 64-bit EL1/EL0
  188. host_el1_sync_vect // Synchronous 32-bit EL1/EL0
  189. invalid_host_el1_vect // IRQ 32-bit EL1/EL0
  190. invalid_host_el1_vect // FIQ 32-bit EL1/EL0
  191. invalid_host_el1_vect // Error 32-bit EL1/EL0
  192. SYM_CODE_END(__kvm_hyp_host_vector)
  193. /*
  194. * Forward SMC with arguments in struct kvm_cpu_context, and
  195. * store the result into the same struct. Assumes SMCCC 1.2 or older.
  196. *
  197. * x0: struct kvm_cpu_context*
  198. */
  199. SYM_CODE_START(__kvm_hyp_host_forward_smc)
  200. /*
  201. * Use x18 to keep the pointer to the host context because
  202. * x18 is callee-saved in SMCCC but not in AAPCS64.
  203. */
  204. mov x18, x0
  205. ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
  206. ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
  207. ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
  208. ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
  209. ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
  210. ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
  211. ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
  212. ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
  213. ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
  214. smc #0
  215. stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
  216. stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
  217. stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
  218. stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
  219. stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
  220. stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
  221. stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
  222. stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
  223. stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
  224. ret
  225. SYM_CODE_END(__kvm_hyp_host_forward_smc)