hyp-entry.S 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2015-2018 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <linux/arm-smccc.h>
  7. #include <linux/linkage.h>
  8. #include <asm/alternative.h>
  9. #include <asm/assembler.h>
  10. #include <asm/cpufeature.h>
  11. #include <asm/kvm_arm.h>
  12. #include <asm/kvm_asm.h>
  13. #include <asm/mmu.h>
  14. #include <asm/spectre.h>
  15. .macro save_caller_saved_regs_vect
  16. /* x0 and x1 were saved in the vector entry */
  17. stp x2, x3, [sp, #-16]!
  18. stp x4, x5, [sp, #-16]!
  19. stp x6, x7, [sp, #-16]!
  20. stp x8, x9, [sp, #-16]!
  21. stp x10, x11, [sp, #-16]!
  22. stp x12, x13, [sp, #-16]!
  23. stp x14, x15, [sp, #-16]!
  24. stp x16, x17, [sp, #-16]!
  25. .endm
  26. .macro restore_caller_saved_regs_vect
  27. ldp x16, x17, [sp], #16
  28. ldp x14, x15, [sp], #16
  29. ldp x12, x13, [sp], #16
  30. ldp x10, x11, [sp], #16
  31. ldp x8, x9, [sp], #16
  32. ldp x6, x7, [sp], #16
  33. ldp x4, x5, [sp], #16
  34. ldp x2, x3, [sp], #16
  35. ldp x0, x1, [sp], #16
  36. .endm
  37. .text
  38. el1_sync: // Guest trapped into EL2
  39. mrs x0, esr_el2
  40. ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
  41. cmp x0, #ESR_ELx_EC_HVC64
  42. ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
  43. b.ne el1_trap
  44. /*
  45. * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
  46. * The workaround has already been applied on the host,
  47. * so let's quickly get back to the guest. We don't bother
  48. * restoring x1, as it can be clobbered anyway.
  49. */
  50. ldr x1, [sp] // Guest's x0
  51. eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
  52. cbz w1, wa_epilogue
  53. /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
  54. eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
  55. ARM_SMCCC_ARCH_WORKAROUND_2)
  56. cbz w1, wa_epilogue
  57. eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
  58. ARM_SMCCC_ARCH_WORKAROUND_3)
  59. cbnz w1, el1_trap
  60. wa_epilogue:
  61. mov x0, xzr
  62. add sp, sp, #16
  63. eret
  64. sb
  65. el1_trap:
  66. get_vcpu_ptr x1, x0
  67. mov x0, #ARM_EXCEPTION_TRAP
  68. b __guest_exit
  69. el1_irq:
  70. el1_fiq:
  71. get_vcpu_ptr x1, x0
  72. mov x0, #ARM_EXCEPTION_IRQ
  73. b __guest_exit
  74. el1_error:
  75. get_vcpu_ptr x1, x0
  76. mov x0, #ARM_EXCEPTION_EL1_SERROR
  77. b __guest_exit
  78. el2_sync:
  79. /* Check for illegal exception return */
  80. mrs x0, spsr_el2
  81. tbnz x0, #20, 1f
  82. save_caller_saved_regs_vect
  83. stp x29, x30, [sp, #-16]!
  84. bl kvm_unexpected_el2_exception
  85. ldp x29, x30, [sp], #16
  86. restore_caller_saved_regs_vect
  87. eret
  88. 1:
  89. /* Let's attempt a recovery from the illegal exception return */
  90. get_vcpu_ptr x1, x0
  91. mov x0, #ARM_EXCEPTION_IL
  92. b __guest_exit
  93. el2_error:
  94. save_caller_saved_regs_vect
  95. stp x29, x30, [sp, #-16]!
  96. bl kvm_unexpected_el2_exception
  97. ldp x29, x30, [sp], #16
  98. restore_caller_saved_regs_vect
  99. eret
  100. sb
  101. .macro invalid_vector label, target = __guest_exit_panic
  102. .align 2
  103. SYM_CODE_START_LOCAL(\label)
  104. b \target
  105. SYM_CODE_END(\label)
  106. .endm
  107. /* None of these should ever happen */
  108. invalid_vector el2t_sync_invalid
  109. invalid_vector el2t_irq_invalid
  110. invalid_vector el2t_fiq_invalid
  111. invalid_vector el2t_error_invalid
  112. invalid_vector el2h_irq_invalid
  113. invalid_vector el2h_fiq_invalid
  114. .ltorg
  115. .align 11
  116. .macro check_preamble_length start, end
  117. /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
  118. .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
  119. .error "KVM vector preamble length mismatch"
  120. .endif
  121. .endm
  122. .macro valid_vect target
  123. .align 7
  124. 661:
  125. esb
  126. stp x0, x1, [sp, #-16]!
  127. 662:
  128. b \target
  129. check_preamble_length 661b, 662b
  130. .endm
  131. .macro invalid_vect target
  132. .align 7
  133. 661:
  134. nop
  135. stp x0, x1, [sp, #-16]!
  136. 662:
  137. b \target
  138. check_preamble_length 661b, 662b
  139. .endm
  140. SYM_CODE_START(__kvm_hyp_vector)
  141. invalid_vect el2t_sync_invalid // Synchronous EL2t
  142. invalid_vect el2t_irq_invalid // IRQ EL2t
  143. invalid_vect el2t_fiq_invalid // FIQ EL2t
  144. invalid_vect el2t_error_invalid // Error EL2t
  145. valid_vect el2_sync // Synchronous EL2h
  146. invalid_vect el2h_irq_invalid // IRQ EL2h
  147. invalid_vect el2h_fiq_invalid // FIQ EL2h
  148. valid_vect el2_error // Error EL2h
  149. valid_vect el1_sync // Synchronous 64-bit EL1
  150. valid_vect el1_irq // IRQ 64-bit EL1
  151. valid_vect el1_fiq // FIQ 64-bit EL1
  152. valid_vect el1_error // Error 64-bit EL1
  153. valid_vect el1_sync // Synchronous 32-bit EL1
  154. valid_vect el1_irq // IRQ 32-bit EL1
  155. valid_vect el1_fiq // FIQ 32-bit EL1
  156. valid_vect el1_error // Error 32-bit EL1
  157. SYM_CODE_END(__kvm_hyp_vector)
  158. .macro spectrev2_smccc_wa1_smc
  159. sub sp, sp, #(8 * 4)
  160. stp x2, x3, [sp, #(8 * 0)]
  161. stp x0, x1, [sp, #(8 * 2)]
  162. alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3
  163. /* Patched to mov WA3 when supported */
  164. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
  165. alternative_cb_end
  166. smc #0
  167. ldp x2, x3, [sp, #(8 * 0)]
  168. add sp, sp, #(8 * 2)
  169. .endm
  170. .macro hyp_ventry indirect, spectrev2
  171. .align 7
  172. 1: esb
  173. .if \spectrev2 != 0
  174. spectrev2_smccc_wa1_smc
  175. .else
  176. stp x0, x1, [sp, #-16]!
  177. mitigate_spectre_bhb_loop x0
  178. mitigate_spectre_bhb_clear_insn
  179. .endif
  180. .if \indirect != 0
  181. alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch
  182. /*
  183. * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
  184. *
  185. * movz x0, #(addr & 0xffff)
  186. * movk x0, #((addr >> 16) & 0xffff), lsl #16
  187. * movk x0, #((addr >> 32) & 0xffff), lsl #32
  188. * br x0
  189. *
  190. * Where:
  191. * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
  192. * See kvm_patch_vector_branch for details.
  193. */
  194. nop
  195. nop
  196. nop
  197. nop
  198. alternative_cb_end
  199. .endif
  200. b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
  201. .endm
  202. .macro generate_vectors indirect, spectrev2
  203. 0:
  204. .rept 16
  205. hyp_ventry \indirect, \spectrev2
  206. .endr
  207. .org 0b + SZ_2K // Safety measure
  208. .endm
  209. .align 11
  210. SYM_CODE_START(__bp_harden_hyp_vecs)
  211. generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
  212. generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
  213. generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
  214. 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
  215. .org 1b
  216. SYM_CODE_END(__bp_harden_hyp_vecs)