entry.S 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2015 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <linux/linkage.h>
  7. #include <asm/alternative.h>
  8. #include <asm/assembler.h>
  9. #include <asm/fpsimdmacros.h>
  10. #include <asm/kvm.h>
  11. #include <asm/kvm_arm.h>
  12. #include <asm/kvm_asm.h>
  13. #include <asm/kvm_mmu.h>
  14. #include <asm/kvm_mte.h>
  15. #include <asm/kvm_ptrauth.h>
  16. .text
  17. /*
  18. * u64 __guest_enter(struct kvm_vcpu *vcpu);
  19. */
  20. SYM_FUNC_START(__guest_enter)
  21. // x0: vcpu
  22. // x1-x17: clobbered by macros
  23. // x29: guest context
  24. adr_this_cpu x1, kvm_hyp_ctxt, x2
  25. // Store the hyp regs
  26. save_callee_saved_regs x1
  27. // Save hyp's sp_el0
  28. save_sp_el0 x1, x2
  29. // Now the hyp state is stored if we have a pending RAS SError it must
  30. // affect the host or hyp. If any asynchronous exception is pending we
  31. // defer the guest entry. The DSB isn't necessary before v8.2 as any
  32. // SError would be fatal.
  33. alternative_if ARM64_HAS_RAS_EXTN
  34. dsb nshst
  35. isb
  36. alternative_else_nop_endif
  37. mrs x1, isr_el1
  38. cbz x1, 1f
  39. mov x0, #ARM_EXCEPTION_IRQ
  40. ret
  41. 1:
  42. set_loaded_vcpu x0, x1, x2
  43. add x29, x0, #VCPU_CONTEXT
  44. // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
  45. mte_switch_to_guest x29, x1, x2
  46. // Macro ptrauth_switch_to_guest format:
  47. // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
  48. // The below macro to restore guest keys is not implemented in C code
  49. // as it may cause Pointer Authentication key signing mismatch errors
  50. // when this feature is enabled for kernel code.
  51. ptrauth_switch_to_guest x29, x0, x1, x2
  52. // Restore the guest's sp_el0
  53. restore_sp_el0 x29, x0
  54. // Restore guest regs x0-x17
  55. ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
  56. ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
  57. ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
  58. ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
  59. ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
  60. ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
  61. ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
  62. ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
  63. ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
  64. // Restore guest regs x18-x29, lr
  65. restore_callee_saved_regs x29
  66. // Do not touch any register after this!
  67. eret
  68. sb
  69. SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
  70. // x2-x29,lr: vcpu regs
  71. // vcpu x0-x1 on the stack
  72. // If the hyp context is loaded, go straight to hyp_panic
  73. get_loaded_vcpu x0, x1
  74. cbnz x0, 1f
  75. b hyp_panic
  76. 1:
  77. // The hyp context is saved so make sure it is restored to allow
  78. // hyp_panic to run at hyp and, subsequently, panic to run in the host.
  79. // This makes use of __guest_exit to avoid duplication but sets the
  80. // return address to tail call into hyp_panic. As a side effect, the
  81. // current state is saved to the guest context but it will only be
  82. // accurate if the guest had been completely restored.
  83. adr_this_cpu x0, kvm_hyp_ctxt, x1
  84. adr_l x1, hyp_panic
  85. str x1, [x0, #CPU_XREG_OFFSET(30)]
  86. get_vcpu_ptr x1, x0
  87. SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
  88. // x0: return code
  89. // x1: vcpu
  90. // x2-x29,lr: vcpu regs
  91. // vcpu x0-x1 on the stack
  92. add x1, x1, #VCPU_CONTEXT
  93. ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
  94. // Store the guest regs x2 and x3
  95. stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
  96. // Retrieve the guest regs x0-x1 from the stack
  97. ldp x2, x3, [sp], #16 // x0, x1
  98. // Store the guest regs x0-x1 and x4-x17
  99. stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
  100. stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
  101. stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
  102. stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
  103. stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
  104. stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
  105. stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
  106. stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
  107. // Store the guest regs x18-x29, lr
  108. save_callee_saved_regs x1
  109. // Store the guest's sp_el0
  110. save_sp_el0 x1, x2
  111. adr_this_cpu x2, kvm_hyp_ctxt, x3
  112. // Macro ptrauth_switch_to_hyp format:
  113. // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
  114. // The below macro to save/restore keys is not implemented in C code
  115. // as it may cause Pointer Authentication key signing mismatch errors
  116. // when this feature is enabled for kernel code.
  117. ptrauth_switch_to_hyp x1, x2, x3, x4, x5
  118. // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
  119. mte_switch_to_hyp x1, x2, x3
  120. // Restore hyp's sp_el0
  121. restore_sp_el0 x2, x3
  122. // Now restore the hyp regs
  123. restore_callee_saved_regs x2
  124. set_loaded_vcpu xzr, x2, x3
  125. alternative_if ARM64_HAS_RAS_EXTN
  126. // If we have the RAS extensions we can consume a pending error
  127. // without an unmask-SError and isb. The ESB-instruction consumed any
  128. // pending guest error when we took the exception from the guest.
  129. mrs_s x2, SYS_DISR_EL1
  130. str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
  131. cbz x2, 1f
  132. msr_s SYS_DISR_EL1, xzr
  133. orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
  134. 1: ret
  135. alternative_else
  136. dsb sy // Synchronize against in-flight ld/st
  137. isb // Prevent an early read of side-effect free ISR
  138. mrs x2, isr_el1
  139. tbnz x2, #8, 2f // ISR_EL1.A
  140. ret
  141. nop
  142. 2:
  143. alternative_endif
  144. // We know we have a pending asynchronous abort, now is the
  145. // time to flush it out. From your VAXorcist book, page 666:
  146. // "Threaten me not, oh Evil one! For I speak with
  147. // the power of DEC, and I command thee to show thyself!"
  148. mrs x2, elr_el2
  149. mrs x3, esr_el2
  150. mrs x4, spsr_el2
  151. mov x5, x0
  152. msr daifclr, #4 // Unmask aborts
  153. // This is our single instruction exception window. A pending
  154. // SError is guaranteed to occur at the earliest when we unmask
  155. // it, and at the latest just after the ISB.
  156. abort_guest_exit_start:
  157. isb
  158. abort_guest_exit_end:
  159. msr daifset, #4 // Mask aborts
  160. ret
  161. _kvm_extable abort_guest_exit_start, 9997f
  162. _kvm_extable abort_guest_exit_end, 9997f
  163. 9997:
  164. msr daifset, #4 // Mask aborts
  165. mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
  166. // restore the EL1 exception context so that we can report some
  167. // information. Merge the exception code with the SError pending bit.
  168. msr elr_el2, x2
  169. msr esr_el2, x3
  170. msr spsr_el2, x4
  171. orr x0, x0, x5
  172. 1: ret
  173. SYM_FUNC_END(__guest_enter)