vmenter.S 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/linkage.h>
  3. #include <asm/asm.h>
  4. #include <asm/bitsperlong.h>
  5. #include <asm/kvm_vcpu_regs.h>
  6. #include <asm/nospec-branch.h>
  7. #include "kvm-asm-offsets.h"
  8. #define WORD_SIZE (BITS_PER_LONG / 8)
  9. /* Intentionally omit RAX as it's context switched by hardware */
  10. #define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
  11. #define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
  12. #define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
  13. /* Intentionally omit RSP as it's context switched by hardware */
  14. #define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
  15. #define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
  16. #define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
  17. #ifdef CONFIG_X86_64
  18. #define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
  19. #define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
  20. #define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
  21. #define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
  22. #define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
  23. #define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
  24. #define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
  25. #define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
  26. #endif
  27. #define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
  28. .section .noinstr.text, "ax"
  29. .macro RESTORE_GUEST_SPEC_CTRL
  30. /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
  31. ALTERNATIVE_2 "", \
  32. "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
  33. "", X86_FEATURE_V_SPEC_CTRL
  34. 801:
  35. .endm
  36. .macro RESTORE_GUEST_SPEC_CTRL_BODY
  37. 800:
  38. /*
  39. * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
  40. * host's, write the MSR. This is kept out-of-line so that the common
  41. * case does not have to jump.
  42. *
  43. * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
  44. * there must not be any returns or indirect branches between this code
  45. * and vmentry.
  46. */
  47. movl SVM_spec_ctrl(%_ASM_DI), %eax
  48. cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
  49. je 801b
  50. mov $MSR_IA32_SPEC_CTRL, %ecx
  51. xor %edx, %edx
  52. wrmsr
  53. jmp 801b
  54. .endm
  55. .macro RESTORE_HOST_SPEC_CTRL
  56. /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
  57. ALTERNATIVE_2 "", \
  58. "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
  59. "", X86_FEATURE_V_SPEC_CTRL
  60. 901:
  61. .endm
  62. .macro RESTORE_HOST_SPEC_CTRL_BODY
  63. 900:
  64. /* Same for after vmexit. */
  65. mov $MSR_IA32_SPEC_CTRL, %ecx
  66. /*
  67. * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
  68. * if it was not intercepted during guest execution.
  69. */
  70. cmpb $0, (%_ASM_SP)
  71. jnz 998f
  72. rdmsr
  73. movl %eax, SVM_spec_ctrl(%_ASM_DI)
  74. 998:
  75. /* Now restore the host value of the MSR if different from the guest's. */
  76. movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
  77. cmp SVM_spec_ctrl(%_ASM_DI), %eax
  78. je 901b
  79. xor %edx, %edx
  80. wrmsr
  81. jmp 901b
  82. .endm
  83. /**
  84. * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
  85. * @svm: struct vcpu_svm *
  86. * @spec_ctrl_intercepted: bool
  87. */
  88. SYM_FUNC_START(__svm_vcpu_run)
  89. push %_ASM_BP
  90. #ifdef CONFIG_X86_64
  91. push %r15
  92. push %r14
  93. push %r13
  94. push %r12
  95. #else
  96. push %edi
  97. push %esi
  98. #endif
  99. push %_ASM_BX
  100. /*
  101. * Save variables needed after vmexit on the stack, in inverse
  102. * order compared to when they are needed.
  103. */
  104. /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
  105. push %_ASM_ARG2
  106. /* Needed to restore access to percpu variables. */
  107. __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
  108. /* Finally save @svm. */
  109. push %_ASM_ARG1
  110. .ifnc _ASM_ARG1, _ASM_DI
  111. /*
  112. * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
  113. * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
  114. */
  115. mov %_ASM_ARG1, %_ASM_DI
  116. .endif
  117. /* Clobbers RAX, RCX, RDX. */
  118. RESTORE_GUEST_SPEC_CTRL
  119. /*
  120. * Use a single vmcb (vmcb01 because it's always valid) for
  121. * context switching guest state via VMLOAD/VMSAVE, that way
  122. * the state doesn't need to be copied between vmcb01 and
  123. * vmcb02 when switching vmcbs for nested virtualization.
  124. */
  125. mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
  126. 1: vmload %_ASM_AX
  127. 2:
  128. /* Get svm->current_vmcb->pa into RAX. */
  129. mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
  130. mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
  131. /* Load guest registers. */
  132. mov VCPU_RCX(%_ASM_DI), %_ASM_CX
  133. mov VCPU_RDX(%_ASM_DI), %_ASM_DX
  134. mov VCPU_RBX(%_ASM_DI), %_ASM_BX
  135. mov VCPU_RBP(%_ASM_DI), %_ASM_BP
  136. mov VCPU_RSI(%_ASM_DI), %_ASM_SI
  137. #ifdef CONFIG_X86_64
  138. mov VCPU_R8 (%_ASM_DI), %r8
  139. mov VCPU_R9 (%_ASM_DI), %r9
  140. mov VCPU_R10(%_ASM_DI), %r10
  141. mov VCPU_R11(%_ASM_DI), %r11
  142. mov VCPU_R12(%_ASM_DI), %r12
  143. mov VCPU_R13(%_ASM_DI), %r13
  144. mov VCPU_R14(%_ASM_DI), %r14
  145. mov VCPU_R15(%_ASM_DI), %r15
  146. #endif
  147. mov VCPU_RDI(%_ASM_DI), %_ASM_DI
  148. /* Enter guest mode */
  149. sti
  150. 3: vmrun %_ASM_AX
  151. 4:
  152. cli
  153. /* Pop @svm to RAX while it's the only available register. */
  154. pop %_ASM_AX
  155. /* Save all guest registers. */
  156. mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
  157. mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
  158. mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
  159. mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
  160. mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
  161. mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
  162. #ifdef CONFIG_X86_64
  163. mov %r8, VCPU_R8 (%_ASM_AX)
  164. mov %r9, VCPU_R9 (%_ASM_AX)
  165. mov %r10, VCPU_R10(%_ASM_AX)
  166. mov %r11, VCPU_R11(%_ASM_AX)
  167. mov %r12, VCPU_R12(%_ASM_AX)
  168. mov %r13, VCPU_R13(%_ASM_AX)
  169. mov %r14, VCPU_R14(%_ASM_AX)
  170. mov %r15, VCPU_R15(%_ASM_AX)
  171. #endif
  172. /* @svm can stay in RDI from now on. */
  173. mov %_ASM_AX, %_ASM_DI
  174. mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
  175. 5: vmsave %_ASM_AX
  176. 6:
  177. /* Restores GSBASE among other things, allowing access to percpu data. */
  178. pop %_ASM_AX
  179. 7: vmload %_ASM_AX
  180. 8:
  181. #ifdef CONFIG_RETPOLINE
  182. /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
  183. FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
  184. #endif
  185. /* Clobbers RAX, RCX, RDX. */
  186. RESTORE_HOST_SPEC_CTRL
  187. /*
  188. * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
  189. * untrained as soon as we exit the VM and are back to the
  190. * kernel. This should be done before re-enabling interrupts
  191. * because interrupt handlers won't sanitize 'ret' if the return is
  192. * from the kernel.
  193. */
  194. UNTRAIN_RET
  195. /* SRSO */
  196. ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
  197. /*
  198. * Clear all general purpose registers except RSP and RAX to prevent
  199. * speculative use of the guest's values, even those that are reloaded
  200. * via the stack. In theory, an L1 cache miss when restoring registers
  201. * could lead to speculative execution with the guest's values.
  202. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
  203. * free. RSP and RAX are exempt as they are restored by hardware
  204. * during VM-Exit.
  205. */
  206. xor %ecx, %ecx
  207. xor %edx, %edx
  208. xor %ebx, %ebx
  209. xor %ebp, %ebp
  210. xor %esi, %esi
  211. xor %edi, %edi
  212. #ifdef CONFIG_X86_64
  213. xor %r8d, %r8d
  214. xor %r9d, %r9d
  215. xor %r10d, %r10d
  216. xor %r11d, %r11d
  217. xor %r12d, %r12d
  218. xor %r13d, %r13d
  219. xor %r14d, %r14d
  220. xor %r15d, %r15d
  221. #endif
  222. /* "Pop" @spec_ctrl_intercepted. */
  223. pop %_ASM_BX
  224. pop %_ASM_BX
  225. #ifdef CONFIG_X86_64
  226. pop %r12
  227. pop %r13
  228. pop %r14
  229. pop %r15
  230. #else
  231. pop %esi
  232. pop %edi
  233. #endif
  234. pop %_ASM_BP
  235. RET
  236. RESTORE_GUEST_SPEC_CTRL_BODY
  237. RESTORE_HOST_SPEC_CTRL_BODY
  238. 10: cmpb $0, kvm_rebooting
  239. jne 2b
  240. ud2
  241. 30: cmpb $0, kvm_rebooting
  242. jne 4b
  243. ud2
  244. 50: cmpb $0, kvm_rebooting
  245. jne 6b
  246. ud2
  247. 70: cmpb $0, kvm_rebooting
  248. jne 8b
  249. ud2
  250. _ASM_EXTABLE(1b, 10b)
  251. _ASM_EXTABLE(3b, 30b)
  252. _ASM_EXTABLE(5b, 50b)
  253. _ASM_EXTABLE(7b, 70b)
  254. SYM_FUNC_END(__svm_vcpu_run)
  255. /**
  256. * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
  257. * @svm: struct vcpu_svm *
  258. * @spec_ctrl_intercepted: bool
  259. */
  260. SYM_FUNC_START(__svm_sev_es_vcpu_run)
  261. push %_ASM_BP
  262. #ifdef CONFIG_X86_64
  263. push %r15
  264. push %r14
  265. push %r13
  266. push %r12
  267. #else
  268. push %edi
  269. push %esi
  270. #endif
  271. push %_ASM_BX
  272. /*
  273. * Save variables needed after vmexit on the stack, in inverse
  274. * order compared to when they are needed.
  275. */
  276. /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
  277. push %_ASM_ARG2
  278. /* Save @svm. */
  279. push %_ASM_ARG1
  280. .ifnc _ASM_ARG1, _ASM_DI
  281. /*
  282. * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
  283. * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
  284. */
  285. mov %_ASM_ARG1, %_ASM_DI
  286. .endif
  287. /* Clobbers RAX, RCX, RDX. */
  288. RESTORE_GUEST_SPEC_CTRL
  289. /* Get svm->current_vmcb->pa into RAX. */
  290. mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
  291. mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
  292. /* Enter guest mode */
  293. sti
  294. 1: vmrun %_ASM_AX
  295. 2: cli
  296. /* Pop @svm to RDI, guest registers have been saved already. */
  297. pop %_ASM_DI
  298. #ifdef CONFIG_RETPOLINE
  299. /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
  300. FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
  301. #endif
  302. /* Clobbers RAX, RCX, RDX. */
  303. RESTORE_HOST_SPEC_CTRL
  304. /*
  305. * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
  306. * untrained as soon as we exit the VM and are back to the
  307. * kernel. This should be done before re-enabling interrupts
  308. * because interrupt handlers won't sanitize RET if the return is
  309. * from the kernel.
  310. */
  311. UNTRAIN_RET
  312. /* "Pop" @spec_ctrl_intercepted. */
  313. pop %_ASM_BX
  314. pop %_ASM_BX
  315. #ifdef CONFIG_X86_64
  316. pop %r12
  317. pop %r13
  318. pop %r14
  319. pop %r15
  320. #else
  321. pop %esi
  322. pop %edi
  323. #endif
  324. pop %_ASM_BP
  325. RET
  326. RESTORE_GUEST_SPEC_CTRL_BODY
  327. RESTORE_HOST_SPEC_CTRL_BODY
  328. 3: cmpb $0, kvm_rebooting
  329. jne 2b
  330. ud2
  331. _ASM_EXTABLE(1b, 3b)
  332. SYM_FUNC_END(__svm_sev_es_vcpu_run)