svm_ops.h 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __KVM_X86_SVM_OPS_H
  3. #define __KVM_X86_SVM_OPS_H
  4. #include <linux/compiler_types.h>
  5. #include "x86.h"
  6. #define svm_asm(insn, clobber...) \
  7. do { \
  8. asm_volatile_goto("1: " __stringify(insn) "\n\t" \
  9. _ASM_EXTABLE(1b, %l[fault]) \
  10. ::: clobber : fault); \
  11. return; \
  12. fault: \
  13. kvm_spurious_fault(); \
  14. } while (0)
  15. #define svm_asm1(insn, op1, clobber...) \
  16. do { \
  17. asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
  18. _ASM_EXTABLE(1b, %l[fault]) \
  19. :: op1 : clobber : fault); \
  20. return; \
  21. fault: \
  22. kvm_spurious_fault(); \
  23. } while (0)
  24. #define svm_asm2(insn, op1, op2, clobber...) \
  25. do { \
  26. asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
  27. _ASM_EXTABLE(1b, %l[fault]) \
  28. :: op1, op2 : clobber : fault); \
  29. return; \
  30. fault: \
  31. kvm_spurious_fault(); \
  32. } while (0)
  33. static inline void clgi(void)
  34. {
  35. svm_asm(clgi);
  36. }
  37. static inline void stgi(void)
  38. {
  39. svm_asm(stgi);
  40. }
  41. static inline void invlpga(unsigned long addr, u32 asid)
  42. {
  43. svm_asm2(invlpga, "c"(asid), "a"(addr));
  44. }
  45. /*
  46. * Despite being a physical address, the portion of rAX that is consumed by
  47. * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
  48. * hence 'unsigned long' instead of 'hpa_t'.
  49. */
  50. static __always_inline void vmsave(unsigned long pa)
  51. {
  52. svm_asm1(vmsave, "a" (pa), "memory");
  53. }
  54. #endif /* __KVM_X86_SVM_OPS_H */