kvm_para.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_KVM_PARA_H
  3. #define _ASM_X86_KVM_PARA_H
  4. #include <asm/processor.h>
  5. #include <asm/alternative.h>
  6. #include <linux/interrupt.h>
  7. #include <uapi/asm/kvm_para.h>
  8. #include <asm/tdx.h>
  9. #ifdef CONFIG_KVM_GUEST
  10. bool kvm_check_and_clear_guest_paused(void);
  11. #else
  12. static inline bool kvm_check_and_clear_guest_paused(void)
  13. {
  14. return false;
  15. }
  16. #endif /* CONFIG_KVM_GUEST */
  17. #define KVM_HYPERCALL \
  18. ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)
  19. /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
  20. * instruction. The hypervisor may replace it with something else but only the
  21. * instructions are guaranteed to be supported.
  22. *
  23. * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  24. * The hypercall number should be placed in rax and the return value will be
  25. * placed in rax. No other registers will be clobbered unless explicitly
  26. * noted by the particular hypercall.
  27. */
  28. static inline long kvm_hypercall0(unsigned int nr)
  29. {
  30. long ret;
  31. if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
  32. return tdx_kvm_hypercall(nr, 0, 0, 0, 0);
  33. asm volatile(KVM_HYPERCALL
  34. : "=a"(ret)
  35. : "a"(nr)
  36. : "memory");
  37. return ret;
  38. }
  39. static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  40. {
  41. long ret;
  42. if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
  43. return tdx_kvm_hypercall(nr, p1, 0, 0, 0);
  44. asm volatile(KVM_HYPERCALL
  45. : "=a"(ret)
  46. : "a"(nr), "b"(p1)
  47. : "memory");
  48. return ret;
  49. }
  50. static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  51. unsigned long p2)
  52. {
  53. long ret;
  54. if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
  55. return tdx_kvm_hypercall(nr, p1, p2, 0, 0);
  56. asm volatile(KVM_HYPERCALL
  57. : "=a"(ret)
  58. : "a"(nr), "b"(p1), "c"(p2)
  59. : "memory");
  60. return ret;
  61. }
  62. static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  63. unsigned long p2, unsigned long p3)
  64. {
  65. long ret;
  66. if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
  67. return tdx_kvm_hypercall(nr, p1, p2, p3, 0);
  68. asm volatile(KVM_HYPERCALL
  69. : "=a"(ret)
  70. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  71. : "memory");
  72. return ret;
  73. }
  74. static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  75. unsigned long p2, unsigned long p3,
  76. unsigned long p4)
  77. {
  78. long ret;
  79. if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
  80. return tdx_kvm_hypercall(nr, p1, p2, p3, p4);
  81. asm volatile(KVM_HYPERCALL
  82. : "=a"(ret)
  83. : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  84. : "memory");
  85. return ret;
  86. }
  87. static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1,
  88. unsigned long p2, unsigned long p3)
  89. {
  90. long ret;
  91. asm volatile("vmmcall"
  92. : "=a"(ret)
  93. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  94. : "memory");
  95. return ret;
  96. }
  97. #ifdef CONFIG_KVM_GUEST
  98. void kvmclock_init(void);
  99. void kvmclock_disable(void);
  100. bool kvm_para_available(void);
  101. unsigned int kvm_arch_para_features(void);
  102. unsigned int kvm_arch_para_hints(void);
  103. void kvm_async_pf_task_wait_schedule(u32 token);
  104. void kvm_async_pf_task_wake(u32 token);
  105. u32 kvm_read_and_reset_apf_flags(void);
  106. bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
  107. DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
  108. static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
  109. {
  110. if (static_branch_unlikely(&kvm_async_pf_enabled))
  111. return __kvm_handle_async_pf(regs, token);
  112. else
  113. return false;
  114. }
  115. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  116. void __init kvm_spinlock_init(void);
  117. #else /* !CONFIG_PARAVIRT_SPINLOCKS */
  118. static inline void kvm_spinlock_init(void)
  119. {
  120. }
  121. #endif /* CONFIG_PARAVIRT_SPINLOCKS */
  122. #else /* CONFIG_KVM_GUEST */
  123. #define kvm_async_pf_task_wait_schedule(T) do {} while(0)
  124. #define kvm_async_pf_task_wake(T) do {} while(0)
  125. static inline bool kvm_para_available(void)
  126. {
  127. return false;
  128. }
  129. static inline unsigned int kvm_arch_para_features(void)
  130. {
  131. return 0;
  132. }
  133. static inline unsigned int kvm_arch_para_hints(void)
  134. {
  135. return 0;
  136. }
  137. static inline u32 kvm_read_and_reset_apf_flags(void)
  138. {
  139. return 0;
  140. }
  141. static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
  142. {
  143. return false;
  144. }
  145. #endif
  146. #endif /* _ASM_X86_KVM_PARA_H */