evmcs.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __KVM_X86_VMX_EVMCS_H
  3. #define __KVM_X86_VMX_EVMCS_H
  4. #include <linux/jump_label.h>
  5. #include <asm/hyperv-tlfs.h>
  6. #include <asm/mshyperv.h>
  7. #include <asm/vmx.h>
  8. #include "capabilities.h"
  9. #include "vmcs.h"
  10. #include "vmcs12.h"
  11. struct vmcs_config;
  12. DECLARE_STATIC_KEY_FALSE(enable_evmcs);
  13. #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
  14. #define KVM_EVMCS_VERSION 1
  15. /*
  16. * Enlightened VMCSv1 doesn't support these:
  17. *
  18. * POSTED_INTR_NV = 0x00000002,
  19. * GUEST_INTR_STATUS = 0x00000810,
  20. * APIC_ACCESS_ADDR = 0x00002014,
  21. * POSTED_INTR_DESC_ADDR = 0x00002016,
  22. * EOI_EXIT_BITMAP0 = 0x0000201c,
  23. * EOI_EXIT_BITMAP1 = 0x0000201e,
  24. * EOI_EXIT_BITMAP2 = 0x00002020,
  25. * EOI_EXIT_BITMAP3 = 0x00002022,
  26. * GUEST_PML_INDEX = 0x00000812,
  27. * PML_ADDRESS = 0x0000200e,
  28. * VM_FUNCTION_CONTROL = 0x00002018,
  29. * EPTP_LIST_ADDRESS = 0x00002024,
  30. * VMREAD_BITMAP = 0x00002026,
  31. * VMWRITE_BITMAP = 0x00002028,
  32. *
  33. * TSC_MULTIPLIER = 0x00002032,
  34. * PLE_GAP = 0x00004020,
  35. * PLE_WINDOW = 0x00004022,
  36. * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
  37. *
  38. * Currently unsupported in KVM:
  39. * GUEST_IA32_RTIT_CTL = 0x00002814,
  40. */
  41. #define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
  42. PIN_BASED_VMX_PREEMPTION_TIMER)
  43. #define EVMCS1_UNSUPPORTED_EXEC_CTRL (CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
  44. #define EVMCS1_UNSUPPORTED_2NDEXEC \
  45. (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
  46. SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
  47. SECONDARY_EXEC_APIC_REGISTER_VIRT | \
  48. SECONDARY_EXEC_ENABLE_PML | \
  49. SECONDARY_EXEC_ENABLE_VMFUNC | \
  50. SECONDARY_EXEC_SHADOW_VMCS | \
  51. SECONDARY_EXEC_TSC_SCALING | \
  52. SECONDARY_EXEC_PAUSE_LOOP_EXITING)
  53. #define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \
  54. (VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
  55. #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (0)
  56. #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
  57. struct evmcs_field {
  58. u16 offset;
  59. u16 clean_field;
  60. };
  61. extern const struct evmcs_field vmcs_field_to_evmcs_1[];
  62. extern const unsigned int nr_evmcs_1_fields;
  63. static __always_inline int evmcs_field_offset(unsigned long field,
  64. u16 *clean_field)
  65. {
  66. unsigned int index = ROL16(field, 6);
  67. const struct evmcs_field *evmcs_field;
  68. if (unlikely(index >= nr_evmcs_1_fields))
  69. return -ENOENT;
  70. evmcs_field = &vmcs_field_to_evmcs_1[index];
  71. /*
  72. * Use offset=0 to detect holes in eVMCS. This offset belongs to
  73. * 'revision_id' but this field has no encoding and is supposed to
  74. * be accessed directly.
  75. */
  76. if (unlikely(!evmcs_field->offset))
  77. return -ENOENT;
  78. if (clean_field)
  79. *clean_field = evmcs_field->clean_field;
  80. return evmcs_field->offset;
  81. }
  82. static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
  83. unsigned long field, u16 offset)
  84. {
  85. /*
  86. * vmcs12_read_any() doesn't care whether the supplied structure
  87. * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
  88. * the exact offset of the required field, use it for convenience
  89. * here.
  90. */
  91. return vmcs12_read_any((void *)evmcs, field, offset);
  92. }
  93. #if IS_ENABLED(CONFIG_HYPERV)
  94. static __always_inline int get_evmcs_offset(unsigned long field,
  95. u16 *clean_field)
  96. {
  97. int offset = evmcs_field_offset(field, clean_field);
  98. WARN_ONCE(offset < 0, "KVM: accessing unsupported EVMCS field %lx\n",
  99. field);
  100. return offset;
  101. }
  102. static __always_inline void evmcs_write64(unsigned long field, u64 value)
  103. {
  104. u16 clean_field;
  105. int offset = get_evmcs_offset(field, &clean_field);
  106. if (offset < 0)
  107. return;
  108. *(u64 *)((char *)current_evmcs + offset) = value;
  109. current_evmcs->hv_clean_fields &= ~clean_field;
  110. }
  111. static inline void evmcs_write32(unsigned long field, u32 value)
  112. {
  113. u16 clean_field;
  114. int offset = get_evmcs_offset(field, &clean_field);
  115. if (offset < 0)
  116. return;
  117. *(u32 *)((char *)current_evmcs + offset) = value;
  118. current_evmcs->hv_clean_fields &= ~clean_field;
  119. }
  120. static inline void evmcs_write16(unsigned long field, u16 value)
  121. {
  122. u16 clean_field;
  123. int offset = get_evmcs_offset(field, &clean_field);
  124. if (offset < 0)
  125. return;
  126. *(u16 *)((char *)current_evmcs + offset) = value;
  127. current_evmcs->hv_clean_fields &= ~clean_field;
  128. }
  129. static inline u64 evmcs_read64(unsigned long field)
  130. {
  131. int offset = get_evmcs_offset(field, NULL);
  132. if (offset < 0)
  133. return 0;
  134. return *(u64 *)((char *)current_evmcs + offset);
  135. }
  136. static inline u32 evmcs_read32(unsigned long field)
  137. {
  138. int offset = get_evmcs_offset(field, NULL);
  139. if (offset < 0)
  140. return 0;
  141. return *(u32 *)((char *)current_evmcs + offset);
  142. }
  143. static inline u16 evmcs_read16(unsigned long field)
  144. {
  145. int offset = get_evmcs_offset(field, NULL);
  146. if (offset < 0)
  147. return 0;
  148. return *(u16 *)((char *)current_evmcs + offset);
  149. }
  150. static inline void evmcs_load(u64 phys_addr)
  151. {
  152. struct hv_vp_assist_page *vp_ap =
  153. hv_get_vp_assist_page(smp_processor_id());
  154. if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
  155. vp_ap->nested_control.features.directhypercall = 1;
  156. vp_ap->current_nested_vmcs = phys_addr;
  157. vp_ap->enlighten_vmentry = 1;
  158. }
  159. #else /* !IS_ENABLED(CONFIG_HYPERV) */
  160. static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
  161. static inline void evmcs_write32(unsigned long field, u32 value) {}
  162. static inline void evmcs_write16(unsigned long field, u16 value) {}
  163. static inline u64 evmcs_read64(unsigned long field) { return 0; }
  164. static inline u32 evmcs_read32(unsigned long field) { return 0; }
  165. static inline u16 evmcs_read16(unsigned long field) { return 0; }
  166. static inline void evmcs_load(u64 phys_addr) {}
  167. #endif /* IS_ENABLED(CONFIG_HYPERV) */
  168. #define EVMPTR_INVALID (-1ULL)
  169. #define EVMPTR_MAP_PENDING (-2ULL)
  170. static inline bool evmptr_is_valid(u64 evmptr)
  171. {
  172. return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
  173. }
  174. enum nested_evmptrld_status {
  175. EVMPTRLD_DISABLED,
  176. EVMPTRLD_SUCCEEDED,
  177. EVMPTRLD_VMFAIL,
  178. EVMPTRLD_ERROR,
  179. };
  180. bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
  181. uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
  182. int nested_enable_evmcs(struct kvm_vcpu *vcpu,
  183. uint16_t *vmcs_version);
  184. void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
  185. int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
  186. #endif /* __KVM_X86_VMX_EVMCS_H */