vmcs.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __KVM_X86_VMX_VMCS_H
  3. #define __KVM_X86_VMX_VMCS_H
  4. #include <linux/ktime.h>
  5. #include <linux/list.h>
  6. #include <linux/nospec.h>
  7. #include <asm/kvm.h>
  8. #include <asm/vmx.h>
  9. #include "capabilities.h"
  10. #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
  11. struct vmcs_hdr {
  12. u32 revision_id:31;
  13. u32 shadow_vmcs:1;
  14. };
  15. struct vmcs {
  16. struct vmcs_hdr hdr;
  17. u32 abort;
  18. char data[];
  19. };
  20. DECLARE_PER_CPU(struct vmcs *, current_vmcs);
  21. /*
  22. * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
  23. * and whose values change infrequently, but are not constant. I.e. this is
  24. * used as a write-through cache of the corresponding VMCS fields.
  25. */
  26. struct vmcs_host_state {
  27. unsigned long cr3; /* May not match real cr3 */
  28. unsigned long cr4; /* May not match real cr4 */
  29. unsigned long gs_base;
  30. unsigned long fs_base;
  31. unsigned long rsp;
  32. u16 fs_sel, gs_sel, ldt_sel;
  33. #ifdef CONFIG_X86_64
  34. u16 ds_sel, es_sel;
  35. #endif
  36. };
  37. struct vmcs_controls_shadow {
  38. u32 vm_entry;
  39. u32 vm_exit;
  40. u32 pin;
  41. u32 exec;
  42. u32 secondary_exec;
  43. u64 tertiary_exec;
  44. };
  45. /*
  46. * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
  47. * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
  48. * loaded on this CPU (so we can clear them if the CPU goes down).
  49. */
  50. struct loaded_vmcs {
  51. struct vmcs *vmcs;
  52. struct vmcs *shadow_vmcs;
  53. int cpu;
  54. bool launched;
  55. bool nmi_known_unmasked;
  56. bool hv_timer_soft_disabled;
  57. /* Support for vnmi-less CPUs */
  58. int soft_vnmi_blocked;
  59. ktime_t entry_time;
  60. s64 vnmi_blocked_time;
  61. unsigned long *msr_bitmap;
  62. struct list_head loaded_vmcss_on_cpu_link;
  63. struct vmcs_host_state host_state;
  64. struct vmcs_controls_shadow controls_shadow;
  65. };
  66. static inline bool is_intr_type(u32 intr_info, u32 type)
  67. {
  68. const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
  69. return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
  70. }
  71. static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
  72. {
  73. const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
  74. INTR_INFO_VECTOR_MASK;
  75. return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
  76. }
  77. static inline bool is_exception_n(u32 intr_info, u8 vector)
  78. {
  79. return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
  80. }
  81. static inline bool is_debug(u32 intr_info)
  82. {
  83. return is_exception_n(intr_info, DB_VECTOR);
  84. }
  85. static inline bool is_breakpoint(u32 intr_info)
  86. {
  87. return is_exception_n(intr_info, BP_VECTOR);
  88. }
  89. static inline bool is_double_fault(u32 intr_info)
  90. {
  91. return is_exception_n(intr_info, DF_VECTOR);
  92. }
  93. static inline bool is_page_fault(u32 intr_info)
  94. {
  95. return is_exception_n(intr_info, PF_VECTOR);
  96. }
  97. static inline bool is_invalid_opcode(u32 intr_info)
  98. {
  99. return is_exception_n(intr_info, UD_VECTOR);
  100. }
  101. static inline bool is_gp_fault(u32 intr_info)
  102. {
  103. return is_exception_n(intr_info, GP_VECTOR);
  104. }
  105. static inline bool is_alignment_check(u32 intr_info)
  106. {
  107. return is_exception_n(intr_info, AC_VECTOR);
  108. }
  109. static inline bool is_machine_check(u32 intr_info)
  110. {
  111. return is_exception_n(intr_info, MC_VECTOR);
  112. }
  113. static inline bool is_nm_fault(u32 intr_info)
  114. {
  115. return is_exception_n(intr_info, NM_VECTOR);
  116. }
  117. /* Undocumented: icebp/int1 */
  118. static inline bool is_icebp(u32 intr_info)
  119. {
  120. return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
  121. }
  122. static inline bool is_nmi(u32 intr_info)
  123. {
  124. return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
  125. }
  126. static inline bool is_external_intr(u32 intr_info)
  127. {
  128. return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
  129. }
  130. static inline bool is_exception_with_error_code(u32 intr_info)
  131. {
  132. const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK;
  133. return (intr_info & mask) == mask;
  134. }
  135. enum vmcs_field_width {
  136. VMCS_FIELD_WIDTH_U16 = 0,
  137. VMCS_FIELD_WIDTH_U64 = 1,
  138. VMCS_FIELD_WIDTH_U32 = 2,
  139. VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
  140. };
  141. static inline int vmcs_field_width(unsigned long field)
  142. {
  143. if (0x1 & field) /* the *_HIGH fields are all 32 bit */
  144. return VMCS_FIELD_WIDTH_U32;
  145. return (field >> 13) & 0x3;
  146. }
  147. static inline int vmcs_field_readonly(unsigned long field)
  148. {
  149. return (((field >> 10) & 0x3) == 1);
  150. }
  151. #define VMCS_FIELD_INDEX_SHIFT (1)
  152. #define VMCS_FIELD_INDEX_MASK GENMASK(9, 1)
  153. static inline unsigned int vmcs_field_index(unsigned long field)
  154. {
  155. return (field & VMCS_FIELD_INDEX_MASK) >> VMCS_FIELD_INDEX_SHIFT;
  156. }
  157. #endif /* __KVM_X86_VMX_VMCS_H */