kvm_cache_regs.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef ASM_KVM_CACHE_REGS_H
  3. #define ASM_KVM_CACHE_REGS_H
  4. #include <linux/kvm_host.h>
  5. #define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
  6. #define KVM_POSSIBLE_CR4_GUEST_BITS \
  7. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  8. | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
  9. #define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
  10. #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
  11. #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
  12. static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
  13. #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
  14. static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
  15. { \
  16. return vcpu->arch.regs[VCPU_REGS_##uname]; \
  17. } \
  18. static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
  19. unsigned long val) \
  20. { \
  21. vcpu->arch.regs[VCPU_REGS_##uname] = val; \
  22. }
  23. BUILD_KVM_GPR_ACCESSORS(rax, RAX)
  24. BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
  25. BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
  26. BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
  27. BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
  28. BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
  29. BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
  30. #ifdef CONFIG_X86_64
  31. BUILD_KVM_GPR_ACCESSORS(r8, R8)
  32. BUILD_KVM_GPR_ACCESSORS(r9, R9)
  33. BUILD_KVM_GPR_ACCESSORS(r10, R10)
  34. BUILD_KVM_GPR_ACCESSORS(r11, R11)
  35. BUILD_KVM_GPR_ACCESSORS(r12, R12)
  36. BUILD_KVM_GPR_ACCESSORS(r13, R13)
  37. BUILD_KVM_GPR_ACCESSORS(r14, R14)
  38. BUILD_KVM_GPR_ACCESSORS(r15, R15)
  39. #endif
  40. /*
  41. * avail dirty
  42. * 0 0 register in VMCS/VMCB
  43. * 0 1 *INVALID*
  44. * 1 0 register in vcpu->arch
  45. * 1 1 register in vcpu->arch, needs to be stored back
  46. */
  47. static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
  48. enum kvm_reg reg)
  49. {
  50. return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  51. }
  52. static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
  53. enum kvm_reg reg)
  54. {
  55. return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  56. }
  57. static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
  58. enum kvm_reg reg)
  59. {
  60. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  61. }
  62. static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
  63. enum kvm_reg reg)
  64. {
  65. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  66. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  67. }
  68. /*
  69. * The "raw" register helpers are only for cases where the full 64 bits of a
  70. * register are read/written irrespective of current vCPU mode. In other words,
  71. * odds are good you shouldn't be using the raw variants.
  72. */
  73. static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
  74. {
  75. if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
  76. return 0;
  77. if (!kvm_register_is_available(vcpu, reg))
  78. static_call(kvm_x86_cache_reg)(vcpu, reg);
  79. return vcpu->arch.regs[reg];
  80. }
  81. static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
  82. unsigned long val)
  83. {
  84. if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
  85. return;
  86. vcpu->arch.regs[reg] = val;
  87. kvm_register_mark_dirty(vcpu, reg);
  88. }
  89. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  90. {
  91. return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
  92. }
  93. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  94. {
  95. kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
  96. }
  97. static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
  98. {
  99. return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
  100. }
  101. static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
  102. {
  103. kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
  104. }
  105. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  106. {
  107. might_sleep(); /* on svm */
  108. if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
  109. static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
  110. return vcpu->arch.walk_mmu->pdptrs[index];
  111. }
  112. static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
  113. {
  114. vcpu->arch.walk_mmu->pdptrs[index] = value;
  115. }
  116. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  117. {
  118. ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
  119. if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
  120. !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
  121. static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
  122. return vcpu->arch.cr0 & mask;
  123. }
  124. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  125. {
  126. return kvm_read_cr0_bits(vcpu, ~0UL);
  127. }
  128. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  129. {
  130. ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
  131. if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
  132. !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
  133. static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
  134. return vcpu->arch.cr4 & mask;
  135. }
  136. static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
  137. {
  138. if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
  139. static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
  140. return vcpu->arch.cr3;
  141. }
  142. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  143. {
  144. return kvm_read_cr4_bits(vcpu, ~0UL);
  145. }
  146. static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
  147. {
  148. return (kvm_rax_read(vcpu) & -1u)
  149. | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
  150. }
  151. static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
  152. {
  153. vcpu->arch.hflags |= HF_GUEST_MASK;
  154. vcpu->stat.guest_mode = 1;
  155. }
  156. static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
  157. {
  158. vcpu->arch.hflags &= ~HF_GUEST_MASK;
  159. if (vcpu->arch.load_eoi_exitmap_pending) {
  160. vcpu->arch.load_eoi_exitmap_pending = false;
  161. kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
  162. }
  163. vcpu->stat.guest_mode = 0;
  164. }
  165. static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
  166. {
  167. return vcpu->arch.hflags & HF_GUEST_MASK;
  168. }
  169. static inline bool is_smm(struct kvm_vcpu *vcpu)
  170. {
  171. return vcpu->arch.hflags & HF_SMM_MASK;
  172. }
  173. #endif