xen.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
  4. * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  5. *
  6. * KVM Xen emulation
  7. */
  8. #ifndef __ARCH_X86_KVM_XEN_H__
  9. #define __ARCH_X86_KVM_XEN_H__
  10. #ifdef CONFIG_KVM_XEN
  11. #include <linux/jump_label_ratelimit.h>
  12. extern struct static_key_false_deferred kvm_xen_enabled;
  13. int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
  14. void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
  15. int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
  16. int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
  17. int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
  18. int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
  19. int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
  20. int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
  21. int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
  22. void kvm_xen_init_vm(struct kvm *kvm);
  23. void kvm_xen_destroy_vm(struct kvm *kvm);
  24. void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu);
  25. void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
  26. int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
  27. struct kvm *kvm);
  28. int kvm_xen_setup_evtchn(struct kvm *kvm,
  29. struct kvm_kernel_irq_routing_entry *e,
  30. const struct kvm_irq_routing_entry *ue);
  31. static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
  32. {
  33. return static_branch_unlikely(&kvm_xen_enabled.key) &&
  34. kvm->arch.xen_hvm_config.msr;
  35. }
  36. static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
  37. {
  38. return static_branch_unlikely(&kvm_xen_enabled.key) &&
  39. (kvm->arch.xen_hvm_config.flags &
  40. KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
  41. }
  42. static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
  43. {
  44. if (static_branch_unlikely(&kvm_xen_enabled.key) &&
  45. vcpu->arch.xen.vcpu_info_cache.active &&
  46. vcpu->kvm->arch.xen.upcall_vector)
  47. return __kvm_xen_has_interrupt(vcpu);
  48. return 0;
  49. }
  50. static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
  51. {
  52. return static_branch_unlikely(&kvm_xen_enabled.key) &&
  53. vcpu->arch.xen.evtchn_pending_sel;
  54. }
  55. static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
  56. {
  57. return !!vcpu->arch.xen.timer_virq;
  58. }
  59. static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
  60. {
  61. if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu))
  62. return atomic_read(&vcpu->arch.xen.timer_pending);
  63. return 0;
  64. }
  65. void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
  66. #else
  67. static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
  68. {
  69. return 1;
  70. }
  71. static inline void kvm_xen_init_vm(struct kvm *kvm)
  72. {
  73. }
  74. static inline void kvm_xen_destroy_vm(struct kvm *kvm)
  75. {
  76. }
  77. static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
  78. {
  79. }
  80. static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
  81. {
  82. }
  83. static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
  84. {
  85. return false;
  86. }
  87. static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
  88. {
  89. return false;
  90. }
  91. static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
  92. {
  93. return 0;
  94. }
  95. static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
  96. {
  97. }
  98. static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
  99. {
  100. return false;
  101. }
  102. static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
  103. {
  104. return 0;
  105. }
  106. static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
  107. {
  108. }
  109. static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
  110. {
  111. return false;
  112. }
  113. #endif
  114. int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
  115. #include <asm/pvclock-abi.h>
  116. #include <asm/xen/interface.h>
  117. #include <xen/interface/vcpu.h>
  118. void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
  119. static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
  120. {
  121. kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
  122. }
  123. static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
  124. {
  125. /*
  126. * If the vCPU wasn't preempted but took a normal exit for
  127. * some reason (hypercalls, I/O, etc.), that is accounted as
  128. * still RUNSTATE_running, as the VMM is still operating on
  129. * behalf of the vCPU. Only if the VMM does actually block
  130. * does it need to enter RUNSTATE_blocked.
  131. */
  132. if (WARN_ON_ONCE(!vcpu->preempted))
  133. return;
  134. kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
  135. }
  136. /* 32-bit compatibility definitions, also used natively in 32-bit build */
  137. struct compat_arch_vcpu_info {
  138. unsigned int cr2;
  139. unsigned int pad[5];
  140. };
  141. struct compat_vcpu_info {
  142. uint8_t evtchn_upcall_pending;
  143. uint8_t evtchn_upcall_mask;
  144. uint16_t pad;
  145. uint32_t evtchn_pending_sel;
  146. struct compat_arch_vcpu_info arch;
  147. struct pvclock_vcpu_time_info time;
  148. }; /* 64 bytes (x86) */
  149. struct compat_arch_shared_info {
  150. unsigned int max_pfn;
  151. unsigned int pfn_to_mfn_frame_list_list;
  152. unsigned int nmi_reason;
  153. unsigned int p2m_cr3;
  154. unsigned int p2m_vaddr;
  155. unsigned int p2m_generation;
  156. uint32_t wc_sec_hi;
  157. };
  158. struct compat_shared_info {
  159. struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
  160. uint32_t evtchn_pending[32];
  161. uint32_t evtchn_mask[32];
  162. struct pvclock_wall_clock wc;
  163. struct compat_arch_shared_info arch;
  164. };
  165. #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
  166. sizeof_field(struct compat_shared_info, \
  167. evtchn_pending))
  168. struct compat_vcpu_runstate_info {
  169. int state;
  170. uint64_t state_entry_time;
  171. uint64_t time[4];
  172. } __attribute__((packed));
  173. #endif /* __ARCH_X86_KVM_XEN_H__ */