mshyperv.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_MSHYPER_H
  3. #define _ASM_X86_MSHYPER_H
  4. #include <linux/types.h>
  5. #include <linux/nmi.h>
  6. #include <linux/msi.h>
  7. #include <asm/io.h>
  8. #include <asm/hyperv-tlfs.h>
  9. #include <asm/nospec-branch.h>
  10. #include <asm/paravirt.h>
  11. #include <asm/mshyperv.h>
  12. union hv_ghcb;
  13. DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
  14. typedef int (*hyperv_fill_flush_list_func)(
  15. struct hv_guest_mapping_flush_list *flush,
  16. void *data);
  17. #define hv_get_raw_timer() rdtsc_ordered()
  18. void hyperv_vector_handler(struct pt_regs *regs);
  19. #if IS_ENABLED(CONFIG_HYPERV)
  20. extern int hyperv_init_cpuhp;
  21. extern void *hv_hypercall_pg;
  22. extern u64 hv_current_partition_id;
  23. extern union hv_ghcb * __percpu *hv_ghcb_pg;
  24. int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
  25. int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
  26. int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
  27. static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
  28. {
  29. u64 input_address = input ? virt_to_phys(input) : 0;
  30. u64 output_address = output ? virt_to_phys(output) : 0;
  31. u64 hv_status;
  32. #ifdef CONFIG_X86_64
  33. if (!hv_hypercall_pg)
  34. return U64_MAX;
  35. __asm__ __volatile__("mov %4, %%r8\n"
  36. CALL_NOSPEC
  37. : "=a" (hv_status), ASM_CALL_CONSTRAINT,
  38. "+c" (control), "+d" (input_address)
  39. : "r" (output_address),
  40. THUNK_TARGET(hv_hypercall_pg)
  41. : "cc", "memory", "r8", "r9", "r10", "r11");
  42. #else
  43. u32 input_address_hi = upper_32_bits(input_address);
  44. u32 input_address_lo = lower_32_bits(input_address);
  45. u32 output_address_hi = upper_32_bits(output_address);
  46. u32 output_address_lo = lower_32_bits(output_address);
  47. if (!hv_hypercall_pg)
  48. return U64_MAX;
  49. __asm__ __volatile__(CALL_NOSPEC
  50. : "=A" (hv_status),
  51. "+c" (input_address_lo), ASM_CALL_CONSTRAINT
  52. : "A" (control),
  53. "b" (input_address_hi),
  54. "D"(output_address_hi), "S"(output_address_lo),
  55. THUNK_TARGET(hv_hypercall_pg)
  56. : "cc", "memory");
  57. #endif /* !x86_64 */
  58. return hv_status;
  59. }
  60. /* Fast hypercall with 8 bytes of input and no output */
  61. static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
  62. {
  63. u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
  64. #ifdef CONFIG_X86_64
  65. {
  66. __asm__ __volatile__(CALL_NOSPEC
  67. : "=a" (hv_status), ASM_CALL_CONSTRAINT,
  68. "+c" (control), "+d" (input1)
  69. : THUNK_TARGET(hv_hypercall_pg)
  70. : "cc", "r8", "r9", "r10", "r11");
  71. }
  72. #else
  73. {
  74. u32 input1_hi = upper_32_bits(input1);
  75. u32 input1_lo = lower_32_bits(input1);
  76. __asm__ __volatile__ (CALL_NOSPEC
  77. : "=A"(hv_status),
  78. "+c"(input1_lo),
  79. ASM_CALL_CONSTRAINT
  80. : "A" (control),
  81. "b" (input1_hi),
  82. THUNK_TARGET(hv_hypercall_pg)
  83. : "cc", "edi", "esi");
  84. }
  85. #endif
  86. return hv_status;
  87. }
  88. /* Fast hypercall with 16 bytes of input */
  89. static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
  90. {
  91. u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
  92. #ifdef CONFIG_X86_64
  93. {
  94. __asm__ __volatile__("mov %4, %%r8\n"
  95. CALL_NOSPEC
  96. : "=a" (hv_status), ASM_CALL_CONSTRAINT,
  97. "+c" (control), "+d" (input1)
  98. : "r" (input2),
  99. THUNK_TARGET(hv_hypercall_pg)
  100. : "cc", "r8", "r9", "r10", "r11");
  101. }
  102. #else
  103. {
  104. u32 input1_hi = upper_32_bits(input1);
  105. u32 input1_lo = lower_32_bits(input1);
  106. u32 input2_hi = upper_32_bits(input2);
  107. u32 input2_lo = lower_32_bits(input2);
  108. __asm__ __volatile__ (CALL_NOSPEC
  109. : "=A"(hv_status),
  110. "+c"(input1_lo), ASM_CALL_CONSTRAINT
  111. : "A" (control), "b" (input1_hi),
  112. "D"(input2_hi), "S"(input2_lo),
  113. THUNK_TARGET(hv_hypercall_pg)
  114. : "cc");
  115. }
  116. #endif
  117. return hv_status;
  118. }
  119. extern struct hv_vp_assist_page **hv_vp_assist_page;
  120. static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
  121. {
  122. if (!hv_vp_assist_page)
  123. return NULL;
  124. return hv_vp_assist_page[cpu];
  125. }
  126. void __init hyperv_init(void);
  127. void hyperv_setup_mmu_ops(void);
  128. void set_hv_tscchange_cb(void (*cb)(void));
  129. void clear_hv_tscchange_cb(void);
  130. void hyperv_stop_tsc_emulation(void);
  131. int hyperv_flush_guest_mapping(u64 as);
  132. int hyperv_flush_guest_mapping_range(u64 as,
  133. hyperv_fill_flush_list_func fill_func, void *data);
  134. int hyperv_fill_flush_guest_mapping_list(
  135. struct hv_guest_mapping_flush_list *flush,
  136. u64 start_gfn, u64 end_gfn);
  137. #ifdef CONFIG_X86_64
  138. void hv_apic_init(void);
  139. void __init hv_init_spinlocks(void);
  140. bool hv_vcpu_is_preempted(int vcpu);
  141. #else
  142. static inline void hv_apic_init(void) {}
  143. #endif
  144. struct irq_domain *hv_create_pci_msi_domain(void);
  145. int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
  146. struct hv_interrupt_entry *entry);
  147. int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
  148. int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible);
  149. #ifdef CONFIG_AMD_MEM_ENCRYPT
  150. void hv_ghcb_msr_write(u64 msr, u64 value);
  151. void hv_ghcb_msr_read(u64 msr, u64 *value);
  152. bool hv_ghcb_negotiate_protocol(void);
  153. void hv_ghcb_terminate(unsigned int set, unsigned int reason);
  154. #else
  155. static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
  156. static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
  157. static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
  158. static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
  159. #endif
  160. extern bool hv_isolation_type_snp(void);
  161. static inline bool hv_is_synic_reg(unsigned int reg)
  162. {
  163. if ((reg >= HV_REGISTER_SCONTROL) &&
  164. (reg <= HV_REGISTER_SINT15))
  165. return true;
  166. return false;
  167. }
  168. static inline u64 hv_get_register(unsigned int reg)
  169. {
  170. u64 value;
  171. if (hv_is_synic_reg(reg) && hv_isolation_type_snp())
  172. hv_ghcb_msr_read(reg, &value);
  173. else
  174. rdmsrl(reg, value);
  175. return value;
  176. }
  177. static inline void hv_set_register(unsigned int reg, u64 value)
  178. {
  179. if (hv_is_synic_reg(reg) && hv_isolation_type_snp()) {
  180. hv_ghcb_msr_write(reg, value);
  181. /* Write proxy bit via wrmsl instruction */
  182. if (reg >= HV_REGISTER_SINT0 &&
  183. reg <= HV_REGISTER_SINT15)
  184. wrmsrl(reg, value | 1 << 20);
  185. } else {
  186. wrmsrl(reg, value);
  187. }
  188. }
  189. #else /* CONFIG_HYPERV */
  190. static inline void hyperv_init(void) {}
  191. static inline void hyperv_setup_mmu_ops(void) {}
  192. static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
  193. static inline void clear_hv_tscchange_cb(void) {}
  194. static inline void hyperv_stop_tsc_emulation(void) {};
  195. static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
  196. {
  197. return NULL;
  198. }
  199. static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
  200. static inline int hyperv_flush_guest_mapping_range(u64 as,
  201. hyperv_fill_flush_list_func fill_func, void *data)
  202. {
  203. return -1;
  204. }
  205. static inline void hv_set_register(unsigned int reg, u64 value) { }
  206. static inline u64 hv_get_register(unsigned int reg) { return 0; }
  207. static inline int hv_set_mem_host_visibility(unsigned long addr, int numpages,
  208. bool visible)
  209. {
  210. return -1;
  211. }
  212. #endif /* CONFIG_HYPERV */
  213. #include <asm-generic/mshyperv.h>
  214. #endif