kvm_host.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <[email protected]>
  7. */
  8. #ifndef __RISCV_KVM_HOST_H__
  9. #define __RISCV_KVM_HOST_H__
  10. #include <linux/types.h>
  11. #include <linux/kvm.h>
  12. #include <linux/kvm_types.h>
  13. #include <linux/spinlock.h>
  14. #include <asm/csr.h>
  15. #include <asm/hwcap.h>
  16. #include <asm/kvm_vcpu_fp.h>
  17. #include <asm/kvm_vcpu_insn.h>
  18. #include <asm/kvm_vcpu_timer.h>
  19. #define KVM_MAX_VCPUS 1024
  20. #define KVM_HALT_POLL_NS_DEFAULT 500000
  21. #define KVM_VCPU_MAX_FEATURES 0
  22. #define KVM_REQ_SLEEP \
  23. KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  24. #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
  25. #define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
  26. #define KVM_REQ_FENCE_I \
  27. KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  28. #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
  29. #define KVM_REQ_HFENCE_VVMA_ALL \
  30. KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  31. #define KVM_REQ_HFENCE \
  32. KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  33. enum kvm_riscv_hfence_type {
  34. KVM_RISCV_HFENCE_UNKNOWN = 0,
  35. KVM_RISCV_HFENCE_GVMA_VMID_GPA,
  36. KVM_RISCV_HFENCE_VVMA_ASID_GVA,
  37. KVM_RISCV_HFENCE_VVMA_ASID_ALL,
  38. KVM_RISCV_HFENCE_VVMA_GVA,
  39. };
  40. struct kvm_riscv_hfence {
  41. enum kvm_riscv_hfence_type type;
  42. unsigned long asid;
  43. unsigned long order;
  44. gpa_t addr;
  45. gpa_t size;
  46. };
  47. #define KVM_RISCV_VCPU_MAX_HFENCE 64
  48. struct kvm_vm_stat {
  49. struct kvm_vm_stat_generic generic;
  50. };
  51. struct kvm_vcpu_stat {
  52. struct kvm_vcpu_stat_generic generic;
  53. u64 ecall_exit_stat;
  54. u64 wfi_exit_stat;
  55. u64 mmio_exit_user;
  56. u64 mmio_exit_kernel;
  57. u64 csr_exit_user;
  58. u64 csr_exit_kernel;
  59. u64 signal_exits;
  60. u64 exits;
  61. };
  62. struct kvm_arch_memory_slot {
  63. };
  64. struct kvm_vmid {
  65. /*
  66. * Writes to vmid_version and vmid happen with vmid_lock held
  67. * whereas reads happen without any lock held.
  68. */
  69. unsigned long vmid_version;
  70. unsigned long vmid;
  71. };
  72. struct kvm_arch {
  73. /* G-stage vmid */
  74. struct kvm_vmid vmid;
  75. /* G-stage page table */
  76. pgd_t *pgd;
  77. phys_addr_t pgd_phys;
  78. /* Guest Timer */
  79. struct kvm_guest_timer timer;
  80. };
  81. struct kvm_sbi_context {
  82. int return_handled;
  83. };
  84. struct kvm_cpu_trap {
  85. unsigned long sepc;
  86. unsigned long scause;
  87. unsigned long stval;
  88. unsigned long htval;
  89. unsigned long htinst;
  90. };
  91. struct kvm_cpu_context {
  92. unsigned long zero;
  93. unsigned long ra;
  94. unsigned long sp;
  95. unsigned long gp;
  96. unsigned long tp;
  97. unsigned long t0;
  98. unsigned long t1;
  99. unsigned long t2;
  100. unsigned long s0;
  101. unsigned long s1;
  102. unsigned long a0;
  103. unsigned long a1;
  104. unsigned long a2;
  105. unsigned long a3;
  106. unsigned long a4;
  107. unsigned long a5;
  108. unsigned long a6;
  109. unsigned long a7;
  110. unsigned long s2;
  111. unsigned long s3;
  112. unsigned long s4;
  113. unsigned long s5;
  114. unsigned long s6;
  115. unsigned long s7;
  116. unsigned long s8;
  117. unsigned long s9;
  118. unsigned long s10;
  119. unsigned long s11;
  120. unsigned long t3;
  121. unsigned long t4;
  122. unsigned long t5;
  123. unsigned long t6;
  124. unsigned long sepc;
  125. unsigned long sstatus;
  126. unsigned long hstatus;
  127. union __riscv_fp_state fp;
  128. };
  129. struct kvm_vcpu_csr {
  130. unsigned long vsstatus;
  131. unsigned long vsie;
  132. unsigned long vstvec;
  133. unsigned long vsscratch;
  134. unsigned long vsepc;
  135. unsigned long vscause;
  136. unsigned long vstval;
  137. unsigned long hvip;
  138. unsigned long vsatp;
  139. unsigned long scounteren;
  140. };
  141. struct kvm_vcpu_arch {
  142. /* VCPU ran at least once */
  143. bool ran_atleast_once;
  144. /* Last Host CPU on which Guest VCPU exited */
  145. int last_exit_cpu;
  146. /* ISA feature bits (similar to MISA) */
  147. DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
  148. /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
  149. unsigned long host_sscratch;
  150. unsigned long host_stvec;
  151. unsigned long host_scounteren;
  152. /* CPU context of Host */
  153. struct kvm_cpu_context host_context;
  154. /* CPU context of Guest VCPU */
  155. struct kvm_cpu_context guest_context;
  156. /* CPU CSR context of Guest VCPU */
  157. struct kvm_vcpu_csr guest_csr;
  158. /* CPU context upon Guest VCPU reset */
  159. struct kvm_cpu_context guest_reset_context;
  160. /* CPU CSR context upon Guest VCPU reset */
  161. struct kvm_vcpu_csr guest_reset_csr;
  162. /*
  163. * VCPU interrupts
  164. *
  165. * We have a lockless approach for tracking pending VCPU interrupts
  166. * implemented using atomic bitops. The irqs_pending bitmap represent
  167. * pending interrupts whereas irqs_pending_mask represent bits changed
  168. * in irqs_pending. Our approach is modeled around multiple producer
  169. * and single consumer problem where the consumer is the VCPU itself.
  170. */
  171. unsigned long irqs_pending;
  172. unsigned long irqs_pending_mask;
  173. /* VCPU Timer */
  174. struct kvm_vcpu_timer timer;
  175. /* HFENCE request queue */
  176. spinlock_t hfence_lock;
  177. unsigned long hfence_head;
  178. unsigned long hfence_tail;
  179. struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
  180. /* MMIO instruction details */
  181. struct kvm_mmio_decode mmio_decode;
  182. /* CSR instruction details */
  183. struct kvm_csr_decode csr_decode;
  184. /* SBI context */
  185. struct kvm_sbi_context sbi_context;
  186. /* Cache pages needed to program page tables with spinlock held */
  187. struct kvm_mmu_memory_cache mmu_page_cache;
  188. /* VCPU power-off state */
  189. bool power_off;
  190. /* Don't run the VCPU (blocked) */
  191. bool pause;
  192. };
  193. static inline void kvm_arch_hardware_unsetup(void) {}
  194. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  195. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  196. #define KVM_ARCH_WANT_MMU_NOTIFIER
  197. #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
  198. void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
  199. gpa_t gpa, gpa_t gpsz,
  200. unsigned long order);
  201. void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
  202. void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
  203. unsigned long order);
  204. void kvm_riscv_local_hfence_gvma_all(void);
  205. void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
  206. unsigned long asid,
  207. unsigned long gva,
  208. unsigned long gvsz,
  209. unsigned long order);
  210. void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
  211. unsigned long asid);
  212. void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
  213. unsigned long gva, unsigned long gvsz,
  214. unsigned long order);
  215. void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
  216. void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
  217. void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
  218. void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
  219. void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
  220. void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
  221. void kvm_riscv_fence_i(struct kvm *kvm,
  222. unsigned long hbase, unsigned long hmask);
  223. void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
  224. unsigned long hbase, unsigned long hmask,
  225. gpa_t gpa, gpa_t gpsz,
  226. unsigned long order);
  227. void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
  228. unsigned long hbase, unsigned long hmask);
  229. void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
  230. unsigned long hbase, unsigned long hmask,
  231. unsigned long gva, unsigned long gvsz,
  232. unsigned long order, unsigned long asid);
  233. void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
  234. unsigned long hbase, unsigned long hmask,
  235. unsigned long asid);
  236. void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
  237. unsigned long hbase, unsigned long hmask,
  238. unsigned long gva, unsigned long gvsz,
  239. unsigned long order);
  240. void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
  241. unsigned long hbase, unsigned long hmask);
  242. int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
  243. phys_addr_t hpa, unsigned long size,
  244. bool writable, bool in_atomic);
  245. void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
  246. unsigned long size);
  247. int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
  248. struct kvm_memory_slot *memslot,
  249. gpa_t gpa, unsigned long hva, bool is_write);
  250. int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
  251. void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
  252. void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
  253. void kvm_riscv_gstage_mode_detect(void);
  254. unsigned long kvm_riscv_gstage_mode(void);
  255. int kvm_riscv_gstage_gpa_bits(void);
  256. void kvm_riscv_gstage_vmid_detect(void);
  257. unsigned long kvm_riscv_gstage_vmid_bits(void);
  258. int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
  259. bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
  260. void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
  261. void __kvm_riscv_unpriv_trap(void);
  262. unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
  263. bool read_insn,
  264. unsigned long guest_addr,
  265. struct kvm_cpu_trap *trap);
  266. void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
  267. struct kvm_cpu_trap *trap);
  268. int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
  269. struct kvm_cpu_trap *trap);
  270. void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
  271. int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
  272. int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
  273. void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
  274. void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
  275. bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask);
  276. void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
  277. void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
  278. int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
  279. int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
  280. #endif /* __RISCV_KVM_HOST_H__ */