kvm_hyp.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2015 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #ifndef __ARM64_KVM_HYP_H__
  7. #define __ARM64_KVM_HYP_H__
  8. #include <linux/compiler.h>
  9. #include <linux/kvm_host.h>
  10. #include <asm/alternative.h>
  11. #include <asm/sysreg.h>
  12. DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
  13. DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
  14. DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
  15. DECLARE_PER_CPU(int, hyp_cpu_number);
  16. #define hyp_smp_processor_id() (__this_cpu_read(hyp_cpu_number))
  17. #define read_sysreg_elx(r,nvh,vh) \
  18. ({ \
  19. u64 reg; \
  20. asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
  21. __mrs_s("%0", r##vh), \
  22. ARM64_HAS_VIRT_HOST_EXTN) \
  23. : "=r" (reg)); \
  24. reg; \
  25. })
  26. #define write_sysreg_elx(v,r,nvh,vh) \
  27. do { \
  28. u64 __val = (u64)(v); \
  29. asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \
  30. __msr_s(r##vh, "%x0"), \
  31. ARM64_HAS_VIRT_HOST_EXTN) \
  32. : : "rZ" (__val)); \
  33. } while (0)
  34. /*
  35. * Unified accessors for registers that have a different encoding
  36. * between VHE and non-VHE. They must be specified without their "ELx"
  37. * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
  38. */
  39. #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
  40. #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
  41. #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
  42. #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
  43. #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
  44. #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
  45. /*
  46. * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
  47. * static inline can allow the compiler to out-of-line this. KVM always wants
  48. * the macro version as its always inlined.
  49. */
  50. #define __kvm_swab32(x) ___constant_swab32(x)
  51. int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
  52. void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
  53. void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
  54. void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
  55. void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
  56. void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
  57. void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
  58. int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
  59. #ifdef __KVM_NVHE_HYPERVISOR__
  60. void __timer_enable_traps(struct kvm_vcpu *vcpu);
  61. void __timer_disable_traps(struct kvm_vcpu *vcpu);
  62. #endif
  63. #ifdef __KVM_NVHE_HYPERVISOR__
  64. void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
  65. void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
  66. #else
  67. void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
  68. void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
  69. void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
  70. void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
  71. #endif
  72. void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
  73. void __debug_switch_to_host(struct kvm_vcpu *vcpu);
  74. #ifdef __KVM_NVHE_HYPERVISOR__
  75. void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
  76. void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
  77. #endif
  78. void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
  79. void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
  80. void __sve_save_state(void *sve_pffr, u32 *fpsr);
  81. void __sve_restore_state(void *sve_pffr, u32 *fpsr);
  82. #ifndef __KVM_NVHE_HYPERVISOR__
  83. void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
  84. void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
  85. #endif
  86. u64 __guest_enter(struct kvm_vcpu *vcpu);
  87. bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
  88. #ifdef __KVM_NVHE_HYPERVISOR__
  89. void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
  90. u64 elr, u64 par);
  91. #endif
  92. #ifdef __KVM_NVHE_HYPERVISOR__
  93. void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
  94. phys_addr_t pgd, void *sp, void *cont_fn);
  95. int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
  96. unsigned long *per_cpu_base, u32 hyp_va_bits);
  97. void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
  98. #endif
  99. #ifdef __KVM_NVHE_HYPERVISOR__
  100. struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu);
  101. struct kvm_host_sve_state *get_host_sve_state(struct kvm_vcpu *vcpu);
  102. #endif
  103. extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
  104. extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
  105. extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
  106. extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
  107. extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
  108. extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
  109. extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
  110. extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
  111. extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
  112. extern unsigned long kvm_nvhe_sym(__icache_flags);
  113. extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
  114. extern bool kvm_nvhe_sym(smccc_trng_available);
  115. extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
  116. struct kvm_nvhe_clock_data {
  117. u32 mult;
  118. u32 shift;
  119. u64 epoch_ns;
  120. u64 epoch_cyc;
  121. };
  122. #endif /* __ARM64_KVM_HYP_H__ */