123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489 |
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef ARCH_X86_KVM_X86_H
- #define ARCH_X86_KVM_X86_H
- #include <linux/kvm_host.h>
- #include <asm/mce.h>
- #include <asm/pvclock.h>
- #include "kvm_cache_regs.h"
- #include "kvm_emulate.h"
- struct kvm_caps {
- /* control of guest tsc rate supported? */
- bool has_tsc_control;
- /* maximum supported tsc_khz for guests */
- u32 max_guest_tsc_khz;
- /* number of bits of the fractional part of the TSC scaling ratio */
- u8 tsc_scaling_ratio_frac_bits;
- /* maximum allowed value of TSC scaling ratio */
- u64 max_tsc_scaling_ratio;
- /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
- u64 default_tsc_scaling_ratio;
- /* bus lock detection supported? */
- bool has_bus_lock_exit;
- /* notify VM exit supported? */
- bool has_notify_vmexit;
- u64 supported_mce_cap;
- u64 supported_xcr0;
- u64 supported_xss;
- u64 supported_perf_cap;
- };
- void kvm_spurious_fault(void);
- #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
- ({ \
- bool failed = (consistency_check); \
- if (failed) \
- trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
- failed; \
- })
- #define KVM_DEFAULT_PLE_GAP 128
- #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
- #define KVM_DEFAULT_PLE_WINDOW_GROW 2
- #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
- #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
- #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
- #define KVM_SVM_DEFAULT_PLE_WINDOW 3000
- static inline unsigned int __grow_ple_window(unsigned int val,
- unsigned int base, unsigned int modifier, unsigned int max)
- {
- u64 ret = val;
- if (modifier < 1)
- return base;
- if (modifier < base)
- ret *= modifier;
- else
- ret += modifier;
- return min(ret, (u64)max);
- }
- static inline unsigned int __shrink_ple_window(unsigned int val,
- unsigned int base, unsigned int modifier, unsigned int min)
- {
- if (modifier < 1)
- return base;
- if (modifier < base)
- val /= modifier;
- else
- val -= modifier;
- return max(val, min);
- }
- #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
- void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
- int kvm_check_nested_events(struct kvm_vcpu *vcpu);
- static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
- {
- return vcpu->arch.exception.pending ||
- vcpu->arch.exception_vmexit.pending ||
- kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- }
- static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
- {
- vcpu->arch.exception.pending = false;
- vcpu->arch.exception.injected = false;
- vcpu->arch.exception_vmexit.pending = false;
- }
- static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
- bool soft)
- {
- vcpu->arch.interrupt.injected = true;
- vcpu->arch.interrupt.soft = soft;
- vcpu->arch.interrupt.nr = vector;
- }
- static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
- {
- vcpu->arch.interrupt.injected = false;
- }
- static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
- {
- return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
- vcpu->arch.nmi_injected;
- }
- static inline bool kvm_exception_is_soft(unsigned int nr)
- {
- return (nr == BP_VECTOR) || (nr == OF_VECTOR);
- }
- static inline bool is_protmode(struct kvm_vcpu *vcpu)
- {
- return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
- }
- static inline int is_long_mode(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_X86_64
- return vcpu->arch.efer & EFER_LMA;
- #else
- return 0;
- #endif
- }
- static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
- {
- int cs_db, cs_l;
- WARN_ON_ONCE(vcpu->arch.guest_state_protected);
- if (!is_long_mode(vcpu))
- return false;
- static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
- return cs_l;
- }
- static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
- {
- /*
- * If running with protected guest state, the CS register is not
- * accessible. The hypercall register values will have had to been
- * provided in 64-bit mode, so assume the guest is in 64-bit.
- */
- return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu);
- }
- static inline bool x86_exception_has_error_code(unsigned int vector)
- {
- static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
- BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
- BIT(PF_VECTOR) | BIT(AC_VECTOR);
- return (1U << vector) & exception_has_error_code;
- }
- static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
- {
- return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
- }
- static inline int is_pae(struct kvm_vcpu *vcpu)
- {
- return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
- }
- static inline int is_pse(struct kvm_vcpu *vcpu)
- {
- return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
- }
- static inline int is_paging(struct kvm_vcpu *vcpu)
- {
- return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
- }
- static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
- {
- return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
- }
- static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
- {
- return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
- }
- static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
- {
- return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
- }
- static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
- gva_t gva, gfn_t gfn, unsigned access)
- {
- u64 gen = kvm_memslots(vcpu->kvm)->generation;
- if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
- return;
- /*
- * If this is a shadow nested page table, the "GVA" is
- * actually a nGPA.
- */
- vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
- vcpu->arch.mmio_access = access;
- vcpu->arch.mmio_gfn = gfn;
- vcpu->arch.mmio_gen = gen;
- }
- static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
- {
- return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
- }
- /*
- * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
- * clear all mmio cache info.
- */
- #define MMIO_GVA_ANY (~(gva_t)0)
- static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
- {
- if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
- return;
- vcpu->arch.mmio_gva = 0;
- }
- static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
- {
- if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
- vcpu->arch.mmio_gva == (gva & PAGE_MASK))
- return true;
- return false;
- }
- static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
- {
- if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
- vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
- return true;
- return false;
- }
- static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
- {
- unsigned long val = kvm_register_read_raw(vcpu, reg);
- return is_64_bit_mode(vcpu) ? val : (u32)val;
- }
- static inline void kvm_register_write(struct kvm_vcpu *vcpu,
- int reg, unsigned long val)
- {
- if (!is_64_bit_mode(vcpu))
- val = (u32)val;
- return kvm_register_write_raw(vcpu, reg, val);
- }
- static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
- {
- return !(kvm->arch.disabled_quirks & quirk);
- }
- void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
- u64 get_kvmclock_ns(struct kvm *kvm);
- int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
- gva_t addr, void *val, unsigned int bytes,
- struct x86_exception *exception);
- int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
- gva_t addr, void *val, unsigned int bytes,
- struct x86_exception *exception);
- int handle_ud(struct kvm_vcpu *vcpu);
- void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
- struct kvm_queued_exception *ex);
- void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
- u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
- bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
- int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
- int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
- bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
- int page_num);
- bool kvm_vector_hashing_enabled(void);
- void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
- int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
- void *insn, int insn_len);
- int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- int emulation_type, void *insn, int insn_len);
- fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
- extern u64 host_xcr0;
- extern u64 host_xss;
- extern struct kvm_caps kvm_caps;
- extern bool enable_pmu;
- static inline bool kvm_mpx_supported(void)
- {
- return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
- == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
- }
- extern unsigned int min_timer_period_us;
- extern bool enable_vmware_backdoor;
- extern int pi_inject_timer;
- extern bool report_ignored_msrs;
- extern bool eager_page_split;
- static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
- {
- return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
- vcpu->arch.virtual_tsc_shift);
- }
- /* Same "calling convention" as do_div:
- * - divide (n << 32) by base
- * - put result in n
- * - return remainder
- */
- #define do_shl32_div32(n, base) \
- ({ \
- u32 __quot, __rem; \
- asm("divl %2" : "=a" (__quot), "=d" (__rem) \
- : "rm" (base), "0" (0), "1" ((u32) n)); \
- n = __quot; \
- __rem; \
- })
- static inline bool kvm_mwait_in_guest(struct kvm *kvm)
- {
- return kvm->arch.mwait_in_guest;
- }
- static inline bool kvm_hlt_in_guest(struct kvm *kvm)
- {
- return kvm->arch.hlt_in_guest;
- }
- static inline bool kvm_pause_in_guest(struct kvm *kvm)
- {
- return kvm->arch.pause_in_guest;
- }
- static inline bool kvm_cstate_in_guest(struct kvm *kvm)
- {
- return kvm->arch.cstate_in_guest;
- }
- static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
- {
- return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED;
- }
- enum kvm_intr_type {
- /* Values are arbitrary, but must be non-zero. */
- KVM_HANDLING_IRQ = 1,
- KVM_HANDLING_NMI,
- };
- static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
- enum kvm_intr_type intr)
- {
- WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
- }
- static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
- {
- WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
- }
- static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
- {
- return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
- }
- static inline bool kvm_pat_valid(u64 data)
- {
- if (data & 0xF8F8F8F8F8F8F8F8ull)
- return false;
- /* 0, 1, 4, 5, 6, 7 are valid values. */
- return (data | ((data & 0x0202020202020202ull) << 1)) == data;
- }
- static inline bool kvm_dr7_valid(u64 data)
- {
- /* Bits [63:32] are reserved */
- return !(data >> 32);
- }
- static inline bool kvm_dr6_valid(u64 data)
- {
- /* Bits [63:32] are reserved */
- return !(data >> 32);
- }
- /*
- * Trigger machine check on the host. We assume all the MSRs are already set up
- * by the CPU and that we still run on the same CPU as the MCE occurred on.
- * We pass a fake environment to the machine check handler because we want
- * the guest to be always treated like user space, no matter what context
- * it used internally.
- */
- static inline void kvm_machine_check(void)
- {
- #if defined(CONFIG_X86_MCE)
- struct pt_regs regs = {
- .cs = 3, /* Fake ring 3 no matter what the guest ran on */
- .flags = X86_EFLAGS_IF,
- };
- do_machine_check(®s);
- #endif
- }
- void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
- void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
- int kvm_spec_ctrl_test_value(u64 value);
- bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
- int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
- struct x86_exception *e);
- int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
- bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
- /*
- * Internal error codes that are used to indicate that MSR emulation encountered
- * an error that should result in #GP in the guest, unless userspace
- * handles it.
- */
- #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
- #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
- #define __cr4_reserved_bits(__cpu_has, __c) \
- ({ \
- u64 __reserved_bits = CR4_RESERVED_BITS; \
- \
- if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
- __reserved_bits |= X86_CR4_OSXSAVE; \
- if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
- __reserved_bits |= X86_CR4_SMEP; \
- if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
- __reserved_bits |= X86_CR4_SMAP; \
- if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
- __reserved_bits |= X86_CR4_FSGSBASE; \
- if (!__cpu_has(__c, X86_FEATURE_PKU)) \
- __reserved_bits |= X86_CR4_PKE; \
- if (!__cpu_has(__c, X86_FEATURE_LA57)) \
- __reserved_bits |= X86_CR4_LA57; \
- if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
- __reserved_bits |= X86_CR4_UMIP; \
- if (!__cpu_has(__c, X86_FEATURE_VMX)) \
- __reserved_bits |= X86_CR4_VMXE; \
- if (!__cpu_has(__c, X86_FEATURE_PCID)) \
- __reserved_bits |= X86_CR4_PCIDE; \
- __reserved_bits; \
- })
- int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
- void *dst);
- int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes,
- void *dst);
- int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
- unsigned int port, void *data, unsigned int count,
- int in);
- #endif
|