Merge tag 'kvm-4.13-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Radim Krčmář: "Second batch of KVM updates for v4.13 Common: - add uevents for VM creation/destruction - annotate and properly access RCU-protected objects s390: - rename IOCTL added in the first v4.13 merge x86: - emulate VMLOAD VMSAVE feature in SVM - support paravirtual asynchronous page fault while nested - add Hyper-V userspace interfaces for better migration - improve master clock corner cases - extend internal error reporting after EPT misconfig - correct single-stepping of emulated instructions in SVM - handle MCE during VM entry - fix nVMX VM entry checks and nVMX VMCS shadowing" * tag 'kvm-4.13-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (28 commits) kvm: x86: hyperv: make VP_INDEX managed by userspace KVM: async_pf: Let guest support delivery of async_pf from guest mode KVM: async_pf: Force a nested vmexit if the injected #PF is async_pf KVM: async_pf: Add L1 guest async_pf #PF vmexit handler KVM: x86: Simplify kvm_x86_ops->queue_exception parameter list kvm: x86: hyperv: add KVM_CAP_HYPERV_SYNIC2 KVM: x86: make backwards_tsc_observed a per-VM variable KVM: trigger uevents when creating or destroying a VM KVM: SVM: Enable Virtual VMLOAD VMSAVE feature KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition KVM: SVM: Rename lbr_ctl field in the vmcb control area KVM: SVM: Prepare for new bit definition in lbr_ctl KVM: SVM: handle singlestep exception when skipping emulated instructions KVM: x86: take slots_lock in kvm_free_pit KVM: s390: Fix KVM_S390_GET_CMMA_BITS ioctl definition kvm: vmx: Properly handle machine check during VM-entry KVM: x86: update master clock before computing kvmclock_offset kvm: nVMX: Shadow "high" parts of shadowed 64-bit VMCS fields kvm: nVMX: Fix nested_vmx_check_msr_bitmap_controls kvm: nVMX: Validate the I/O bitmaps on nested VM-entry ...
This commit is contained in:
@@ -286,6 +286,7 @@
|
||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
|
@@ -23,6 +23,7 @@ struct x86_exception {
|
||||
u16 error_code;
|
||||
bool nested_page_fault;
|
||||
u64 address; /* cr2 or nested page fault gpa */
|
||||
u8 async_page_fault;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -462,10 +462,12 @@ struct kvm_vcpu_hv_synic {
|
||||
DECLARE_BITMAP(auto_eoi_bitmap, 256);
|
||||
DECLARE_BITMAP(vec_bitmap, 256);
|
||||
bool active;
|
||||
bool dont_zero_synic_pages;
|
||||
};
|
||||
|
||||
/* Hyper-V per vcpu emulation context */
|
||||
struct kvm_vcpu_hv {
|
||||
u32 vp_index;
|
||||
u64 hv_vapic;
|
||||
s64 runtime_offset;
|
||||
struct kvm_vcpu_hv_synic synic;
|
||||
@@ -549,6 +551,7 @@ struct kvm_vcpu_arch {
|
||||
bool reinject;
|
||||
u8 nr;
|
||||
u32 error_code;
|
||||
u8 nested_apf;
|
||||
} exception;
|
||||
|
||||
struct kvm_queued_interrupt {
|
||||
@@ -649,6 +652,9 @@ struct kvm_vcpu_arch {
|
||||
u64 msr_val;
|
||||
u32 id;
|
||||
bool send_user_only;
|
||||
u32 host_apf_reason;
|
||||
unsigned long nested_apf_token;
|
||||
bool delivery_as_pf_vmexit;
|
||||
} apf;
|
||||
|
||||
/* OSVW MSRs (AMD only) */
|
||||
@@ -803,6 +809,7 @@ struct kvm_arch {
|
||||
int audit_point;
|
||||
#endif
|
||||
|
||||
bool backwards_tsc_observed;
|
||||
bool boot_vcpu_runs_old_kvmclock;
|
||||
u32 bsp_vcpu_id;
|
||||
|
||||
@@ -952,9 +959,7 @@ struct kvm_x86_ops {
|
||||
unsigned char *hypercall_addr);
|
||||
void (*set_irq)(struct kvm_vcpu *vcpu);
|
||||
void (*set_nmi)(struct kvm_vcpu *vcpu);
|
||||
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
|
||||
bool has_error_code, u32 error_code,
|
||||
bool reinject);
|
||||
void (*queue_exception)(struct kvm_vcpu *vcpu);
|
||||
void (*cancel_injection)(struct kvm_vcpu *vcpu);
|
||||
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
|
||||
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
|
||||
|
@@ -83,7 +83,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||
u32 event_inj;
|
||||
u32 event_inj_err;
|
||||
u64 nested_cr3;
|
||||
u64 lbr_ctl;
|
||||
u64 virt_ext;
|
||||
u32 clean;
|
||||
u32 reserved_5;
|
||||
u64 next_rip;
|
||||
@@ -119,6 +119,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||
#define AVIC_ENABLE_SHIFT 31
|
||||
#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
|
||||
|
||||
#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
|
||||
#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
|
||||
|
||||
#define SVM_INTERRUPT_SHADOW_MASK 1
|
||||
|
||||
#define SVM_IOIO_STR_SHIFT 2
|
||||
|
Reference in New Issue
Block a user