Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "PPC: - Better machine check handling for HV KVM - Ability to support guests with threads=2, 4 or 8 on POWER9 - Fix for a race that could cause delayed recognition of signals - Fix for a bug where POWER9 guests could sleep with interrupts pending. ARM: - VCPU request overhaul - allow timer and PMU to have their interrupt number selected from userspace - workaround for Cavium erratum 30115 - handling of memory poisonning - the usual crop of fixes and cleanups s390: - initial machine check forwarding - migration support for the CMMA page hinting information - cleanups and fixes x86: - nested VMX bugfixes and improvements - more reliable NMI window detection on AMD - APIC timer optimizations Generic: - VCPU request overhaul + documentation of common code patterns - kvm_stat improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (124 commits) Update my email address kvm: vmx: allow host to access guest MSR_IA32_BNDCFGS x86: kvm: mmu: use ept a/d in vmcs02 iff used in vmcs12 kvm: x86: mmu: allow A/D bits to be disabled in an mmu x86: kvm: mmu: make spte mmio mask more explicit x86: kvm: mmu: dead code thanks to access tracking KVM: PPC: Book3S: Fix typo in XICS-on-XIVE state saving code KVM: PPC: Book3S HV: Close race with testing for signals on guest entry KVM: PPC: Book3S HV: Simplify dynamic micro-threading code KVM: x86: remove ignored type attribute KVM: LAPIC: Fix lapic timer injection delay KVM: lapic: reorganize restart_apic_timer KVM: lapic: reorganize start_hv_timer kvm: nVMX: Check memory operand to INVVPID KVM: s390: Inject machine check into the nested guest KVM: s390: Inject machine check into the guest tools/kvm_stat: add new interactive command 'b' tools/kvm_stat: add new command line switch '-i' tools/kvm_stat: fix error on interactive command 'g' KVM: SVM: suppress unnecessary NMI singlestep on GIF=0 and nested exit ...
This commit is contained in:
@@ -190,6 +190,7 @@ struct vcpu_svm {
|
||||
struct nested_state nested;
|
||||
|
||||
bool nmi_singlestep;
|
||||
u64 nmi_singlestep_guest_rflags;
|
||||
|
||||
unsigned int3_injected;
|
||||
unsigned long int3_rip;
|
||||
@@ -964,6 +965,18 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
|
||||
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
|
||||
}
|
||||
|
||||
static void disable_nmi_singlestep(struct vcpu_svm *svm)
|
||||
{
|
||||
svm->nmi_singlestep = false;
|
||||
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
|
||||
/* Clear our flags if they were not set by the guest */
|
||||
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
|
||||
svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
|
||||
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
|
||||
svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
|
||||
}
|
||||
}
|
||||
|
||||
/* Note:
|
||||
* This hash table is used to map VM_ID to a struct kvm_arch,
|
||||
* when handling AMD IOMMU GALOG notification to schedule in
|
||||
@@ -1713,11 +1726,24 @@ static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
|
||||
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_svm(vcpu)->vmcb->save.rflags;
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
unsigned long rflags = svm->vmcb->save.rflags;
|
||||
|
||||
if (svm->nmi_singlestep) {
|
||||
/* Hide our flags if they were not set by the guest */
|
||||
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
|
||||
rflags &= ~X86_EFLAGS_TF;
|
||||
if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
|
||||
rflags &= ~X86_EFLAGS_RF;
|
||||
}
|
||||
return rflags;
|
||||
}
|
||||
|
||||
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
||||
{
|
||||
if (to_svm(vcpu)->nmi_singlestep)
|
||||
rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
|
||||
|
||||
/*
|
||||
* Any change of EFLAGS.VM is accompanied by a reload of SS
|
||||
* (caused by either a task switch or an inter-privilege IRET),
|
||||
@@ -2112,10 +2138,7 @@ static int db_interception(struct vcpu_svm *svm)
|
||||
}
|
||||
|
||||
if (svm->nmi_singlestep) {
|
||||
svm->nmi_singlestep = false;
|
||||
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
|
||||
svm->vmcb->save.rflags &=
|
||||
~(X86_EFLAGS_TF | X86_EFLAGS_RF);
|
||||
disable_nmi_singlestep(svm);
|
||||
}
|
||||
|
||||
if (svm->vcpu.guest_debug &
|
||||
@@ -2370,8 +2393,8 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int nested_svm_check_permissions(struct vcpu_svm *svm)
|
||||
{
|
||||
if (!(svm->vcpu.arch.efer & EFER_SVME)
|
||||
|| !is_paging(&svm->vcpu)) {
|
||||
if (!(svm->vcpu.arch.efer & EFER_SVME) ||
|
||||
!is_paging(&svm->vcpu)) {
|
||||
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
@@ -2381,7 +2404,7 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
|
||||
@@ -2534,6 +2557,31 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
|
||||
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
|
||||
}
|
||||
|
||||
/* DB exceptions for our internal use must not cause vmexit */
|
||||
static int nested_svm_intercept_db(struct vcpu_svm *svm)
|
||||
{
|
||||
unsigned long dr6;
|
||||
|
||||
/* if we're not singlestepping, it's not ours */
|
||||
if (!svm->nmi_singlestep)
|
||||
return NESTED_EXIT_DONE;
|
||||
|
||||
/* if it's not a singlestep exception, it's not ours */
|
||||
if (kvm_get_dr(&svm->vcpu, 6, &dr6))
|
||||
return NESTED_EXIT_DONE;
|
||||
if (!(dr6 & DR6_BS))
|
||||
return NESTED_EXIT_DONE;
|
||||
|
||||
/* if the guest is singlestepping, it should get the vmexit */
|
||||
if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
|
||||
disable_nmi_singlestep(svm);
|
||||
return NESTED_EXIT_DONE;
|
||||
}
|
||||
|
||||
/* it's ours, the nested hypervisor must not see this one */
|
||||
return NESTED_EXIT_HOST;
|
||||
}
|
||||
|
||||
static int nested_svm_exit_special(struct vcpu_svm *svm)
|
||||
{
|
||||
u32 exit_code = svm->vmcb->control.exit_code;
|
||||
@@ -2589,8 +2637,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
|
||||
}
|
||||
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
|
||||
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
|
||||
if (svm->nested.intercept_exceptions & excp_bits)
|
||||
vmexit = NESTED_EXIT_DONE;
|
||||
if (svm->nested.intercept_exceptions & excp_bits) {
|
||||
if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
|
||||
vmexit = nested_svm_intercept_db(svm);
|
||||
else
|
||||
vmexit = NESTED_EXIT_DONE;
|
||||
}
|
||||
/* async page fault always cause vmexit */
|
||||
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
|
||||
svm->apf_reason != 0)
|
||||
@@ -4627,10 +4679,17 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
|
||||
== HF_NMI_MASK)
|
||||
return; /* IRET will cause a vm exit */
|
||||
|
||||
if ((svm->vcpu.arch.hflags & HF_GIF_MASK) == 0)
|
||||
return; /* STGI will cause a vm exit */
|
||||
|
||||
if (svm->nested.exit_required)
|
||||
return; /* we're not going to run the guest yet */
|
||||
|
||||
/*
|
||||
* Something prevents NMI from been injected. Single step over possible
|
||||
* problem (IRET or exception injection or interrupt shadow)
|
||||
*/
|
||||
svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
|
||||
svm->nmi_singlestep = true;
|
||||
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
|
||||
}
|
||||
@@ -4771,6 +4830,22 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
if (unlikely(svm->nested.exit_required))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Disable singlestep if we're injecting an interrupt/exception.
|
||||
* We don't want our modified rflags to be pushed on the stack where
|
||||
* we might not be able to easily reset them if we disabled NMI
|
||||
* singlestep later.
|
||||
*/
|
||||
if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
|
||||
/*
|
||||
* Event injection happens before external interrupts cause a
|
||||
* vmexit and interrupts are disabled here, so smp_send_reschedule
|
||||
* is enough to force an immediate vmexit.
|
||||
*/
|
||||
disable_nmi_singlestep(svm);
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
}
|
||||
|
||||
pre_svm_run(svm);
|
||||
|
||||
sync_lapic_to_cr8(vcpu);
|
||||
|
Reference in New Issue
Block a user