Merge tag 'v4.6-rc4' into x86/asm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
|
||||
return 1;
|
||||
}
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
vcpu->arch.xcr0 = xcr0;
|
||||
|
||||
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
|
||||
@@ -6095,12 +6094,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
|
||||
}
|
||||
|
||||
/* try to inject new event if pending */
|
||||
if (vcpu->arch.nmi_pending) {
|
||||
if (kvm_x86_ops->nmi_allowed(vcpu)) {
|
||||
--vcpu->arch.nmi_pending;
|
||||
vcpu->arch.nmi_injected = true;
|
||||
kvm_x86_ops->set_nmi(vcpu);
|
||||
}
|
||||
if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
|
||||
--vcpu->arch.nmi_pending;
|
||||
vcpu->arch.nmi_injected = true;
|
||||
kvm_x86_ops->set_nmi(vcpu);
|
||||
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
|
||||
/*
|
||||
* Because interrupts can be injected asynchronously, we are
|
||||
@@ -6569,10 +6566,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (inject_pending_event(vcpu, req_int_win) != 0)
|
||||
req_immediate_exit = true;
|
||||
/* enable NMI/IRQ window open exits if needed */
|
||||
else if (vcpu->arch.nmi_pending)
|
||||
kvm_x86_ops->enable_nmi_window(vcpu);
|
||||
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
|
||||
kvm_x86_ops->enable_irq_window(vcpu);
|
||||
else {
|
||||
if (vcpu->arch.nmi_pending)
|
||||
kvm_x86_ops->enable_nmi_window(vcpu);
|
||||
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
|
||||
kvm_x86_ops->enable_irq_window(vcpu);
|
||||
}
|
||||
|
||||
if (kvm_lapic_enabled(vcpu)) {
|
||||
update_cr8_intercept(vcpu);
|
||||
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops->prepare_guest_switch(vcpu);
|
||||
if (vcpu->fpu_active)
|
||||
kvm_load_guest_fpu(vcpu);
|
||||
kvm_load_guest_xcr0(vcpu);
|
||||
|
||||
vcpu->mode = IN_GUEST_MODE;
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
goto cancel_injection;
|
||||
}
|
||||
|
||||
kvm_load_guest_xcr0(vcpu);
|
||||
|
||||
if (req_immediate_exit)
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
|
||||
@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
smp_wmb();
|
||||
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
|
||||
/* Interrupt is enabled by handle_external_intr() */
|
||||
kvm_x86_ops->handle_external_intr(vcpu);
|
||||
|
||||
@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
* and assume host would use all available bits.
|
||||
* Guest xcr0 would be loaded later.
|
||||
*/
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
__kernel_fpu_begin();
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
|
||||
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_put_guest_xcr0(vcpu);
|
||||
|
||||
if (!vcpu->guest_fpu_loaded) {
|
||||
vcpu->fpu_counter = 0;
|
||||
return;
|
||||
|
Referens i nytt ärende
Block a user