KVM: x86: Move vendor CR4 validity check to dedicated kvm_x86_ops hook
[ Upstream commit c2fe3cd4604ac87c587db05d41843d667dc43815 ]
Split out VMX's checks on CR4.VMXE to a dedicated hook, .is_valid_cr4(),
and invoke the new hook from kvm_valid_cr4(). This fixes an issue where
KVM_SET_SREGS would return success while failing to actually set CR4.
Fixing the issue by explicitly checking kvm_x86_ops.set_cr4()'s return
in __set_sregs() is not a viable option as KVM has already stuffed a
variety of vCPU state.
Note, kvm_valid_cr4() and is_valid_cr4() have different return types and
inverted semantics. This will be remedied in a future patch.
Fixes: 5e1746d620
("KVM: nVMX: Allow setting the VMXE bit in CR4")
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20201007014417.29276-5-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
2f04a04d06
commit
c72a9b1d0d
@@ -1117,7 +1117,8 @@ struct kvm_x86_ops {
|
|||||||
struct kvm_segment *var, int seg);
|
struct kvm_segment *var, int seg);
|
||||||
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
|
||||||
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
|
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
|
@@ -1692,7 +1692,12 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|||||||
update_cr0_intercept(svm);
|
update_cr0_intercept(svm);
|
||||||
}
|
}
|
||||||
|
|
||||||
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
{
|
{
|
||||||
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
|
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
|
||||||
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
|
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
|
||||||
@@ -1706,7 +1711,6 @@ int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
cr4 |= host_cr4_mce;
|
cr4 |= host_cr4_mce;
|
||||||
to_svm(vcpu)->vmcb->save.cr4 = cr4;
|
to_svm(vcpu)->vmcb->save.cr4 = cr4;
|
||||||
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_set_segment(struct kvm_vcpu *vcpu,
|
static void svm_set_segment(struct kvm_vcpu *vcpu,
|
||||||
@@ -4238,6 +4242,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||||||
.get_cpl = svm_get_cpl,
|
.get_cpl = svm_get_cpl,
|
||||||
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
|
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
|
||||||
.set_cr0 = svm_set_cr0,
|
.set_cr0 = svm_set_cr0,
|
||||||
|
.is_valid_cr4 = svm_is_valid_cr4,
|
||||||
.set_cr4 = svm_set_cr4,
|
.set_cr4 = svm_set_cr4,
|
||||||
.set_efer = svm_set_efer,
|
.set_efer = svm_set_efer,
|
||||||
.get_idt = svm_get_idt,
|
.get_idt = svm_get_idt,
|
||||||
|
@@ -355,7 +355,7 @@ void svm_vcpu_free_msrpm(u32 *msrpm);
|
|||||||
|
|
||||||
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
void svm_flush_tlb(struct kvm_vcpu *vcpu);
|
void svm_flush_tlb(struct kvm_vcpu *vcpu);
|
||||||
void disable_nmi_singlestep(struct vcpu_svm *svm);
|
void disable_nmi_singlestep(struct vcpu_svm *svm);
|
||||||
bool svm_smi_blocked(struct kvm_vcpu *vcpu);
|
bool svm_smi_blocked(struct kvm_vcpu *vcpu);
|
||||||
|
@@ -4879,7 +4879,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||||||
/*
|
/*
|
||||||
* The Intel VMX Instruction Reference lists a bunch of bits that are
|
* The Intel VMX Instruction Reference lists a bunch of bits that are
|
||||||
* prerequisite to running VMXON, most notably cr4.VMXE must be set to
|
* prerequisite to running VMXON, most notably cr4.VMXE must be set to
|
||||||
* 1 (see vmx_set_cr4() for when we allow the guest to set this).
|
* 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
|
||||||
* Otherwise, we should fail with #UD. But most faulting conditions
|
* Otherwise, we should fail with #UD. But most faulting conditions
|
||||||
* have already been checked by hardware, prior to the VM-exit for
|
* have already been checked by hardware, prior to the VM-exit for
|
||||||
* VMXON. We do test guest cr4.VMXE because processor CR4 always has
|
* VMXON. We do test guest cr4.VMXE because processor CR4 always has
|
||||||
|
@@ -3183,7 +3183,23 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
|
|||||||
vmcs_writel(GUEST_CR3, guest_cr3);
|
vmcs_writel(GUEST_CR3, guest_cr3);
|
||||||
}
|
}
|
||||||
|
|
||||||
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We operate under the default treatment of SMM, so VMX cannot be
|
||||||
|
* enabled under SMM. Note, whether or not VMXE is allowed at all is
|
||||||
|
* handled by kvm_valid_cr4().
|
||||||
|
*/
|
||||||
|
if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
/*
|
/*
|
||||||
@@ -3211,17 +3227,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* We operate under the default treatment of SMM, so VMX cannot be
|
|
||||||
* enabled under SMM. Note, whether or not VMXE is allowed at all is
|
|
||||||
* handled by kvm_valid_cr4().
|
|
||||||
*/
|
|
||||||
if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
vcpu->arch.cr4 = cr4;
|
vcpu->arch.cr4 = cr4;
|
||||||
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
|
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
|
||||||
|
|
||||||
@@ -3252,7 +3257,6 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
|
|
||||||
vmcs_writel(CR4_READ_SHADOW, cr4);
|
vmcs_writel(CR4_READ_SHADOW, cr4);
|
||||||
vmcs_writel(GUEST_CR4, hw_cr4);
|
vmcs_writel(GUEST_CR4, hw_cr4);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
|
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
|
||||||
@@ -7748,6 +7752,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
|||||||
.get_cpl = vmx_get_cpl,
|
.get_cpl = vmx_get_cpl,
|
||||||
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
|
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
|
||||||
.set_cr0 = vmx_set_cr0,
|
.set_cr0 = vmx_set_cr0,
|
||||||
|
.is_valid_cr4 = vmx_is_valid_cr4,
|
||||||
.set_cr4 = vmx_set_cr4,
|
.set_cr4 = vmx_set_cr4,
|
||||||
.set_efer = vmx_set_efer,
|
.set_efer = vmx_set_efer,
|
||||||
.get_idt = vmx_get_idt,
|
.get_idt = vmx_get_idt,
|
||||||
|
@@ -347,7 +347,7 @@ u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
|
|||||||
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
|
void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
|
||||||
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
||||||
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
|
||||||
int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||||
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
|
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
|
||||||
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
|
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
|
||||||
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||||
|
@@ -986,6 +986,9 @@ int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
|
if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!kvm_x86_ops.is_valid_cr4(vcpu, cr4))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_valid_cr4);
|
EXPORT_SYMBOL_GPL(kvm_valid_cr4);
|
||||||
@@ -1020,8 +1023,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_x86_ops.set_cr4(vcpu, cr4))
|
kvm_x86_ops.set_cr4(vcpu, cr4);
|
||||||
return 1;
|
|
||||||
|
|
||||||
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
|
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
|
||||||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||||
|
Reference in New Issue
Block a user