|
|
|
@@ -19,6 +19,14 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
|
|
|
|
|
static bool __read_mostly nested_early_check = 0;
|
|
|
|
|
module_param(nested_early_check, bool, S_IRUGO);
|
|
|
|
|
|
|
|
|
|
#define CC(consistency_check) \
|
|
|
|
|
({ \
|
|
|
|
|
bool failed = (consistency_check); \
|
|
|
|
|
if (failed) \
|
|
|
|
|
trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
|
|
|
|
|
failed; \
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Hyper-V requires all of these, so mark them as supported even though
|
|
|
|
|
* they are just treated the same as all-context.
|
|
|
|
@@ -430,8 +438,8 @@ static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
|
|
|
|
|
!page_address_valid(vcpu, vmcs12->io_bitmap_b))
|
|
|
|
|
if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
|
|
|
|
|
CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -443,7 +451,7 @@ static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
|
|
|
|
|
if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -455,7 +463,7 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
|
|
|
|
|
if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -688,7 +696,7 @@ static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
|
|
|
|
|
!page_address_valid(vcpu, vmcs12->apic_access_addr))
|
|
|
|
|
CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
else
|
|
|
|
|
return 0;
|
|
|
|
@@ -707,16 +715,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
* If virtualize x2apic mode is enabled,
|
|
|
|
|
* virtualize apic access must be disabled.
|
|
|
|
|
*/
|
|
|
|
|
if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
|
|
|
|
|
nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
|
|
|
|
|
if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
|
|
|
|
|
nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If virtual interrupt delivery is enabled,
|
|
|
|
|
* we must exit on external interrupts.
|
|
|
|
|
*/
|
|
|
|
|
if (nested_cpu_has_vid(vmcs12) &&
|
|
|
|
|
!nested_exit_on_intr(vcpu))
|
|
|
|
|
if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -727,15 +734,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
* bits 5:0 of posted_intr_desc_addr should be zero.
|
|
|
|
|
*/
|
|
|
|
|
if (nested_cpu_has_posted_intr(vmcs12) &&
|
|
|
|
|
(!nested_cpu_has_vid(vmcs12) ||
|
|
|
|
|
!nested_exit_intr_ack_set(vcpu) ||
|
|
|
|
|
(vmcs12->posted_intr_nv & 0xff00) ||
|
|
|
|
|
(vmcs12->posted_intr_desc_addr & 0x3f) ||
|
|
|
|
|
(vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
|
|
|
|
|
(CC(!nested_cpu_has_vid(vmcs12)) ||
|
|
|
|
|
CC(!nested_exit_intr_ack_set(vcpu)) ||
|
|
|
|
|
CC((vmcs12->posted_intr_nv & 0xff00)) ||
|
|
|
|
|
CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
|
|
|
|
|
CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* tpr shadow is needed by all apicv features. */
|
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
|
|
|
|
|
if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -759,10 +766,12 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
|
|
|
|
|
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
|
|
|
|
|
vmcs12->vm_exit_msr_load_addr) ||
|
|
|
|
|
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
|
|
|
|
|
vmcs12->vm_exit_msr_store_addr))
|
|
|
|
|
if (CC(nested_vmx_check_msr_switch(vcpu,
|
|
|
|
|
vmcs12->vm_exit_msr_load_count,
|
|
|
|
|
vmcs12->vm_exit_msr_load_addr)) ||
|
|
|
|
|
CC(nested_vmx_check_msr_switch(vcpu,
|
|
|
|
|
vmcs12->vm_exit_msr_store_count,
|
|
|
|
|
vmcs12->vm_exit_msr_store_addr)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -771,8 +780,9 @@ static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
|
|
|
|
|
vmcs12->vm_entry_msr_load_addr))
|
|
|
|
|
if (CC(nested_vmx_check_msr_switch(vcpu,
|
|
|
|
|
vmcs12->vm_entry_msr_load_count,
|
|
|
|
|
vmcs12->vm_entry_msr_load_addr)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -784,8 +794,8 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (!nested_cpu_has_pml(vmcs12))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!nested_cpu_has_ept(vmcs12) ||
|
|
|
|
|
!page_address_valid(vcpu, vmcs12->pml_address))
|
|
|
|
|
if (CC(!nested_cpu_has_ept(vmcs12)) ||
|
|
|
|
|
CC(!page_address_valid(vcpu, vmcs12->pml_address)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -794,8 +804,8 @@ static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
|
|
|
|
|
!nested_cpu_has_ept(vmcs12))
|
|
|
|
|
if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
|
|
|
|
|
!nested_cpu_has_ept(vmcs12)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@@ -803,8 +813,8 @@ static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
|
|
|
|
|
!nested_cpu_has_ept(vmcs12))
|
|
|
|
|
if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
|
|
|
|
|
!nested_cpu_has_ept(vmcs12)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@@ -815,8 +825,8 @@ static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (!nested_cpu_has_shadow_vmcs(vmcs12))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
|
|
|
|
|
!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
|
|
|
|
|
if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
|
|
|
|
|
CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -826,12 +836,12 @@ static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmx_msr_entry *e)
|
|
|
|
|
{
|
|
|
|
|
/* x2APIC MSR accesses are not allowed */
|
|
|
|
|
if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
|
|
|
|
|
if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
|
|
|
|
|
e->index == MSR_IA32_UCODE_REV)
|
|
|
|
|
if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
|
|
|
|
|
CC(e->index == MSR_IA32_UCODE_REV))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (e->reserved != 0)
|
|
|
|
|
if (CC(e->reserved != 0))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@@ -839,9 +849,9 @@ static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
|
|
|
|
|
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmx_msr_entry *e)
|
|
|
|
|
{
|
|
|
|
|
if (e->index == MSR_FS_BASE ||
|
|
|
|
|
e->index == MSR_GS_BASE ||
|
|
|
|
|
e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
|
|
|
|
|
if (CC(e->index == MSR_FS_BASE) ||
|
|
|
|
|
CC(e->index == MSR_GS_BASE) ||
|
|
|
|
|
CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
|
|
|
|
|
nested_vmx_msr_check_common(vcpu, e))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
return 0;
|
|
|
|
@@ -850,7 +860,7 @@ static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
|
|
|
|
|
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct vmx_msr_entry *e)
|
|
|
|
|
{
|
|
|
|
|
if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
|
|
|
|
|
if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
|
|
|
|
|
nested_vmx_msr_check_common(vcpu, e))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
return 0;
|
|
|
|
@@ -864,9 +874,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
|
{
|
|
|
|
|
u32 i;
|
|
|
|
|
struct vmx_msr_entry e;
|
|
|
|
|
struct msr_data msr;
|
|
|
|
|
|
|
|
|
|
msr.host_initiated = false;
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
|
|
|
|
|
&e, sizeof(e))) {
|
|
|
|
@@ -881,9 +889,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
|
__func__, i, e.index, e.reserved);
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
msr.index = e.index;
|
|
|
|
|
msr.data = e.value;
|
|
|
|
|
if (kvm_set_msr(vcpu, &msr)) {
|
|
|
|
|
if (kvm_set_msr(vcpu, e.index, e.value)) {
|
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
|
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
|
|
|
|
|
__func__, i, e.index, e.value);
|
|
|
|
@@ -897,11 +903,11 @@ fail:
|
|
|
|
|
|
|
|
|
|
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
|
{
|
|
|
|
|
u64 data;
|
|
|
|
|
u32 i;
|
|
|
|
|
struct vmx_msr_entry e;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
|
struct msr_data msr_info;
|
|
|
|
|
if (kvm_vcpu_read_guest(vcpu,
|
|
|
|
|
gpa + i * sizeof(e),
|
|
|
|
|
&e, 2 * sizeof(u32))) {
|
|
|
|
@@ -916,9 +922,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
|
__func__, i, e.index, e.reserved);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
msr_info.host_initiated = false;
|
|
|
|
|
msr_info.index = e.index;
|
|
|
|
|
if (kvm_get_msr(vcpu, &msr_info)) {
|
|
|
|
|
if (kvm_get_msr(vcpu, e.index, &data)) {
|
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
|
"%s cannot read MSR (%u, 0x%x)\n",
|
|
|
|
|
__func__, i, e.index);
|
|
|
|
@@ -927,10 +931,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
|
if (kvm_vcpu_write_guest(vcpu,
|
|
|
|
|
gpa + i * sizeof(e) +
|
|
|
|
|
offsetof(struct vmx_msr_entry, value),
|
|
|
|
|
&msr_info.data, sizeof(msr_info.data))) {
|
|
|
|
|
&data, sizeof(data))) {
|
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
|
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
|
|
|
|
|
__func__, i, e.index, msr_info.data);
|
|
|
|
|
__func__, i, e.index, data);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -955,7 +959,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
|
|
|
|
|
u32 *entry_failure_code)
|
|
|
|
|
{
|
|
|
|
|
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
|
|
|
|
|
if (!nested_cr3_valid(vcpu, cr3)) {
|
|
|
|
|
if (CC(!nested_cr3_valid(vcpu, cr3))) {
|
|
|
|
|
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
@@ -965,7 +969,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
|
|
|
|
|
* must not be dereferenced.
|
|
|
|
|
*/
|
|
|
|
|
if (is_pae_paging(vcpu) && !nested_ept) {
|
|
|
|
|
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
|
|
|
|
|
if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
|
|
|
|
|
*entry_failure_code = ENTRY_FAIL_PDPTE;
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
@@ -2411,12 +2415,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
|
|
|
|
|
|
|
static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (!nested_cpu_has_nmi_exiting(vmcs12) &&
|
|
|
|
|
nested_cpu_has_virtual_nmis(vmcs12))
|
|
|
|
|
if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
|
|
|
|
|
nested_cpu_has_virtual_nmis(vmcs12)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!nested_cpu_has_virtual_nmis(vmcs12) &&
|
|
|
|
|
nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
|
|
|
|
|
if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
|
|
|
|
|
nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -2430,11 +2434,11 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
|
|
|
|
|
/* Check for memory type validity */
|
|
|
|
|
switch (address & VMX_EPTP_MT_MASK) {
|
|
|
|
|
case VMX_EPTP_MT_UC:
|
|
|
|
|
if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
|
|
|
|
|
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
|
|
|
|
|
return false;
|
|
|
|
|
break;
|
|
|
|
|
case VMX_EPTP_MT_WB:
|
|
|
|
|
if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
|
|
|
|
|
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
|
|
|
|
|
return false;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
@@ -2442,16 +2446,16 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* only 4 levels page-walk length are valid */
|
|
|
|
|
if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
|
|
|
|
|
if (CC((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* Reserved bits should not be set */
|
|
|
|
|
if (address >> maxphyaddr || ((address >> 7) & 0x1f))
|
|
|
|
|
if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f)))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* AD, if set, should be supported */
|
|
|
|
|
if (address & VMX_EPTP_AD_ENABLE_BIT) {
|
|
|
|
|
if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
|
|
|
|
|
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -2466,21 +2470,21 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
{
|
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
|
|
if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
|
|
|
|
|
vmx->nested.msrs.pinbased_ctls_low,
|
|
|
|
|
vmx->nested.msrs.pinbased_ctls_high) ||
|
|
|
|
|
!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
|
|
|
|
|
vmx->nested.msrs.procbased_ctls_low,
|
|
|
|
|
vmx->nested.msrs.procbased_ctls_high))
|
|
|
|
|
if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
|
|
|
|
|
vmx->nested.msrs.pinbased_ctls_low,
|
|
|
|
|
vmx->nested.msrs.pinbased_ctls_high)) ||
|
|
|
|
|
CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
|
|
|
|
|
vmx->nested.msrs.procbased_ctls_low,
|
|
|
|
|
vmx->nested.msrs.procbased_ctls_high)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
|
|
|
|
|
!vmx_control_verify(vmcs12->secondary_vm_exec_control,
|
|
|
|
|
vmx->nested.msrs.secondary_ctls_low,
|
|
|
|
|
vmx->nested.msrs.secondary_ctls_high))
|
|
|
|
|
CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
|
|
|
|
|
vmx->nested.msrs.secondary_ctls_low,
|
|
|
|
|
vmx->nested.msrs.secondary_ctls_high)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
|
|
|
|
|
if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
|
|
|
|
|
nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
|
|
|
|
|
nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
|
|
|
|
|
nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
|
|
|
|
@@ -2491,7 +2495,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
|
|
|
|
|
nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
|
|
|
|
|
nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
|
|
|
|
|
(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
|
|
|
|
|
CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!nested_cpu_has_preemption_timer(vmcs12) &&
|
|
|
|
@@ -2499,17 +2503,17 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (nested_cpu_has_ept(vmcs12) &&
|
|
|
|
|
!valid_ept_address(vcpu, vmcs12->ept_pointer))
|
|
|
|
|
CC(!valid_ept_address(vcpu, vmcs12->ept_pointer)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (nested_cpu_has_vmfunc(vmcs12)) {
|
|
|
|
|
if (vmcs12->vm_function_control &
|
|
|
|
|
~vmx->nested.msrs.vmfunc_controls)
|
|
|
|
|
if (CC(vmcs12->vm_function_control &
|
|
|
|
|
~vmx->nested.msrs.vmfunc_controls))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (nested_cpu_has_eptp_switching(vmcs12)) {
|
|
|
|
|
if (!nested_cpu_has_ept(vmcs12) ||
|
|
|
|
|
!page_address_valid(vcpu, vmcs12->eptp_list_address))
|
|
|
|
|
if (CC(!nested_cpu_has_ept(vmcs12)) ||
|
|
|
|
|
CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -2525,10 +2529,10 @@ static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
{
|
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
|
|
if (!vmx_control_verify(vmcs12->vm_exit_controls,
|
|
|
|
|
vmx->nested.msrs.exit_ctls_low,
|
|
|
|
|
vmx->nested.msrs.exit_ctls_high) ||
|
|
|
|
|
nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
|
|
|
|
|
if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
|
|
|
|
|
vmx->nested.msrs.exit_ctls_low,
|
|
|
|
|
vmx->nested.msrs.exit_ctls_high)) ||
|
|
|
|
|
CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -2542,9 +2546,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
{
|
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
|
|
if (!vmx_control_verify(vmcs12->vm_entry_controls,
|
|
|
|
|
vmx->nested.msrs.entry_ctls_low,
|
|
|
|
|
vmx->nested.msrs.entry_ctls_high))
|
|
|
|
|
if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
|
|
|
|
|
vmx->nested.msrs.entry_ctls_low,
|
|
|
|
|
vmx->nested.msrs.entry_ctls_high)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -2564,31 +2568,31 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
|
|
|
|
|
|
|
|
|
|
/* VM-entry interruption-info field: interruption type */
|
|
|
|
|
if (intr_type == INTR_TYPE_RESERVED ||
|
|
|
|
|
(intr_type == INTR_TYPE_OTHER_EVENT &&
|
|
|
|
|
!nested_cpu_supports_monitor_trap_flag(vcpu)))
|
|
|
|
|
if (CC(intr_type == INTR_TYPE_RESERVED) ||
|
|
|
|
|
CC(intr_type == INTR_TYPE_OTHER_EVENT &&
|
|
|
|
|
!nested_cpu_supports_monitor_trap_flag(vcpu)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* VM-entry interruption-info field: vector */
|
|
|
|
|
if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
|
|
|
|
|
(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
|
|
|
|
|
(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
|
|
|
|
|
if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
|
|
|
|
|
CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
|
|
|
|
|
CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* VM-entry interruption-info field: deliver error code */
|
|
|
|
|
should_have_error_code =
|
|
|
|
|
intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
|
|
|
|
|
x86_exception_has_error_code(vector);
|
|
|
|
|
if (has_error_code != should_have_error_code)
|
|
|
|
|
if (CC(has_error_code != should_have_error_code))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* VM-entry exception error code */
|
|
|
|
|
if (has_error_code &&
|
|
|
|
|
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
|
|
|
|
|
if (CC(has_error_code &&
|
|
|
|
|
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* VM-entry interruption-info field: reserved bits */
|
|
|
|
|
if (intr_info & INTR_INFO_RESVD_BITS_MASK)
|
|
|
|
|
if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* VM-entry instruction length */
|
|
|
|
@@ -2596,9 +2600,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
|
|
|
|
|
case INTR_TYPE_SOFT_EXCEPTION:
|
|
|
|
|
case INTR_TYPE_SOFT_INTR:
|
|
|
|
|
case INTR_TYPE_PRIV_SW_EXCEPTION:
|
|
|
|
|
if ((vmcs12->vm_entry_instruction_len > 15) ||
|
|
|
|
|
(vmcs12->vm_entry_instruction_len == 0 &&
|
|
|
|
|
!nested_cpu_has_zero_length_injection(vcpu)))
|
|
|
|
|
if (CC(vmcs12->vm_entry_instruction_len > 15) ||
|
|
|
|
|
CC(vmcs12->vm_entry_instruction_len == 0 &&
|
|
|
|
|
CC(!nested_cpu_has_zero_length_injection(vcpu))))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -2625,40 +2629,40 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
|
|
|
|
{
|
|
|
|
|
bool ia32e;
|
|
|
|
|
|
|
|
|
|
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
|
|
|
|
|
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
|
|
|
|
|
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
|
|
|
|
|
if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
|
|
|
|
|
CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
|
|
|
|
|
CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
|
|
|
|
|
is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
|
|
|
|
|
if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
|
|
|
|
|
CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
|
|
|
|
|
!kvm_pat_valid(vmcs12->host_ia32_pat))
|
|
|
|
|
CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
ia32e = (vmcs12->vm_exit_controls &
|
|
|
|
|
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
|
|
|
|
|
|
|
|
|
|
if (vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK) ||
|
|
|
|
|
vmcs12->host_cs_selector == 0 ||
|
|
|
|
|
vmcs12->host_tr_selector == 0 ||
|
|
|
|
|
(vmcs12->host_ss_selector == 0 && !ia32e))
|
|
|
|
|
if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
|
|
|
|
|
CC(vmcs12->host_cs_selector == 0) ||
|
|
|
|
|
CC(vmcs12->host_tr_selector == 0) ||
|
|
|
|
|
CC(vmcs12->host_ss_selector == 0 && !ia32e))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
|
if (is_noncanonical_address(vmcs12->host_fs_base, vcpu) ||
|
|
|
|
|
is_noncanonical_address(vmcs12->host_gs_base, vcpu) ||
|
|
|
|
|
is_noncanonical_address(vmcs12->host_gdtr_base, vcpu) ||
|
|
|
|
|
is_noncanonical_address(vmcs12->host_idtr_base, vcpu) ||
|
|
|
|
|
is_noncanonical_address(vmcs12->host_tr_base, vcpu))
|
|
|
|
|
if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
|
|
|
|
|
CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
|
|
|
|
|
CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
|
|
|
|
|
CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
|
|
|
|
|
CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
@@ -2669,9 +2673,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
|
|
|
|
|
* the host address-space size VM-exit control.
|
|
|
|
|
*/
|
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
|
|
|
|
|
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
|
|
|
|
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
|
|
|
|
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
|
|
|
|
|
if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
|
|
|
|
|
CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
|
|
|
|
|
CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -2688,16 +2692,16 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (vmcs12->vmcs_link_pointer == -1ull)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
|
|
|
|
|
if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
|
|
|
|
|
if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
shadow = map.hva;
|
|
|
|
|
|
|
|
|
|
if (shadow->hdr.revision_id != VMCS12_REVISION ||
|
|
|
|
|
shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
|
|
|
|
|
if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
|
|
|
|
|
CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
|
|
|
|
|
r = -EINVAL;
|
|
|
|
|
|
|
|
|
|
kvm_vcpu_unmap(vcpu, &map, false);
|
|
|
|
@@ -2709,8 +2713,8 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
|
|
|
|
|
*/
|
|
|
|
|
static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
|
|
|
|
|
{
|
|
|
|
|
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
|
|
|
|
|
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
|
|
|
|
|
if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
|
|
|
|
|
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@@ -2724,12 +2728,12 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
|
|
|
|
*exit_qual = ENTRY_FAIL_DEFAULT;
|
|
|
|
|
|
|
|
|
|
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
|
|
|
|
|
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
|
|
|
|
|
if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
|
|
|
|
|
CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
|
|
|
|
|
!kvm_pat_valid(vmcs12->guest_ia32_pat))
|
|
|
|
|
CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
|
|
|
|
@@ -2749,16 +2753,16 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
|
|
|
|
|
if (to_vmx(vcpu)->nested.nested_run_pending &&
|
|
|
|
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
|
|
|
|
|
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
|
|
|
|
|
if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
|
|
|
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
|
|
|
|
|
((vmcs12->guest_cr0 & X86_CR0_PG) &&
|
|
|
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
|
|
|
|
|
if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
|
|
|
|
|
CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
|
|
|
|
|
CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
|
|
|
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
|
|
|
|
|
(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
|
|
|
|
|
(vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
|
|
|
|
|
(CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
|
|
|
|
|
CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (nested_check_guest_non_reg_state(vmcs12))
|
|
|
|
@@ -2841,9 +2845,13 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
|
|
|
|
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
|
|
|
|
|
|
|
|
|
if (vm_fail) {
|
|
|
|
|
u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
|
|
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
|
WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
|
|
|
|
|
VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
|
|
|
|
|
|
trace_kvm_nested_vmenter_failed(
|
|
|
|
|
"early hardware check VM-instruction error: ", error);
|
|
|
|
|
WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -3401,6 +3409,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
|
|
|
|
|
unsigned long exit_qual;
|
|
|
|
|
bool block_nested_events =
|
|
|
|
|
vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
|
|
|
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
|
|
|
|
|
|
if (lapic_in_kernel(vcpu) &&
|
|
|
|
|
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
|
|
|
|
|
if (block_nested_events)
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.pending &&
|
|
|
|
|
nested_vmx_check_exception(vcpu, &exit_qual)) {
|
|
|
|
@@ -3889,7 +3906,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
struct vmx_msr_entry g, h;
|
|
|
|
|
struct msr_data msr;
|
|
|
|
|
gpa_t gpa;
|
|
|
|
|
u32 i, j;
|
|
|
|
|
|
|
|
|
@@ -3949,7 +3965,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
* from the guest value. The intent is to stuff host state as
|
|
|
|
|
* silently as possible, not to fully process the exit load list.
|
|
|
|
|
*/
|
|
|
|
|
msr.host_initiated = false;
|
|
|
|
|
for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
|
|
|
|
|
gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
|
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
|
|
|
|
@@ -3979,9 +3994,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
goto vmabort;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
msr.index = h.index;
|
|
|
|
|
msr.data = h.value;
|
|
|
|
|
if (kvm_set_msr(vcpu, &msr)) {
|
|
|
|
|
if (kvm_set_msr(vcpu, h.index, h.value)) {
|
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
|
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
|
|
|
|
|
__func__, j, h.index, h.value);
|
|
|
|
@@ -4466,7 +4479,12 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
|
|
|
|
|
{
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
free_nested(vcpu);
|
|
|
|
|
|
|
|
|
|
/* Process a latched INIT during time CPU was in VMX operation */
|
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -5261,8 +5279,9 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if (unlikely(vmx->fail)) {
|
|
|
|
|
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
|
|
|
|
|
vmcs_read32(VM_INSTRUCTION_ERROR));
|
|
|
|
|
trace_kvm_nested_vmenter_failed(
|
|
|
|
|
"hardware VM-instruction error: ",
|
|
|
|
|
vmcs_read32(VM_INSTRUCTION_ERROR));
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|