Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more KVM updates from Paolo Bonzini:
 "The guest side of the asynchronous page fault work has been delayed to
  5.9 in order to sync with Thomas's interrupt entry rework, but here's
  the rest of the KVM updates for this merge window.

  MIPS:
   - Loongson port

  PPC:
   - Fixes

  ARM:
   - Fixes

  x86:
   - KVM_SET_USER_MEMORY_REGION optimizations
   - Fixes
   - Selftest fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (62 commits)
  KVM: x86: do not pass poisoned hva to __kvm_set_memory_region
  KVM: selftests: fix sync_with_host() in smm_test
  KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected
  KVM: async_pf: Cleanup kvm_setup_async_pf()
  kvm: i8254: remove redundant assignment to pointer s
  KVM: x86: respect singlestep when emulating instruction
  KVM: selftests: Don't probe KVM_CAP_HYPERV_ENLIGHTENED_VMCS when nested VMX is unsupported
  KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check
  KVM: nVMX: Consult only the "basic" exit reason when routing nested exit
  KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h
  KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception
  KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts
  KVM: arm64: Remove host_cpu_context member from vcpu structure
  KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr
  KVM: arm64: Handle PtrAuth traps early
  KVM: x86: Unexport x86_fpu_cache and make it static
  KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K
  KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
  KVM: arm64: Stop save/restoring ACTLR_EL1
  KVM: arm64: Add emulation for 32bit guests accessing ACTLR2
  ...
This commit is contained in:
Linus Torvalds
2020-06-12 11:05:52 -07:00
83 changed files with 1801 additions and 740 deletions

View File

@@ -3,6 +3,7 @@
/s390x/resets
/s390x/sync_regs_test
/x86_64/cr4_cpuid_sync_test
/x86_64/debug_regs
/x86_64/evmcs_test
/x86_64/hyperv_cpuid
/x86_64/mmio_warning_test

View File

@@ -83,7 +83,11 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
ifeq ($(ARCH),x86_64)
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include
else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \

View File

@@ -33,6 +33,7 @@ struct svm_test_data {
struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
bool nested_svm_supported(void);
void nested_svm_check_supported(void);
static inline bool cpu_has_svm(void)

View File

@@ -598,15 +598,12 @@ union vmx_ctrl_msr {
};
};
union vmx_basic basic;
union vmx_ctrl_msr ctrl_pin_rev;
union vmx_ctrl_msr ctrl_exit_rev;
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
bool nested_vmx_supported(void);
void nested_vmx_check_supported(void);
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,

View File

@@ -195,11 +195,18 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
case VM_MODE_PXXV48_4K:
#ifdef __x86_64__
kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
TEST_ASSERT(vm->va_bits == 48, "Linear address width "
"(%d bits) not supported", vm->va_bits);
/*
* Ignore KVM support for 5-level paging (vm->va_bits == 57),
* it doesn't take effect unless a CR4.LA57 is set, which it
* isn't for this VM_MODE.
*/
TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
"Linear address width (%d bits) not supported",
vm->va_bits);
pr_debug("Guest physical address width detected: %d\n",
vm->pa_bits);
vm->pgtable_levels = 4;
vm->va_bits = 48;
#else
TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
#endif

View File

@@ -148,14 +148,18 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
: "r15", "memory");
}
void nested_svm_check_supported(void)
bool nested_svm_supported(void)
{
struct kvm_cpuid_entry2 *entry =
kvm_get_supported_cpuid_entry(0x80000001);
if (!(entry->ecx & CPUID_SVM)) {
return entry->ecx & CPUID_SVM;
}
void nested_svm_check_supported(void)
{
if (!nested_svm_supported()) {
print_skip("nested SVM not enabled");
exit(KSFT_SKIP);
}
}

View File

@@ -379,11 +379,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
init_vmcs_guest_state(guest_rip, guest_rsp);
}
void nested_vmx_check_supported(void)
bool nested_vmx_supported(void)
{
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & CPUID_VMX)) {
return entry->ecx & CPUID_VMX;
}
void nested_vmx_check_supported(void)
{
if (!nested_vmx_supported()) {
print_skip("nested VMX not enabled");
exit(KSFT_SKIP);
}

View File

@@ -94,9 +94,10 @@ int main(int argc, char *argv[])
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
if (!nested_vmx_supported() ||
!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
print_skip("capabilities not available");
print_skip("Enlightened VMCS is unsupported");
exit(KSFT_SKIP);
}

View File

@@ -170,7 +170,8 @@ int main(int argc, char *argv[])
case 1:
break;
case 2:
if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
if (!nested_vmx_supported() ||
!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
print_skip("Enlightened VMCS is unsupported");
continue;
}

View File

@@ -47,10 +47,10 @@ uint8_t smi_handler[] = {
0x0f, 0xaa, /* rsm */
};
void sync_with_host(uint64_t phase)
static inline void sync_with_host(uint64_t phase)
{
asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
: : "a" (phase));
: "+a" (phase));
}
void self_smi(void)
@@ -118,16 +118,17 @@ int main(int argc, char *argv[])
vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
if (nested_svm_supported())
vcpu_alloc_svm(vm, &nested_gva);
else
else if (nested_vmx_supported())
vcpu_alloc_vmx(vm, &nested_gva);
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
} else {
pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vm, VCPU_ID, 1, 0);
}
if (!nested_gva)
pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,

View File

@@ -171,16 +171,17 @@ int main(int argc, char *argv[])
vcpu_regs_get(vm, VCPU_ID, &regs1);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
if (nested_svm_supported())
vcpu_alloc_svm(vm, &nested_gva);
else
else if (nested_vmx_supported())
vcpu_alloc_vmx(vm, &nested_gva);
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
} else {
pr_info("will skip nested state checks\n");
vcpu_args_set(vm, VCPU_ID, 1, 0);
}
if (!nested_gva)
pr_info("will skip nested state checks\n");
vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,

View File

@@ -31,6 +31,10 @@ bool l2_save_restore_done;
static u64 l2_vmx_pt_start;
volatile u64 l2_vmx_pt_finish;
union vmx_basic basic;
union vmx_ctrl_msr ctrl_pin_rev;
union vmx_ctrl_msr ctrl_exit_rev;
void l2_guest_code(void)
{
u64 vmx_pt_delta;