Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "ARM: - support for SVE and Pointer Authentication in guests - PMU improvements POWER: - support for direct access to the POWER9 XIVE interrupt controller - memory and performance optimizations x86: - support for accessing memory not backed by struct page - fixes and refactoring Generic: - dirty page tracking improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (155 commits) kvm: fix compilation on aarch64 Revert "KVM: nVMX: Expose RDPMC-exiting only when guest supports PMU" kvm: x86: Fix L1TF mitigation for shadow MMU KVM: nVMX: Disable intercept for FS/GS base MSRs in vmcs02 when possible KVM: PPC: Book3S: Remove useless checks in 'release' method of KVM device KVM: PPC: Book3S HV: XIVE: Fix spelling mistake "acessing" -> "accessing" KVM: PPC: Book3S HV: Make sure to load LPID for radix VCPUs kvm: nVMX: Set nested_run_pending in vmx_set_nested_state after checks complete tests: kvm: Add tests for KVM_SET_NESTED_STATE KVM: nVMX: KVM_SET_NESTED_STATE - Tear down old EVMCS state before setting new state tests: kvm: Add tests for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_CPU_ID tests: kvm: Add tests to .gitignore KVM: Introduce KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 KVM: Fix kvm_clear_dirty_log_protect off-by-(minus-)one KVM: Fix the bitmap range to copy during clear dirty KVM: arm64: Fix ptrauth ID register masking logic KVM: x86: use direct accessors for RIP and RSP KVM: VMX: Use accessors for GPRs outside of dedicated caching logic KVM: x86: Omit caching logic for always-available GPRs kvm, x86: Properly check whether a pfn is an MMIO or not ...
This commit is contained in:
7
tools/testing/selftests/kvm/.gitignore
vendored
7
tools/testing/selftests/kvm/.gitignore
vendored
@@ -1,9 +1,14 @@
|
||||
/x86_64/cr4_cpuid_sync_test
|
||||
/x86_64/evmcs_test
|
||||
/x86_64/hyperv_cpuid
|
||||
/x86_64/kvm_create_max_vcpus
|
||||
/x86_64/platform_info_test
|
||||
/x86_64/set_sregs_test
|
||||
/x86_64/smm_test
|
||||
/x86_64/state_test
|
||||
/x86_64/sync_regs_test
|
||||
/x86_64/vmx_close_while_nested_test
|
||||
/x86_64/vmx_set_nested_state_test
|
||||
/x86_64/vmx_tsc_adjust_test
|
||||
/x86_64/state_test
|
||||
/clear_dirty_log_test
|
||||
/dirty_log_test
|
||||
|
@@ -20,6 +20,8 @@ TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/kvm_create_max_vcpus
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_test
|
||||
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
|
||||
|
||||
|
@@ -314,7 +314,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
|
||||
#ifdef USE_CLEAR_DIRTY_LOG
|
||||
struct kvm_enable_cap cap = {};
|
||||
|
||||
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT;
|
||||
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
|
||||
cap.args[0] = 1;
|
||||
vm_enable_cap(vm, &cap);
|
||||
#endif
|
||||
@@ -430,7 +430,7 @@ int main(int argc, char *argv[])
|
||||
int opt, i;
|
||||
|
||||
#ifdef USE_CLEAR_DIRTY_LOG
|
||||
if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT)) {
|
||||
if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2)) {
|
||||
fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
@@ -118,6 +118,10 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_vcpu_events *events);
|
||||
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_vcpu_events *events);
|
||||
void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_nested_state *state);
|
||||
int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_nested_state *state, bool ignore_error);
|
||||
|
||||
const char *exit_reason_str(unsigned int exit_reason);
|
||||
|
||||
|
@@ -1250,6 +1250,38 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
ret, errno);
|
||||
}
|
||||
|
||||
void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_nested_state *state)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||
int ret;
|
||||
|
||||
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
|
||||
|
||||
ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
|
||||
TEST_ASSERT(ret == 0,
|
||||
"KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
|
||||
ret, errno);
|
||||
}
|
||||
|
||||
int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_nested_state *state, bool ignore_error)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||
int ret;
|
||||
|
||||
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
|
||||
|
||||
ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
|
||||
if (!ignore_error) {
|
||||
TEST_ASSERT(ret == 0,
|
||||
"KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
|
||||
ret, errno);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* VM VCPU System Regs Get
|
||||
*
|
||||
|
70
tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c
Normal file
70
tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* kvm_create_max_vcpus
|
||||
*
|
||||
* Copyright (C) 2019, Google LLC.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*
|
||||
* Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE /* for program_invocation_short_name */
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "asm/kvm.h"
|
||||
#include "linux/kvm.h"
|
||||
|
||||
void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
int i;
|
||||
|
||||
printf("Testing creating %d vCPUs, with IDs %d...%d.\n",
|
||||
num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
|
||||
|
||||
vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
|
||||
|
||||
for (i = 0; i < num_vcpus; i++) {
|
||||
int vcpu_id = first_vcpu_id + i;
|
||||
|
||||
/* This asserts that the vCPU was created. */
|
||||
vm_vcpu_add(vm, vcpu_id, 0, 0);
|
||||
}
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
|
||||
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
|
||||
|
||||
printf("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
|
||||
printf("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
|
||||
|
||||
/*
|
||||
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
|
||||
* Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
|
||||
* in this case.
|
||||
*/
|
||||
if (!kvm_max_vcpu_id)
|
||||
kvm_max_vcpu_id = kvm_max_vcpus;
|
||||
|
||||
TEST_ASSERT(kvm_max_vcpu_id >= kvm_max_vcpus,
|
||||
"KVM_MAX_VCPU_ID (%d) must be at least as large as KVM_MAX_VCPUS (%d).",
|
||||
kvm_max_vcpu_id, kvm_max_vcpus);
|
||||
|
||||
test_vcpu_creation(0, kvm_max_vcpus);
|
||||
|
||||
if (kvm_max_vcpu_id > kvm_max_vcpus)
|
||||
test_vcpu_creation(
|
||||
kvm_max_vcpu_id - kvm_max_vcpus, kvm_max_vcpus);
|
||||
|
||||
return 0;
|
||||
}
|
280
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
Normal file
280
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
Normal file
@@ -0,0 +1,280 @@
|
||||
/*
|
||||
* vmx_set_nested_state_test
|
||||
*
|
||||
* Copyright (C) 2019, Google LLC.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*
|
||||
* This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
|
||||
*/
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "vmx.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/*
|
||||
* Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value
|
||||
* changes this should be updated.
|
||||
*/
|
||||
#define VMCS12_REVISION 0x11e57ed0
|
||||
#define VCPU_ID 5
|
||||
|
||||
void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
|
||||
{
|
||||
volatile struct kvm_run *run;
|
||||
|
||||
vcpu_nested_state_set(vm, VCPU_ID, state, false);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
|
||||
"Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
}
|
||||
|
||||
void test_nested_state_expect_errno(struct kvm_vm *vm,
|
||||
struct kvm_nested_state *state,
|
||||
int expected_errno)
|
||||
{
|
||||
volatile struct kvm_run *run;
|
||||
int rv;
|
||||
|
||||
rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
|
||||
TEST_ASSERT(rv == -1 && errno == expected_errno,
|
||||
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
|
||||
strerror(expected_errno), expected_errno, rv, strerror(errno),
|
||||
errno);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
|
||||
"Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
}
|
||||
|
||||
void test_nested_state_expect_einval(struct kvm_vm *vm,
|
||||
struct kvm_nested_state *state)
|
||||
{
|
||||
test_nested_state_expect_errno(vm, state, EINVAL);
|
||||
}
|
||||
|
||||
void test_nested_state_expect_efault(struct kvm_vm *vm,
|
||||
struct kvm_nested_state *state)
|
||||
{
|
||||
test_nested_state_expect_errno(vm, state, EFAULT);
|
||||
}
|
||||
|
||||
void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
|
||||
u32 vmcs12_revision)
|
||||
{
|
||||
/* Set revision_id in vmcs12 to vmcs12_revision. */
|
||||
*(u32 *)(state->data) = vmcs12_revision;
|
||||
}
|
||||
|
||||
void set_default_state(struct kvm_nested_state *state)
|
||||
{
|
||||
memset(state, 0, sizeof(*state));
|
||||
state->flags = KVM_STATE_NESTED_RUN_PENDING |
|
||||
KVM_STATE_NESTED_GUEST_MODE;
|
||||
state->format = 0;
|
||||
state->size = sizeof(*state);
|
||||
}
|
||||
|
||||
void set_default_vmx_state(struct kvm_nested_state *state, int size)
|
||||
{
|
||||
memset(state, 0, size);
|
||||
state->flags = KVM_STATE_NESTED_GUEST_MODE |
|
||||
KVM_STATE_NESTED_RUN_PENDING |
|
||||
KVM_STATE_NESTED_EVMCS;
|
||||
state->format = 0;
|
||||
state->size = size;
|
||||
state->vmx.vmxon_pa = 0x1000;
|
||||
state->vmx.vmcs_pa = 0x2000;
|
||||
state->vmx.smm.flags = 0;
|
||||
set_revision_id_for_vmcs12(state, VMCS12_REVISION);
|
||||
}
|
||||
|
||||
void test_vmx_nested_state(struct kvm_vm *vm)
|
||||
{
|
||||
/* Add a page for VMCS12. */
|
||||
const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
|
||||
struct kvm_nested_state *state =
|
||||
(struct kvm_nested_state *)malloc(state_sz);
|
||||
|
||||
/* The format must be set to 0. 0 for VMX, 1 for SVM. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->format = 1;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* We cannot virtualize anything if the guest does not have VMX
|
||||
* enabled.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* We cannot virtualize anything if the guest does not have VMX
|
||||
* enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
|
||||
* is set to -1ull.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = -1ull;
|
||||
test_nested_state(vm, state);
|
||||
|
||||
/* Enable VMX in the guest CPUID. */
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
|
||||
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = -1ull;
|
||||
state->vmx.smm.flags = 1;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = -1ull;
|
||||
state->vmx.vmcs_pa = 0;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
|
||||
* setting the nested state.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = -1ull;
|
||||
state->vmx.vmcs_pa = -1ull;
|
||||
test_nested_state(vm, state);
|
||||
|
||||
/* It is invalid to have vmxon_pa set to a non-page aligned address. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = 1;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
|
||||
* KVM_STATE_NESTED_GUEST_MODE set together.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->flags = KVM_STATE_NESTED_GUEST_MODE |
|
||||
KVM_STATE_NESTED_RUN_PENDING;
|
||||
state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* It is invalid to have any of the SMM flags set besides:
|
||||
* KVM_STATE_NESTED_SMM_GUEST_MODE
|
||||
* KVM_STATE_NESTED_SMM_VMXON
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
|
||||
KVM_STATE_NESTED_SMM_VMXON);
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* Outside SMM, SMM flags must be zero. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->flags = 0;
|
||||
state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* Size must be large enough to fit kvm_nested_state and vmcs12. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->size = sizeof(*state);
|
||||
test_nested_state(vm, state);
|
||||
|
||||
/* vmxon_pa cannot be the same address as vmcs_pa. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = 0;
|
||||
state->vmx.vmcs_pa = 0;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/* The revision id for vmcs12 must be VMCS12_REVISION. */
|
||||
set_default_vmx_state(state, state_sz);
|
||||
set_revision_id_for_vmcs12(state, 0);
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
/*
|
||||
* Test that if we leave nesting the state reflects that when we get
|
||||
* it again.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->vmx.vmxon_pa = -1ull;
|
||||
state->vmx.vmcs_pa = -1ull;
|
||||
state->flags = 0;
|
||||
test_nested_state(vm, state);
|
||||
vcpu_nested_state_get(vm, VCPU_ID, state);
|
||||
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
|
||||
"Size must be between %d and %d. The size returned was %d.",
|
||||
sizeof(*state), state_sz, state->size);
|
||||
TEST_ASSERT(state->vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
|
||||
TEST_ASSERT(state->vmx.vmcs_pa == -1ull, "vmcs_pa must be -1ull.");
|
||||
|
||||
free(state);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_nested_state state;
|
||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
|
||||
printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD currently does not implement set_nested_state, so for now we
|
||||
* just early out.
|
||||
*/
|
||||
if (!(entry->ecx & CPUID_VMX)) {
|
||||
fprintf(stderr, "nested VMX not enabled, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
vm = vm_create_default(VCPU_ID, 0, 0);
|
||||
|
||||
/* Passing a NULL kvm_nested_state causes a EFAULT. */
|
||||
test_nested_state_expect_efault(vm, NULL);
|
||||
|
||||
/* 'size' cannot be smaller than sizeof(kvm_nested_state). */
|
||||
set_default_state(&state);
|
||||
state.size = 0;
|
||||
test_nested_state_expect_einval(vm, &state);
|
||||
|
||||
/*
|
||||
* Setting the flags 0xf fails the flags check. The only flags that
|
||||
* can be used are:
|
||||
* KVM_STATE_NESTED_GUEST_MODE
|
||||
* KVM_STATE_NESTED_RUN_PENDING
|
||||
* KVM_STATE_NESTED_EVMCS
|
||||
*/
|
||||
set_default_state(&state);
|
||||
state.flags = 0xf;
|
||||
test_nested_state_expect_einval(vm, &state);
|
||||
|
||||
/*
|
||||
* If KVM_STATE_NESTED_RUN_PENDING is set then
|
||||
* KVM_STATE_NESTED_GUEST_MODE has to be set as well.
|
||||
*/
|
||||
set_default_state(&state);
|
||||
state.flags = KVM_STATE_NESTED_RUN_PENDING;
|
||||
test_nested_state_expect_einval(vm, &state);
|
||||
|
||||
/*
|
||||
* TODO: When SVM support is added for KVM_SET_NESTED_STATE
|
||||
* add tests here to support it like VMX.
|
||||
*/
|
||||
if (entry->ecx & CPUID_VMX)
|
||||
test_vmx_nested_state(vm);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
return 0;
|
||||
}
|
Reference in New Issue
Block a user