Merge tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář: "ARM: - Improved guest IPA space support (32 to 52 bits) - RAS event delivery for 32bit - PMU fixes - Guest entry hardening - Various cleanups - Port of dirty_log_test selftest PPC: - Nested HV KVM support for radix guests on POWER9. The performance is much better than with PR KVM. Migration and arbitrary level of nesting is supported. - Disable nested HV-KVM on early POWER9 chips that need a particular hardware bug workaround - One VM per core mode to prevent potential data leaks - PCI pass-through optimization - merge ppc-kvm topic branch and kvm-ppc-fixes to get a better base s390: - Initial version of AP crypto virtualization via vfio-mdev - Improvement for vfio-ap - Set the host program identifier - Optimize page table locking x86: - Enable nested virtualization by default - Implement Hyper-V IPI hypercalls - Improve #PF and #DB handling - Allow guests to use Enlightened VMCS - Add migration selftests for VMCS and Enlightened VMCS - Allow coalesced PIO accesses - Add an option to perform nested VMCS host state consistency check through hardware - Automatic tuning of lapic_timer_advance_ns - Many fixes, minor improvements, and cleanups" * tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits) KVM/nVMX: Do not validate that posted_intr_desc_addr is page aligned Revert "kvm: x86: optimize dr6 restore" KVM: PPC: Optimize clearing TCEs for sparse tables x86/kvm/nVMX: tweak shadow fields selftests/kvm: add missing executables to .gitignore KVM: arm64: Safety check PSTATE when entering guest and handle IL KVM: PPC: Book3S HV: Don't use streamlined entry path on early POWER9 chips arm/arm64: KVM: Enable 32 bits kvm vcpu events support arm/arm64: KVM: Rename function kvm_arch_dev_ioctl_check_extension() KVM: arm64: Fix caching of host MDCR_EL2 value KVM: VMX: enable nested virtualization by default KVM/x86: Use 32bit xor to clear registers in svm.c kvm: x86: Introduce KVM_CAP_EXCEPTION_PAYLOAD kvm: vmx: Defer setting of DR6 until #DB delivery kvm: x86: Defer setting of CR2 until #PF delivery kvm: x86: Add payload operands to kvm_multiple_exception kvm: x86: Add exception payload fields to kvm_vcpu_events kvm: x86: Add has_payload and payload to kvm_queued_exception KVM: Documentation: Fix omission in struct kvm_vcpu_events KVM: selftests: add Enlightened VMCS test ...
This commit is contained in:
14
tools/testing/selftests/kvm/.gitignore
vendored
14
tools/testing/selftests/kvm/.gitignore
vendored
@@ -1,6 +1,8 @@
|
||||
cr4_cpuid_sync_test
|
||||
platform_info_test
|
||||
set_sregs_test
|
||||
sync_regs_test
|
||||
vmx_tsc_adjust_test
|
||||
state_test
|
||||
/x86_64/cr4_cpuid_sync_test
|
||||
/x86_64/evmcs_test
|
||||
/x86_64/platform_info_test
|
||||
/x86_64/set_sregs_test
|
||||
/x86_64/sync_regs_test
|
||||
/x86_64/vmx_tsc_adjust_test
|
||||
/x86_64/state_test
|
||||
/dirty_log_test
|
||||
|
@@ -1,26 +1,30 @@
|
||||
all:
|
||||
|
||||
top_srcdir = ../../../../
|
||||
top_srcdir = ../../../..
|
||||
UNAME_M := $(shell uname -m)
|
||||
|
||||
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
|
||||
LIBKVM_x86_64 = lib/x86.c lib/vmx.c
|
||||
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
|
||||
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
|
||||
LIBKVM_aarch64 = lib/aarch64/processor.c
|
||||
|
||||
TEST_GEN_PROGS_x86_64 = platform_info_test
|
||||
TEST_GEN_PROGS_x86_64 += set_sregs_test
|
||||
TEST_GEN_PROGS_x86_64 += sync_regs_test
|
||||
TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
|
||||
TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
|
||||
TEST_GEN_PROGS_x86_64 += state_test
|
||||
TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/state_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_test
|
||||
|
||||
TEST_GEN_PROGS_aarch64 += dirty_log_test
|
||||
|
||||
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
|
||||
LIBKVM += $(LIBKVM_$(UNAME_M))
|
||||
|
||||
INSTALL_HDR_PATH = $(top_srcdir)/usr
|
||||
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
|
||||
LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
|
||||
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
|
||||
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
|
||||
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
|
||||
LDFLAGS += -pthread
|
||||
|
||||
# After inclusion, $(OUTPUT) is defined and
|
||||
@@ -29,7 +33,7 @@ include ../lib.mk
|
||||
|
||||
STATIC_LIBS := $(OUTPUT)/libkvm.a
|
||||
LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM))
|
||||
EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS)
|
||||
EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.*
|
||||
|
||||
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ))))
|
||||
$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
|
||||
@@ -41,3 +45,12 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
|
||||
all: $(STATIC_LIBS)
|
||||
$(TEST_GEN_PROGS): $(STATIC_LIBS)
|
||||
$(STATIC_LIBS):| khdr
|
||||
|
||||
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
|
||||
cscope:
|
||||
$(RM) cscope.*
|
||||
(find $(include_paths) -name '*.h' \
|
||||
-exec realpath --relative-base=$(PWD) {} \;; \
|
||||
find . -name '*.c' \
|
||||
-exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
|
||||
cscope -b
|
||||
|
@@ -5,6 +5,8 @@
|
||||
* Copyright (C) 2018, Red Hat, Inc.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE /* for program_invocation_name */
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
@@ -15,76 +17,78 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define DEBUG printf
|
||||
#define DEBUG printf
|
||||
|
||||
#define VCPU_ID 1
|
||||
|
||||
#define VCPU_ID 1
|
||||
/* The memory slot index to track dirty pages */
|
||||
#define TEST_MEM_SLOT_INDEX 1
|
||||
/*
|
||||
* GPA offset of the testing memory slot. Must be bigger than the
|
||||
* default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES.
|
||||
*/
|
||||
#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */
|
||||
/* Size of the testing memory slot */
|
||||
#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */
|
||||
#define TEST_MEM_SLOT_INDEX 1
|
||||
|
||||
/* Default guest test memory offset, 1G */
|
||||
#define DEFAULT_GUEST_TEST_MEM 0x40000000
|
||||
|
||||
/* How many pages to dirty for each guest loop */
|
||||
#define TEST_PAGES_PER_LOOP 1024
|
||||
#define TEST_PAGES_PER_LOOP 1024
|
||||
|
||||
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
|
||||
#define TEST_HOST_LOOP_N 32
|
||||
#define TEST_HOST_LOOP_N 32
|
||||
|
||||
/* Interval for each host loop (ms) */
|
||||
#define TEST_HOST_LOOP_INTERVAL 10
|
||||
#define TEST_HOST_LOOP_INTERVAL 10
|
||||
|
||||
/*
|
||||
* Guest variables. We use these variables to share data between host
|
||||
* and guest. There are two copies of the variables, one in host memory
|
||||
* (which is unused) and one in guest memory. When the host wants to
|
||||
* access these variables, it needs to call addr_gva2hva() to access the
|
||||
* guest copy.
|
||||
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
|
||||
* sync_global_to/from_guest() are used when accessing from
|
||||
* the host. READ/WRITE_ONCE() should also be used with anything
|
||||
* that may change.
|
||||
*/
|
||||
uint64_t guest_random_array[TEST_PAGES_PER_LOOP];
|
||||
uint64_t guest_iteration;
|
||||
uint64_t guest_page_size;
|
||||
static uint64_t host_page_size;
|
||||
static uint64_t guest_page_size;
|
||||
static uint64_t guest_num_pages;
|
||||
static uint64_t random_array[TEST_PAGES_PER_LOOP];
|
||||
static uint64_t iteration;
|
||||
|
||||
/*
|
||||
* Writes to the first byte of a random page within the testing memory
|
||||
* region continuously.
|
||||
* GPA offset of the testing memory slot. Must be bigger than
|
||||
* DEFAULT_GUEST_PHY_PAGES.
|
||||
*/
|
||||
void guest_code(void)
|
||||
static uint64_t guest_test_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
|
||||
/*
|
||||
* Continuously write to the first 8 bytes of a random pages within
|
||||
* the testing memory region.
|
||||
*/
|
||||
static void guest_code(void)
|
||||
{
|
||||
int i = 0;
|
||||
uint64_t volatile *array = guest_random_array;
|
||||
uint64_t volatile *guest_addr;
|
||||
int i;
|
||||
|
||||
while (true) {
|
||||
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
|
||||
/*
|
||||
* Write to the first 8 bytes of a random page
|
||||
* on the testing memory region.
|
||||
*/
|
||||
guest_addr = (uint64_t *)
|
||||
(TEST_MEM_OFFSET +
|
||||
(array[i] % TEST_MEM_PAGES) * guest_page_size);
|
||||
*guest_addr = guest_iteration;
|
||||
uint64_t addr = guest_test_mem;
|
||||
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
|
||||
* guest_page_size;
|
||||
addr &= ~(host_page_size - 1);
|
||||
*(uint64_t *)addr = READ_ONCE(iteration);
|
||||
}
|
||||
|
||||
/* Tell the host that we need more random numbers */
|
||||
GUEST_SYNC(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Host variables. These variables should only be used by the host
|
||||
* rather than the guest.
|
||||
*/
|
||||
bool host_quit;
|
||||
/* Host variables */
|
||||
static bool host_quit;
|
||||
|
||||
/* Points to the test VM memory region on which we track dirty logs */
|
||||
void *host_test_mem;
|
||||
static void *host_test_mem;
|
||||
static uint64_t host_num_pages;
|
||||
|
||||
/* For statistics only */
|
||||
uint64_t host_dirty_count;
|
||||
uint64_t host_clear_count;
|
||||
uint64_t host_track_next_count;
|
||||
static uint64_t host_dirty_count;
|
||||
static uint64_t host_clear_count;
|
||||
static uint64_t host_track_next_count;
|
||||
|
||||
/*
|
||||
* We use this bitmap to track some pages that should have its dirty
|
||||
@@ -93,40 +97,34 @@ uint64_t host_track_next_count;
|
||||
* page bit is cleared in the latest bitmap, then the system must
|
||||
* report that write in the next get dirty log call.
|
||||
*/
|
||||
unsigned long *host_bmap_track;
|
||||
static unsigned long *host_bmap_track;
|
||||
|
||||
void generate_random_array(uint64_t *guest_array, uint64_t size)
|
||||
static void generate_random_array(uint64_t *guest_array, uint64_t size)
|
||||
{
|
||||
uint64_t i;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
for (i = 0; i < size; i++)
|
||||
guest_array[i] = random();
|
||||
}
|
||||
}
|
||||
|
||||
void *vcpu_worker(void *data)
|
||||
static void *vcpu_worker(void *data)
|
||||
{
|
||||
int ret;
|
||||
uint64_t loops, *guest_array, pages_count = 0;
|
||||
struct kvm_vm *vm = data;
|
||||
uint64_t *guest_array;
|
||||
uint64_t pages_count = 0;
|
||||
struct kvm_run *run;
|
||||
struct guest_args args;
|
||||
struct ucall uc;
|
||||
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
/* Retrieve the guest random array pointer and cache it */
|
||||
guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array);
|
||||
|
||||
DEBUG("VCPU starts\n");
|
||||
|
||||
guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
|
||||
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
|
||||
|
||||
while (!READ_ONCE(host_quit)) {
|
||||
/* Let the guest to dirty these random pages */
|
||||
/* Let the guest dirty the random pages */
|
||||
ret = _vcpu_run(vm, VCPU_ID);
|
||||
guest_args_read(vm, VCPU_ID, &args);
|
||||
if (run->exit_reason == KVM_EXIT_IO &&
|
||||
args.port == GUEST_PORT_SYNC) {
|
||||
if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) {
|
||||
pages_count += TEST_PAGES_PER_LOOP;
|
||||
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
|
||||
} else {
|
||||
@@ -137,18 +135,20 @@ void *vcpu_worker(void *data)
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count);
|
||||
DEBUG("Dirtied %"PRIu64" pages\n", pages_count);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
|
||||
static void vm_dirty_log_verify(unsigned long *bmap)
|
||||
{
|
||||
uint64_t page;
|
||||
uint64_t volatile *value_ptr;
|
||||
uint64_t *value_ptr;
|
||||
uint64_t step = host_page_size >= guest_page_size ? 1 :
|
||||
guest_page_size / host_page_size;
|
||||
|
||||
for (page = 0; page < TEST_MEM_PAGES; page++) {
|
||||
value_ptr = host_test_mem + page * getpagesize();
|
||||
for (page = 0; page < host_num_pages; page += step) {
|
||||
value_ptr = host_test_mem + page * host_page_size;
|
||||
|
||||
/* If this is a special page that we were tracking... */
|
||||
if (test_and_clear_bit(page, host_bmap_track)) {
|
||||
@@ -208,88 +208,117 @@ void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
|
||||
}
|
||||
}
|
||||
|
||||
void help(char *name)
|
||||
static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
|
||||
uint64_t extra_mem_pages, void *guest_code)
|
||||
{
|
||||
puts("");
|
||||
printf("usage: %s [-i iterations] [-I interval] [-h]\n", name);
|
||||
puts("");
|
||||
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
|
||||
TEST_HOST_LOOP_N);
|
||||
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
|
||||
TEST_HOST_LOOP_INTERVAL);
|
||||
puts("");
|
||||
exit(0);
|
||||
struct kvm_vm *vm;
|
||||
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
|
||||
|
||||
vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
|
||||
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
|
||||
#ifdef __x86_64__
|
||||
vm_create_irqchip(vm);
|
||||
#endif
|
||||
vm_vcpu_add_default(vm, vcpuid, guest_code);
|
||||
return vm;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
|
||||
unsigned long interval, bool top_offset)
|
||||
{
|
||||
unsigned int guest_pa_bits, guest_page_shift;
|
||||
pthread_t vcpu_thread;
|
||||
struct kvm_vm *vm;
|
||||
uint64_t volatile *psize, *iteration;
|
||||
unsigned long *bmap, iterations = TEST_HOST_LOOP_N,
|
||||
interval = TEST_HOST_LOOP_INTERVAL;
|
||||
int opt;
|
||||
uint64_t max_gfn;
|
||||
unsigned long *bmap;
|
||||
|
||||
while ((opt = getopt(argc, argv, "hi:I:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'i':
|
||||
iterations = strtol(optarg, NULL, 10);
|
||||
break;
|
||||
case 'I':
|
||||
interval = strtol(optarg, NULL, 10);
|
||||
break;
|
||||
case 'h':
|
||||
default:
|
||||
help(argv[0]);
|
||||
break;
|
||||
}
|
||||
switch (mode) {
|
||||
case VM_MODE_P52V48_4K:
|
||||
guest_pa_bits = 52;
|
||||
guest_page_shift = 12;
|
||||
break;
|
||||
case VM_MODE_P52V48_64K:
|
||||
guest_pa_bits = 52;
|
||||
guest_page_shift = 16;
|
||||
break;
|
||||
case VM_MODE_P40V48_4K:
|
||||
guest_pa_bits = 40;
|
||||
guest_page_shift = 12;
|
||||
break;
|
||||
case VM_MODE_P40V48_64K:
|
||||
guest_pa_bits = 40;
|
||||
guest_page_shift = 16;
|
||||
break;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
|
||||
}
|
||||
|
||||
TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n");
|
||||
TEST_ASSERT(interval > 0, "Interval must be bigger than zero");
|
||||
DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
|
||||
|
||||
DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
|
||||
iterations, interval);
|
||||
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
|
||||
guest_page_size = (1ul << guest_page_shift);
|
||||
/* 1G of guest page sized pages */
|
||||
guest_num_pages = (1ul << (30 - guest_page_shift));
|
||||
host_page_size = getpagesize();
|
||||
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
|
||||
!!((guest_num_pages * guest_page_size) % host_page_size);
|
||||
|
||||
srandom(time(0));
|
||||
if (top_offset) {
|
||||
guest_test_mem = (max_gfn - guest_num_pages) * guest_page_size;
|
||||
guest_test_mem &= ~(host_page_size - 1);
|
||||
}
|
||||
|
||||
bmap = bitmap_alloc(TEST_MEM_PAGES);
|
||||
host_bmap_track = bitmap_alloc(TEST_MEM_PAGES);
|
||||
DEBUG("guest test mem offset: 0x%lx\n", guest_test_mem);
|
||||
|
||||
vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code);
|
||||
bmap = bitmap_alloc(host_num_pages);
|
||||
host_bmap_track = bitmap_alloc(host_num_pages);
|
||||
|
||||
vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code);
|
||||
|
||||
/* Add an extra memory slot for testing dirty logging */
|
||||
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
|
||||
TEST_MEM_OFFSET,
|
||||
guest_test_mem,
|
||||
TEST_MEM_SLOT_INDEX,
|
||||
TEST_MEM_PAGES,
|
||||
guest_num_pages,
|
||||
KVM_MEM_LOG_DIRTY_PAGES);
|
||||
/* Cache the HVA pointer of the region */
|
||||
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET);
|
||||
|
||||
/* Do 1:1 mapping for the dirty track memory slot */
|
||||
virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET,
|
||||
TEST_MEM_PAGES * getpagesize(), 0);
|
||||
virt_map(vm, guest_test_mem, guest_test_mem,
|
||||
guest_num_pages * guest_page_size, 0);
|
||||
|
||||
/* Cache the HVA pointer of the region */
|
||||
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_mem);
|
||||
|
||||
#ifdef __x86_64__
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
#endif
|
||||
#ifdef __aarch64__
|
||||
ucall_init(vm, UCALL_MMIO, NULL);
|
||||
#endif
|
||||
|
||||
/* Tell the guest about the page size on the system */
|
||||
psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size);
|
||||
*psize = getpagesize();
|
||||
/* Export the shared variables to the guest */
|
||||
sync_global_to_guest(vm, host_page_size);
|
||||
sync_global_to_guest(vm, guest_page_size);
|
||||
sync_global_to_guest(vm, guest_test_mem);
|
||||
sync_global_to_guest(vm, guest_num_pages);
|
||||
|
||||
/* Start the iterations */
|
||||
iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration);
|
||||
*iteration = 1;
|
||||
iteration = 1;
|
||||
sync_global_to_guest(vm, iteration);
|
||||
host_quit = false;
|
||||
host_dirty_count = 0;
|
||||
host_clear_count = 0;
|
||||
host_track_next_count = 0;
|
||||
|
||||
/* Start dirtying pages */
|
||||
pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
|
||||
|
||||
while (*iteration < iterations) {
|
||||
while (iteration < iterations) {
|
||||
/* Give the vcpu thread some time to dirty some pages */
|
||||
usleep(interval * 1000);
|
||||
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
|
||||
vm_dirty_log_verify(bmap, *iteration);
|
||||
(*iteration)++;
|
||||
vm_dirty_log_verify(bmap);
|
||||
iteration++;
|
||||
sync_global_to_guest(vm, iteration);
|
||||
}
|
||||
|
||||
/* Tell the vcpu thread to quit */
|
||||
@@ -302,7 +331,118 @@ int main(int argc, char *argv[])
|
||||
|
||||
free(bmap);
|
||||
free(host_bmap_track);
|
||||
ucall_uninit(vm);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static struct vm_guest_modes {
|
||||
enum vm_guest_mode mode;
|
||||
bool supported;
|
||||
bool enabled;
|
||||
} vm_guest_modes[NUM_VM_MODES] = {
|
||||
#if defined(__x86_64__)
|
||||
{ VM_MODE_P52V48_4K, 1, 1, },
|
||||
{ VM_MODE_P52V48_64K, 0, 0, },
|
||||
{ VM_MODE_P40V48_4K, 0, 0, },
|
||||
{ VM_MODE_P40V48_64K, 0, 0, },
|
||||
#elif defined(__aarch64__)
|
||||
{ VM_MODE_P52V48_4K, 0, 0, },
|
||||
{ VM_MODE_P52V48_64K, 0, 0, },
|
||||
{ VM_MODE_P40V48_4K, 1, 1, },
|
||||
{ VM_MODE_P40V48_64K, 1, 1, },
|
||||
#endif
|
||||
};
|
||||
|
||||
static void help(char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
puts("");
|
||||
printf("usage: %s [-h] [-i iterations] [-I interval] "
|
||||
"[-o offset] [-t] [-m mode]\n", name);
|
||||
puts("");
|
||||
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
|
||||
TEST_HOST_LOOP_N);
|
||||
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
|
||||
TEST_HOST_LOOP_INTERVAL);
|
||||
printf(" -o: guest test memory offset (default: 0x%lx)\n",
|
||||
DEFAULT_GUEST_TEST_MEM);
|
||||
printf(" -t: map guest test memory at the top of the allowed "
|
||||
"physical address range\n");
|
||||
printf(" -m: specify the guest mode ID to test "
|
||||
"(default: test all supported modes)\n"
|
||||
" This option may be used multiple times.\n"
|
||||
" Guest mode IDs:\n");
|
||||
for (i = 0; i < NUM_VM_MODES; ++i) {
|
||||
printf(" %d: %s%s\n",
|
||||
vm_guest_modes[i].mode,
|
||||
vm_guest_mode_string(vm_guest_modes[i].mode),
|
||||
vm_guest_modes[i].supported ? " (supported)" : "");
|
||||
}
|
||||
puts("");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
unsigned long iterations = TEST_HOST_LOOP_N;
|
||||
unsigned long interval = TEST_HOST_LOOP_INTERVAL;
|
||||
bool mode_selected = false;
|
||||
bool top_offset = false;
|
||||
unsigned int mode;
|
||||
int opt, i;
|
||||
|
||||
while ((opt = getopt(argc, argv, "hi:I:o:tm:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'i':
|
||||
iterations = strtol(optarg, NULL, 10);
|
||||
break;
|
||||
case 'I':
|
||||
interval = strtol(optarg, NULL, 10);
|
||||
break;
|
||||
case 'o':
|
||||
guest_test_mem = strtoull(optarg, NULL, 0);
|
||||
break;
|
||||
case 't':
|
||||
top_offset = true;
|
||||
break;
|
||||
case 'm':
|
||||
if (!mode_selected) {
|
||||
for (i = 0; i < NUM_VM_MODES; ++i)
|
||||
vm_guest_modes[i].enabled = 0;
|
||||
mode_selected = true;
|
||||
}
|
||||
mode = strtoul(optarg, NULL, 10);
|
||||
TEST_ASSERT(mode < NUM_VM_MODES,
|
||||
"Guest mode ID %d too big", mode);
|
||||
vm_guest_modes[mode].enabled = 1;
|
||||
break;
|
||||
case 'h':
|
||||
default:
|
||||
help(argv[0]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
|
||||
TEST_ASSERT(interval > 0, "Interval must be greater than zero");
|
||||
TEST_ASSERT(!top_offset || guest_test_mem == DEFAULT_GUEST_TEST_MEM,
|
||||
"Cannot use both -o [offset] and -t at the same time");
|
||||
|
||||
DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
|
||||
iterations, interval);
|
||||
|
||||
srandom(time(0));
|
||||
|
||||
for (i = 0; i < NUM_VM_MODES; ++i) {
|
||||
if (!vm_guest_modes[i].enabled)
|
||||
continue;
|
||||
TEST_ASSERT(vm_guest_modes[i].supported,
|
||||
"Guest mode ID %d (%s) not supported.",
|
||||
vm_guest_modes[i].mode,
|
||||
vm_guest_mode_string(vm_guest_modes[i].mode));
|
||||
run_test(vm_guest_modes[i].mode, iterations, interval, top_offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
55
tools/testing/selftests/kvm/include/aarch64/processor.h
Normal file
55
tools/testing/selftests/kvm/include/aarch64/processor.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* AArch64 processor specific defines
|
||||
*
|
||||
* Copyright (C) 2018, Red Hat, Inc.
|
||||
*/
|
||||
#ifndef SELFTEST_KVM_PROCESSOR_H
|
||||
#define SELFTEST_KVM_PROCESSOR_H
|
||||
|
||||
#include "kvm_util.h"
|
||||
|
||||
|
||||
#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
|
||||
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
|
||||
|
||||
#define CPACR_EL1 3, 0, 1, 0, 2
|
||||
#define TCR_EL1 3, 0, 2, 0, 2
|
||||
#define MAIR_EL1 3, 0, 10, 2, 0
|
||||
#define TTBR0_EL1 3, 0, 2, 0, 0
|
||||
#define SCTLR_EL1 3, 0, 1, 0, 0
|
||||
|
||||
/*
|
||||
* Default MAIR
|
||||
* index attribute
|
||||
* DEVICE_nGnRnE 0 0000:0000
|
||||
* DEVICE_nGnRE 1 0000:0100
|
||||
* DEVICE_GRE 2 0000:1100
|
||||
* NORMAL_NC 3 0100:0100
|
||||
* NORMAL 4 1111:1111
|
||||
* NORMAL_WT 5 1011:1011
|
||||
*/
|
||||
#define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \
|
||||
(0x04ul << (1 * 8)) | \
|
||||
(0x0cul << (2 * 8)) | \
|
||||
(0x44ul << (3 * 8)) | \
|
||||
(0xfful << (4 * 8)) | \
|
||||
(0xbbul << (5 * 8)))
|
||||
|
||||
static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
|
||||
{
|
||||
struct kvm_one_reg reg;
|
||||
reg.id = id;
|
||||
reg.addr = (uint64_t)addr;
|
||||
vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, ®);
|
||||
}
|
||||
|
||||
static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val)
|
||||
{
|
||||
struct kvm_one_reg reg;
|
||||
reg.id = id;
|
||||
reg.addr = (uint64_t)&val;
|
||||
vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, ®);
|
||||
}
|
||||
|
||||
#endif /* SELFTEST_KVM_PROCESSOR_H */
|
1098
tools/testing/selftests/kvm/include/evmcs.h
Normal file
1098
tools/testing/selftests/kvm/include/evmcs.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@
|
||||
*
|
||||
*/
|
||||
#ifndef SELFTEST_KVM_UTIL_H
|
||||
#define SELFTEST_KVM_UTIL_H 1
|
||||
#define SELFTEST_KVM_UTIL_H
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
@@ -17,12 +17,6 @@
|
||||
|
||||
#include "sparsebit.h"
|
||||
|
||||
/*
|
||||
* Memslots can't cover the gfn starting at this gpa otherwise vCPUs can't be
|
||||
* created. Only applies to VMs using EPT.
|
||||
*/
|
||||
#define KVM_DEFAULT_IDENTITY_MAP_ADDRESS 0xfffbc000ul
|
||||
|
||||
|
||||
/* Callers of kvm_util only have an incomplete/opaque description of the
|
||||
* structure kvm_util is using to maintain the state of a VM.
|
||||
@@ -33,16 +27,23 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
|
||||
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
|
||||
|
||||
/* Minimum allocated guest virtual and physical addresses */
|
||||
#define KVM_UTIL_MIN_VADDR 0x2000
|
||||
#define KVM_UTIL_MIN_VADDR 0x2000
|
||||
|
||||
#define DEFAULT_GUEST_PHY_PAGES 512
|
||||
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
|
||||
#define DEFAULT_STACK_PGS 5
|
||||
#define DEFAULT_STACK_PGS 5
|
||||
|
||||
enum vm_guest_mode {
|
||||
VM_MODE_FLAT48PG,
|
||||
VM_MODE_P52V48_4K,
|
||||
VM_MODE_P52V48_64K,
|
||||
VM_MODE_P40V48_4K,
|
||||
VM_MODE_P40V48_64K,
|
||||
NUM_VM_MODES,
|
||||
};
|
||||
|
||||
#define vm_guest_mode_string(m) vm_guest_mode_string[m]
|
||||
extern const char * const vm_guest_mode_string[];
|
||||
|
||||
enum vm_mem_backing_src_type {
|
||||
VM_MEM_SRC_ANONYMOUS,
|
||||
VM_MEM_SRC_ANONYMOUS_THP,
|
||||
@@ -58,15 +59,15 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm);
|
||||
void kvm_vm_release(struct kvm_vm *vmp);
|
||||
void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
|
||||
|
||||
int kvm_memcmp_hva_gva(void *hva,
|
||||
struct kvm_vm *vm, const vm_vaddr_t gva, size_t len);
|
||||
int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
|
||||
size_t len);
|
||||
|
||||
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
|
||||
uint32_t data_memslot, uint32_t pgd_memslot);
|
||||
uint32_t data_memslot, uint32_t pgd_memslot);
|
||||
|
||||
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
|
||||
void vcpu_dump(FILE *stream, struct kvm_vm *vm,
|
||||
uint32_t vcpuid, uint8_t indent);
|
||||
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
|
||||
uint8_t indent);
|
||||
|
||||
void vm_create_irqchip(struct kvm_vm *vm);
|
||||
|
||||
@@ -75,13 +76,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
|
||||
uint32_t flags);
|
||||
|
||||
void vcpu_ioctl(struct kvm_vm *vm,
|
||||
uint32_t vcpuid, unsigned long ioctl, void *arg);
|
||||
void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
|
||||
void *arg);
|
||||
void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
|
||||
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
|
||||
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot);
|
||||
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
|
||||
int gdt_memslot);
|
||||
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
uint32_t data_memslot, uint32_t pgd_memslot);
|
||||
uint32_t data_memslot, uint32_t pgd_memslot);
|
||||
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
size_t size, uint32_t pgd_memslot);
|
||||
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
|
||||
@@ -93,56 +95,35 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_mp_state *mp_state);
|
||||
void vcpu_regs_get(struct kvm_vm *vm,
|
||||
uint32_t vcpuid, struct kvm_regs *regs);
|
||||
void vcpu_regs_set(struct kvm_vm *vm,
|
||||
uint32_t vcpuid, struct kvm_regs *regs);
|
||||
struct kvm_mp_state *mp_state);
|
||||
void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
|
||||
void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
|
||||
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
|
||||
void vcpu_sregs_get(struct kvm_vm *vm,
|
||||
uint32_t vcpuid, struct kvm_sregs *sregs);
|
||||
void vcpu_sregs_set(struct kvm_vm *vm,
|
||||
uint32_t vcpuid, struct kvm_sregs *sregs);
|
||||
int _vcpu_sregs_set(struct kvm_vm *vm,
|
||||
uint32_t vcpuid, struct kvm_sregs *sregs);
|
||||
void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_sregs *sregs);
|
||||
void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_sregs *sregs);
|
||||
int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_sregs *sregs);
|
||||
void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_vcpu_events *events);
|
||||
struct kvm_vcpu_events *events);
|
||||
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_vcpu_events *events);
|
||||
uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
|
||||
void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
|
||||
uint64_t msr_value);
|
||||
struct kvm_vcpu_events *events);
|
||||
|
||||
const char *exit_reason_str(unsigned int exit_reason);
|
||||
|
||||
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
|
||||
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint32_t pgd_memslot);
|
||||
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
|
||||
vm_paddr_t paddr_min, uint32_t memslot);
|
||||
|
||||
struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
|
||||
void vcpu_set_cpuid(
|
||||
struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid);
|
||||
|
||||
struct kvm_cpuid_entry2 *
|
||||
kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
|
||||
|
||||
static inline struct kvm_cpuid_entry2 *
|
||||
kvm_get_supported_cpuid_entry(uint32_t function)
|
||||
{
|
||||
return kvm_get_supported_cpuid_index(function, 0);
|
||||
}
|
||||
uint32_t pgd_memslot);
|
||||
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
|
||||
uint32_t memslot);
|
||||
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot);
|
||||
|
||||
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
|
||||
void *guest_code);
|
||||
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
|
||||
|
||||
typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
|
||||
vm_paddr_t vmxon_paddr,
|
||||
vm_vaddr_t vmcs_vaddr,
|
||||
vm_paddr_t vmcs_paddr);
|
||||
|
||||
struct kvm_userspace_memory_region *
|
||||
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
|
||||
uint64_t end);
|
||||
@@ -152,43 +133,49 @@ allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
|
||||
|
||||
int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
|
||||
|
||||
#define GUEST_PORT_SYNC 0x1000
|
||||
#define GUEST_PORT_ABORT 0x1001
|
||||
#define GUEST_PORT_DONE 0x1002
|
||||
#define sync_global_to_guest(vm, g) ({ \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
|
||||
memcpy(_p, &(g), sizeof(g)); \
|
||||
})
|
||||
|
||||
static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
|
||||
{
|
||||
__asm__ __volatile__("in %[port], %%al"
|
||||
:
|
||||
: [port]"d"(port), "D"(arg0), "S"(arg1)
|
||||
: "rax");
|
||||
}
|
||||
#define sync_global_from_guest(vm, g) ({ \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
|
||||
memcpy(&(g), _p, sizeof(g)); \
|
||||
})
|
||||
|
||||
/*
|
||||
* Allows to pass three arguments to the host: port is 16bit wide,
|
||||
* arg0 & arg1 are 64bit wide
|
||||
*/
|
||||
#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \
|
||||
__exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
|
||||
/* ucall implementation types */
|
||||
typedef enum {
|
||||
UCALL_PIO,
|
||||
UCALL_MMIO,
|
||||
} ucall_type_t;
|
||||
|
||||
#define GUEST_ASSERT(_condition) do { \
|
||||
if (!(_condition)) \
|
||||
GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \
|
||||
"Failed guest assert: " \
|
||||
#_condition, __LINE__); \
|
||||
} while (0)
|
||||
/* Common ucalls */
|
||||
enum {
|
||||
UCALL_NONE,
|
||||
UCALL_SYNC,
|
||||
UCALL_ABORT,
|
||||
UCALL_DONE,
|
||||
};
|
||||
|
||||
#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage)
|
||||
#define UCALL_MAX_ARGS 6
|
||||
|
||||
#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0)
|
||||
struct ucall {
|
||||
uint64_t cmd;
|
||||
uint64_t args[UCALL_MAX_ARGS];
|
||||
};
|
||||
|
||||
struct guest_args {
|
||||
uint64_t arg0;
|
||||
uint64_t arg1;
|
||||
uint16_t port;
|
||||
} __attribute__ ((packed));
|
||||
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg);
|
||||
void ucall_uninit(struct kvm_vm *vm);
|
||||
void ucall(uint64_t cmd, int nargs, ...);
|
||||
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
|
||||
|
||||
void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct guest_args *args);
|
||||
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
|
||||
#define GUEST_DONE() ucall(UCALL_DONE, 0)
|
||||
#define GUEST_ASSERT(_condition) do { \
|
||||
if (!(_condition)) \
|
||||
ucall(UCALL_ABORT, 2, \
|
||||
"Failed guest assert: " \
|
||||
#_condition, __LINE__); \
|
||||
} while (0)
|
||||
|
||||
#endif /* SELFTEST_KVM_UTIL_H */
|
||||
|
@@ -15,8 +15,8 @@
|
||||
* even in the case where most bits are set.
|
||||
*/
|
||||
|
||||
#ifndef _TEST_SPARSEBIT_H_
|
||||
#define _TEST_SPARSEBIT_H_
|
||||
#ifndef SELFTEST_KVM_SPARSEBIT_H
|
||||
#define SELFTEST_KVM_SPARSEBIT_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
@@ -72,4 +72,4 @@ void sparsebit_validate_internal(struct sparsebit *sbit);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _TEST_SPARSEBIT_H_ */
|
||||
#endif /* SELFTEST_KVM_SPARSEBIT_H */
|
||||
|
@@ -7,8 +7,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef TEST_UTIL_H
|
||||
#define TEST_UTIL_H 1
|
||||
#ifndef SELFTEST_KVM_TEST_UTIL_H
|
||||
#define SELFTEST_KVM_TEST_UTIL_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdarg.h>
|
||||
@@ -41,4 +41,4 @@ void test_assert(bool exp, const char *exp_str,
|
||||
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
|
||||
} while (0)
|
||||
|
||||
#endif /* TEST_UTIL_H */
|
||||
#endif /* SELFTEST_KVM_TEST_UTIL_H */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* tools/testing/selftests/kvm/include/x86.h
|
||||
* tools/testing/selftests/kvm/include/x86_64/processor.h
|
||||
*
|
||||
* Copyright (C) 2018, Google LLC.
|
||||
*
|
||||
@@ -7,8 +7,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SELFTEST_KVM_X86_H
|
||||
#define SELFTEST_KVM_X86_H
|
||||
#ifndef SELFTEST_KVM_PROCESSOR_H
|
||||
#define SELFTEST_KVM_PROCESSOR_H
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdint.h>
|
||||
@@ -305,7 +305,25 @@ static inline unsigned long get_xmm(int n)
|
||||
|
||||
struct kvm_x86_state;
|
||||
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state);
|
||||
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_x86_state *state);
|
||||
|
||||
struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
|
||||
void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_cpuid2 *cpuid);
|
||||
|
||||
struct kvm_cpuid_entry2 *
|
||||
kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
|
||||
|
||||
static inline struct kvm_cpuid_entry2 *
|
||||
kvm_get_supported_cpuid_entry(uint32_t function)
|
||||
{
|
||||
return kvm_get_supported_cpuid_index(function, 0);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
|
||||
void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
|
||||
uint64_t msr_value);
|
||||
|
||||
/*
|
||||
* Basic CPU control in CR0
|
||||
@@ -1044,4 +1062,4 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
|
||||
#define MSR_VM_IGNNE 0xc0010115
|
||||
#define MSR_VM_HSAVE_PA 0xc0010117
|
||||
|
||||
#endif /* !SELFTEST_KVM_X86_H */
|
||||
#endif /* SELFTEST_KVM_PROCESSOR_H */
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* tools/testing/selftests/kvm/include/vmx.h
|
||||
* tools/testing/selftests/kvm/include/x86_64/vmx.h
|
||||
*
|
||||
* Copyright (C) 2018, Google LLC.
|
||||
*
|
||||
@@ -11,7 +11,7 @@
|
||||
#define SELFTEST_KVM_VMX_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define CPUID_VMX_BIT 5
|
||||
|
||||
@@ -339,6 +339,8 @@ struct vmx_msr_entry {
|
||||
uint64_t value;
|
||||
} __attribute__ ((aligned(16)));
|
||||
|
||||
#include "evmcs.h"
|
||||
|
||||
static inline int vmxon(uint64_t phys)
|
||||
{
|
||||
uint8_t ret;
|
||||
@@ -372,6 +374,9 @@ static inline int vmptrld(uint64_t vmcs_pa)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return -1;
|
||||
|
||||
__asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
|
||||
: [ret]"=rm"(ret)
|
||||
: [pa]"m"(vmcs_pa)
|
||||
@@ -385,6 +390,9 @@ static inline int vmptrst(uint64_t *value)
|
||||
uint64_t tmp;
|
||||
uint8_t ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmptrst(value);
|
||||
|
||||
__asm__ __volatile__("vmptrst %[value]; setna %[ret]"
|
||||
: [value]"=m"(tmp), [ret]"=rm"(ret)
|
||||
: : "cc", "memory");
|
||||
@@ -411,6 +419,9 @@ static inline int vmlaunch(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmlaunch();
|
||||
|
||||
__asm__ __volatile__("push %%rbp;"
|
||||
"push %%rcx;"
|
||||
"push %%rdx;"
|
||||
@@ -443,6 +454,9 @@ static inline int vmresume(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmresume();
|
||||
|
||||
__asm__ __volatile__("push %%rbp;"
|
||||
"push %%rcx;"
|
||||
"push %%rdx;"
|
||||
@@ -482,6 +496,9 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
|
||||
uint64_t tmp;
|
||||
uint8_t ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmread(encoding, value);
|
||||
|
||||
__asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
|
||||
: [value]"=rm"(tmp), [ret]"=rm"(ret)
|
||||
: [encoding]"r"(encoding)
|
||||
@@ -506,6 +523,9 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmwrite(encoding, value);
|
||||
|
||||
__asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
|
||||
: [ret]"=rm"(ret)
|
||||
: [value]"rm"(value), [encoding]"r"(encoding)
|
||||
@@ -543,10 +563,19 @@ struct vmx_pages {
|
||||
void *vmwrite_hva;
|
||||
uint64_t vmwrite_gpa;
|
||||
void *vmwrite;
|
||||
|
||||
void *vp_assist_hva;
|
||||
uint64_t vp_assist_gpa;
|
||||
void *vp_assist;
|
||||
|
||||
void *enlightened_vmcs_hva;
|
||||
uint64_t enlightened_vmcs_gpa;
|
||||
void *enlightened_vmcs;
|
||||
};
|
||||
|
||||
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
|
||||
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
|
||||
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
|
||||
bool load_vmcs(struct vmx_pages *vmx);
|
||||
|
||||
#endif /* !SELFTEST_KVM_VMX_H */
|
||||
#endif /* SELFTEST_KVM_VMX_H */
|
311
tools/testing/selftests/kvm/lib/aarch64/processor.c
Normal file
311
tools/testing/selftests/kvm/lib/aarch64/processor.c
Normal file
@@ -0,0 +1,311 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* AArch64 code
|
||||
*
|
||||
* Copyright (C) 2018, Red Hat, Inc.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE /* for program_invocation_name */
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "../kvm_util_internal.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
|
||||
|
||||
static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
|
||||
{
|
||||
return (v + vm->page_size) & ~(vm->page_size - 1);
|
||||
}
|
||||
|
||||
static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
{
|
||||
unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
|
||||
uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
|
||||
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
{
|
||||
unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
|
||||
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
|
||||
TEST_ASSERT(vm->pgtable_levels == 4,
|
||||
"Mode %d does not have 4 page table levels", vm->mode);
|
||||
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
{
|
||||
unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
|
||||
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
|
||||
TEST_ASSERT(vm->pgtable_levels >= 3,
|
||||
"Mode %d does not have >= 3 page table levels", vm->mode);
|
||||
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
{
|
||||
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
return (gva >> vm->page_shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
|
||||
{
|
||||
uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
|
||||
return entry & mask;
|
||||
}
|
||||
|
||||
static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
|
||||
{
|
||||
unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
|
||||
return 1 << (vm->va_bits - shift);
|
||||
}
|
||||
|
||||
static uint64_t ptrs_per_pte(struct kvm_vm *vm)
|
||||
{
|
||||
return 1 << (vm->page_shift - 3);
|
||||
}
|
||||
|
||||
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!vm->pgd_created) {
|
||||
vm_paddr_t paddr = vm_phy_pages_alloc(vm,
|
||||
page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
|
||||
vm->pgd = paddr;
|
||||
vm->pgd_created = true;
|
||||
}
|
||||
}
|
||||
|
||||
void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint32_t pgd_memslot, uint64_t flags)
|
||||
{
|
||||
uint8_t attr_idx = flags & 7;
|
||||
uint64_t *ptep;
|
||||
|
||||
TEST_ASSERT((vaddr % vm->page_size) == 0,
|
||||
"Virtual address not on page boundary,\n"
|
||||
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
|
||||
(vaddr >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx", vaddr);
|
||||
TEST_ASSERT((paddr % vm->page_size) == 0,
|
||||
"Physical address not on page boundary,\n"
|
||||
" paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
|
||||
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond beyond maximum supported,\n"
|
||||
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
|
||||
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
|
||||
if (!*ptep) {
|
||||
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
|
||||
*ptep |= 3;
|
||||
}
|
||||
|
||||
switch (vm->pgtable_levels) {
|
||||
case 4:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
|
||||
if (!*ptep) {
|
||||
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
|
||||
*ptep |= 3;
|
||||
}
|
||||
/* fall through */
|
||||
case 3:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
|
||||
if (!*ptep) {
|
||||
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
|
||||
*ptep |= 3;
|
||||
}
|
||||
/* fall through */
|
||||
case 2:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
|
||||
break;
|
||||
default:
|
||||
TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
|
||||
}
|
||||
|
||||
*ptep = paddr | 3;
|
||||
*ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
|
||||
}
|
||||
|
||||
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint32_t pgd_memslot)
|
||||
{
|
||||
uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
|
||||
|
||||
_virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
|
||||
}
|
||||
|
||||
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
{
|
||||
uint64_t *ptep;
|
||||
|
||||
if (!vm->pgd_created)
|
||||
goto unmapped_gva;
|
||||
|
||||
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
|
||||
if (!ptep)
|
||||
goto unmapped_gva;
|
||||
|
||||
switch (vm->pgtable_levels) {
|
||||
case 4:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
|
||||
if (!ptep)
|
||||
goto unmapped_gva;
|
||||
/* fall through */
|
||||
case 3:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
|
||||
if (!ptep)
|
||||
goto unmapped_gva;
|
||||
/* fall through */
|
||||
case 2:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
|
||||
if (!ptep)
|
||||
goto unmapped_gva;
|
||||
break;
|
||||
default:
|
||||
TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
|
||||
}
|
||||
|
||||
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
|
||||
|
||||
unmapped_gva:
|
||||
TEST_ASSERT(false, "No mapping for vm virtual address, "
|
||||
"gva: 0x%lx", gva);
|
||||
}
|
||||
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
|
||||
{
|
||||
#ifdef DEBUG_VM
|
||||
static const char * const type[] = { "", "pud", "pmd", "pte" };
|
||||
uint64_t pte, *ptep;
|
||||
|
||||
if (level == 4)
|
||||
return;
|
||||
|
||||
for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
|
||||
ptep = addr_gpa2hva(vm, pte);
|
||||
if (!*ptep)
|
||||
continue;
|
||||
printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
|
||||
pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
{
|
||||
int level = 4 - (vm->pgtable_levels - 1);
|
||||
uint64_t pgd, *ptep;
|
||||
|
||||
if (!vm->pgd_created)
|
||||
return;
|
||||
|
||||
for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
|
||||
ptep = addr_gpa2hva(vm, pgd);
|
||||
if (!*ptep)
|
||||
continue;
|
||||
printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
|
||||
pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
|
||||
}
|
||||
}
|
||||
|
||||
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
|
||||
void *guest_code)
|
||||
{
|
||||
uint64_t ptrs_per_4k_pte = 512;
|
||||
uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
|
||||
|
||||
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
|
||||
vm_vcpu_add_default(vm, vcpuid, guest_code);
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
|
||||
{
|
||||
size_t stack_size = vm->page_size == 4096 ?
|
||||
DEFAULT_STACK_PGS * vm->page_size :
|
||||
vm->page_size;
|
||||
uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
|
||||
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
|
||||
|
||||
vm_vcpu_add(vm, vcpuid, 0, 0);
|
||||
|
||||
set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
|
||||
set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||
}
|
||||
|
||||
void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
|
||||
{
|
||||
struct kvm_vcpu_init init;
|
||||
uint64_t sctlr_el1, tcr_el1;
|
||||
|
||||
memset(&init, 0, sizeof(init));
|
||||
init.target = KVM_ARM_TARGET_GENERIC_V8;
|
||||
vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, &init);
|
||||
|
||||
/*
|
||||
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
|
||||
* registers, which the variable argument list macros do.
|
||||
*/
|
||||
set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
|
||||
|
||||
get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
|
||||
get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
|
||||
|
||||
switch (vm->mode) {
|
||||
case VM_MODE_P52V48_4K:
|
||||
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
|
||||
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
|
||||
break;
|
||||
case VM_MODE_P52V48_64K:
|
||||
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
|
||||
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
|
||||
break;
|
||||
case VM_MODE_P40V48_4K:
|
||||
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
|
||||
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
|
||||
break;
|
||||
case VM_MODE_P40V48_64K:
|
||||
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
|
||||
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
|
||||
break;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
|
||||
}
|
||||
|
||||
sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
|
||||
/* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
|
||||
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
|
||||
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
|
||||
|
||||
set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
|
||||
set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
|
||||
set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
|
||||
set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
|
||||
}
|
||||
|
||||
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
||||
{
|
||||
uint64_t pstate, pc;
|
||||
|
||||
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
|
||||
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
|
||||
|
||||
fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
|
||||
indent, "", pstate, pc);
|
||||
|
||||
}
|
@@ -13,7 +13,7 @@
|
||||
#include <execinfo.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#include "../../kselftest.h"
|
||||
#include "kselftest.h"
|
||||
|
||||
/* Dumps the current stack trace to stderr. */
|
||||
static void __attribute__((noinline)) test_dump_stack(void);
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,28 +1,29 @@
|
||||
/*
|
||||
* tools/testing/selftests/kvm/lib/kvm_util.c
|
||||
* tools/testing/selftests/kvm/lib/kvm_util_internal.h
|
||||
*
|
||||
* Copyright (C) 2018, Google LLC.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*/
|
||||
|
||||
#ifndef KVM_UTIL_INTERNAL_H
|
||||
#define KVM_UTIL_INTERNAL_H 1
|
||||
#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
|
||||
#define SELFTEST_KVM_UTIL_INTERNAL_H
|
||||
|
||||
#include "sparsebit.h"
|
||||
|
||||
#define KVM_DEV_PATH "/dev/kvm"
|
||||
|
||||
#ifndef BITS_PER_BYTE
|
||||
#define BITS_PER_BYTE 8
|
||||
#define BITS_PER_BYTE 8
|
||||
#endif
|
||||
|
||||
#ifndef BITS_PER_LONG
|
||||
#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
|
||||
#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
|
||||
#endif
|
||||
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
|
||||
|
||||
/* Concrete definition of struct kvm_vm. */
|
||||
struct userspace_mem_region {
|
||||
struct userspace_mem_region *next, *prev;
|
||||
struct kvm_userspace_memory_region region;
|
||||
@@ -45,14 +46,16 @@ struct kvm_vm {
|
||||
int mode;
|
||||
int kvm_fd;
|
||||
int fd;
|
||||
unsigned int pgtable_levels;
|
||||
unsigned int page_size;
|
||||
unsigned int page_shift;
|
||||
unsigned int pa_bits;
|
||||
unsigned int va_bits;
|
||||
uint64_t max_gfn;
|
||||
struct vcpu *vcpu_head;
|
||||
struct userspace_mem_region *userspace_mem_region_head;
|
||||
struct sparsebit *vpages_valid;
|
||||
struct sparsebit *vpages_mapped;
|
||||
|
||||
bool has_irqchip;
|
||||
bool pgd_created;
|
||||
vm_paddr_t pgd;
|
||||
@@ -60,13 +63,11 @@ struct kvm_vm {
|
||||
vm_vaddr_t tss;
|
||||
};
|
||||
|
||||
struct vcpu *vcpu_find(struct kvm_vm *vm,
|
||||
uint32_t vcpuid);
|
||||
void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot);
|
||||
struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot,
|
||||
int gdt_memslot);
|
||||
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
|
||||
void regs_dump(FILE *stream, struct kvm_regs *regs,
|
||||
uint8_t indent);
|
||||
void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
|
||||
uint8_t indent);
|
||||
void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
|
||||
void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
|
||||
|
||||
#endif
|
||||
#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
|
||||
|
144
tools/testing/selftests/kvm/lib/ucall.c
Normal file
144
tools/testing/selftests/kvm/lib/ucall.c
Normal file
@@ -0,0 +1,144 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* ucall support. A ucall is a "hypercall to userspace".
|
||||
*
|
||||
* Copyright (C) 2018, Red Hat, Inc.
|
||||
*/
|
||||
#include "kvm_util.h"
|
||||
#include "kvm_util_internal.h"
|
||||
|
||||
#define UCALL_PIO_PORT ((uint16_t)0x1000)
|
||||
|
||||
static ucall_type_t ucall_type;
|
||||
static vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
|
||||
static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
{
|
||||
if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
|
||||
return false;
|
||||
|
||||
virt_pg_map(vm, gpa, gpa, 0);
|
||||
|
||||
ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
|
||||
sync_global_to_guest(vm, ucall_exit_mmio_addr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
|
||||
{
|
||||
ucall_type = type;
|
||||
sync_global_to_guest(vm, ucall_type);
|
||||
|
||||
if (type == UCALL_PIO)
|
||||
return;
|
||||
|
||||
if (type == UCALL_MMIO) {
|
||||
vm_paddr_t gpa, start, end, step;
|
||||
bool ret;
|
||||
|
||||
if (arg) {
|
||||
gpa = (vm_paddr_t)arg;
|
||||
ret = ucall_mmio_init(vm, gpa);
|
||||
TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find an address within the allowed virtual address space,
|
||||
* that does _not_ have a KVM memory region associated with it.
|
||||
* Identity mapping an address like this allows the guest to
|
||||
* access it, but as KVM doesn't know what to do with it, it
|
||||
* will assume it's something userspace handles and exit with
|
||||
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
|
||||
* Here we start with a guess that the addresses around two
|
||||
* thirds of the VA space are unmapped and then work both down
|
||||
* and up from there in 1/6 VA space sized steps.
|
||||
*/
|
||||
start = 1ul << (vm->va_bits * 2 / 3);
|
||||
end = 1ul << vm->va_bits;
|
||||
step = 1ul << (vm->va_bits / 6);
|
||||
for (gpa = start; gpa >= 0; gpa -= step) {
|
||||
if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
|
||||
return;
|
||||
}
|
||||
for (gpa = start + step; gpa < end; gpa += step) {
|
||||
if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
|
||||
return;
|
||||
}
|
||||
TEST_ASSERT(false, "Can't find a ucall mmio address");
|
||||
}
|
||||
}
|
||||
|
||||
void ucall_uninit(struct kvm_vm *vm)
|
||||
{
|
||||
ucall_type = 0;
|
||||
sync_global_to_guest(vm, ucall_type);
|
||||
ucall_exit_mmio_addr = 0;
|
||||
sync_global_to_guest(vm, ucall_exit_mmio_addr);
|
||||
}
|
||||
|
||||
static void ucall_pio_exit(struct ucall *uc)
|
||||
{
|
||||
#ifdef __x86_64__
|
||||
asm volatile("in %[port], %%al"
|
||||
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ucall_mmio_exit(struct ucall *uc)
|
||||
{
|
||||
*ucall_exit_mmio_addr = (vm_vaddr_t)uc;
|
||||
}
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...)
|
||||
{
|
||||
struct ucall uc = {
|
||||
.cmd = cmd,
|
||||
};
|
||||
va_list va;
|
||||
int i;
|
||||
|
||||
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
|
||||
|
||||
va_start(va, nargs);
|
||||
for (i = 0; i < nargs; ++i)
|
||||
uc.args[i] = va_arg(va, uint64_t);
|
||||
va_end(va);
|
||||
|
||||
switch (ucall_type) {
|
||||
case UCALL_PIO:
|
||||
ucall_pio_exit(&uc);
|
||||
break;
|
||||
case UCALL_MMIO:
|
||||
ucall_mmio_exit(&uc);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
|
||||
{
|
||||
struct kvm_run *run = vcpu_state(vm, vcpu_id);
|
||||
|
||||
memset(uc, 0, sizeof(*uc));
|
||||
|
||||
#ifdef __x86_64__
|
||||
if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
|
||||
run->io.port == UCALL_PIO_PORT) {
|
||||
struct kvm_regs regs;
|
||||
vcpu_regs_get(vm, vcpu_id, ®s);
|
||||
memcpy(uc, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(*uc));
|
||||
return uc->cmd;
|
||||
}
|
||||
#endif
|
||||
if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
|
||||
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
|
||||
vm_vaddr_t gva;
|
||||
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
|
||||
"Unexpected ucall exit mmio address access");
|
||||
gva = *(vm_vaddr_t *)run->mmio.data;
|
||||
memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc));
|
||||
}
|
||||
|
||||
return uc->cmd;
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* tools/testing/selftests/kvm/lib/x86.c
|
||||
* tools/testing/selftests/kvm/lib/x86_64/processor.c
|
||||
*
|
||||
* Copyright (C) 2018, Google LLC.
|
||||
*
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "kvm_util_internal.h"
|
||||
#include "x86.h"
|
||||
#include "../kvm_util_internal.h"
|
||||
#include "processor.h"
|
||||
|
||||
/* Minimum physical address used for virtual translation tables. */
|
||||
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
|
||||
@@ -231,7 +231,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
|
||||
{
|
||||
int rc;
|
||||
|
||||
TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
|
||||
TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
|
||||
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
|
||||
|
||||
/* If needed, create page map l4 table. */
|
||||
@@ -264,7 +264,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint16_t index[4];
|
||||
struct pageMapL4Entry *pml4e;
|
||||
|
||||
TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
|
||||
TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
|
||||
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
|
||||
|
||||
TEST_ASSERT((vaddr % vm->page_size) == 0,
|
||||
@@ -551,7 +551,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
struct pageTableEntry *pte;
|
||||
void *hva;
|
||||
|
||||
TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
|
||||
TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
|
||||
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
|
||||
|
||||
index[0] = (gva >> 12) & 0x1ffu;
|
||||
@@ -624,9 +624,9 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
|
||||
kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
|
||||
|
||||
switch (vm->mode) {
|
||||
case VM_MODE_FLAT48PG:
|
||||
case VM_MODE_P52V48_4K:
|
||||
sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
|
||||
sregs.cr4 |= X86_CR4_PAE;
|
||||
sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
|
||||
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
|
||||
|
||||
kvm_seg_set_unusable(&sregs.ldt);
|
||||
@@ -672,6 +672,102 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
|
||||
vcpu_set_mp_state(vm, vcpuid, &mp_state);
|
||||
}
|
||||
|
||||
/* Allocate an instance of struct kvm_cpuid2
|
||||
*
|
||||
* Input Args: None
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: A pointer to the allocated struct. The caller is responsible
|
||||
* for freeing this struct.
|
||||
*
|
||||
* Since kvm_cpuid2 uses a 0-length array to allow a the size of the
|
||||
* array to be decided at allocation time, allocation is slightly
|
||||
* complicated. This function uses a reasonable default length for
|
||||
* the array and performs the appropriate allocation.
|
||||
*/
|
||||
static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
|
||||
{
|
||||
struct kvm_cpuid2 *cpuid;
|
||||
int nent = 100;
|
||||
size_t size;
|
||||
|
||||
size = sizeof(*cpuid);
|
||||
size += nent * sizeof(struct kvm_cpuid_entry2);
|
||||
cpuid = malloc(size);
|
||||
if (!cpuid) {
|
||||
perror("malloc");
|
||||
abort();
|
||||
}
|
||||
|
||||
cpuid->nent = nent;
|
||||
|
||||
return cpuid;
|
||||
}
|
||||
|
||||
/* KVM Supported CPUID Get
|
||||
*
|
||||
* Input Args: None
|
||||
*
|
||||
* Output Args:
|
||||
*
|
||||
* Return: The supported KVM CPUID
|
||||
*
|
||||
* Get the guest CPUID supported by KVM.
|
||||
*/
|
||||
struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
|
||||
{
|
||||
static struct kvm_cpuid2 *cpuid;
|
||||
int ret;
|
||||
int kvm_fd;
|
||||
|
||||
if (cpuid)
|
||||
return cpuid;
|
||||
|
||||
cpuid = allocate_kvm_cpuid2();
|
||||
kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
|
||||
if (kvm_fd < 0)
|
||||
exit(KSFT_SKIP);
|
||||
|
||||
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
|
||||
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
|
||||
ret, errno);
|
||||
|
||||
close(kvm_fd);
|
||||
return cpuid;
|
||||
}
|
||||
|
||||
/* Locate a cpuid entry.
|
||||
*
|
||||
* Input Args:
|
||||
* cpuid: The cpuid.
|
||||
* function: The function of the cpuid entry to find.
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: A pointer to the cpuid entry. Never returns NULL.
|
||||
*/
|
||||
struct kvm_cpuid_entry2 *
|
||||
kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
|
||||
{
|
||||
struct kvm_cpuid2 *cpuid;
|
||||
struct kvm_cpuid_entry2 *entry = NULL;
|
||||
int i;
|
||||
|
||||
cpuid = kvm_get_supported_cpuid();
|
||||
for (i = 0; i < cpuid->nent; i++) {
|
||||
if (cpuid->entries[i].function == function &&
|
||||
cpuid->entries[i].index == index) {
|
||||
entry = &cpuid->entries[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
|
||||
function, index);
|
||||
return entry;
|
||||
}
|
||||
|
||||
/* VM VCPU CPUID Set
|
||||
*
|
||||
* Input Args:
|
||||
@@ -698,6 +794,7 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
|
||||
rc, errno);
|
||||
|
||||
}
|
||||
|
||||
/* Create a VM with reasonable defaults
|
||||
*
|
||||
* Input Args:
|
||||
@@ -726,7 +823,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
|
||||
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create(VM_MODE_FLAT48PG,
|
||||
vm = vm_create(VM_MODE_P52V48_4K,
|
||||
DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
|
||||
O_RDWR);
|
||||
|
||||
@@ -742,6 +839,154 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
|
||||
return vm;
|
||||
}
|
||||
|
||||
/* VCPU Get MSR
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vcpuid - VCPU ID
|
||||
* msr_index - Index of MSR
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
|
||||
*
|
||||
* Get value of MSR for VCPU.
|
||||
*/
|
||||
uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||
struct {
|
||||
struct kvm_msrs header;
|
||||
struct kvm_msr_entry entry;
|
||||
} buffer = {};
|
||||
int r;
|
||||
|
||||
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
|
||||
buffer.header.nmsrs = 1;
|
||||
buffer.entry.index = msr_index;
|
||||
r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
|
||||
TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
|
||||
" rc: %i errno: %i", r, errno);
|
||||
|
||||
return buffer.entry.data;
|
||||
}
|
||||
|
||||
/* VCPU Set MSR
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vcpuid - VCPU ID
|
||||
* msr_index - Index of MSR
|
||||
* msr_value - New value of MSR
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: On success, nothing. On failure a TEST_ASSERT is produced.
|
||||
*
|
||||
* Set value of MSR for VCPU.
|
||||
*/
|
||||
void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
|
||||
uint64_t msr_value)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||
struct {
|
||||
struct kvm_msrs header;
|
||||
struct kvm_msr_entry entry;
|
||||
} buffer = {};
|
||||
int r;
|
||||
|
||||
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
|
||||
memset(&buffer, 0, sizeof(buffer));
|
||||
buffer.header.nmsrs = 1;
|
||||
buffer.entry.index = msr_index;
|
||||
buffer.entry.data = msr_value;
|
||||
r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
|
||||
TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
|
||||
" rc: %i errno: %i", r, errno);
|
||||
}
|
||||
|
||||
/* VM VCPU Args Set
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vcpuid - VCPU ID
|
||||
* num - number of arguments
|
||||
* ... - arguments, each of type uint64_t
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: None
|
||||
*
|
||||
* Sets the first num function input arguments to the values
|
||||
* given as variable args. Each of the variable args is expected to
|
||||
* be of type uint64_t.
|
||||
*/
|
||||
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
|
||||
{
|
||||
va_list ap;
|
||||
struct kvm_regs regs;
|
||||
|
||||
TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
|
||||
" num: %u\n",
|
||||
num);
|
||||
|
||||
va_start(ap, num);
|
||||
vcpu_regs_get(vm, vcpuid, ®s);
|
||||
|
||||
if (num >= 1)
|
||||
regs.rdi = va_arg(ap, uint64_t);
|
||||
|
||||
if (num >= 2)
|
||||
regs.rsi = va_arg(ap, uint64_t);
|
||||
|
||||
if (num >= 3)
|
||||
regs.rdx = va_arg(ap, uint64_t);
|
||||
|
||||
if (num >= 4)
|
||||
regs.rcx = va_arg(ap, uint64_t);
|
||||
|
||||
if (num >= 5)
|
||||
regs.r8 = va_arg(ap, uint64_t);
|
||||
|
||||
if (num >= 6)
|
||||
regs.r9 = va_arg(ap, uint64_t);
|
||||
|
||||
vcpu_regs_set(vm, vcpuid, ®s);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM VCPU Dump
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vcpuid - VCPU ID
|
||||
* indent - Left margin indent amount
|
||||
*
|
||||
* Output Args:
|
||||
* stream - Output FILE stream
|
||||
*
|
||||
* Return: None
|
||||
*
|
||||
* Dumps the current state of the VCPU specified by vcpuid, within the VM
|
||||
* given by vm, to the FILE stream given by stream.
|
||||
*/
|
||||
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
||||
{
|
||||
struct kvm_regs regs;
|
||||
struct kvm_sregs sregs;
|
||||
|
||||
fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
|
||||
|
||||
fprintf(stream, "%*sregs:\n", indent + 2, "");
|
||||
vcpu_regs_get(vm, vcpuid, ®s);
|
||||
regs_dump(stream, ®s, indent + 4);
|
||||
|
||||
fprintf(stream, "%*ssregs:\n", indent + 2, "");
|
||||
vcpu_sregs_get(vm, vcpuid, &sregs);
|
||||
sregs_dump(stream, &sregs, indent + 4);
|
||||
}
|
||||
|
||||
struct kvm_x86_state {
|
||||
struct kvm_vcpu_events events;
|
||||
struct kvm_mp_state mp_state;
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* tools/testing/selftests/kvm/lib/x86.c
|
||||
* tools/testing/selftests/kvm/lib/x86_64/vmx.c
|
||||
*
|
||||
* Copyright (C) 2018, Google LLC.
|
||||
*
|
||||
@@ -10,9 +10,11 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
#include "vmx.h"
|
||||
|
||||
bool enable_evmcs;
|
||||
|
||||
/* Allocate memory regions for nested VMX tests.
|
||||
*
|
||||
* Input Args:
|
||||
@@ -62,6 +64,20 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
|
||||
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
|
||||
memset(vmx->vmwrite_hva, 0, getpagesize());
|
||||
|
||||
/* Setup of a region of guest memory for the VP Assist page. */
|
||||
vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(),
|
||||
0x10000, 0, 0);
|
||||
vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
|
||||
vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
|
||||
|
||||
/* Setup of a region of guest memory for the enlightened VMCS. */
|
||||
vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(),
|
||||
0x10000, 0, 0);
|
||||
vmx->enlightened_vmcs_hva =
|
||||
addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
|
||||
vmx->enlightened_vmcs_gpa =
|
||||
addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
|
||||
|
||||
*p_vmx_gva = vmx_gva;
|
||||
return vmx;
|
||||
}
|
||||
@@ -107,18 +123,31 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
|
||||
if (vmxon(vmx->vmxon_gpa))
|
||||
return false;
|
||||
|
||||
/* Load a VMCS. */
|
||||
*(uint32_t *)(vmx->vmcs) = vmcs_revision();
|
||||
if (vmclear(vmx->vmcs_gpa))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (vmptrld(vmx->vmcs_gpa))
|
||||
return false;
|
||||
bool load_vmcs(struct vmx_pages *vmx)
|
||||
{
|
||||
if (!enable_evmcs) {
|
||||
/* Load a VMCS. */
|
||||
*(uint32_t *)(vmx->vmcs) = vmcs_revision();
|
||||
if (vmclear(vmx->vmcs_gpa))
|
||||
return false;
|
||||
|
||||
/* Setup shadow VMCS, do not load it yet. */
|
||||
*(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
|
||||
if (vmclear(vmx->shadow_vmcs_gpa))
|
||||
return false;
|
||||
if (vmptrld(vmx->vmcs_gpa))
|
||||
return false;
|
||||
|
||||
/* Setup shadow VMCS, do not load it yet. */
|
||||
*(uint32_t *)(vmx->shadow_vmcs) =
|
||||
vmcs_revision() | 0x80000000ul;
|
||||
if (vmclear(vmx->shadow_vmcs_gpa))
|
||||
return false;
|
||||
} else {
|
||||
if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
|
||||
vmx->enlightened_vmcs))
|
||||
return false;
|
||||
current_evmcs->revision_id = vmcs_revision();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
@@ -17,7 +17,7 @@
|
||||
#include "test_util.h"
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define X86_FEATURE_XSAVE (1<<26)
|
||||
#define X86_FEATURE_OSXSAVE (1<<27)
|
||||
@@ -67,6 +67,7 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
struct ucall uc;
|
||||
int rc;
|
||||
|
||||
entry = kvm_get_supported_cpuid_entry(1);
|
||||
@@ -87,21 +88,20 @@ int main(int argc, char *argv[])
|
||||
rc = _vcpu_run(vm, VCPU_ID);
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_IO) {
|
||||
switch (run->io.port) {
|
||||
case GUEST_PORT_SYNC:
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
/* emulate hypervisor clearing CR4.OSXSAVE */
|
||||
vcpu_sregs_get(vm, VCPU_ID, &sregs);
|
||||
sregs.cr4 &= ~X86_CR4_OSXSAVE;
|
||||
vcpu_sregs_set(vm, VCPU_ID, &sregs);
|
||||
break;
|
||||
case GUEST_PORT_ABORT:
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
|
||||
break;
|
||||
case GUEST_PORT_DONE:
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown port 0x%x.",
|
||||
run->io.port);
|
||||
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
|
||||
}
|
||||
}
|
||||
}
|
160
tools/testing/selftests/kvm/x86_64/evmcs_test.c
Normal file
160
tools/testing/selftests/kvm/x86_64/evmcs_test.c
Normal file
@@ -0,0 +1,160 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018, Red Hat, Inc.
|
||||
*
|
||||
* Tests for Enlightened VMCS, including nested guest state.
|
||||
*/
|
||||
#define _GNU_SOURCE /* for program_invocation_short_name */
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "test_util.h"
|
||||
|
||||
#include "kvm_util.h"
|
||||
|
||||
#include "vmx.h"
|
||||
|
||||
#define VCPU_ID 5
|
||||
|
||||
static bool have_nested_state;
|
||||
|
||||
void l2_guest_code(void)
|
||||
{
|
||||
GUEST_SYNC(6);
|
||||
|
||||
GUEST_SYNC(7);
|
||||
|
||||
/* Done, exit to L1 and never come back. */
|
||||
vmcall();
|
||||
}
|
||||
|
||||
void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
{
|
||||
#define L2_GUEST_STACK_SIZE 64
|
||||
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
|
||||
|
||||
enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
|
||||
|
||||
GUEST_ASSERT(vmx_pages->vmcs_gpa);
|
||||
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
|
||||
GUEST_SYNC(3);
|
||||
GUEST_ASSERT(load_vmcs(vmx_pages));
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
|
||||
|
||||
GUEST_SYNC(4);
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
|
||||
|
||||
prepare_vmcs(vmx_pages, l2_guest_code,
|
||||
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
|
||||
|
||||
GUEST_SYNC(5);
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
|
||||
GUEST_ASSERT(!vmlaunch());
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
|
||||
GUEST_SYNC(8);
|
||||
GUEST_ASSERT(!vmresume());
|
||||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
|
||||
GUEST_SYNC(9);
|
||||
}
|
||||
|
||||
void guest_code(struct vmx_pages *vmx_pages)
|
||||
{
|
||||
GUEST_SYNC(1);
|
||||
GUEST_SYNC(2);
|
||||
|
||||
if (vmx_pages)
|
||||
l1_guest_code(vmx_pages);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct vmx_pages *vmx_pages = NULL;
|
||||
vm_vaddr_t vmx_pages_gva = 0;
|
||||
|
||||
struct kvm_regs regs1, regs2;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
struct kvm_x86_state *state;
|
||||
struct ucall uc;
|
||||
int stage;
|
||||
uint16_t evmcs_ver;
|
||||
struct kvm_enable_cap enable_evmcs_cap = {
|
||||
.cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
|
||||
.args[0] = (unsigned long)&evmcs_ver
|
||||
};
|
||||
|
||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_code);
|
||||
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
|
||||
!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
|
||||
printf("capabilities not available, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
||||
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
||||
|
||||
vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
|
||||
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
|
||||
|
||||
for (stage = 1;; stage++) {
|
||||
_vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
"Unexpected exit reason: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
|
||||
memset(®s1, 0, sizeof(regs1));
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
|
||||
__FILE__, uc.args[1]);
|
||||
/* NOT REACHED */
|
||||
case UCALL_SYNC:
|
||||
break;
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
|
||||
}
|
||||
|
||||
/* UCALL_SYNC is handled here. */
|
||||
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
|
||||
uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
|
||||
stage, (ulong)uc.args[1]);
|
||||
|
||||
state = vcpu_save_state(vm, VCPU_ID);
|
||||
kvm_vm_release(vm);
|
||||
|
||||
/* Restore state in a new VM. */
|
||||
kvm_vm_restart(vm, O_RDWR);
|
||||
vm_vcpu_add(vm, VCPU_ID, 0, 0);
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
vcpu_load_state(vm, VCPU_ID, state);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
free(state);
|
||||
|
||||
memset(®s2, 0, sizeof(regs2));
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s2);
|
||||
TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
|
||||
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
|
||||
(ulong) regs2.rdi, (ulong) regs2.rsi);
|
||||
}
|
||||
|
||||
done:
|
||||
kvm_vm_free(vm);
|
||||
}
|
@@ -19,7 +19,7 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define VCPU_ID 0
|
||||
#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
|
||||
@@ -48,7 +48,7 @@ static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
|
||||
static void test_msr_platform_info_enabled(struct kvm_vm *vm)
|
||||
{
|
||||
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
|
||||
struct guest_args args;
|
||||
struct ucall uc;
|
||||
|
||||
set_msr_platform_info_enabled(vm, true);
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
@@ -56,11 +56,11 @@ static void test_msr_platform_info_enabled(struct kvm_vm *vm)
|
||||
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
guest_args_read(vm, VCPU_ID, &args);
|
||||
TEST_ASSERT(args.port == GUEST_PORT_SYNC,
|
||||
"Received IO from port other than PORT_HOST_SYNC: %u\n",
|
||||
run->io.port);
|
||||
TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
|
||||
get_ucall(vm, VCPU_ID, &uc);
|
||||
TEST_ASSERT(uc.cmd == UCALL_SYNC,
|
||||
"Received ucall other than UCALL_SYNC: %u\n",
|
||||
ucall);
|
||||
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
|
||||
MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
|
||||
"Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
|
||||
MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
|
@@ -22,7 +22,7 @@
|
||||
#include "test_util.h"
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define VCPU_ID 5
|
||||
|
@@ -17,7 +17,7 @@
|
||||
#include "test_util.h"
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
#include "vmx.h"
|
||||
|
||||
#define VCPU_ID 5
|
||||
@@ -26,20 +26,20 @@ static bool have_nested_state;
|
||||
|
||||
void l2_guest_code(void)
|
||||
{
|
||||
GUEST_SYNC(5);
|
||||
GUEST_SYNC(6);
|
||||
|
||||
/* Exit to L1 */
|
||||
vmcall();
|
||||
|
||||
/* L1 has now set up a shadow VMCS for us. */
|
||||
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
|
||||
GUEST_SYNC(9);
|
||||
GUEST_SYNC(10);
|
||||
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
|
||||
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
|
||||
GUEST_SYNC(10);
|
||||
GUEST_SYNC(11);
|
||||
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
|
||||
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
|
||||
GUEST_SYNC(11);
|
||||
GUEST_SYNC(12);
|
||||
|
||||
/* Done, exit to L1 and never come back. */
|
||||
vmcall();
|
||||
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
|
||||
GUEST_ASSERT(vmx_pages->vmcs_gpa);
|
||||
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
|
||||
GUEST_SYNC(3);
|
||||
GUEST_ASSERT(load_vmcs(vmx_pages));
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
|
||||
|
||||
GUEST_SYNC(3);
|
||||
GUEST_SYNC(4);
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
|
||||
|
||||
prepare_vmcs(vmx_pages, l2_guest_code,
|
||||
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
|
||||
|
||||
GUEST_SYNC(4);
|
||||
GUEST_SYNC(5);
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
|
||||
GUEST_ASSERT(!vmlaunch());
|
||||
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
|
||||
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
GUEST_ASSERT(!vmresume());
|
||||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
|
||||
|
||||
GUEST_SYNC(6);
|
||||
GUEST_SYNC(7);
|
||||
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
|
||||
|
||||
GUEST_ASSERT(!vmresume());
|
||||
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
|
||||
GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
|
||||
GUEST_ASSERT(vmlaunch());
|
||||
GUEST_SYNC(7);
|
||||
GUEST_SYNC(8);
|
||||
GUEST_ASSERT(vmlaunch());
|
||||
GUEST_ASSERT(vmresume());
|
||||
|
||||
vmwrite(GUEST_RIP, 0xc0ffee);
|
||||
GUEST_SYNC(8);
|
||||
GUEST_SYNC(9);
|
||||
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
|
||||
|
||||
GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
|
||||
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
|
||||
GUEST_ASSERT(vmlaunch());
|
||||
GUEST_ASSERT(vmresume());
|
||||
GUEST_SYNC(12);
|
||||
GUEST_SYNC(13);
|
||||
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
|
||||
GUEST_ASSERT(vmlaunch());
|
||||
GUEST_ASSERT(vmresume());
|
||||
@@ -127,6 +129,7 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
struct kvm_x86_state *state;
|
||||
struct ucall uc;
|
||||
int stage;
|
||||
|
||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||
@@ -155,23 +158,23 @@ int main(int argc, char *argv[])
|
||||
|
||||
memset(®s1, 0, sizeof(regs1));
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s1);
|
||||
switch (run->io.port) {
|
||||
case GUEST_PORT_ABORT:
|
||||
TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi,
|
||||
__FILE__, regs1.rsi);
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
|
||||
__FILE__, uc.args[1]);
|
||||
/* NOT REACHED */
|
||||
case GUEST_PORT_SYNC:
|
||||
case UCALL_SYNC:
|
||||
break;
|
||||
case GUEST_PORT_DONE:
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
|
||||
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
|
||||
}
|
||||
|
||||
/* PORT_SYNC is handled here. */
|
||||
TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") &&
|
||||
regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx",
|
||||
stage, (ulong) regs1.rsi);
|
||||
/* UCALL_SYNC is handled here. */
|
||||
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
|
||||
uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
|
||||
stage, (ulong)uc.args[1]);
|
||||
|
||||
state = vcpu_save_state(vm, VCPU_ID);
|
||||
kvm_vm_release(vm);
|
@@ -19,7 +19,7 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define VCPU_ID 5
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* gtests/tests/vmx_tsc_adjust_test.c
|
||||
* vmx_tsc_adjust_test
|
||||
*
|
||||
* Copyright (C) 2018, Google LLC.
|
||||
*
|
||||
@@ -22,13 +22,13 @@
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "x86.h"
|
||||
#include "processor.h"
|
||||
#include "vmx.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "../kselftest.h"
|
||||
#include "kselftest.h"
|
||||
|
||||
#ifndef MSR_IA32_TSC_ADJUST
|
||||
#define MSR_IA32_TSC_ADJUST 0x3b
|
||||
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
|
||||
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
|
||||
|
||||
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
|
||||
GUEST_ASSERT(load_vmcs(vmx_pages));
|
||||
|
||||
/* Prepare the VMCS for L2 execution. */
|
||||
prepare_vmcs(vmx_pages, l2_guest_code,
|
||||
@@ -146,26 +147,25 @@ int main(int argc, char *argv[])
|
||||
|
||||
for (;;) {
|
||||
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
|
||||
struct guest_args args;
|
||||
struct ucall uc;
|
||||
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
guest_args_read(vm, VCPU_ID, &args);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
|
||||
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
|
||||
switch (args.port) {
|
||||
case GUEST_PORT_ABORT:
|
||||
TEST_ASSERT(false, "%s", (const char *) args.arg0);
|
||||
switch (get_ucall(vm, VCPU_ID, &uc)) {
|
||||
case UCALL_ABORT:
|
||||
TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
|
||||
/* NOT REACHED */
|
||||
case GUEST_PORT_SYNC:
|
||||
report(args.arg1);
|
||||
case UCALL_SYNC:
|
||||
report(uc.args[1]);
|
||||
break;
|
||||
case GUEST_PORT_DONE:
|
||||
case UCALL_DONE:
|
||||
goto done;
|
||||
default:
|
||||
TEST_ASSERT(false, "Unknown port 0x%x.", args.port);
|
||||
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user