Merge tag 'kvm-arm-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into next

KVM/ARM changes for Linux 4.8

- GICv3 ITS emulation
- Simpler idmap management that fixes potential TLB conflicts
- Honor the kernel protection in HYP mode
- Removal of the old vgic implementation
This commit is contained in:
Radim Krčmář
2016-07-22 20:27:26 +02:00
54 changed files with 2709 additions and 6076 deletions

View File

@@ -46,13 +46,6 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
config KVM_NEW_VGIC
bool "New VGIC implementation"
depends on KVM
default y
---help---
uses the new VGIC implementation
source drivers/vhost/Kconfig
endif # VIRTUALIZATION

View File

@@ -22,7 +22,6 @@ obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
ifeq ($(CONFIG_KVM_NEW_VGIC),y)
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
@@ -30,9 +29,4 @@ obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
else
obj-y += $(KVM)/arm/vgic.o
obj-y += $(KVM)/arm/vgic-v2.o
obj-y += $(KVM)/arm/vgic-v2-emul.o
endif
obj-y += $(KVM)/arm/arch_timer.o

View File

@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
@@ -122,7 +123,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out_fail_alloc;
ret = create_hyp_mappings(kvm, kvm + 1);
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
goto out_free_stage2_pgd;
@@ -201,7 +202,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_MAX_VCPUS;
break;
default:
r = kvm_arch_dev_ioctl_check_extension(ext);
r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
break;
}
return r;
@@ -239,7 +240,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
err = create_hyp_mappings(vcpu, vcpu + 1);
err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
if (err)
goto vcpu_uninit;
@@ -1038,7 +1039,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
static void cpu_init_hyp_mode(void *dummy)
{
phys_addr_t boot_pgd_ptr;
phys_addr_t pgd_ptr;
unsigned long hyp_stack_ptr;
unsigned long stack_page;
@@ -1047,13 +1047,12 @@ static void cpu_init_hyp_mode(void *dummy)
/* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors(kvm_get_idmap_vector());
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
kvm_arm_init_debug();
@@ -1075,15 +1074,9 @@ static void cpu_hyp_reinit(void)
static void cpu_hyp_reset(void)
{
phys_addr_t boot_pgd_ptr;
phys_addr_t phys_idmap_start;
if (!is_kernel_in_hyp_mode()) {
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
phys_idmap_start = kvm_get_idmap_start();
__cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start);
}
if (!is_kernel_in_hyp_mode())
__cpu_reset_hyp_mode(hyp_default_vectors,
kvm_get_idmap_start());
}
static void _kvm_arch_hardware_enable(void *discard)
@@ -1293,14 +1286,14 @@ static int init_hyp_mode(void)
* Map the Hyp-code called directly from the host
*/
err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
kvm_ksym_ref(__hyp_text_end));
kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_err;
}
err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
kvm_ksym_ref(__end_rodata));
kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
if (err) {
kvm_err("Cannot map rodata section\n");
goto out_err;
@@ -1311,7 +1304,8 @@ static int init_hyp_mode(void)
*/
for_each_possible_cpu(cpu) {
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
PAGE_HYP);
if (err) {
kvm_err("Cannot map hyp stack\n");
@@ -1323,7 +1317,7 @@ static int init_hyp_mode(void)
kvm_cpu_context_t *cpu_ctxt;
cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
kvm_err("Cannot map host CPU state: %d\n", err);
@@ -1331,10 +1325,6 @@ static int init_hyp_mode(void)
}
}
#ifndef CONFIG_HOTPLUG_CPU
free_boot_hyp_pgd();
#endif
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);

View File

@@ -32,23 +32,13 @@
* r2,r3 = Hypervisor pgd pointer
*
* The init scenario is:
* - We jump in HYP with four parameters: boot HYP pgd, runtime HYP pgd,
* runtime stack, runtime vectors
* - Enable the MMU with the boot pgd
* - Jump to a target into the trampoline page (remember, this is the same
* physical page!)
* - Now switch to the runtime pgd (same VA, and still the same physical
* page!)
* - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
* runtime vectors
* - Invalidate TLBs
* - Set stack and vectors
* - Setup the page tables
* - Enable the MMU
* - Profit! (or eret, if you only care about the code).
*
* As we only have four registers available to pass parameters (and we
* need six), we split the init in two phases:
* - Phase 1: r0 = 0, r1 = 0, r2,r3 contain the boot PGD.
* Provides the basic HYP init, and enable the MMU.
* - Phase 2: r0 = ToS, r1 = vectors, r2,r3 contain the runtime PGD.
* Switches to the runtime PGD, set stack and vectors.
*/
.text
@@ -68,8 +58,11 @@ __kvm_hyp_init:
W(b) .
__do_hyp_init:
cmp r0, #0 @ We have a SP?
bne phase2 @ Yes, second stage init
@ Set stack pointer
mov sp, r0
@ Set HVBAR to point to the HYP vectors
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@@ -114,34 +107,25 @@ __do_hyp_init:
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
isb
mcr p15, 4, r0, c1, c0, 0 @ HSCR
@ End of init phase-1
eret
phase2:
@ Set stack pointer
mov sp, r0
@ Set HVBAR to point to the HYP vectors
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
@ Jump to the trampoline page
ldr r0, =TRAMPOLINE_VA
adr r1, target
bfi r0, r1, #0, #PAGE_SHIFT
ret r0
target: @ We're now in the trampoline code, switch page tables
mcrr p15, 4, rr_lo_hi(r2, r3), c2
isb
@ Invalidate the old TLBs
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
dsb ish
eret
@ r0 : stub vectors address
ENTRY(__kvm_hyp_reset)
/* We're now in idmap, disable MMU */
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
bic r1, r1, r2
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
/* Install stub vectors */
mcr p15, 4, r0, c12, c0, 0 @ HVBAR
isb
eret
ENDPROC(__kvm_hyp_reset)
.ltorg

View File

@@ -32,8 +32,6 @@
#include "trace.h"
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd;
static pgd_t *merged_hyp_pgd;
@@ -483,28 +481,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
} while (pgd++, addr = next, addr != end);
}
/**
* free_boot_hyp_pgd - free HYP boot page tables
*
* Free the HYP boot page tables. The bounce page is also freed.
*/
void free_boot_hyp_pgd(void)
{
mutex_lock(&kvm_hyp_pgd_mutex);
if (boot_hyp_pgd) {
unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_hyp_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
boot_hyp_pgd = NULL;
}
if (hyp_pgd)
unmap_hyp_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
mutex_unlock(&kvm_hyp_pgd_mutex);
}
/**
* free_hyp_pgds - free Hyp-mode page tables
*
@@ -519,15 +495,20 @@ void free_hyp_pgds(void)
{
unsigned long addr;
free_boot_hyp_pgd();
mutex_lock(&kvm_hyp_pgd_mutex);
if (boot_hyp_pgd) {
unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
boot_hyp_pgd = NULL;
}
if (hyp_pgd) {
unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
@@ -679,17 +660,18 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
* @to: The virtual kernel end address of the range (exclusive)
* @prot: The protection to be applied to this range
*
* The same virtual address as the kernel virtual address is also used
* in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
* physical pages.
*/
int create_hyp_mappings(void *from, void *to)
int create_hyp_mappings(void *from, void *to, pgprot_t prot)
{
phys_addr_t phys_addr;
unsigned long virt_addr;
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
unsigned long start = kern_hyp_va((unsigned long)from);
unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
@@ -704,7 +686,7 @@ int create_hyp_mappings(void *from, void *to)
err = __create_hyp_mappings(hyp_pgd, virt_addr,
virt_addr + PAGE_SIZE,
__phys_to_pfn(phys_addr),
PAGE_HYP);
prot);
if (err)
return err;
}
@@ -723,8 +705,8 @@ int create_hyp_mappings(void *from, void *to)
*/
int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
{
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
unsigned long start = kern_hyp_va((unsigned long)from);
unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
@@ -1687,14 +1669,6 @@ phys_addr_t kvm_mmu_get_httbr(void)
return virt_to_phys(hyp_pgd);
}
phys_addr_t kvm_mmu_get_boot_httbr(void)
{
if (__kvm_cpu_uses_extended_idmap())
return virt_to_phys(merged_hyp_pgd);
else
return virt_to_phys(boot_hyp_pgd);
}
phys_addr_t kvm_get_idmap_vector(void)
{
return hyp_idmap_vector;
@@ -1705,6 +1679,22 @@ phys_addr_t kvm_get_idmap_start(void)
return hyp_idmap_start;
}
static int kvm_map_idmap_text(pgd_t *pgd)
{
int err;
/* Create the idmap in the boot page tables */
err = __create_hyp_mappings(pgd,
hyp_idmap_start, hyp_idmap_end,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP_EXEC);
if (err)
kvm_err("Failed to idmap %lx-%lx\n",
hyp_idmap_start, hyp_idmap_end);
return err;
}
int kvm_mmu_init(void)
{
int err;
@@ -1719,28 +1709,41 @@ int kvm_mmu_init(void)
*/
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
kvm_info("HYP VA range: %lx:%lx\n",
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
if (!hyp_pgd || !boot_hyp_pgd) {
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
hyp_idmap_start < kern_hyp_va(~0UL)) {
/*
* The idmap page is intersecting with the VA space,
* it is not safe to continue further.
*/
kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
err = -EINVAL;
goto out;
}
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
if (!hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM;
goto out;
}
/* Create the idmap in the boot page tables */
err = __create_hyp_mappings(boot_hyp_pgd,
hyp_idmap_start, hyp_idmap_end,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP);
if (err) {
kvm_err("Failed to idmap %lx-%lx\n",
hyp_idmap_start, hyp_idmap_end);
goto out;
}
if (__kvm_cpu_uses_extended_idmap()) {
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
hyp_pgd_order);
if (!boot_hyp_pgd) {
kvm_err("Hyp boot PGD not allocated\n");
err = -ENOMEM;
goto out;
}
err = kvm_map_idmap_text(boot_hyp_pgd);
if (err)
goto out;
merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!merged_hyp_pgd) {
kvm_err("Failed to allocate extra HYP pgd\n");
@@ -1748,29 +1751,10 @@ int kvm_mmu_init(void)
}
__kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
hyp_idmap_start);
return 0;
}
/* Map the very same page at the trampoline VA */
err = __create_hyp_mappings(boot_hyp_pgd,
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP);
if (err) {
kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
TRAMPOLINE_VA);
goto out;
}
/* Map the same page again into the runtime page tables */
err = __create_hyp_mappings(hyp_pgd,
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP);
if (err) {
kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
TRAMPOLINE_VA);
goto out;
} else {
err = kvm_map_idmap_text(hyp_pgd);
if (err)
goto out;
}
return 0;