Merge tag 'kvm-4.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář: "All architectures: - move `make kvmconfig` stubs from x86 - use 64 bits for debugfs stats ARM: - Important fixes for not using an in-kernel irqchip - handle SError exceptions and present them to guests if appropriate - proxying of GICV access at EL2 if guest mappings are unsafe - GICv3 on AArch32 on ARMv8 - preparations for GICv3 save/restore, including ABI docs - cleanups and a bit of optimizations MIPS: - A couple of fixes in preparation for supporting MIPS EVA host kernels - MIPS SMP host & TLB invalidation fixes PPC: - Fix the bug which caused guests to falsely report lockups - other minor fixes - a small optimization s390: - Lazy enablement of runtime instrumentation - up to 255 CPUs for nested guests - rework of machine check deliver - cleanups and fixes x86: - IOMMU part of AMD's AVIC for vmexit-less interrupt delivery - Hyper-V TSC page - per-vcpu tsc_offset in debugfs - accelerated INS/OUTS in nVMX - cleanups and fixes" * tag 'kvm-4.9-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (140 commits) KVM: MIPS: Drop dubious EntryHi optimisation KVM: MIPS: Invalidate TLB by regenerating ASIDs KVM: MIPS: Split kernel/user ASID regeneration KVM: MIPS: Drop other CPU ASIDs on guest MMU changes KVM: arm/arm64: vgic: Don't flush/sync without a working vgic KVM: arm64: Require in-kernel irqchip for PMU support KVM: PPC: Book3s PR: Allow access to unprivileged MMCR2 register KVM: PPC: Book3S PR: Support 64kB page size on POWER8E and POWER8NVL KVM: PPC: Book3S: Remove duplicate setting of the B field in tlbie KVM: PPC: BookE: Fix a sanity check KVM: PPC: Book3S HV: Take out virtual core piggybacking code KVM: PPC: Book3S: Treat VTB as a per-subcore register, not per-thread ARM: gic-v3: Work around definition of gic_write_bpr1 KVM: nVMX: Fix the NMI IDT-vectoring handling KVM: VMX: Enable MSR-BASED TPR shadow even if APICv is inactive KVM: nVMX: Fix reload apic access page warning kvmconfig: add virtio-gpu to config fragment config: move x86 kvm_guest.config to a common location arm64: KVM: Remove duplicating init code for setting VMID ARM: KVM: Support vgic-v3 ...
This commit is contained in:
@@ -22,9 +22,7 @@
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
|
||||
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
|
||||
#include <asm/cp15.h>
|
||||
|
||||
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
|
||||
#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
|
||||
@@ -99,68 +97,129 @@
|
||||
#define ICH_AP1R2 __AP1Rx(2)
|
||||
#define ICH_AP1R3 __AP1Rx(3)
|
||||
|
||||
/* A32-to-A64 mappings used by VGIC save/restore */
|
||||
|
||||
#define CPUIF_MAP(a32, a64) \
|
||||
static inline void write_ ## a64(u32 val) \
|
||||
{ \
|
||||
write_sysreg(val, a32); \
|
||||
} \
|
||||
static inline u32 read_ ## a64(void) \
|
||||
{ \
|
||||
return read_sysreg(a32); \
|
||||
} \
|
||||
|
||||
#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
|
||||
static inline void write_ ## a64(u64 val) \
|
||||
{ \
|
||||
write_sysreg(lower_32_bits(val), a32lo);\
|
||||
write_sysreg(upper_32_bits(val), a32hi);\
|
||||
} \
|
||||
static inline u64 read_ ## a64(void) \
|
||||
{ \
|
||||
u64 val = read_sysreg(a32lo); \
|
||||
\
|
||||
val |= (u64)read_sysreg(a32hi) << 32; \
|
||||
\
|
||||
return val; \
|
||||
}
|
||||
|
||||
CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
|
||||
CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
|
||||
CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
|
||||
CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
|
||||
CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
|
||||
CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
|
||||
CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
|
||||
CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
|
||||
CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
|
||||
CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
|
||||
CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
|
||||
CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
|
||||
CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
|
||||
CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
|
||||
CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
|
||||
CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
|
||||
|
||||
CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
|
||||
|
||||
#define read_gicreg(r) read_##r()
|
||||
#define write_gicreg(v, r) write_##r(v)
|
||||
|
||||
/* Low-level accessors */
|
||||
|
||||
static inline void gic_write_eoir(u32 irq)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
|
||||
write_sysreg(irq, ICC_EOIR1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
|
||||
write_sysreg(val, ICC_DIR);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline u32 gic_read_iar(void)
|
||||
{
|
||||
u32 irqstat;
|
||||
u32 irqstat = read_sysreg(ICC_IAR1);
|
||||
|
||||
asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
|
||||
dsb(sy);
|
||||
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
static inline void gic_write_pmr(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
|
||||
write_sysreg(val, ICC_PMR);
|
||||
}
|
||||
|
||||
static inline void gic_write_ctlr(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
|
||||
write_sysreg(val, ICC_CTLR);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_grpen1(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
|
||||
write_sysreg(val, ICC_IGRPEN1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_sgi1r(u64 val)
|
||||
{
|
||||
asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
|
||||
write_sysreg(val, ICC_SGI1R);
|
||||
}
|
||||
|
||||
static inline u32 gic_read_sre(void)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
|
||||
return val;
|
||||
return read_sysreg(ICC_SRE);
|
||||
}
|
||||
|
||||
static inline void gic_write_sre(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
|
||||
write_sysreg(val, ICC_SRE);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_bpr1(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
|
||||
write_sysreg(val, ICC_BPR1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -49,6 +49,21 @@
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
|
||||
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
|
||||
#define __ACCESS_CP15_64(Op1, CRm) \
|
||||
"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
|
||||
|
||||
#define __read_sysreg(r, w, c, t) ({ \
|
||||
t __val; \
|
||||
asm volatile(r " " c : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
|
||||
|
||||
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
|
||||
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
|
||||
|
||||
extern unsigned long cr_alignment; /* defined in entry-armv.S */
|
||||
|
||||
static inline unsigned long get_cr(void)
|
||||
|
@@ -55,6 +55,7 @@
|
||||
|
||||
#define MPIDR_LEVEL_BITS 8
|
||||
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
|
||||
#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
|
||||
|
||||
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
|
||||
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
|
||||
|
@@ -21,6 +21,10 @@
|
||||
|
||||
#include <asm/virt.h>
|
||||
|
||||
#define ARM_EXIT_WITH_ABORT_BIT 31
|
||||
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
|
||||
#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
|
||||
|
||||
#define ARM_EXCEPTION_RESET 0
|
||||
#define ARM_EXCEPTION_UNDEFINED 1
|
||||
#define ARM_EXCEPTION_SOFTWARE 2
|
||||
@@ -68,6 +72,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
extern void __init_stage2_translation(void);
|
||||
|
||||
extern void __kvm_hyp_reset(unsigned long);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
||||
|
@@ -40,18 +40,29 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||
*vcpu_reg(vcpu, reg_num) = val;
|
||||
}
|
||||
|
||||
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
|
||||
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
||||
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
|
||||
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_condition_valid32(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
kvm_skip_instr32(vcpu, is_wide_instr);
|
||||
}
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr = HCR_GUEST_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
|
||||
static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.hcr;
|
||||
}
|
||||
@@ -61,7 +72,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
|
||||
vcpu->arch.hcr = hcr;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
|
||||
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
@@ -71,9 +82,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
|
||||
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
|
||||
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
|
||||
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
|
||||
}
|
||||
|
||||
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||
@@ -93,11 +104,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
|
||||
return cpsr_mode > USR_MODE;;
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
|
||||
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.hsr;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
if (hsr & HSR_CV)
|
||||
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.hxfar;
|
||||
|
@@ -39,7 +39,12 @@
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
|
||||
#else
|
||||
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
|
||||
#endif
|
||||
|
||||
#define KVM_REQ_VCPU_EXIT 8
|
||||
|
||||
@@ -183,15 +188,15 @@ struct kvm_vcpu_arch {
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
u32 remote_tlb_flush;
|
||||
ulong remote_tlb_flush;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_attempted_poll;
|
||||
u32 halt_poll_invalid;
|
||||
u32 halt_wakeup;
|
||||
u32 hvc_exit_stat;
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_invalid;
|
||||
u64 halt_wakeup;
|
||||
u64 hvc_exit_stat;
|
||||
u64 wfe_exit_stat;
|
||||
u64 wfi_exit_stat;
|
||||
u64 mmio_exit_user;
|
||||
|
@@ -20,28 +20,15 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/vfp.h>
|
||||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
|
||||
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
|
||||
#define __ACCESS_CP15_64(Op1, CRm) \
|
||||
"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
|
||||
#define __ACCESS_VFP(CRn) \
|
||||
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
|
||||
|
||||
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
|
||||
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
|
||||
|
||||
#define __read_sysreg(r, w, c, t) ({ \
|
||||
t __val; \
|
||||
asm volatile(r " " c : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
|
||||
|
||||
#define write_special(v, r) \
|
||||
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
|
||||
#define read_special(r) ({ \
|
||||
@@ -119,6 +106,9 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
|
||||
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
|
||||
void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
|
||||
static inline bool __vfp_enabled(void)
|
||||
|
@@ -63,37 +63,13 @@ void kvm_clear_hyp_idmap(void);
|
||||
static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
|
||||
{
|
||||
*pmd = new_pmd;
|
||||
flush_pmd_entry(pmd);
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
|
||||
{
|
||||
*pte = new_pte;
|
||||
/*
|
||||
* flush_pmd_entry just takes a void pointer and cleans the necessary
|
||||
* cache entries, so we can reuse the function for ptes.
|
||||
*/
|
||||
flush_pmd_entry(pte);
|
||||
}
|
||||
|
||||
static inline void kvm_clean_pgd(pgd_t *pgd)
|
||||
{
|
||||
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
static inline void kvm_clean_pmd(pmd_t *pmd)
|
||||
{
|
||||
clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
|
||||
}
|
||||
|
||||
static inline void kvm_clean_pmd_entry(pmd_t *pmd)
|
||||
{
|
||||
clean_pmd_entry(pmd);
|
||||
}
|
||||
|
||||
static inline void kvm_clean_pte(pte_t *pte)
|
||||
{
|
||||
clean_pte_table(pte);
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
|
||||
|
Reference in New Issue
Block a user