Merge branch 'errata/tx2-219' into for-next/fixes
Workaround for Cavium/Marvell ThunderX2 erratum #219. * errata/tx2-219: arm64: Allow CAVIUM_TX2_ERRATUM_219 to be selected arm64: Avoid Cavium TX2 erratum 219 when switching TTBR arm64: Enable workaround for Cavium TX2 erratum 219 when running SMT arm64: KVM: Trap VM ops when ARM64_WORKAROUND_CAVIUM_TX2_219_TVM is set
This commit is contained in:
@@ -616,6 +616,23 @@ config CAVIUM_ERRATUM_30115
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_TX2_ERRATUM_219
|
||||
bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
|
||||
default y
|
||||
help
|
||||
On Cavium ThunderX2, a load, store or prefetch instruction between a
|
||||
TTBR update and the corresponding context synchronizing operation can
|
||||
cause a spurious Data Abort to be delivered to any hardware thread in
|
||||
the CPU core.
|
||||
|
||||
Work around the issue by avoiding the problematic code sequence and
|
||||
trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
|
||||
trap handler performs the corresponding register access, skips the
|
||||
instruction and ensures context synchronization by virtue of the
|
||||
exception return.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config QCOM_FALKOR_ERRATUM_1003
|
||||
bool "Falkor E1003: Incorrect translation due to ASID change"
|
||||
default y
|
||||
|
@@ -723,7 +723,7 @@ CONFIG_TEGRA_IOMMU_SMMU=y
|
||||
CONFIG_ARM_SMMU=y
|
||||
CONFIG_ARM_SMMU_V3=y
|
||||
CONFIG_QCOM_IOMMU=y
|
||||
CONFIG_REMOTEPROC=m
|
||||
CONFIG_REMOTEPROC=y
|
||||
CONFIG_QCOM_Q6V5_MSS=m
|
||||
CONFIG_QCOM_Q6V5_PAS=m
|
||||
CONFIG_QCOM_SYSMON=m
|
||||
|
@@ -52,7 +52,9 @@
|
||||
#define ARM64_HAS_IRQ_PRIO_MASKING 42
|
||||
#define ARM64_HAS_DCPODP 43
|
||||
#define ARM64_WORKAROUND_1463225 44
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
||||
|
||||
#define ARM64_NCAPS 45
|
||||
#define ARM64_NCAPS 47
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
@@ -47,30 +47,6 @@
|
||||
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
|
||||
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
|
||||
|
||||
/**
|
||||
* hyp_alternate_select - Generates patchable code sequences that are
|
||||
* used to switch between two implementations of a function, depending
|
||||
* on the availability of a feature.
|
||||
*
|
||||
* @fname: a symbol name that will be defined as a function returning a
|
||||
* function pointer whose type will match @orig and @alt
|
||||
* @orig: A pointer to the default function, as returned by @fname when
|
||||
* @cond doesn't hold
|
||||
* @alt: A pointer to the alternate function, as returned by @fname
|
||||
* when @cond holds
|
||||
* @cond: a CPU feature (as described in asm/cpufeature.h)
|
||||
*/
|
||||
#define hyp_alternate_select(fname, orig, alt, cond) \
|
||||
typeof(orig) * __hyp_text fname(void) \
|
||||
{ \
|
||||
typeof(alt) *val = orig; \
|
||||
asm volatile(ALTERNATIVE("nop \n", \
|
||||
"mov %0, %1 \n", \
|
||||
cond) \
|
||||
: "+r" (val) : "r" (alt)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
|
@@ -1,7 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_XEN_OPS_H
|
||||
#define _ASM_XEN_OPS_H
|
||||
|
||||
void xen_efi_runtime_setup(void);
|
||||
|
||||
#endif /* _ASM_XEN_OPS_H */
|
@@ -12,6 +12,7 @@
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
static bool __maybe_unused
|
||||
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
return (need_wa > 0);
|
||||
}
|
||||
|
||||
static const __maybe_unused struct midr_range tx2_family_cpus[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
{},
|
||||
};
|
||||
|
||||
static bool __maybe_unused
|
||||
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!is_affected_midr_range_list(entry, scope) ||
|
||||
!is_hyp_mode_available())
|
||||
return false;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
|
||||
static const struct midr_range arm64_harden_el2_vectors[] = {
|
||||
@@ -851,6 +876,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = has_cortex_a76_erratum_1463225,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
|
||||
{
|
||||
.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
.matches = needs_tx2_tvm_workaround,
|
||||
},
|
||||
{
|
||||
.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@@ -1071,7 +1071,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
#else
|
||||
ldr x30, =vectors
|
||||
#endif
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
|
||||
prfm plil1strm, [x30, #(1b - tramp_vectors)]
|
||||
alternative_else_nop_endif
|
||||
msr vbar_el1, x30
|
||||
add x30, x30, #(1b - tramp_vectors)
|
||||
isb
|
||||
|
@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 hcr = vcpu->arch.hcr_el2;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
|
||||
hcr |= HCR_TVM;
|
||||
|
||||
write_sysreg(hcr, hcr_el2);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
|
||||
@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
* the crucial bit is "On taking a vSError interrupt,
|
||||
* HCR_EL2.VSE is cleared to 0."
|
||||
*/
|
||||
if (vcpu->arch.hcr_el2 & HCR_VSE)
|
||||
vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
|
||||
if (vcpu->arch.hcr_el2 & HCR_VSE) {
|
||||
vcpu->arch.hcr_el2 &= ~HCR_VSE;
|
||||
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
|
||||
}
|
||||
|
||||
if (has_vhe())
|
||||
deactivate_traps_vhe();
|
||||
@@ -229,20 +234,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static bool __hyp_text __true_value(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __hyp_text __false_value(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__check_arm_834220,
|
||||
__false_value, __true_value,
|
||||
ARM64_WORKAROUND_834220);
|
||||
|
||||
static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
|
||||
{
|
||||
u64 par, tmp;
|
||||
@@ -298,7 +289,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
|
||||
* resolve the IPA using the AT instruction.
|
||||
*/
|
||||
if (!(esr & ESR_ELx_S1PTW) &&
|
||||
(__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
|
||||
(cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
|
||||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
|
||||
if (!__translate_far_to_hpfar(far, &hpfar))
|
||||
return false;
|
||||
} else {
|
||||
@@ -393,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
|
||||
int rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
u64 val = vcpu_get_reg(vcpu, rt);
|
||||
|
||||
/*
|
||||
* The normal sysreg handling code expects to see the traps,
|
||||
* let's not do anything here.
|
||||
*/
|
||||
if (vcpu->arch.hcr_el2 & HCR_TVM)
|
||||
return false;
|
||||
|
||||
switch (sysreg) {
|
||||
case SYS_SCTLR_EL1:
|
||||
write_sysreg_el1(val, SYS_SCTLR);
|
||||
break;
|
||||
case SYS_TTBR0_EL1:
|
||||
write_sysreg_el1(val, SYS_TTBR0);
|
||||
break;
|
||||
case SYS_TTBR1_EL1:
|
||||
write_sysreg_el1(val, SYS_TTBR1);
|
||||
break;
|
||||
case SYS_TCR_EL1:
|
||||
write_sysreg_el1(val, SYS_TCR);
|
||||
break;
|
||||
case SYS_ESR_EL1:
|
||||
write_sysreg_el1(val, SYS_ESR);
|
||||
break;
|
||||
case SYS_FAR_EL1:
|
||||
write_sysreg_el1(val, SYS_FAR);
|
||||
break;
|
||||
case SYS_AFSR0_EL1:
|
||||
write_sysreg_el1(val, SYS_AFSR0);
|
||||
break;
|
||||
case SYS_AFSR1_EL1:
|
||||
write_sysreg_el1(val, SYS_AFSR1);
|
||||
break;
|
||||
case SYS_MAIR_EL1:
|
||||
write_sysreg_el1(val, SYS_MAIR);
|
||||
break;
|
||||
case SYS_AMAIR_EL1:
|
||||
write_sysreg_el1(val, SYS_AMAIR);
|
||||
break;
|
||||
case SYS_CONTEXTIDR_EL1:
|
||||
write_sysreg_el1(val, SYS_CONTEXTIDR);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
__kvm_skip_instr(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true when we were able to fixup the guest exit and should return to
|
||||
* the guest, false when we should restore the host state and return to the
|
||||
@@ -412,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
if (*exit_code != ARM_EXCEPTION_TRAP)
|
||||
goto exit;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
|
||||
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
|
||||
handle_tx2_tvm(vcpu))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We trap the first access to the FP/SIMD to save the host context
|
||||
* and restore the guest context lazily.
|
||||
|
@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
|
||||
isb();
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__tlb_switch_to_guest,
|
||||
__tlb_switch_to_guest_nvhe,
|
||||
__tlb_switch_to_guest_vhe,
|
||||
ARM64_HAS_VIRT_HOST_EXTN);
|
||||
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
|
||||
struct tlb_inv_context *cxt)
|
||||
{
|
||||
if (has_vhe())
|
||||
__tlb_switch_to_guest_vhe(kvm, cxt);
|
||||
else
|
||||
__tlb_switch_to_guest_nvhe(kvm, cxt);
|
||||
}
|
||||
|
||||
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
|
||||
struct tlb_inv_context *cxt)
|
||||
@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__tlb_switch_to_host,
|
||||
__tlb_switch_to_host_nvhe,
|
||||
__tlb_switch_to_host_vhe,
|
||||
ARM64_HAS_VIRT_HOST_EXTN);
|
||||
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
|
||||
struct tlb_inv_context *cxt)
|
||||
{
|
||||
if (has_vhe())
|
||||
__tlb_switch_to_host_vhe(kvm, cxt);
|
||||
else
|
||||
__tlb_switch_to_host_nvhe(kvm, cxt);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
{
|
||||
@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
__tlb_switch_to_guest()(kvm, &cxt);
|
||||
__tlb_switch_to_guest(kvm, &cxt);
|
||||
|
||||
/*
|
||||
* We could do so much better if we had the VA as well.
|
||||
@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
if (!has_vhe() && icache_is_vpipt())
|
||||
__flush_icache_all();
|
||||
|
||||
__tlb_switch_to_host()(kvm, &cxt);
|
||||
__tlb_switch_to_host(kvm, &cxt);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
__tlb_switch_to_guest()(kvm, &cxt);
|
||||
__tlb_switch_to_guest(kvm, &cxt);
|
||||
|
||||
__tlbi(vmalls12e1is);
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
__tlb_switch_to_host()(kvm, &cxt);
|
||||
__tlb_switch_to_host(kvm, &cxt);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
@@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest()(kvm, &cxt);
|
||||
__tlb_switch_to_guest(kvm, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
__tlb_switch_to_host()(kvm, &cxt);
|
||||
__tlb_switch_to_host(kvm, &cxt);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_flush_vm_context(void)
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
|
||||
obj-y := xen-arm.o hypercall.o
|
||||
obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o)
|
||||
|
مرجع در شماره جدید
Block a user