arm64: Rename WORKAROUND_1319367 to SPECULATIVE_AT_NVHE
To match SPECULATIVE_AT_VHE let's also have a generic name for the NVHE variant. Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Steven Price <steven.price@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
committed by
Will Deacon
parent
e85d68faed
commit
db0d46a58d
@@ -546,9 +546,13 @@ config ARM64_ERRATUM_1286807
|
|||||||
invalidated has been observed by other observers. The
|
invalidated has been observed by other observers. The
|
||||||
workaround repeats the TLBI+DSB operation.
|
workaround repeats the TLBI+DSB operation.
|
||||||
|
|
||||||
|
config ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
|
||||||
|
bool
|
||||||
|
|
||||||
config ARM64_ERRATUM_1319367
|
config ARM64_ERRATUM_1319367
|
||||||
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
|
bool "Cortex-A57/A72: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
|
||||||
default y
|
default y
|
||||||
|
select ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
|
||||||
help
|
help
|
||||||
This option adds work arounds for ARM Cortex-A57 erratum 1319537
|
This option adds work arounds for ARM Cortex-A57 erratum 1319537
|
||||||
and A72 erratum 1319367
|
and A72 erratum 1319367
|
||||||
|
|||||||
@@ -55,7 +55,7 @@
|
|||||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
|
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
|
||||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
||||||
#define ARM64_WORKAROUND_1542419 47
|
#define ARM64_WORKAROUND_1542419 47
|
||||||
#define ARM64_WORKAROUND_1319367 48
|
#define ARM64_WORKAROUND_SPECULATIVE_AT_NVHE 48
|
||||||
|
|
||||||
#define ARM64_NCAPS 49
|
#define ARM64_NCAPS 49
|
||||||
|
|
||||||
|
|||||||
@@ -934,7 +934,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||||||
#ifdef CONFIG_ARM64_ERRATUM_1319367
|
#ifdef CONFIG_ARM64_ERRATUM_1319367
|
||||||
{
|
{
|
||||||
.desc = "ARM erratum 1319367",
|
.desc = "ARM erratum 1319367",
|
||||||
.capability = ARM64_WORKAROUND_1319367,
|
.capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
|
||||||
ERRATA_MIDR_RANGE_LIST(ca57_a72),
|
ERRATA_MIDR_RANGE_LIST(ca57_a72),
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
write_sysreg(val, cptr_el2);
|
write_sysreg(val, cptr_el2);
|
||||||
|
|
||||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
|
||||||
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
|
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
|
||||||
|
|
||||||
isb();
|
isb();
|
||||||
@@ -173,7 +173,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
|
|||||||
{
|
{
|
||||||
u64 mdcr_el2 = read_sysreg(mdcr_el2);
|
u64 mdcr_el2 = read_sysreg(mdcr_el2);
|
||||||
|
|
||||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
|||||||
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
|
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
|
||||||
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
|
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
|
||||||
|
|
||||||
if (!cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
|
||||||
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
|
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
|
||||||
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
|
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
|
||||||
} else if (!ctxt->__hyp_running_vcpu) {
|
} else if (!ctxt->__hyp_running_vcpu) {
|
||||||
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
|||||||
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
|
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
|
||||||
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
|
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
|
||||||
|
|
||||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367) &&
|
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
|
||||||
ctxt->__hyp_running_vcpu) {
|
ctxt->__hyp_running_vcpu) {
|
||||||
/*
|
/*
|
||||||
* Must only be done for host registers, hence the context
|
* Must only be done for host registers, hence the context
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
|
|||||||
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
|
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
|
||||||
struct tlb_inv_context *cxt)
|
struct tlb_inv_context *cxt)
|
||||||
{
|
{
|
||||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -117,7 +117,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
|
|||||||
{
|
{
|
||||||
write_sysreg(0, vttbr_el2);
|
write_sysreg(0, vttbr_el2);
|
||||||
|
|
||||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
|
if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
|
||||||
/* Ensure write of the host VMID */
|
/* Ensure write of the host VMID */
|
||||||
isb();
|
isb();
|
||||||
/* Restore the host's TCR_EL1 */
|
/* Restore the host's TCR_EL1 */
|
||||||
|
|||||||
Reference in New Issue
Block a user