KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A

commit 5bdf3437603d4af87f9c7f424b0c8aeed2420745 upstream.

CPUs vulnerable to Spectre-BHB either need to make an SMC-CC firmware
call from the vectors, or run a sequence of branches. This gets added
to the hyp vectors. If there is no support for arch-workaround-1 in
firmware, the indirect vector will be used.

kvm_init_vector_slots() only initialises the two indirect slots if
the platform is vulnerable to Spectre-v3a. pKVM's hyp_map_vectors()
only initialises __hyp_bp_vect_base if the platform is vulnerable to
Spectre-v3a.

As there are about to more users of the indirect vectors, ensure
their entries in hyp_spectre_vector_selector[] are always initialised,
and __hyp_bp_vect_base defaults to the regular VA mapping.

The Spectre-v3a check is moved to a helper
kvm_system_needs_idmapped_vectors(), and merged with the code
that creates the hyp mappings.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
James Morse
2021-11-16 15:06:19 +00:00
committed by Greg Kroah-Hartman
parent 13a807a0a0
commit 192023e6ba
7 changed files with 130 additions and 4 deletions

View File

@@ -66,7 +66,8 @@
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57
#define ARM64_WORKAROUND_1508412 58
#define ARM64_SPECTRE_BHB 59
#define ARM64_NCAPS 59
#define ARM64_NCAPS 60
#endif /* __ASM_CPUCAPS_H */

View File

@@ -35,6 +35,8 @@
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#define __SMCCC_WORKAROUND_3_SMC_SZ 36
#define __SPECTRE_BHB_LOOP_SZ 44
#define KVM_HOST_SMCCC_ID(id) \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -199,6 +201,10 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
extern char __smccc_workaround_3_smc[__SMCCC_WORKAROUND_3_SMC_SZ];
extern char __spectre_bhb_loop_k8[__SPECTRE_BHB_LOOP_SZ];
extern char __spectre_bhb_loop_k24[__SPECTRE_BHB_LOOP_SZ];
extern char __spectre_bhb_loop_k32[__SPECTRE_BHB_LOOP_SZ];
/*
* Obtain the PC-relative address of a kernel symbol

View File

@@ -237,7 +237,8 @@ static inline void *kvm_get_hyp_vector(void)
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1;
if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
if ((cpus_have_const_cap(ARM64_SPECTRE_V2) ||
cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}

View File

@@ -67,6 +67,12 @@ typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
int hyp_vectors_slot;
bp_hardening_cb_t fn;
/*
* template_start is only used by the BHB mitigation to identify the
* hyp_vectors_slot sequence.
*/
const char *template_start;
};
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

View File

@@ -220,9 +220,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
static DEFINE_RAW_SPINLOCK(bp_lock);
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{
static DEFINE_RAW_SPINLOCK(bp_lock);
int cpu, slot = -1;
const char *hyp_vecs_start = __smccc_workaround_1_smc;
const char *hyp_vecs_end = __smccc_workaround_1_smc +
@@ -253,6 +253,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
raw_spin_unlock(&bp_lock);
}
#else
@@ -819,3 +820,47 @@ enum mitigation_state arm64_get_spectre_bhb_state(void)
{
return spectre_bhb_state;
}
static int kvm_bhb_get_vecs_size(const char *start)
{
if (start == __smccc_workaround_3_smc)
return __SMCCC_WORKAROUND_3_SMC_SZ;
else if (start == __spectre_bhb_loop_k8 ||
start == __spectre_bhb_loop_k24 ||
start == __spectre_bhb_loop_k32)
return __SPECTRE_BHB_LOOP_SZ;
return 0;
}
void kvm_setup_bhb_slot(const char *hyp_vecs_start)
{
int cpu, slot = -1, size;
const char *hyp_vecs_end;
if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
return;
size = kvm_bhb_get_vecs_size(hyp_vecs_start);
if (WARN_ON_ONCE(!hyp_vecs_start || !size))
return;
hyp_vecs_end = hyp_vecs_start + size;
raw_spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
break;
}
}
if (slot == -1) {
slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
raw_spin_unlock(&bp_lock);
}

View File

@@ -1337,7 +1337,8 @@ static int kvm_map_vectors(void)
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* SV2 + HEL2 -> use hardened vectors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
if (cpus_have_const_cap(ARM64_SPECTRE_V2) ||
cpus_have_const_cap(ARM64_SPECTRE_BHB)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}

View File

@@ -30,3 +30,69 @@ SYM_DATA_START(__smccc_workaround_1_smc)
1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_DATA_END(__smccc_workaround_1_smc)
.global __smccc_workaround_3_smc
SYM_DATA_START(__smccc_workaround_3_smc)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
smc #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
1: .org __smccc_workaround_3_smc + __SMCCC_WORKAROUND_3_SMC_SZ
.org 1b
SYM_DATA_END(__smccc_workaround_3_smc)
.global __spectre_bhb_loop_k8
SYM_DATA_START(__spectre_bhb_loop_k8)
esb
sub sp, sp, #(8 * 2)
stp x0, x1, [sp, #(8 * 0)]
mov x0, #8
2: b . + 4
subs x0, x0, #1
b.ne 2b
dsb nsh
isb
ldp x0, x1, [sp, #(8 * 0)]
add sp, sp, #(8 * 2)
1: .org __spectre_bhb_loop_k8 + __SPECTRE_BHB_LOOP_SZ
.org 1b
SYM_DATA_END(__spectre_bhb_loop_k8)
.global __spectre_bhb_loop_k24
SYM_DATA_START(__spectre_bhb_loop_k24)
esb
sub sp, sp, #(8 * 2)
stp x0, x1, [sp, #(8 * 0)]
mov x0, #8
2: b . + 4
subs x0, x0, #1
b.ne 2b
dsb nsh
isb
ldp x0, x1, [sp, #(8 * 0)]
add sp, sp, #(8 * 2)
1: .org __spectre_bhb_loop_k24 + __SPECTRE_BHB_LOOP_SZ
.org 1b
SYM_DATA_END(__spectre_bhb_loop_k24)
.global __spectre_bhb_loop_k32
SYM_DATA_START(__spectre_bhb_loop_k32)
esb
sub sp, sp, #(8 * 2)
stp x0, x1, [sp, #(8 * 0)]
mov x0, #8
2: b . + 4
subs x0, x0, #1
b.ne 2b
dsb nsh
isb
ldp x0, x1, [sp, #(8 * 0)]
add sp, sp, #(8 * 2)
1: .org __spectre_bhb_loop_k32 + __SPECTRE_BHB_LOOP_SZ
.org 1b
SYM_DATA_END(__spectre_bhb_loop_k32)