
Add prctl based control for Speculative Store Bypass mitigation and make it the default mitigation for Intel and AMD. Andi Kleen provided the following rationale (slightly redacted): There are multiple levels of impact of Speculative Store Bypass: 1) JITed sandbox. It cannot invoke system calls, but can do PRIME+PROBE and may have call interfaces to other code 2) Native code process. No protection inside the process at this level. 3) Kernel. 4) Between processes. The prctl tries to protect against case (1) doing attacks. If the untrusted code can do random system calls then control is already lost in a much worse way. So there needs to be system call protection in some way (using a JIT not allowing them or seccomp). Or rather if the process can subvert its environment somehow to do the prctl it can already execute arbitrary code, which is much worse than SSB. To put it differently, the point of the prctl is to not allow JITed code to read data it shouldn't read from its JITed sandbox. If it already has escaped its sandbox then it can already read everything it wants in its address space, and do much worse. The ability to control Speculative Store Bypass allows to enable the protection selectively without affecting overall system performance. Based on an initial patch from Tim Chen. Completely rewritten. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
343 lines
9.2 KiB
C
343 lines
9.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
|
|
#define _ASM_X86_NOSPEC_BRANCH_H_
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/alternative-asm.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/msr-index.h>
|
|
|
|
/*
|
|
* Fill the CPU return stack buffer.
|
|
*
|
|
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
|
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
|
*
|
|
* This is required in various cases for retpoline and IBRS-based
|
|
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
|
* eliminate potentially bogus entries from the RSB, and sometimes
|
|
* purely to ensure that it doesn't get empty, which on some CPUs would
|
|
* allow predictions from other (unwanted!) sources to be used.
|
|
*
|
|
* We define a CPP macro such that it can be used from both .S files and
|
|
* inline assembly. It's possible to do a .macro and then include that
|
|
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
|
|
*/
|
|
|
|
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
|
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
|
|
|
|
/*
|
|
* Google experimented with loop-unrolling and this turned out to be
|
|
* the optimal version — two calls, each with their own speculation
|
|
* trap should their return address end up getting used, in a loop.
|
|
*/
|
|
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
|
mov $(nr/2), reg; \
|
|
771: \
|
|
call 772f; \
|
|
773: /* speculation trap */ \
|
|
pause; \
|
|
lfence; \
|
|
jmp 773b; \
|
|
772: \
|
|
call 774f; \
|
|
775: /* speculation trap */ \
|
|
pause; \
|
|
lfence; \
|
|
jmp 775b; \
|
|
774: \
|
|
dec reg; \
|
|
jnz 771b; \
|
|
add $(BITS_PER_LONG/8) * nr, sp;
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
/*
|
|
* This should be used immediately before a retpoline alternative. It tells
|
|
* objtool where the retpolines are so that it can make sense of the control
|
|
* flow by just reading the original instruction(s) and ignoring the
|
|
* alternatives.
|
|
*/
|
|
.macro ANNOTATE_NOSPEC_ALTERNATIVE
|
|
.Lannotate_\@:
|
|
.pushsection .discard.nospec
|
|
.long .Lannotate_\@ - .
|
|
.popsection
|
|
.endm
|
|
|
|
/*
|
|
* This should be used immediately before an indirect jump/call. It tells
|
|
* objtool the subsequent indirect jump/call is vouched safe for retpoline
|
|
* builds.
|
|
*/
|
|
.macro ANNOTATE_RETPOLINE_SAFE
|
|
.Lannotate_\@:
|
|
.pushsection .discard.retpoline_safe
|
|
_ASM_PTR .Lannotate_\@
|
|
.popsection
|
|
.endm
|
|
|
|
/*
|
|
* These are the bare retpoline primitives for indirect jmp and call.
|
|
* Do not use these directly; they only exist to make the ALTERNATIVE
|
|
* invocation below less ugly.
|
|
*/
|
|
.macro RETPOLINE_JMP reg:req
|
|
call .Ldo_rop_\@
|
|
.Lspec_trap_\@:
|
|
pause
|
|
lfence
|
|
jmp .Lspec_trap_\@
|
|
.Ldo_rop_\@:
|
|
mov \reg, (%_ASM_SP)
|
|
ret
|
|
.endm
|
|
|
|
/*
|
|
* This is a wrapper around RETPOLINE_JMP so the called function in reg
|
|
* returns to the instruction after the macro.
|
|
*/
|
|
.macro RETPOLINE_CALL reg:req
|
|
jmp .Ldo_call_\@
|
|
.Ldo_retpoline_jmp_\@:
|
|
RETPOLINE_JMP \reg
|
|
.Ldo_call_\@:
|
|
call .Ldo_retpoline_jmp_\@
|
|
.endm
|
|
|
|
/*
|
|
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
|
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
|
* attack.
|
|
*/
|
|
.macro JMP_NOSPEC reg:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
|
|
__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
|
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
|
|
#else
|
|
jmp *\reg
|
|
#endif
|
|
.endm
|
|
|
|
.macro CALL_NOSPEC reg:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
|
|
__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
|
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
|
|
#else
|
|
call *\reg
|
|
#endif
|
|
.endm
|
|
|
|
/*
|
|
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
|
* monstrosity above, manually.
|
|
*/
|
|
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
|
#ifdef CONFIG_RETPOLINE
|
|
ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE "jmp .Lskip_rsb_\@", \
|
|
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
|
|
\ftr
|
|
.Lskip_rsb_\@:
|
|
#endif
|
|
.endm
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#define ANNOTATE_NOSPEC_ALTERNATIVE \
|
|
"999:\n\t" \
|
|
".pushsection .discard.nospec\n\t" \
|
|
".long 999b - .\n\t" \
|
|
".popsection\n\t"
|
|
|
|
#define ANNOTATE_RETPOLINE_SAFE \
|
|
"999:\n\t" \
|
|
".pushsection .discard.retpoline_safe\n\t" \
|
|
_ASM_PTR " 999b\n\t" \
|
|
".popsection\n\t"
|
|
|
|
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
|
|
|
|
/*
|
|
* Since the inline asm uses the %V modifier which is only in newer GCC,
|
|
* the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
|
|
*/
|
|
# define CALL_NOSPEC \
|
|
ANNOTATE_NOSPEC_ALTERNATIVE \
|
|
ALTERNATIVE( \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
|
X86_FEATURE_RETPOLINE)
|
|
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
|
|
|
#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
|
|
/*
|
|
* For i386 we use the original ret-equivalent retpoline, because
|
|
* otherwise we'll run out of registers. We don't care about CET
|
|
* here, anyway.
|
|
*/
|
|
# define CALL_NOSPEC \
|
|
ALTERNATIVE( \
|
|
ANNOTATE_RETPOLINE_SAFE \
|
|
"call *%[thunk_target]\n", \
|
|
" jmp 904f;\n" \
|
|
" .align 16\n" \
|
|
"901: call 903f;\n" \
|
|
"902: pause;\n" \
|
|
" lfence;\n" \
|
|
" jmp 902b;\n" \
|
|
" .align 16\n" \
|
|
"903: addl $4, %%esp;\n" \
|
|
" pushl %[thunk_target];\n" \
|
|
" ret;\n" \
|
|
" .align 16\n" \
|
|
"904: call 901b;\n", \
|
|
X86_FEATURE_RETPOLINE)
|
|
|
|
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
|
#else /* No retpoline for C / inline asm */
|
|
# define CALL_NOSPEC "call *%[thunk_target]\n"
|
|
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
|
#endif
|
|
|
|
/* The Spectre V2 mitigation variants */
|
|
enum spectre_v2_mitigation {
|
|
SPECTRE_V2_NONE,
|
|
SPECTRE_V2_RETPOLINE_MINIMAL,
|
|
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
|
|
SPECTRE_V2_RETPOLINE_GENERIC,
|
|
SPECTRE_V2_RETPOLINE_AMD,
|
|
SPECTRE_V2_IBRS,
|
|
};
|
|
|
|
/*
|
|
* The Intel specification for the SPEC_CTRL MSR requires that we
|
|
* preserve any already set reserved bits at boot time (e.g. for
|
|
* future additions that this kernel is not currently aware of).
|
|
* We then set any additional mitigation bits that we want
|
|
* ourselves and always use this as the base for SPEC_CTRL.
|
|
* We also use this when handling guest entry/exit as below.
|
|
*/
|
|
extern void x86_spec_ctrl_set(u64);
|
|
extern u64 x86_spec_ctrl_get_default(void);
|
|
|
|
/* The Speculative Store Bypass disable variants */
|
|
enum ssb_mitigation {
|
|
SPEC_STORE_BYPASS_NONE,
|
|
SPEC_STORE_BYPASS_DISABLE,
|
|
SPEC_STORE_BYPASS_PRCTL,
|
|
};
|
|
|
|
extern char __indirect_thunk_start[];
|
|
extern char __indirect_thunk_end[];
|
|
|
|
/*
|
|
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
|
* can be followed in the host, by overwriting the RSB completely. Both
|
|
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
|
|
* CPUs with IBRS_ALL *might* it be avoided.
|
|
*/
|
|
static inline void vmexit_fill_RSB(void)
|
|
{
|
|
#ifdef CONFIG_RETPOLINE
|
|
unsigned long loops;
|
|
|
|
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
|
ALTERNATIVE("jmp 910f",
|
|
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
|
X86_FEATURE_RETPOLINE)
|
|
"910:"
|
|
: "=r" (loops), ASM_CALL_CONSTRAINT
|
|
: : "memory" );
|
|
#endif
|
|
}
|
|
|
|
static __always_inline
|
|
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
|
{
|
|
asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
|
: : "c" (msr),
|
|
"a" (val),
|
|
"d" (val >> 32),
|
|
[feature] "i" (feature)
|
|
: "memory");
|
|
}
|
|
|
|
static inline void indirect_branch_prediction_barrier(void)
|
|
{
|
|
u64 val = PRED_CMD_IBPB;
|
|
|
|
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
|
}
|
|
|
|
/*
|
|
* With retpoline, we must use IBRS to restrict branch prediction
|
|
* before calling into firmware.
|
|
*
|
|
* (Implemented as CPP macros due to header hell.)
|
|
*/
|
|
#define firmware_restrict_branch_speculation_start() \
|
|
do { \
|
|
u64 val = x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS; \
|
|
\
|
|
preempt_disable(); \
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
} while (0)
|
|
|
|
#define firmware_restrict_branch_speculation_end() \
|
|
do { \
|
|
u64 val = x86_spec_ctrl_get_default(); \
|
|
\
|
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
X86_FEATURE_USE_IBRS_FW); \
|
|
preempt_enable(); \
|
|
} while (0)
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
/*
|
|
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
|
* for the following assembly:
|
|
*
|
|
* With retpolines configured:
|
|
*
|
|
* callq do_rop
|
|
* spec_trap:
|
|
* pause
|
|
* lfence
|
|
* jmp spec_trap
|
|
* do_rop:
|
|
* mov %rax,(%rsp)
|
|
* retq
|
|
*
|
|
* Without retpolines configured:
|
|
*
|
|
* jmp *%rax
|
|
*/
|
|
#ifdef CONFIG_RETPOLINE
|
|
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
|
|
# define RETPOLINE_RAX_BPF_JIT() \
|
|
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
|
/* spec_trap: */ \
|
|
EMIT2(0xF3, 0x90); /* pause */ \
|
|
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
/* do_rop: */ \
|
|
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
|
|
EMIT1(0xC3); /* retq */
|
|
#else
|
|
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
|
|
# define RETPOLINE_RAX_BPF_JIT() \
|
|
EMIT2(0xFF, 0xE0); /* jmp *%rax */
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|