Merge 5.10.133 into android12-5.10-lts
Changes in 5.10.133 KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.SKVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw objtool: Refactor ORC section generation objtool: Add 'alt_group' struct objtool: Support stack layout changes in alternatives objtool: Support retpoline jump detection for vmlinux.o objtool: Assume only ELF functions do sibling calls objtool: Combine UNWIND_HINT_RET_OFFSET and UNWIND_HINT_FUNC x86/xen: Support objtool validation in xen-asm.S x86/xen: Support objtool vmlinux.o validation in xen-head.S x86/alternative: Merge include files x86/alternative: Support not-feature x86/alternative: Support ALTERNATIVE_TERNARY x86/alternative: Use ALTERNATIVE_TERNARY() in _static_cpu_has() x86/insn: Rename insn_decode() to insn_decode_from_regs() x86/insn: Add a __ignore_sync_check__ marker x86/insn: Add an insn_decode() API x86/insn-eval: Handle return values from the decoder x86/alternative: Use insn_decode() x86: Add insn_decode_kernel() x86/alternatives: Optimize optimize_nops() x86/retpoline: Simplify retpolines objtool: Correctly handle retpoline thunk calls objtool: Handle per arch retpoline naming objtool: Rework the elf_rebuild_reloc_section() logic objtool: Add elf_create_reloc() helper objtool: Create reloc sections implicitly objtool: Extract elf_strtab_concat() objtool: Extract elf_symbol_add() objtool: Add elf_create_undef_symbol() objtool: Keep track of retpoline call sites objtool: Cache instruction relocs objtool: Skip magical retpoline .altinstr_replacement objtool/x86: Rewrite retpoline thunk calls objtool: Support asm jump tables x86/alternative: Optimize single-byte NOPs at an arbitrary position objtool: Fix .symtab_shndx handling for elf_create_undef_symbol() objtool: Only rewrite unconditional retpoline thunk calls objtool/x86: Ignore __x86_indirect_alt_* symbols objtool: Don't make .altinstructions writable objtool: Teach get_alt_entry() about more relocation types objtool: print out the symbol type when complaining about it objtool: Remove reloc symbol type checks in get_alt_entry() objtool: Make .altinstructions section entry size consistent objtool: Introduce CFI hash objtool: Handle __sanitize_cov*() tail calls objtool: Classify symbols objtool: Explicitly avoid self modifying code in .altinstr_replacement objtool,x86: Replace alternatives with .retpoline_sites x86/retpoline: Remove unused replacement symbols x86/asm: Fix register order x86/asm: Fixup odd GEN-for-each-reg.h usage x86/retpoline: Move the retpoline thunk declarations to nospec-branch.h x86/retpoline: Create a retpoline thunk array x86/alternative: Implement .retpoline_sites support x86/alternative: Handle Jcc __x86_indirect_thunk_\reg x86/alternative: Try inline spectre_v2=retpoline,amd x86/alternative: Add debug prints to apply_retpolines() bpf,x86: Simplify computing label offsets bpf,x86: Respect X86_FEATURE_RETPOLINE* x86/lib/atomic64_386_32: Rename things x86: Prepare asm files for straight-line-speculation x86: Prepare inline-asm for straight-line-speculation x86/alternative: Relax text_poke_bp() constraint objtool: Add straight-line-speculation validation x86: Add straight-line-speculation mitigation tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' kvm/emulate: Fix SETcc emulation function offsets with SLS objtool: Default ignore INT3 for unreachable crypto: x86/poly1305 - Fixup SLS objtool: Fix SLS validation for kcov tail-call replacement objtool: Fix code relocs vs weak symbols objtool: Fix type of reloc::addend objtool: Fix symbol creation x86/entry: Remove skip_r11rcx objtool: Fix objtool regression on x32 systems x86/realmode: build with -D__DISABLE_EXPORTS x86/kvm/vmx: Make noinstr clean x86/cpufeatures: Move RETPOLINE flags to word 11 x86/retpoline: Cleanup some #ifdefery x86/retpoline: Swizzle retpoline thunk Makefile: Set retpoline cflags based on CONFIG_CC_IS_{CLANG,GCC} x86/retpoline: Use -mfunction-return x86: Undo return-thunk damage x86,objtool: Create .return_sites objtool: skip non-text sections when adding return-thunk sites x86,static_call: Use alternative RET encoding x86/ftrace: Use alternative RET encoding x86/bpf: Use alternative RET encoding x86/kvm: Fix SETcc emulation for return thunks x86/vsyscall_emu/64: Don't use RET in vsyscall emulation x86/sev: Avoid using __x86_return_thunk x86: Use return-thunk in asm code objtool: Treat .text.__x86.* as noinstr x86: Add magic AMD return-thunk x86/bugs: Report AMD retbleed vulnerability x86/bugs: Add AMD retbleed= boot parameter x86/bugs: Enable STIBP for JMP2RET x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value x86/entry: Add kernel IBRS implementation x86/bugs: Optimize SPEC_CTRL MSR writes x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation() x86/bugs: Report Intel retbleed vulnerability intel_idle: Disable IBRS during long idle objtool: Update Retpoline validation x86/xen: Rename SYS* entry points x86/bugs: Add retbleed=ibpb x86/bugs: Do IBPB fallback check only once objtool: Add entry UNRET validation x86/cpu/amd: Add Spectral Chicken x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n x86/speculation: Fix firmware entry SPEC_CTRL handling x86/speculation: Fix SPEC_CTRL write on SMT state change x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit x86/speculation: Remove x86_spec_ctrl_mask objtool: Re-add UNWIND_HINT_{SAVE_RESTORE} KVM: VMX: Flatten __vmx_vcpu_run() KVM: VMX: Convert launched argument to flags KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS KVM: VMX: Fix IBRS handling after vmexit x86/speculation: Fill RSB on vmexit for IBRS x86/common: Stamp out the stepping madness x86/cpu/amd: Enumerate BTC_NO x86/retbleed: Add fine grained Kconfig knobs x86/bugs: Add Cannon lake to RETBleed affected CPU list x86/bugs: Do not enable IBPB-on-entry when IBPB is not supported x86/kexec: Disable RET on kexec x86/speculation: Disable RRSBA behavior x86/static_call: Serialize __static_call_fixup() properly tools/insn: Restore the relative include paths for cross building x86, kvm: use proper ASM macros for kvm_vcpu_is_preempted x86/xen: Fix initialisation in hypercall_page after rethunk x86/ftrace: Add UNWIND_HINT_FUNC annotation for ftrace_stub x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current efi/x86: use naked RET on mixed mode call wrapper x86/kvm: fix FASTOP_SIZE when return thunks are enabled KVM: emulate: do not adjust size of fastop and setcc subroutines tools arch x86: Sync the msr-index.h copy with the kernel sources tools headers cpufeatures: Sync with the kernel sources x86/bugs: Remove apostrophe typo um: Add missing apply_returns() x86: Use -mindirect-branch-cs-prefix for RETPOLINE builds kvm: fix objtool relocation warning objtool: Fix elf_create_undef_symbol() endianness tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' - again tools headers: Remove broken definition of __LITTLE_ENDIAN Linux 5.10.133 Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Change-Id: I7e23843058c509562ae3f3a68e0710f31249a087
This commit is contained in:
@@ -4751,6 +4751,30 @@
|
|||||||
|
|
||||||
retain_initrd [RAM] Keep initrd memory after extraction
|
retain_initrd [RAM] Keep initrd memory after extraction
|
||||||
|
|
||||||
|
retbleed= [X86] Control mitigation of RETBleed (Arbitrary
|
||||||
|
Speculative Code Execution with Return Instructions)
|
||||||
|
vulnerability.
|
||||||
|
|
||||||
|
off - no mitigation
|
||||||
|
auto - automatically select a migitation
|
||||||
|
auto,nosmt - automatically select a mitigation,
|
||||||
|
disabling SMT if necessary for
|
||||||
|
the full mitigation (only on Zen1
|
||||||
|
and older without STIBP).
|
||||||
|
ibpb - mitigate short speculation windows on
|
||||||
|
basic block boundaries too. Safe, highest
|
||||||
|
perf impact.
|
||||||
|
unret - force enable untrained return thunks,
|
||||||
|
only effective on AMD f15h-f17h
|
||||||
|
based systems.
|
||||||
|
unret,nosmt - like unret, will disable SMT when STIBP
|
||||||
|
is not available.
|
||||||
|
|
||||||
|
Selecting 'auto' will choose a mitigation method at run
|
||||||
|
time according to the CPU.
|
||||||
|
|
||||||
|
Not specifying this option is equivalent to retbleed=auto.
|
||||||
|
|
||||||
rfkill.default_state=
|
rfkill.default_state=
|
||||||
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
|
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
|
||||||
etc. communication is blocked by default.
|
etc. communication is blocked by default.
|
||||||
@@ -5100,6 +5124,7 @@
|
|||||||
eibrs - enhanced IBRS
|
eibrs - enhanced IBRS
|
||||||
eibrs,retpoline - enhanced IBRS + Retpolines
|
eibrs,retpoline - enhanced IBRS + Retpolines
|
||||||
eibrs,lfence - enhanced IBRS + LFENCE
|
eibrs,lfence - enhanced IBRS + LFENCE
|
||||||
|
ibrs - use IBRS to protect kernel
|
||||||
|
|
||||||
Not specifying this option is equivalent to
|
Not specifying this option is equivalent to
|
||||||
spectre_v2=auto.
|
spectre_v2=auto.
|
||||||
|
23
Makefile
23
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 132
|
SUBLEVEL = 133
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
@@ -688,12 +688,21 @@ ifdef CONFIG_FUNCTION_TRACER
|
|||||||
CC_FLAGS_FTRACE := -pg
|
CC_FLAGS_FTRACE := -pg
|
||||||
endif
|
endif
|
||||||
|
|
||||||
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
|
ifdef CONFIG_CC_IS_GCC
|
||||||
RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
|
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
|
||||||
RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
|
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
|
||||||
RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
|
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
|
||||||
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
|
endif
|
||||||
RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
|
ifdef CONFIG_CC_IS_CLANG
|
||||||
|
RETPOLINE_CFLAGS := -mretpoline-external-thunk
|
||||||
|
RETPOLINE_VDSO_CFLAGS := -mretpoline
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_RETHUNK
|
||||||
|
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
|
||||||
|
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
|
||||||
|
endif
|
||||||
|
|
||||||
export RETPOLINE_CFLAGS
|
export RETPOLINE_CFLAGS
|
||||||
export RETPOLINE_VDSO_CFLAGS
|
export RETPOLINE_VDSO_CFLAGS
|
||||||
|
|
||||||
|
@@ -358,6 +358,14 @@ void __init check_bugs(void)
|
|||||||
os_check_bugs();
|
os_check_bugs();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void apply_retpolines(s32 *start, s32 *end)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void apply_returns(s32 *start, s32 *end)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
|
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@@ -460,15 +460,6 @@ config GOLDFISH
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_GOLDFISH
|
depends on X86_GOLDFISH
|
||||||
|
|
||||||
config RETPOLINE
|
|
||||||
bool "Avoid speculative indirect branches in kernel"
|
|
||||||
default y
|
|
||||||
help
|
|
||||||
Compile kernel with the retpoline compiler options to guard against
|
|
||||||
kernel-to-user data leaks by avoiding speculative indirect
|
|
||||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
|
||||||
support for full protection. The kernel may run slower.
|
|
||||||
|
|
||||||
config X86_CPU_RESCTRL
|
config X86_CPU_RESCTRL
|
||||||
bool "x86 CPU resource control support"
|
bool "x86 CPU resource control support"
|
||||||
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
|
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
|
||||||
@@ -2422,6 +2413,88 @@ source "kernel/livepatch/Kconfig"
|
|||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
config CC_HAS_SLS
|
||||||
|
def_bool $(cc-option,-mharden-sls=all)
|
||||||
|
|
||||||
|
config CC_HAS_RETURN_THUNK
|
||||||
|
def_bool $(cc-option,-mfunction-return=thunk-extern)
|
||||||
|
|
||||||
|
menuconfig SPECULATION_MITIGATIONS
|
||||||
|
bool "Mitigations for speculative execution vulnerabilities"
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Say Y here to enable options which enable mitigations for
|
||||||
|
speculative execution hardware vulnerabilities.
|
||||||
|
|
||||||
|
If you say N, all mitigations will be disabled. You really
|
||||||
|
should know what you are doing to say so.
|
||||||
|
|
||||||
|
if SPECULATION_MITIGATIONS
|
||||||
|
|
||||||
|
config PAGE_TABLE_ISOLATION
|
||||||
|
bool "Remove the kernel mapping in user mode"
|
||||||
|
default y
|
||||||
|
depends on (X86_64 || X86_PAE)
|
||||||
|
help
|
||||||
|
This feature reduces the number of hardware side channels by
|
||||||
|
ensuring that the majority of kernel addresses are not mapped
|
||||||
|
into userspace.
|
||||||
|
|
||||||
|
See Documentation/x86/pti.rst for more details.
|
||||||
|
|
||||||
|
config RETPOLINE
|
||||||
|
bool "Avoid speculative indirect branches in kernel"
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Compile kernel with the retpoline compiler options to guard against
|
||||||
|
kernel-to-user data leaks by avoiding speculative indirect
|
||||||
|
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||||
|
support for full protection. The kernel may run slower.
|
||||||
|
|
||||||
|
config RETHUNK
|
||||||
|
bool "Enable return-thunks"
|
||||||
|
depends on RETPOLINE && CC_HAS_RETURN_THUNK
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Compile the kernel with the return-thunks compiler option to guard
|
||||||
|
against kernel-to-user data leaks by avoiding return speculation.
|
||||||
|
Requires a compiler with -mfunction-return=thunk-extern
|
||||||
|
support for full protection. The kernel may run slower.
|
||||||
|
|
||||||
|
config CPU_UNRET_ENTRY
|
||||||
|
bool "Enable UNRET on kernel entry"
|
||||||
|
depends on CPU_SUP_AMD && RETHUNK
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Compile the kernel with support for the retbleed=unret mitigation.
|
||||||
|
|
||||||
|
config CPU_IBPB_ENTRY
|
||||||
|
bool "Enable IBPB on kernel entry"
|
||||||
|
depends on CPU_SUP_AMD
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Compile the kernel with support for the retbleed=ibpb mitigation.
|
||||||
|
|
||||||
|
config CPU_IBRS_ENTRY
|
||||||
|
bool "Enable IBRS on kernel entry"
|
||||||
|
depends on CPU_SUP_INTEL
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Compile the kernel with support for the spectre_v2=ibrs mitigation.
|
||||||
|
This mitigates both spectre_v2 and retbleed at great cost to
|
||||||
|
performance.
|
||||||
|
|
||||||
|
config SLS
|
||||||
|
bool "Mitigate Straight-Line-Speculation"
|
||||||
|
depends on CC_HAS_SLS && X86_64
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Compile the kernel with straight-line-speculation options to guard
|
||||||
|
against straight line speculation. The kernel image might be slightly
|
||||||
|
larger.
|
||||||
|
|
||||||
|
endif
|
||||||
|
|
||||||
config ARCH_HAS_ADD_PAGES
|
config ARCH_HAS_ADD_PAGES
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG
|
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG
|
||||||
|
@@ -31,7 +31,7 @@ endif
|
|||||||
CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
|
CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
|
||||||
M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
|
M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
|
||||||
|
|
||||||
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
|
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
|
||||||
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
|
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
|
||||||
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
||||||
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
|
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
|
||||||
@@ -196,6 +196,10 @@ ifdef CONFIG_RETPOLINE
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_SLS
|
||||||
|
KBUILD_CFLAGS += -mharden-sls=all
|
||||||
|
endif
|
||||||
|
|
||||||
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
|
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
|
||||||
|
|
||||||
ifdef CONFIG_LTO_CLANG
|
ifdef CONFIG_LTO_CLANG
|
||||||
|
@@ -89,7 +89,7 @@ SYM_FUNC_START(__efi64_thunk)
|
|||||||
|
|
||||||
pop %rbx
|
pop %rbx
|
||||||
pop %rbp
|
pop %rbp
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(__efi64_thunk)
|
SYM_FUNC_END(__efi64_thunk)
|
||||||
|
|
||||||
.code32
|
.code32
|
||||||
|
@@ -786,7 +786,7 @@ SYM_FUNC_START(efi32_pe_entry)
|
|||||||
2: popl %edi // restore callee-save registers
|
2: popl %edi // restore callee-save registers
|
||||||
popl %ebx
|
popl %ebx
|
||||||
leave
|
leave
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(efi32_pe_entry)
|
SYM_FUNC_END(efi32_pe_entry)
|
||||||
|
|
||||||
.section ".rodata"
|
.section ".rodata"
|
||||||
@@ -868,7 +868,7 @@ SYM_FUNC_START(startup32_check_sev_cbit)
|
|||||||
popl %ebx
|
popl %ebx
|
||||||
popl %eax
|
popl %eax
|
||||||
#endif
|
#endif
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(startup32_check_sev_cbit)
|
SYM_FUNC_END(startup32_check_sev_cbit)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -58,7 +58,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
|
|||||||
|
|
||||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||||
|
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(get_sev_encryption_bit)
|
SYM_FUNC_END(get_sev_encryption_bit)
|
||||||
|
|
||||||
.code64
|
.code64
|
||||||
@@ -99,7 +99,7 @@ SYM_FUNC_START(set_sev_encryption_mask)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
xor %rax, %rax
|
xor %rax, %rax
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(set_sev_encryption_mask)
|
SYM_FUNC_END(set_sev_encryption_mask)
|
||||||
|
|
||||||
.data
|
.data
|
||||||
|
@@ -122,7 +122,7 @@ SYM_FUNC_START_LOCAL(__load_partial)
|
|||||||
pxor T0, MSG
|
pxor T0, MSG
|
||||||
|
|
||||||
.Lld_partial_8:
|
.Lld_partial_8:
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(__load_partial)
|
SYM_FUNC_END(__load_partial)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(__store_partial)
|
|||||||
mov %r10b, (%r9)
|
mov %r10b, (%r9)
|
||||||
|
|
||||||
.Lst_partial_1:
|
.Lst_partial_1:
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(__store_partial)
|
SYM_FUNC_END(__store_partial)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -225,7 +225,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
|
|||||||
movdqu STATE4, 0x40(STATEP)
|
movdqu STATE4, 0x40(STATEP)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_init)
|
SYM_FUNC_END(crypto_aegis128_aesni_init)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -337,7 +337,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
|
|||||||
movdqu STATE3, 0x30(STATEP)
|
movdqu STATE3, 0x30(STATEP)
|
||||||
movdqu STATE4, 0x40(STATEP)
|
movdqu STATE4, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lad_out_1:
|
.Lad_out_1:
|
||||||
movdqu STATE4, 0x00(STATEP)
|
movdqu STATE4, 0x00(STATEP)
|
||||||
@@ -346,7 +346,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
|
|||||||
movdqu STATE2, 0x30(STATEP)
|
movdqu STATE2, 0x30(STATEP)
|
||||||
movdqu STATE3, 0x40(STATEP)
|
movdqu STATE3, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lad_out_2:
|
.Lad_out_2:
|
||||||
movdqu STATE3, 0x00(STATEP)
|
movdqu STATE3, 0x00(STATEP)
|
||||||
@@ -355,7 +355,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
|
|||||||
movdqu STATE1, 0x30(STATEP)
|
movdqu STATE1, 0x30(STATEP)
|
||||||
movdqu STATE2, 0x40(STATEP)
|
movdqu STATE2, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lad_out_3:
|
.Lad_out_3:
|
||||||
movdqu STATE2, 0x00(STATEP)
|
movdqu STATE2, 0x00(STATEP)
|
||||||
@@ -364,7 +364,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
|
|||||||
movdqu STATE0, 0x30(STATEP)
|
movdqu STATE0, 0x30(STATEP)
|
||||||
movdqu STATE1, 0x40(STATEP)
|
movdqu STATE1, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lad_out_4:
|
.Lad_out_4:
|
||||||
movdqu STATE1, 0x00(STATEP)
|
movdqu STATE1, 0x00(STATEP)
|
||||||
@@ -373,11 +373,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
|
|||||||
movdqu STATE4, 0x30(STATEP)
|
movdqu STATE4, 0x30(STATEP)
|
||||||
movdqu STATE0, 0x40(STATEP)
|
movdqu STATE0, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lad_out:
|
.Lad_out:
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_ad)
|
SYM_FUNC_END(crypto_aegis128_aesni_ad)
|
||||||
|
|
||||||
.macro encrypt_block a s0 s1 s2 s3 s4 i
|
.macro encrypt_block a s0 s1 s2 s3 s4 i
|
||||||
@@ -452,7 +452,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
|
|||||||
movdqu STATE2, 0x30(STATEP)
|
movdqu STATE2, 0x30(STATEP)
|
||||||
movdqu STATE3, 0x40(STATEP)
|
movdqu STATE3, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lenc_out_1:
|
.Lenc_out_1:
|
||||||
movdqu STATE3, 0x00(STATEP)
|
movdqu STATE3, 0x00(STATEP)
|
||||||
@@ -461,7 +461,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
|
|||||||
movdqu STATE1, 0x30(STATEP)
|
movdqu STATE1, 0x30(STATEP)
|
||||||
movdqu STATE2, 0x40(STATEP)
|
movdqu STATE2, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lenc_out_2:
|
.Lenc_out_2:
|
||||||
movdqu STATE2, 0x00(STATEP)
|
movdqu STATE2, 0x00(STATEP)
|
||||||
@@ -470,7 +470,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
|
|||||||
movdqu STATE0, 0x30(STATEP)
|
movdqu STATE0, 0x30(STATEP)
|
||||||
movdqu STATE1, 0x40(STATEP)
|
movdqu STATE1, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lenc_out_3:
|
.Lenc_out_3:
|
||||||
movdqu STATE1, 0x00(STATEP)
|
movdqu STATE1, 0x00(STATEP)
|
||||||
@@ -479,7 +479,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
|
|||||||
movdqu STATE4, 0x30(STATEP)
|
movdqu STATE4, 0x30(STATEP)
|
||||||
movdqu STATE0, 0x40(STATEP)
|
movdqu STATE0, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lenc_out_4:
|
.Lenc_out_4:
|
||||||
movdqu STATE0, 0x00(STATEP)
|
movdqu STATE0, 0x00(STATEP)
|
||||||
@@ -488,11 +488,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
|
|||||||
movdqu STATE3, 0x30(STATEP)
|
movdqu STATE3, 0x30(STATEP)
|
||||||
movdqu STATE4, 0x40(STATEP)
|
movdqu STATE4, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lenc_out:
|
.Lenc_out:
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_enc)
|
SYM_FUNC_END(crypto_aegis128_aesni_enc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -532,7 +532,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
|
|||||||
movdqu STATE3, 0x40(STATEP)
|
movdqu STATE3, 0x40(STATEP)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
|
SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
|
||||||
|
|
||||||
.macro decrypt_block a s0 s1 s2 s3 s4 i
|
.macro decrypt_block a s0 s1 s2 s3 s4 i
|
||||||
@@ -606,7 +606,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
|
|||||||
movdqu STATE2, 0x30(STATEP)
|
movdqu STATE2, 0x30(STATEP)
|
||||||
movdqu STATE3, 0x40(STATEP)
|
movdqu STATE3, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Ldec_out_1:
|
.Ldec_out_1:
|
||||||
movdqu STATE3, 0x00(STATEP)
|
movdqu STATE3, 0x00(STATEP)
|
||||||
@@ -615,7 +615,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
|
|||||||
movdqu STATE1, 0x30(STATEP)
|
movdqu STATE1, 0x30(STATEP)
|
||||||
movdqu STATE2, 0x40(STATEP)
|
movdqu STATE2, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Ldec_out_2:
|
.Ldec_out_2:
|
||||||
movdqu STATE2, 0x00(STATEP)
|
movdqu STATE2, 0x00(STATEP)
|
||||||
@@ -624,7 +624,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
|
|||||||
movdqu STATE0, 0x30(STATEP)
|
movdqu STATE0, 0x30(STATEP)
|
||||||
movdqu STATE1, 0x40(STATEP)
|
movdqu STATE1, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Ldec_out_3:
|
.Ldec_out_3:
|
||||||
movdqu STATE1, 0x00(STATEP)
|
movdqu STATE1, 0x00(STATEP)
|
||||||
@@ -633,7 +633,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
|
|||||||
movdqu STATE4, 0x30(STATEP)
|
movdqu STATE4, 0x30(STATEP)
|
||||||
movdqu STATE0, 0x40(STATEP)
|
movdqu STATE0, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Ldec_out_4:
|
.Ldec_out_4:
|
||||||
movdqu STATE0, 0x00(STATEP)
|
movdqu STATE0, 0x00(STATEP)
|
||||||
@@ -642,11 +642,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
|
|||||||
movdqu STATE3, 0x30(STATEP)
|
movdqu STATE3, 0x30(STATEP)
|
||||||
movdqu STATE4, 0x40(STATEP)
|
movdqu STATE4, 0x40(STATEP)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Ldec_out:
|
.Ldec_out:
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_dec)
|
SYM_FUNC_END(crypto_aegis128_aesni_dec)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -696,7 +696,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
|
|||||||
movdqu STATE3, 0x40(STATEP)
|
movdqu STATE3, 0x40(STATEP)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
|
SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -743,5 +743,5 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
|
|||||||
movdqu MSG, (%rsi)
|
movdqu MSG, (%rsi)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crypto_aegis128_aesni_final)
|
SYM_FUNC_END(crypto_aegis128_aesni_final)
|
||||||
|
@@ -525,7 +525,7 @@ ddq_add_8:
|
|||||||
/* return updated IV */
|
/* return updated IV */
|
||||||
vpshufb xbyteswap, xcounter, xcounter
|
vpshufb xbyteswap, xcounter, xcounter
|
||||||
vmovdqu xcounter, (p_iv)
|
vmovdqu xcounter, (p_iv)
|
||||||
ret
|
RET
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -1598,7 +1598,7 @@ SYM_FUNC_START(aesni_gcm_dec)
|
|||||||
GCM_ENC_DEC dec
|
GCM_ENC_DEC dec
|
||||||
GCM_COMPLETE arg10, arg11
|
GCM_COMPLETE arg10, arg11
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_dec)
|
SYM_FUNC_END(aesni_gcm_dec)
|
||||||
|
|
||||||
|
|
||||||
@@ -1687,7 +1687,7 @@ SYM_FUNC_START(aesni_gcm_enc)
|
|||||||
|
|
||||||
GCM_COMPLETE arg10, arg11
|
GCM_COMPLETE arg10, arg11
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_enc)
|
SYM_FUNC_END(aesni_gcm_enc)
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
@@ -1705,7 +1705,7 @@ SYM_FUNC_START(aesni_gcm_init)
|
|||||||
FUNC_SAVE
|
FUNC_SAVE
|
||||||
GCM_INIT %arg3, %arg4,%arg5, %arg6
|
GCM_INIT %arg3, %arg4,%arg5, %arg6
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_init)
|
SYM_FUNC_END(aesni_gcm_init)
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
@@ -1720,7 +1720,7 @@ SYM_FUNC_START(aesni_gcm_enc_update)
|
|||||||
FUNC_SAVE
|
FUNC_SAVE
|
||||||
GCM_ENC_DEC enc
|
GCM_ENC_DEC enc
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_enc_update)
|
SYM_FUNC_END(aesni_gcm_enc_update)
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
@@ -1735,7 +1735,7 @@ SYM_FUNC_START(aesni_gcm_dec_update)
|
|||||||
FUNC_SAVE
|
FUNC_SAVE
|
||||||
GCM_ENC_DEC dec
|
GCM_ENC_DEC dec
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_dec_update)
|
SYM_FUNC_END(aesni_gcm_dec_update)
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
@@ -1750,7 +1750,7 @@ SYM_FUNC_START(aesni_gcm_finalize)
|
|||||||
FUNC_SAVE
|
FUNC_SAVE
|
||||||
GCM_COMPLETE %arg3 %arg4
|
GCM_COMPLETE %arg3 %arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_finalize)
|
SYM_FUNC_END(aesni_gcm_finalize)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -1766,7 +1766,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
|
|||||||
pxor %xmm1, %xmm0
|
pxor %xmm1, %xmm0
|
||||||
movaps %xmm0, (TKEYP)
|
movaps %xmm0, (TKEYP)
|
||||||
add $0x10, TKEYP
|
add $0x10, TKEYP
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_key_expansion_256a)
|
SYM_FUNC_END(_key_expansion_256a)
|
||||||
SYM_FUNC_END_ALIAS(_key_expansion_128)
|
SYM_FUNC_END_ALIAS(_key_expansion_128)
|
||||||
|
|
||||||
@@ -1791,7 +1791,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192a)
|
|||||||
shufps $0b01001110, %xmm2, %xmm1
|
shufps $0b01001110, %xmm2, %xmm1
|
||||||
movaps %xmm1, 0x10(TKEYP)
|
movaps %xmm1, 0x10(TKEYP)
|
||||||
add $0x20, TKEYP
|
add $0x20, TKEYP
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_key_expansion_192a)
|
SYM_FUNC_END(_key_expansion_192a)
|
||||||
|
|
||||||
SYM_FUNC_START_LOCAL(_key_expansion_192b)
|
SYM_FUNC_START_LOCAL(_key_expansion_192b)
|
||||||
@@ -1810,7 +1810,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192b)
|
|||||||
|
|
||||||
movaps %xmm0, (TKEYP)
|
movaps %xmm0, (TKEYP)
|
||||||
add $0x10, TKEYP
|
add $0x10, TKEYP
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_key_expansion_192b)
|
SYM_FUNC_END(_key_expansion_192b)
|
||||||
|
|
||||||
SYM_FUNC_START_LOCAL(_key_expansion_256b)
|
SYM_FUNC_START_LOCAL(_key_expansion_256b)
|
||||||
@@ -1822,7 +1822,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b)
|
|||||||
pxor %xmm1, %xmm2
|
pxor %xmm1, %xmm2
|
||||||
movaps %xmm2, (TKEYP)
|
movaps %xmm2, (TKEYP)
|
||||||
add $0x10, TKEYP
|
add $0x10, TKEYP
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_key_expansion_256b)
|
SYM_FUNC_END(_key_expansion_256b)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1937,7 +1937,7 @@ SYM_FUNC_START(aesni_set_key)
|
|||||||
popl KEYP
|
popl KEYP
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_set_key)
|
SYM_FUNC_END(aesni_set_key)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1961,7 +1961,7 @@ SYM_FUNC_START(aesni_enc)
|
|||||||
popl KEYP
|
popl KEYP
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_enc)
|
SYM_FUNC_END(aesni_enc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2018,7 +2018,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc1)
|
|||||||
aesenc KEY, STATE
|
aesenc KEY, STATE
|
||||||
movaps 0x70(TKEYP), KEY
|
movaps 0x70(TKEYP), KEY
|
||||||
aesenclast KEY, STATE
|
aesenclast KEY, STATE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_aesni_enc1)
|
SYM_FUNC_END(_aesni_enc1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2126,7 +2126,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
|
|||||||
aesenclast KEY, STATE2
|
aesenclast KEY, STATE2
|
||||||
aesenclast KEY, STATE3
|
aesenclast KEY, STATE3
|
||||||
aesenclast KEY, STATE4
|
aesenclast KEY, STATE4
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_aesni_enc4)
|
SYM_FUNC_END(_aesni_enc4)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2151,7 +2151,7 @@ SYM_FUNC_START(aesni_dec)
|
|||||||
popl KEYP
|
popl KEYP
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_dec)
|
SYM_FUNC_END(aesni_dec)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2208,7 +2208,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec1)
|
|||||||
aesdec KEY, STATE
|
aesdec KEY, STATE
|
||||||
movaps 0x70(TKEYP), KEY
|
movaps 0x70(TKEYP), KEY
|
||||||
aesdeclast KEY, STATE
|
aesdeclast KEY, STATE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_aesni_dec1)
|
SYM_FUNC_END(_aesni_dec1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2316,7 +2316,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec4)
|
|||||||
aesdeclast KEY, STATE2
|
aesdeclast KEY, STATE2
|
||||||
aesdeclast KEY, STATE3
|
aesdeclast KEY, STATE3
|
||||||
aesdeclast KEY, STATE4
|
aesdeclast KEY, STATE4
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_aesni_dec4)
|
SYM_FUNC_END(_aesni_dec4)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2376,7 +2376,7 @@ SYM_FUNC_START(aesni_ecb_enc)
|
|||||||
popl LEN
|
popl LEN
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_ecb_enc)
|
SYM_FUNC_END(aesni_ecb_enc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2437,7 +2437,7 @@ SYM_FUNC_START(aesni_ecb_dec)
|
|||||||
popl LEN
|
popl LEN
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_ecb_dec)
|
SYM_FUNC_END(aesni_ecb_dec)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2481,7 +2481,7 @@ SYM_FUNC_START(aesni_cbc_enc)
|
|||||||
popl IVP
|
popl IVP
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_cbc_enc)
|
SYM_FUNC_END(aesni_cbc_enc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2574,7 +2574,7 @@ SYM_FUNC_START(aesni_cbc_dec)
|
|||||||
popl IVP
|
popl IVP
|
||||||
#endif
|
#endif
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_cbc_dec)
|
SYM_FUNC_END(aesni_cbc_dec)
|
||||||
|
|
||||||
#ifdef __x86_64__
|
#ifdef __x86_64__
|
||||||
@@ -2602,7 +2602,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc_init)
|
|||||||
mov $1, TCTR_LOW
|
mov $1, TCTR_LOW
|
||||||
movq TCTR_LOW, INC
|
movq TCTR_LOW, INC
|
||||||
movq CTR, TCTR_LOW
|
movq CTR, TCTR_LOW
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_aesni_inc_init)
|
SYM_FUNC_END(_aesni_inc_init)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2630,7 +2630,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc)
|
|||||||
.Linc_low:
|
.Linc_low:
|
||||||
movaps CTR, IV
|
movaps CTR, IV
|
||||||
pshufb BSWAP_MASK, IV
|
pshufb BSWAP_MASK, IV
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(_aesni_inc)
|
SYM_FUNC_END(_aesni_inc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2693,7 +2693,7 @@ SYM_FUNC_START(aesni_ctr_enc)
|
|||||||
movups IV, (IVP)
|
movups IV, (IVP)
|
||||||
.Lctr_enc_just_ret:
|
.Lctr_enc_just_ret:
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_ctr_enc)
|
SYM_FUNC_END(aesni_ctr_enc)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2778,7 +2778,7 @@ SYM_FUNC_START(aesni_xts_encrypt)
|
|||||||
movups IV, (IVP)
|
movups IV, (IVP)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_xts_encrypt)
|
SYM_FUNC_END(aesni_xts_encrypt)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2846,7 +2846,7 @@ SYM_FUNC_START(aesni_xts_decrypt)
|
|||||||
movups IV, (IVP)
|
movups IV, (IVP)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_xts_decrypt)
|
SYM_FUNC_END(aesni_xts_decrypt)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -1777,7 +1777,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen2)
|
|||||||
FUNC_SAVE
|
FUNC_SAVE
|
||||||
INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
|
INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_init_avx_gen2)
|
SYM_FUNC_END(aesni_gcm_init_avx_gen2)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -1798,15 +1798,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2)
|
|||||||
# must be 192
|
# must be 192
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_128_enc_update:
|
key_128_enc_update:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_256_enc_update:
|
key_256_enc_update:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
|
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -1827,15 +1827,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2)
|
|||||||
# must be 192
|
# must be 192
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_128_dec_update:
|
key_128_dec_update:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_256_dec_update:
|
key_256_dec_update:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
|
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -1856,15 +1856,15 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen2)
|
|||||||
# must be 192
|
# must be 192
|
||||||
GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
|
GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_128_finalize:
|
key_128_finalize:
|
||||||
GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
|
GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_256_finalize:
|
key_256_finalize:
|
||||||
GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
|
GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
|
SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -2745,7 +2745,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen4)
|
|||||||
FUNC_SAVE
|
FUNC_SAVE
|
||||||
INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
|
INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_init_avx_gen4)
|
SYM_FUNC_END(aesni_gcm_init_avx_gen4)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -2766,15 +2766,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4)
|
|||||||
# must be 192
|
# must be 192
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_128_enc_update4:
|
key_128_enc_update4:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_256_enc_update4:
|
key_256_enc_update4:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
|
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -2795,15 +2795,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4)
|
|||||||
# must be 192
|
# must be 192
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_128_dec_update4:
|
key_128_dec_update4:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_256_dec_update4:
|
key_256_dec_update4:
|
||||||
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
|
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
|
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@@ -2824,13 +2824,13 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen4)
|
|||||||
# must be 192
|
# must be 192
|
||||||
GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
|
GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_128_finalize4:
|
key_128_finalize4:
|
||||||
GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
|
GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
key_256_finalize4:
|
key_256_finalize4:
|
||||||
GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
|
GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
|
||||||
FUNC_RESTORE
|
FUNC_RESTORE
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
|
SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
|
||||||
|
@@ -171,7 +171,7 @@ SYM_FUNC_START(blake2s_compress_ssse3)
|
|||||||
movdqu %xmm1,0x10(%rdi)
|
movdqu %xmm1,0x10(%rdi)
|
||||||
movdqu %xmm14,0x20(%rdi)
|
movdqu %xmm14,0x20(%rdi)
|
||||||
.Lendofloop:
|
.Lendofloop:
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(blake2s_compress_ssse3)
|
SYM_FUNC_END(blake2s_compress_ssse3)
|
||||||
|
|
||||||
#ifdef CONFIG_AS_AVX512
|
#ifdef CONFIG_AS_AVX512
|
||||||
@@ -251,6 +251,6 @@ SYM_FUNC_START(blake2s_compress_avx512)
|
|||||||
vmovdqu %xmm1,0x10(%rdi)
|
vmovdqu %xmm1,0x10(%rdi)
|
||||||
vmovdqu %xmm4,0x20(%rdi)
|
vmovdqu %xmm4,0x20(%rdi)
|
||||||
vzeroupper
|
vzeroupper
|
||||||
retq
|
RET
|
||||||
SYM_FUNC_END(blake2s_compress_avx512)
|
SYM_FUNC_END(blake2s_compress_avx512)
|
||||||
#endif /* CONFIG_AS_AVX512 */
|
#endif /* CONFIG_AS_AVX512 */
|
||||||
|
@@ -135,10 +135,10 @@ SYM_FUNC_START(__blowfish_enc_blk)
|
|||||||
jnz .L__enc_xor;
|
jnz .L__enc_xor;
|
||||||
|
|
||||||
write_block();
|
write_block();
|
||||||
ret;
|
RET;
|
||||||
.L__enc_xor:
|
.L__enc_xor:
|
||||||
xor_block();
|
xor_block();
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__blowfish_enc_blk)
|
SYM_FUNC_END(__blowfish_enc_blk)
|
||||||
|
|
||||||
SYM_FUNC_START(blowfish_dec_blk)
|
SYM_FUNC_START(blowfish_dec_blk)
|
||||||
@@ -170,7 +170,7 @@ SYM_FUNC_START(blowfish_dec_blk)
|
|||||||
|
|
||||||
movq %r11, %r12;
|
movq %r11, %r12;
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(blowfish_dec_blk)
|
SYM_FUNC_END(blowfish_dec_blk)
|
||||||
|
|
||||||
/**********************************************************************
|
/**********************************************************************
|
||||||
@@ -322,14 +322,14 @@ SYM_FUNC_START(__blowfish_enc_blk_4way)
|
|||||||
|
|
||||||
popq %rbx;
|
popq %rbx;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__enc_xor4:
|
.L__enc_xor4:
|
||||||
xor_block4();
|
xor_block4();
|
||||||
|
|
||||||
popq %rbx;
|
popq %rbx;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__blowfish_enc_blk_4way)
|
SYM_FUNC_END(__blowfish_enc_blk_4way)
|
||||||
|
|
||||||
SYM_FUNC_START(blowfish_dec_blk_4way)
|
SYM_FUNC_START(blowfish_dec_blk_4way)
|
||||||
@@ -364,5 +364,5 @@ SYM_FUNC_START(blowfish_dec_blk_4way)
|
|||||||
popq %rbx;
|
popq %rbx;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(blowfish_dec_blk_4way)
|
SYM_FUNC_END(blowfish_dec_blk_4way)
|
||||||
|
@@ -193,7 +193,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
|
|||||||
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
|
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
|
||||||
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
|
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
|
||||||
%rcx, (%r9));
|
%rcx, (%r9));
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
|
SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
@@ -201,7 +201,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
|
|||||||
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
|
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
|
||||||
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
|
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
|
||||||
%rax, (%r9));
|
%rax, (%r9));
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
|
SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -787,7 +787,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16)
|
|||||||
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
|
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
.Lenc_max32:
|
.Lenc_max32:
|
||||||
@@ -874,7 +874,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
|
|||||||
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
|
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
.Ldec_max32:
|
.Ldec_max32:
|
||||||
@@ -915,7 +915,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way)
|
|||||||
%xmm8, %rsi);
|
%xmm8, %rsi);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_ecb_enc_16way)
|
SYM_FUNC_END(camellia_ecb_enc_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_ecb_dec_16way)
|
SYM_FUNC_START(camellia_ecb_dec_16way)
|
||||||
@@ -945,7 +945,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way)
|
|||||||
%xmm8, %rsi);
|
%xmm8, %rsi);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_ecb_dec_16way)
|
SYM_FUNC_END(camellia_ecb_dec_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_cbc_dec_16way)
|
SYM_FUNC_START(camellia_cbc_dec_16way)
|
||||||
@@ -996,7 +996,7 @@ SYM_FUNC_START(camellia_cbc_dec_16way)
|
|||||||
%xmm8, %rsi);
|
%xmm8, %rsi);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_cbc_dec_16way)
|
SYM_FUNC_END(camellia_cbc_dec_16way)
|
||||||
|
|
||||||
#define inc_le128(x, minus_one, tmp) \
|
#define inc_le128(x, minus_one, tmp) \
|
||||||
@@ -1109,7 +1109,7 @@ SYM_FUNC_START(camellia_ctr_16way)
|
|||||||
%xmm8, %rsi);
|
%xmm8, %rsi);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_ctr_16way)
|
SYM_FUNC_END(camellia_ctr_16way)
|
||||||
|
|
||||||
#define gf128mul_x_ble(iv, mask, tmp) \
|
#define gf128mul_x_ble(iv, mask, tmp) \
|
||||||
@@ -1253,7 +1253,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way)
|
|||||||
%xmm8, %rsi);
|
%xmm8, %rsi);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_xts_crypt_16way)
|
SYM_FUNC_END(camellia_xts_crypt_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_xts_enc_16way)
|
SYM_FUNC_START(camellia_xts_enc_16way)
|
||||||
|
@@ -227,7 +227,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
|
|||||||
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
|
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
|
||||||
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
|
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
|
||||||
%rcx, (%r9));
|
%rcx, (%r9));
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
|
SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
@@ -235,7 +235,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
|
|||||||
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
|
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
|
||||||
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
|
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
|
||||||
%rax, (%r9));
|
%rax, (%r9));
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
|
SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -825,7 +825,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
|
|||||||
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
|
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
.Lenc_max32:
|
.Lenc_max32:
|
||||||
@@ -912,7 +912,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
|
|||||||
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
|
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
.Ldec_max32:
|
.Ldec_max32:
|
||||||
@@ -957,7 +957,7 @@ SYM_FUNC_START(camellia_ecb_enc_32way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_ecb_enc_32way)
|
SYM_FUNC_END(camellia_ecb_enc_32way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_ecb_dec_32way)
|
SYM_FUNC_START(camellia_ecb_dec_32way)
|
||||||
@@ -991,7 +991,7 @@ SYM_FUNC_START(camellia_ecb_dec_32way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_ecb_dec_32way)
|
SYM_FUNC_END(camellia_ecb_dec_32way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_cbc_dec_32way)
|
SYM_FUNC_START(camellia_cbc_dec_32way)
|
||||||
@@ -1059,7 +1059,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_cbc_dec_32way)
|
SYM_FUNC_END(camellia_cbc_dec_32way)
|
||||||
|
|
||||||
#define inc_le128(x, minus_one, tmp) \
|
#define inc_le128(x, minus_one, tmp) \
|
||||||
@@ -1199,7 +1199,7 @@ SYM_FUNC_START(camellia_ctr_32way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_ctr_32way)
|
SYM_FUNC_END(camellia_ctr_32way)
|
||||||
|
|
||||||
#define gf128mul_x_ble(iv, mask, tmp) \
|
#define gf128mul_x_ble(iv, mask, tmp) \
|
||||||
@@ -1366,7 +1366,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_xts_crypt_32way)
|
SYM_FUNC_END(camellia_xts_crypt_32way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_xts_enc_32way)
|
SYM_FUNC_START(camellia_xts_enc_32way)
|
||||||
|
@@ -213,13 +213,13 @@ SYM_FUNC_START(__camellia_enc_blk)
|
|||||||
enc_outunpack(mov, RT1);
|
enc_outunpack(mov, RT1);
|
||||||
|
|
||||||
movq RR12, %r12;
|
movq RR12, %r12;
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__enc_xor:
|
.L__enc_xor:
|
||||||
enc_outunpack(xor, RT1);
|
enc_outunpack(xor, RT1);
|
||||||
|
|
||||||
movq RR12, %r12;
|
movq RR12, %r12;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__camellia_enc_blk)
|
SYM_FUNC_END(__camellia_enc_blk)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_dec_blk)
|
SYM_FUNC_START(camellia_dec_blk)
|
||||||
@@ -257,7 +257,7 @@ SYM_FUNC_START(camellia_dec_blk)
|
|||||||
dec_outunpack();
|
dec_outunpack();
|
||||||
|
|
||||||
movq RR12, %r12;
|
movq RR12, %r12;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_dec_blk)
|
SYM_FUNC_END(camellia_dec_blk)
|
||||||
|
|
||||||
/**********************************************************************
|
/**********************************************************************
|
||||||
@@ -448,14 +448,14 @@ SYM_FUNC_START(__camellia_enc_blk_2way)
|
|||||||
|
|
||||||
movq RR12, %r12;
|
movq RR12, %r12;
|
||||||
popq %rbx;
|
popq %rbx;
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__enc2_xor:
|
.L__enc2_xor:
|
||||||
enc_outunpack2(xor, RT2);
|
enc_outunpack2(xor, RT2);
|
||||||
|
|
||||||
movq RR12, %r12;
|
movq RR12, %r12;
|
||||||
popq %rbx;
|
popq %rbx;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__camellia_enc_blk_2way)
|
SYM_FUNC_END(__camellia_enc_blk_2way)
|
||||||
|
|
||||||
SYM_FUNC_START(camellia_dec_blk_2way)
|
SYM_FUNC_START(camellia_dec_blk_2way)
|
||||||
@@ -495,5 +495,5 @@ SYM_FUNC_START(camellia_dec_blk_2way)
|
|||||||
|
|
||||||
movq RR12, %r12;
|
movq RR12, %r12;
|
||||||
movq RXOR, %rbx;
|
movq RXOR, %rbx;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(camellia_dec_blk_2way)
|
SYM_FUNC_END(camellia_dec_blk_2way)
|
||||||
|
@@ -279,7 +279,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
|
|||||||
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
|
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
|
||||||
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
|
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__cast5_enc_blk16)
|
SYM_FUNC_END(__cast5_enc_blk16)
|
||||||
|
|
||||||
.align 16
|
.align 16
|
||||||
@@ -352,7 +352,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
|
|||||||
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
|
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
|
||||||
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
|
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__skip_dec:
|
.L__skip_dec:
|
||||||
vpsrldq $4, RKR, RKR;
|
vpsrldq $4, RKR, RKR;
|
||||||
@@ -393,7 +393,7 @@ SYM_FUNC_START(cast5_ecb_enc_16way)
|
|||||||
|
|
||||||
popq %r15;
|
popq %r15;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast5_ecb_enc_16way)
|
SYM_FUNC_END(cast5_ecb_enc_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast5_ecb_dec_16way)
|
SYM_FUNC_START(cast5_ecb_dec_16way)
|
||||||
@@ -431,7 +431,7 @@ SYM_FUNC_START(cast5_ecb_dec_16way)
|
|||||||
|
|
||||||
popq %r15;
|
popq %r15;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast5_ecb_dec_16way)
|
SYM_FUNC_END(cast5_ecb_dec_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast5_cbc_dec_16way)
|
SYM_FUNC_START(cast5_cbc_dec_16way)
|
||||||
@@ -483,7 +483,7 @@ SYM_FUNC_START(cast5_cbc_dec_16way)
|
|||||||
popq %r15;
|
popq %r15;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast5_cbc_dec_16way)
|
SYM_FUNC_END(cast5_cbc_dec_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast5_ctr_16way)
|
SYM_FUNC_START(cast5_ctr_16way)
|
||||||
@@ -559,5 +559,5 @@ SYM_FUNC_START(cast5_ctr_16way)
|
|||||||
popq %r15;
|
popq %r15;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast5_ctr_16way)
|
SYM_FUNC_END(cast5_ctr_16way)
|
||||||
|
@@ -291,7 +291,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
|
|||||||
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
|
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
|
||||||
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
|
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__cast6_enc_blk8)
|
SYM_FUNC_END(__cast6_enc_blk8)
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
@@ -338,7 +338,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
|
|||||||
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
|
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
|
||||||
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
|
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__cast6_dec_blk8)
|
SYM_FUNC_END(__cast6_dec_blk8)
|
||||||
|
|
||||||
SYM_FUNC_START(cast6_ecb_enc_8way)
|
SYM_FUNC_START(cast6_ecb_enc_8way)
|
||||||
@@ -361,7 +361,7 @@ SYM_FUNC_START(cast6_ecb_enc_8way)
|
|||||||
|
|
||||||
popq %r15;
|
popq %r15;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast6_ecb_enc_8way)
|
SYM_FUNC_END(cast6_ecb_enc_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast6_ecb_dec_8way)
|
SYM_FUNC_START(cast6_ecb_dec_8way)
|
||||||
@@ -384,7 +384,7 @@ SYM_FUNC_START(cast6_ecb_dec_8way)
|
|||||||
|
|
||||||
popq %r15;
|
popq %r15;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast6_ecb_dec_8way)
|
SYM_FUNC_END(cast6_ecb_dec_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast6_cbc_dec_8way)
|
SYM_FUNC_START(cast6_cbc_dec_8way)
|
||||||
@@ -410,7 +410,7 @@ SYM_FUNC_START(cast6_cbc_dec_8way)
|
|||||||
popq %r15;
|
popq %r15;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast6_cbc_dec_8way)
|
SYM_FUNC_END(cast6_cbc_dec_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast6_ctr_8way)
|
SYM_FUNC_START(cast6_ctr_8way)
|
||||||
@@ -438,7 +438,7 @@ SYM_FUNC_START(cast6_ctr_8way)
|
|||||||
popq %r15;
|
popq %r15;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast6_ctr_8way)
|
SYM_FUNC_END(cast6_ctr_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast6_xts_enc_8way)
|
SYM_FUNC_START(cast6_xts_enc_8way)
|
||||||
@@ -465,7 +465,7 @@ SYM_FUNC_START(cast6_xts_enc_8way)
|
|||||||
|
|
||||||
popq %r15;
|
popq %r15;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast6_xts_enc_8way)
|
SYM_FUNC_END(cast6_xts_enc_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(cast6_xts_dec_8way)
|
SYM_FUNC_START(cast6_xts_dec_8way)
|
||||||
@@ -492,5 +492,5 @@ SYM_FUNC_START(cast6_xts_dec_8way)
|
|||||||
|
|
||||||
popq %r15;
|
popq %r15;
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(cast6_xts_dec_8way)
|
SYM_FUNC_END(cast6_xts_dec_8way)
|
||||||
|
@@ -193,7 +193,7 @@ SYM_FUNC_START(chacha_2block_xor_avx2)
|
|||||||
|
|
||||||
.Ldone2:
|
.Ldone2:
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart2:
|
.Lxorpart2:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
@@ -498,7 +498,7 @@ SYM_FUNC_START(chacha_4block_xor_avx2)
|
|||||||
|
|
||||||
.Ldone4:
|
.Ldone4:
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart4:
|
.Lxorpart4:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
@@ -992,7 +992,7 @@ SYM_FUNC_START(chacha_8block_xor_avx2)
|
|||||||
.Ldone8:
|
.Ldone8:
|
||||||
vzeroupper
|
vzeroupper
|
||||||
lea -8(%r10),%rsp
|
lea -8(%r10),%rsp
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart8:
|
.Lxorpart8:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
|
@@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
|
|||||||
|
|
||||||
.Ldone2:
|
.Ldone2:
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart2:
|
.Lxorpart2:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
@@ -432,7 +432,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
|
|||||||
|
|
||||||
.Ldone4:
|
.Ldone4:
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart4:
|
.Lxorpart4:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
@@ -812,7 +812,7 @@ SYM_FUNC_START(chacha_8block_xor_avx512vl)
|
|||||||
|
|
||||||
.Ldone8:
|
.Ldone8:
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart8:
|
.Lxorpart8:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
|
@@ -108,7 +108,7 @@ SYM_FUNC_START_LOCAL(chacha_permute)
|
|||||||
sub $2,%r8d
|
sub $2,%r8d
|
||||||
jnz .Ldoubleround
|
jnz .Ldoubleround
|
||||||
|
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(chacha_permute)
|
SYM_FUNC_END(chacha_permute)
|
||||||
|
|
||||||
SYM_FUNC_START(chacha_block_xor_ssse3)
|
SYM_FUNC_START(chacha_block_xor_ssse3)
|
||||||
@@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
|
|||||||
|
|
||||||
.Ldone:
|
.Ldone:
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart:
|
.Lxorpart:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
@@ -217,7 +217,7 @@ SYM_FUNC_START(hchacha_block_ssse3)
|
|||||||
movdqu %xmm3,0x10(%rsi)
|
movdqu %xmm3,0x10(%rsi)
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(hchacha_block_ssse3)
|
SYM_FUNC_END(hchacha_block_ssse3)
|
||||||
|
|
||||||
SYM_FUNC_START(chacha_4block_xor_ssse3)
|
SYM_FUNC_START(chacha_4block_xor_ssse3)
|
||||||
@@ -762,7 +762,7 @@ SYM_FUNC_START(chacha_4block_xor_ssse3)
|
|||||||
|
|
||||||
.Ldone4:
|
.Ldone4:
|
||||||
lea -8(%r10),%rsp
|
lea -8(%r10),%rsp
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.Lxorpart4:
|
.Lxorpart4:
|
||||||
# xor remaining bytes from partial register into output
|
# xor remaining bytes from partial register into output
|
||||||
|
@@ -236,5 +236,5 @@ fold_64:
|
|||||||
pxor %xmm2, %xmm1
|
pxor %xmm2, %xmm1
|
||||||
pextrd $0x01, %xmm1, %eax
|
pextrd $0x01, %xmm1, %eax
|
||||||
|
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crc32_pclmul_le_16)
|
SYM_FUNC_END(crc32_pclmul_le_16)
|
||||||
|
@@ -309,7 +309,7 @@ do_return:
|
|||||||
popq %rsi
|
popq %rsi
|
||||||
popq %rdi
|
popq %rdi
|
||||||
popq %rbx
|
popq %rbx
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(crc_pcl)
|
SYM_FUNC_END(crc_pcl)
|
||||||
|
|
||||||
.section .rodata, "a", @progbits
|
.section .rodata, "a", @progbits
|
||||||
|
@@ -257,7 +257,7 @@ SYM_FUNC_START(crc_t10dif_pcl)
|
|||||||
# Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
|
# Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
|
||||||
|
|
||||||
pextrw $0, %xmm0, %eax
|
pextrw $0, %xmm0, %eax
|
||||||
ret
|
RET
|
||||||
|
|
||||||
.align 16
|
.align 16
|
||||||
.Lless_than_256_bytes:
|
.Lless_than_256_bytes:
|
||||||
|
@@ -243,7 +243,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk)
|
|||||||
popq %r12;
|
popq %r12;
|
||||||
popq %rbx;
|
popq %rbx;
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
|
SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
|
||||||
|
|
||||||
/***********************************************************************
|
/***********************************************************************
|
||||||
@@ -528,7 +528,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way)
|
|||||||
popq %r12;
|
popq %r12;
|
||||||
popq %rbx;
|
popq %rbx;
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
|
SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
|
||||||
|
|
||||||
.section .rodata, "a", @progbits
|
.section .rodata, "a", @progbits
|
||||||
|
@@ -85,7 +85,7 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
|
|||||||
psrlq $1, T2
|
psrlq $1, T2
|
||||||
pxor T2, T1
|
pxor T2, T1
|
||||||
pxor T1, DATA
|
pxor T1, DATA
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(__clmul_gf128mul_ble)
|
SYM_FUNC_END(__clmul_gf128mul_ble)
|
||||||
|
|
||||||
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
|
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
|
||||||
@@ -99,7 +99,7 @@ SYM_FUNC_START(clmul_ghash_mul)
|
|||||||
pshufb BSWAP, DATA
|
pshufb BSWAP, DATA
|
||||||
movups DATA, (%rdi)
|
movups DATA, (%rdi)
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(clmul_ghash_mul)
|
SYM_FUNC_END(clmul_ghash_mul)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -128,5 +128,5 @@ SYM_FUNC_START(clmul_ghash_update)
|
|||||||
movups DATA, (%rdi)
|
movups DATA, (%rdi)
|
||||||
.Lupdate_just_ret:
|
.Lupdate_just_ret:
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(clmul_ghash_update)
|
SYM_FUNC_END(clmul_ghash_update)
|
||||||
|
@@ -153,5 +153,5 @@ SYM_FUNC_START(nh_avx2)
|
|||||||
vpaddq T1, T0, T0
|
vpaddq T1, T0, T0
|
||||||
vpaddq T4, T0, T0
|
vpaddq T4, T0, T0
|
||||||
vmovdqu T0, (HASH)
|
vmovdqu T0, (HASH)
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(nh_avx2)
|
SYM_FUNC_END(nh_avx2)
|
||||||
|
@@ -119,5 +119,5 @@ SYM_FUNC_START(nh_sse2)
|
|||||||
paddq PASS2_SUMS, T1
|
paddq PASS2_SUMS, T1
|
||||||
movdqu T0, 0x00(HASH)
|
movdqu T0, 0x00(HASH)
|
||||||
movdqu T1, 0x10(HASH)
|
movdqu T1, 0x10(HASH)
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(nh_sse2)
|
SYM_FUNC_END(nh_sse2)
|
||||||
|
@@ -297,7 +297,7 @@ ___
|
|||||||
$code.=<<___;
|
$code.=<<___;
|
||||||
mov \$1,%eax
|
mov \$1,%eax
|
||||||
.Lno_key:
|
.Lno_key:
|
||||||
ret
|
RET
|
||||||
___
|
___
|
||||||
&end_function("poly1305_init_x86_64");
|
&end_function("poly1305_init_x86_64");
|
||||||
|
|
||||||
@@ -373,7 +373,7 @@ $code.=<<___;
|
|||||||
.cfi_adjust_cfa_offset -48
|
.cfi_adjust_cfa_offset -48
|
||||||
.Lno_data:
|
.Lno_data:
|
||||||
.Lblocks_epilogue:
|
.Lblocks_epilogue:
|
||||||
ret
|
RET
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
___
|
___
|
||||||
&end_function("poly1305_blocks_x86_64");
|
&end_function("poly1305_blocks_x86_64");
|
||||||
@@ -399,7 +399,7 @@ $code.=<<___;
|
|||||||
mov %rax,0($mac) # write result
|
mov %rax,0($mac) # write result
|
||||||
mov %rcx,8($mac)
|
mov %rcx,8($mac)
|
||||||
|
|
||||||
ret
|
RET
|
||||||
___
|
___
|
||||||
&end_function("poly1305_emit_x86_64");
|
&end_function("poly1305_emit_x86_64");
|
||||||
if ($avx) {
|
if ($avx) {
|
||||||
@@ -429,7 +429,7 @@ ___
|
|||||||
&poly1305_iteration();
|
&poly1305_iteration();
|
||||||
$code.=<<___;
|
$code.=<<___;
|
||||||
pop $ctx
|
pop $ctx
|
||||||
ret
|
RET
|
||||||
.size __poly1305_block,.-__poly1305_block
|
.size __poly1305_block,.-__poly1305_block
|
||||||
|
|
||||||
.type __poly1305_init_avx,\@abi-omnipotent
|
.type __poly1305_init_avx,\@abi-omnipotent
|
||||||
@@ -594,7 +594,7 @@ __poly1305_init_avx:
|
|||||||
|
|
||||||
lea -48-64($ctx),$ctx # size [de-]optimization
|
lea -48-64($ctx),$ctx # size [de-]optimization
|
||||||
pop %rbp
|
pop %rbp
|
||||||
ret
|
RET
|
||||||
.size __poly1305_init_avx,.-__poly1305_init_avx
|
.size __poly1305_init_avx,.-__poly1305_init_avx
|
||||||
___
|
___
|
||||||
|
|
||||||
@@ -747,7 +747,7 @@ $code.=<<___;
|
|||||||
.cfi_restore %rbp
|
.cfi_restore %rbp
|
||||||
.Lno_data_avx:
|
.Lno_data_avx:
|
||||||
.Lblocks_avx_epilogue:
|
.Lblocks_avx_epilogue:
|
||||||
ret
|
RET
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
|
|
||||||
.align 32
|
.align 32
|
||||||
@@ -1452,7 +1452,7 @@ $code.=<<___ if (!$win64);
|
|||||||
___
|
___
|
||||||
$code.=<<___;
|
$code.=<<___;
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
___
|
___
|
||||||
&end_function("poly1305_blocks_avx");
|
&end_function("poly1305_blocks_avx");
|
||||||
@@ -1508,7 +1508,7 @@ $code.=<<___;
|
|||||||
mov %rax,0($mac) # write result
|
mov %rax,0($mac) # write result
|
||||||
mov %rcx,8($mac)
|
mov %rcx,8($mac)
|
||||||
|
|
||||||
ret
|
RET
|
||||||
___
|
___
|
||||||
&end_function("poly1305_emit_avx");
|
&end_function("poly1305_emit_avx");
|
||||||
|
|
||||||
@@ -1675,7 +1675,7 @@ $code.=<<___;
|
|||||||
.cfi_restore %rbp
|
.cfi_restore %rbp
|
||||||
.Lno_data_avx2$suffix:
|
.Lno_data_avx2$suffix:
|
||||||
.Lblocks_avx2_epilogue$suffix:
|
.Lblocks_avx2_epilogue$suffix:
|
||||||
ret
|
RET
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
|
|
||||||
.align 32
|
.align 32
|
||||||
@@ -2201,7 +2201,7 @@ $code.=<<___ if (!$win64);
|
|||||||
___
|
___
|
||||||
$code.=<<___;
|
$code.=<<___;
|
||||||
vzeroupper
|
vzeroupper
|
||||||
ret
|
RET
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
___
|
___
|
||||||
if($avx > 2 && $avx512) {
|
if($avx > 2 && $avx512) {
|
||||||
@@ -2792,7 +2792,7 @@ $code.=<<___ if (!$win64);
|
|||||||
.cfi_def_cfa_register %rsp
|
.cfi_def_cfa_register %rsp
|
||||||
___
|
___
|
||||||
$code.=<<___;
|
$code.=<<___;
|
||||||
ret
|
RET
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
___
|
___
|
||||||
|
|
||||||
@@ -2893,7 +2893,7 @@ $code.=<<___ if ($flavour =~ /elf32/);
|
|||||||
___
|
___
|
||||||
$code.=<<___;
|
$code.=<<___;
|
||||||
mov \$1,%eax
|
mov \$1,%eax
|
||||||
ret
|
RET
|
||||||
.size poly1305_init_base2_44,.-poly1305_init_base2_44
|
.size poly1305_init_base2_44,.-poly1305_init_base2_44
|
||||||
___
|
___
|
||||||
{
|
{
|
||||||
@@ -3010,7 +3010,7 @@ poly1305_blocks_vpmadd52:
|
|||||||
jnz .Lblocks_vpmadd52_4x
|
jnz .Lblocks_vpmadd52_4x
|
||||||
|
|
||||||
.Lno_data_vpmadd52:
|
.Lno_data_vpmadd52:
|
||||||
ret
|
RET
|
||||||
.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
|
.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
|
||||||
___
|
___
|
||||||
}
|
}
|
||||||
@@ -3451,7 +3451,7 @@ poly1305_blocks_vpmadd52_4x:
|
|||||||
vzeroall
|
vzeroall
|
||||||
|
|
||||||
.Lno_data_vpmadd52_4x:
|
.Lno_data_vpmadd52_4x:
|
||||||
ret
|
RET
|
||||||
.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
|
.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x
|
||||||
___
|
___
|
||||||
}
|
}
|
||||||
@@ -3824,7 +3824,7 @@ $code.=<<___;
|
|||||||
vzeroall
|
vzeroall
|
||||||
|
|
||||||
.Lno_data_vpmadd52_8x:
|
.Lno_data_vpmadd52_8x:
|
||||||
ret
|
RET
|
||||||
.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
|
.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x
|
||||||
___
|
___
|
||||||
}
|
}
|
||||||
@@ -3861,7 +3861,7 @@ poly1305_emit_base2_44:
|
|||||||
mov %rax,0($mac) # write result
|
mov %rax,0($mac) # write result
|
||||||
mov %rcx,8($mac)
|
mov %rcx,8($mac)
|
||||||
|
|
||||||
ret
|
RET
|
||||||
.size poly1305_emit_base2_44,.-poly1305_emit_base2_44
|
.size poly1305_emit_base2_44,.-poly1305_emit_base2_44
|
||||||
___
|
___
|
||||||
} } }
|
} } }
|
||||||
@@ -3916,7 +3916,7 @@ xor128_encrypt_n_pad:
|
|||||||
|
|
||||||
.Ldone_enc:
|
.Ldone_enc:
|
||||||
mov $otp,%rax
|
mov $otp,%rax
|
||||||
ret
|
RET
|
||||||
.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
|
.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad
|
||||||
|
|
||||||
.globl xor128_decrypt_n_pad
|
.globl xor128_decrypt_n_pad
|
||||||
@@ -3967,7 +3967,7 @@ xor128_decrypt_n_pad:
|
|||||||
|
|
||||||
.Ldone_dec:
|
.Ldone_dec:
|
||||||
mov $otp,%rax
|
mov $otp,%rax
|
||||||
ret
|
RET
|
||||||
.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
|
.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad
|
||||||
___
|
___
|
||||||
}
|
}
|
||||||
@@ -4109,7 +4109,7 @@ avx_handler:
|
|||||||
pop %rbx
|
pop %rbx
|
||||||
pop %rdi
|
pop %rdi
|
||||||
pop %rsi
|
pop %rsi
|
||||||
ret
|
RET
|
||||||
.size avx_handler,.-avx_handler
|
.size avx_handler,.-avx_handler
|
||||||
|
|
||||||
.section .pdata
|
.section .pdata
|
||||||
|
@@ -605,7 +605,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
|
|||||||
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
||||||
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__serpent_enc_blk8_avx)
|
SYM_FUNC_END(__serpent_enc_blk8_avx)
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
@@ -659,7 +659,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
|
|||||||
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
||||||
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__serpent_dec_blk8_avx)
|
SYM_FUNC_END(__serpent_dec_blk8_avx)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_ecb_enc_8way_avx)
|
SYM_FUNC_START(serpent_ecb_enc_8way_avx)
|
||||||
@@ -677,7 +677,7 @@ SYM_FUNC_START(serpent_ecb_enc_8way_avx)
|
|||||||
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_ecb_enc_8way_avx)
|
SYM_FUNC_END(serpent_ecb_enc_8way_avx)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_ecb_dec_8way_avx)
|
SYM_FUNC_START(serpent_ecb_dec_8way_avx)
|
||||||
@@ -695,7 +695,7 @@ SYM_FUNC_START(serpent_ecb_dec_8way_avx)
|
|||||||
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_ecb_dec_8way_avx)
|
SYM_FUNC_END(serpent_ecb_dec_8way_avx)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_cbc_dec_8way_avx)
|
SYM_FUNC_START(serpent_cbc_dec_8way_avx)
|
||||||
@@ -713,7 +713,7 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx)
|
|||||||
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_cbc_dec_8way_avx)
|
SYM_FUNC_END(serpent_cbc_dec_8way_avx)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_ctr_8way_avx)
|
SYM_FUNC_START(serpent_ctr_8way_avx)
|
||||||
@@ -733,7 +733,7 @@ SYM_FUNC_START(serpent_ctr_8way_avx)
|
|||||||
store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_ctr_8way_avx)
|
SYM_FUNC_END(serpent_ctr_8way_avx)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_xts_enc_8way_avx)
|
SYM_FUNC_START(serpent_xts_enc_8way_avx)
|
||||||
@@ -755,7 +755,7 @@ SYM_FUNC_START(serpent_xts_enc_8way_avx)
|
|||||||
store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_xts_enc_8way_avx)
|
SYM_FUNC_END(serpent_xts_enc_8way_avx)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_xts_dec_8way_avx)
|
SYM_FUNC_START(serpent_xts_dec_8way_avx)
|
||||||
@@ -777,5 +777,5 @@ SYM_FUNC_START(serpent_xts_dec_8way_avx)
|
|||||||
store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_xts_dec_8way_avx)
|
SYM_FUNC_END(serpent_xts_dec_8way_avx)
|
||||||
|
@@ -611,7 +611,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16)
|
|||||||
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
||||||
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__serpent_enc_blk16)
|
SYM_FUNC_END(__serpent_enc_blk16)
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
@@ -665,7 +665,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk16)
|
|||||||
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
||||||
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__serpent_dec_blk16)
|
SYM_FUNC_END(__serpent_dec_blk16)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_ecb_enc_16way)
|
SYM_FUNC_START(serpent_ecb_enc_16way)
|
||||||
@@ -687,7 +687,7 @@ SYM_FUNC_START(serpent_ecb_enc_16way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_ecb_enc_16way)
|
SYM_FUNC_END(serpent_ecb_enc_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_ecb_dec_16way)
|
SYM_FUNC_START(serpent_ecb_dec_16way)
|
||||||
@@ -709,7 +709,7 @@ SYM_FUNC_START(serpent_ecb_dec_16way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_ecb_dec_16way)
|
SYM_FUNC_END(serpent_ecb_dec_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_cbc_dec_16way)
|
SYM_FUNC_START(serpent_cbc_dec_16way)
|
||||||
@@ -732,7 +732,7 @@ SYM_FUNC_START(serpent_cbc_dec_16way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_cbc_dec_16way)
|
SYM_FUNC_END(serpent_cbc_dec_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_ctr_16way)
|
SYM_FUNC_START(serpent_ctr_16way)
|
||||||
@@ -757,7 +757,7 @@ SYM_FUNC_START(serpent_ctr_16way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_ctr_16way)
|
SYM_FUNC_END(serpent_ctr_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_xts_enc_16way)
|
SYM_FUNC_START(serpent_xts_enc_16way)
|
||||||
@@ -783,7 +783,7 @@ SYM_FUNC_START(serpent_xts_enc_16way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_xts_enc_16way)
|
SYM_FUNC_END(serpent_xts_enc_16way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_xts_dec_16way)
|
SYM_FUNC_START(serpent_xts_dec_16way)
|
||||||
@@ -809,5 +809,5 @@ SYM_FUNC_START(serpent_xts_dec_16way)
|
|||||||
vzeroupper;
|
vzeroupper;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_xts_dec_16way)
|
SYM_FUNC_END(serpent_xts_dec_16way)
|
||||||
|
@@ -553,12 +553,12 @@ SYM_FUNC_START(__serpent_enc_blk_4way)
|
|||||||
|
|
||||||
write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
|
write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__enc_xor4:
|
.L__enc_xor4:
|
||||||
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
|
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__serpent_enc_blk_4way)
|
SYM_FUNC_END(__serpent_enc_blk_4way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_dec_blk_4way)
|
SYM_FUNC_START(serpent_dec_blk_4way)
|
||||||
@@ -612,5 +612,5 @@ SYM_FUNC_START(serpent_dec_blk_4way)
|
|||||||
movl arg_dst(%esp), %eax;
|
movl arg_dst(%esp), %eax;
|
||||||
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
|
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_dec_blk_4way)
|
SYM_FUNC_END(serpent_dec_blk_4way)
|
||||||
|
@@ -675,13 +675,13 @@ SYM_FUNC_START(__serpent_enc_blk_8way)
|
|||||||
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
||||||
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__enc_xor8:
|
.L__enc_xor8:
|
||||||
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
||||||
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__serpent_enc_blk_8way)
|
SYM_FUNC_END(__serpent_enc_blk_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(serpent_dec_blk_8way)
|
SYM_FUNC_START(serpent_dec_blk_8way)
|
||||||
@@ -735,5 +735,5 @@ SYM_FUNC_START(serpent_dec_blk_8way)
|
|||||||
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
||||||
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(serpent_dec_blk_8way)
|
SYM_FUNC_END(serpent_dec_blk_8way)
|
||||||
|
@@ -674,7 +674,7 @@ _loop3:
|
|||||||
pop %r12
|
pop %r12
|
||||||
pop %rbx
|
pop %rbx
|
||||||
|
|
||||||
ret
|
RET
|
||||||
|
|
||||||
SYM_FUNC_END(\name)
|
SYM_FUNC_END(\name)
|
||||||
.endm
|
.endm
|
||||||
|
@@ -290,7 +290,7 @@ SYM_FUNC_START(sha1_ni_transform)
|
|||||||
.Ldone_hash:
|
.Ldone_hash:
|
||||||
mov RSPSAVE, %rsp
|
mov RSPSAVE, %rsp
|
||||||
|
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha1_ni_transform)
|
SYM_FUNC_END(sha1_ni_transform)
|
||||||
|
|
||||||
.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
|
.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
|
||||||
|
@@ -99,7 +99,7 @@
|
|||||||
pop %rbp
|
pop %rbp
|
||||||
pop %r12
|
pop %r12
|
||||||
pop %rbx
|
pop %rbx
|
||||||
ret
|
RET
|
||||||
|
|
||||||
SYM_FUNC_END(\name)
|
SYM_FUNC_END(\name)
|
||||||
.endm
|
.endm
|
||||||
|
@@ -458,7 +458,7 @@ done_hash:
|
|||||||
popq %r13
|
popq %r13
|
||||||
popq %r12
|
popq %r12
|
||||||
popq %rbx
|
popq %rbx
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha256_transform_avx)
|
SYM_FUNC_END(sha256_transform_avx)
|
||||||
|
|
||||||
.section .rodata.cst256.K256, "aM", @progbits, 256
|
.section .rodata.cst256.K256, "aM", @progbits, 256
|
||||||
|
@@ -711,7 +711,7 @@ done_hash:
|
|||||||
popq %r13
|
popq %r13
|
||||||
popq %r12
|
popq %r12
|
||||||
popq %rbx
|
popq %rbx
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha256_transform_rorx)
|
SYM_FUNC_END(sha256_transform_rorx)
|
||||||
|
|
||||||
.section .rodata.cst512.K256, "aM", @progbits, 512
|
.section .rodata.cst512.K256, "aM", @progbits, 512
|
||||||
|
@@ -472,7 +472,7 @@ done_hash:
|
|||||||
popq %r12
|
popq %r12
|
||||||
popq %rbx
|
popq %rbx
|
||||||
|
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha256_transform_ssse3)
|
SYM_FUNC_END(sha256_transform_ssse3)
|
||||||
|
|
||||||
.section .rodata.cst256.K256, "aM", @progbits, 256
|
.section .rodata.cst256.K256, "aM", @progbits, 256
|
||||||
|
@@ -326,7 +326,7 @@ SYM_FUNC_START(sha256_ni_transform)
|
|||||||
|
|
||||||
.Ldone_hash:
|
.Ldone_hash:
|
||||||
|
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha256_ni_transform)
|
SYM_FUNC_END(sha256_ni_transform)
|
||||||
|
|
||||||
.section .rodata.cst256.K256, "aM", @progbits, 256
|
.section .rodata.cst256.K256, "aM", @progbits, 256
|
||||||
|
@@ -364,7 +364,7 @@ updateblock:
|
|||||||
mov frame_RSPSAVE(%rsp), %rsp
|
mov frame_RSPSAVE(%rsp), %rsp
|
||||||
|
|
||||||
nowork:
|
nowork:
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha512_transform_avx)
|
SYM_FUNC_END(sha512_transform_avx)
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
@@ -681,7 +681,7 @@ done_hash:
|
|||||||
|
|
||||||
# Restore Stack Pointer
|
# Restore Stack Pointer
|
||||||
mov frame_RSPSAVE(%rsp), %rsp
|
mov frame_RSPSAVE(%rsp), %rsp
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha512_transform_rorx)
|
SYM_FUNC_END(sha512_transform_rorx)
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
@@ -366,7 +366,7 @@ updateblock:
|
|||||||
mov frame_RSPSAVE(%rsp), %rsp
|
mov frame_RSPSAVE(%rsp), %rsp
|
||||||
|
|
||||||
nowork:
|
nowork:
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sha512_transform_ssse3)
|
SYM_FUNC_END(sha512_transform_ssse3)
|
||||||
|
|
||||||
########################################################################
|
########################################################################
|
||||||
|
@@ -272,7 +272,7 @@ SYM_FUNC_START_LOCAL(__twofish_enc_blk8)
|
|||||||
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
|
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
|
||||||
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
|
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__twofish_enc_blk8)
|
SYM_FUNC_END(__twofish_enc_blk8)
|
||||||
|
|
||||||
.align 8
|
.align 8
|
||||||
@@ -312,7 +312,7 @@ SYM_FUNC_START_LOCAL(__twofish_dec_blk8)
|
|||||||
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
|
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
|
||||||
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
|
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
|
||||||
|
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__twofish_dec_blk8)
|
SYM_FUNC_END(__twofish_dec_blk8)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_ecb_enc_8way)
|
SYM_FUNC_START(twofish_ecb_enc_8way)
|
||||||
@@ -332,7 +332,7 @@ SYM_FUNC_START(twofish_ecb_enc_8way)
|
|||||||
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
|
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_ecb_enc_8way)
|
SYM_FUNC_END(twofish_ecb_enc_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_ecb_dec_8way)
|
SYM_FUNC_START(twofish_ecb_dec_8way)
|
||||||
@@ -352,7 +352,7 @@ SYM_FUNC_START(twofish_ecb_dec_8way)
|
|||||||
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_ecb_dec_8way)
|
SYM_FUNC_END(twofish_ecb_dec_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_cbc_dec_8way)
|
SYM_FUNC_START(twofish_cbc_dec_8way)
|
||||||
@@ -377,7 +377,7 @@ SYM_FUNC_START(twofish_cbc_dec_8way)
|
|||||||
popq %r12;
|
popq %r12;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_cbc_dec_8way)
|
SYM_FUNC_END(twofish_cbc_dec_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_ctr_8way)
|
SYM_FUNC_START(twofish_ctr_8way)
|
||||||
@@ -404,7 +404,7 @@ SYM_FUNC_START(twofish_ctr_8way)
|
|||||||
popq %r12;
|
popq %r12;
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_ctr_8way)
|
SYM_FUNC_END(twofish_ctr_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_xts_enc_8way)
|
SYM_FUNC_START(twofish_xts_enc_8way)
|
||||||
@@ -428,7 +428,7 @@ SYM_FUNC_START(twofish_xts_enc_8way)
|
|||||||
store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
|
store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_xts_enc_8way)
|
SYM_FUNC_END(twofish_xts_enc_8way)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_xts_dec_8way)
|
SYM_FUNC_START(twofish_xts_dec_8way)
|
||||||
@@ -452,5 +452,5 @@ SYM_FUNC_START(twofish_xts_dec_8way)
|
|||||||
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_xts_dec_8way)
|
SYM_FUNC_END(twofish_xts_dec_8way)
|
||||||
|
@@ -260,7 +260,7 @@ SYM_FUNC_START(twofish_enc_blk)
|
|||||||
pop %ebx
|
pop %ebx
|
||||||
pop %ebp
|
pop %ebp
|
||||||
mov $1, %eax
|
mov $1, %eax
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(twofish_enc_blk)
|
SYM_FUNC_END(twofish_enc_blk)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_dec_blk)
|
SYM_FUNC_START(twofish_dec_blk)
|
||||||
@@ -317,5 +317,5 @@ SYM_FUNC_START(twofish_dec_blk)
|
|||||||
pop %ebx
|
pop %ebx
|
||||||
pop %ebp
|
pop %ebp
|
||||||
mov $1, %eax
|
mov $1, %eax
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(twofish_dec_blk)
|
SYM_FUNC_END(twofish_dec_blk)
|
||||||
|
@@ -258,7 +258,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
|
|||||||
popq %rbx;
|
popq %rbx;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
popq %r13;
|
popq %r13;
|
||||||
ret;
|
RET;
|
||||||
|
|
||||||
.L__enc_xor3:
|
.L__enc_xor3:
|
||||||
outunpack_enc3(xor);
|
outunpack_enc3(xor);
|
||||||
@@ -266,7 +266,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
|
|||||||
popq %rbx;
|
popq %rbx;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
popq %r13;
|
popq %r13;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(__twofish_enc_blk_3way)
|
SYM_FUNC_END(__twofish_enc_blk_3way)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_dec_blk_3way)
|
SYM_FUNC_START(twofish_dec_blk_3way)
|
||||||
@@ -301,5 +301,5 @@ SYM_FUNC_START(twofish_dec_blk_3way)
|
|||||||
popq %rbx;
|
popq %rbx;
|
||||||
popq %r12;
|
popq %r12;
|
||||||
popq %r13;
|
popq %r13;
|
||||||
ret;
|
RET;
|
||||||
SYM_FUNC_END(twofish_dec_blk_3way)
|
SYM_FUNC_END(twofish_dec_blk_3way)
|
||||||
|
@@ -252,7 +252,7 @@ SYM_FUNC_START(twofish_enc_blk)
|
|||||||
|
|
||||||
popq R1
|
popq R1
|
||||||
movl $1,%eax
|
movl $1,%eax
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(twofish_enc_blk)
|
SYM_FUNC_END(twofish_enc_blk)
|
||||||
|
|
||||||
SYM_FUNC_START(twofish_dec_blk)
|
SYM_FUNC_START(twofish_dec_blk)
|
||||||
@@ -304,5 +304,5 @@ SYM_FUNC_START(twofish_dec_blk)
|
|||||||
|
|
||||||
popq R1
|
popq R1
|
||||||
movl $1,%eax
|
movl $1,%eax
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(twofish_dec_blk)
|
SYM_FUNC_END(twofish_dec_blk)
|
||||||
|
@@ -21,7 +21,7 @@ CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
|
|||||||
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
|
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
|
||||||
CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,)
|
CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,)
|
||||||
|
|
||||||
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
||||||
obj-y += common.o
|
obj-y += common.o
|
||||||
|
|
||||||
obj-y += vdso/
|
obj-y += vdso/
|
||||||
|
@@ -6,6 +6,8 @@
|
|||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
|
#include <asm/msr.h>
|
||||||
|
#include <asm/nospec-branch.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
@@ -146,27 +148,19 @@ For 32-bit we have the following conventions - kernel is built with
|
|||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
|
.macro POP_REGS pop_rdi=1
|
||||||
popq %r15
|
popq %r15
|
||||||
popq %r14
|
popq %r14
|
||||||
popq %r13
|
popq %r13
|
||||||
popq %r12
|
popq %r12
|
||||||
popq %rbp
|
popq %rbp
|
||||||
popq %rbx
|
popq %rbx
|
||||||
.if \skip_r11rcx
|
|
||||||
popq %rsi
|
|
||||||
.else
|
|
||||||
popq %r11
|
popq %r11
|
||||||
.endif
|
|
||||||
popq %r10
|
popq %r10
|
||||||
popq %r9
|
popq %r9
|
||||||
popq %r8
|
popq %r8
|
||||||
popq %rax
|
popq %rax
|
||||||
.if \skip_r11rcx
|
|
||||||
popq %rsi
|
|
||||||
.else
|
|
||||||
popq %rcx
|
popq %rcx
|
||||||
.endif
|
|
||||||
popq %rdx
|
popq %rdx
|
||||||
popq %rsi
|
popq %rsi
|
||||||
.if \pop_rdi
|
.if \pop_rdi
|
||||||
@@ -316,6 +310,66 @@ For 32-bit we have the following conventions - kernel is built with
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IBRS kernel mitigation for Spectre_v2.
|
||||||
|
*
|
||||||
|
* Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
|
||||||
|
* the regs it uses (AX, CX, DX). Must be called before the first RET
|
||||||
|
* instruction (NOTE! UNTRAIN_RET includes a RET instruction)
|
||||||
|
*
|
||||||
|
* The optional argument is used to save/restore the current value,
|
||||||
|
* which is used on the paranoid paths.
|
||||||
|
*
|
||||||
|
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
|
||||||
|
*/
|
||||||
|
.macro IBRS_ENTER save_reg
|
||||||
|
#ifdef CONFIG_CPU_IBRS_ENTRY
|
||||||
|
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
|
||||||
|
movl $MSR_IA32_SPEC_CTRL, %ecx
|
||||||
|
|
||||||
|
.ifnb \save_reg
|
||||||
|
rdmsr
|
||||||
|
shl $32, %rdx
|
||||||
|
or %rdx, %rax
|
||||||
|
mov %rax, \save_reg
|
||||||
|
test $SPEC_CTRL_IBRS, %eax
|
||||||
|
jz .Ldo_wrmsr_\@
|
||||||
|
lfence
|
||||||
|
jmp .Lend_\@
|
||||||
|
.Ldo_wrmsr_\@:
|
||||||
|
.endif
|
||||||
|
|
||||||
|
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
|
||||||
|
movl %edx, %eax
|
||||||
|
shr $32, %rdx
|
||||||
|
wrmsr
|
||||||
|
.Lend_\@:
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
|
||||||
|
* regs. Must be called after the last RET.
|
||||||
|
*/
|
||||||
|
.macro IBRS_EXIT save_reg
|
||||||
|
#ifdef CONFIG_CPU_IBRS_ENTRY
|
||||||
|
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
|
||||||
|
movl $MSR_IA32_SPEC_CTRL, %ecx
|
||||||
|
|
||||||
|
.ifnb \save_reg
|
||||||
|
mov \save_reg, %rdx
|
||||||
|
.else
|
||||||
|
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
|
||||||
|
andl $(~SPEC_CTRL_IBRS), %edx
|
||||||
|
.endif
|
||||||
|
|
||||||
|
movl %edx, %eax
|
||||||
|
shr $32, %rdx
|
||||||
|
wrmsr
|
||||||
|
.Lend_\@:
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mitigate Spectre v1 for conditional swapgs code paths.
|
* Mitigate Spectre v1 for conditional swapgs code paths.
|
||||||
*
|
*
|
||||||
|
22
arch/x86/entry/entry.S
Normal file
22
arch/x86/entry/entry.S
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* Common place for both 32- and 64-bit entry routines.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/export.h>
|
||||||
|
#include <asm/msr-index.h>
|
||||||
|
|
||||||
|
.pushsection .noinstr.text, "ax"
|
||||||
|
|
||||||
|
SYM_FUNC_START(entry_ibpb)
|
||||||
|
movl $MSR_IA32_PRED_CMD, %ecx
|
||||||
|
movl $PRED_CMD_IBPB, %eax
|
||||||
|
xorl %edx, %edx
|
||||||
|
wrmsr
|
||||||
|
RET
|
||||||
|
SYM_FUNC_END(entry_ibpb)
|
||||||
|
/* For KVM */
|
||||||
|
EXPORT_SYMBOL_GPL(entry_ibpb);
|
||||||
|
|
||||||
|
.popsection
|
@@ -40,7 +40,7 @@
|
|||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
#include <asm/irq_vectors.h>
|
#include <asm/irq_vectors.h>
|
||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
#include <asm/alternative-asm.h>
|
#include <asm/alternative.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/smap.h>
|
#include <asm/smap.h>
|
||||||
#include <asm/frame.h>
|
#include <asm/frame.h>
|
||||||
@@ -782,7 +782,6 @@ SYM_CODE_START(__switch_to_asm)
|
|||||||
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
|
||||||
/*
|
/*
|
||||||
* When switching from a shallower to a deeper call stack
|
* When switching from a shallower to a deeper call stack
|
||||||
* the RSB may either underflow or use entries populated
|
* the RSB may either underflow or use entries populated
|
||||||
@@ -791,7 +790,6 @@ SYM_CODE_START(__switch_to_asm)
|
|||||||
* speculative execution to prevent attack.
|
* speculative execution to prevent attack.
|
||||||
*/
|
*/
|
||||||
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Restore flags or the incoming task to restore AC state. */
|
/* Restore flags or the incoming task to restore AC state. */
|
||||||
popfl
|
popfl
|
||||||
@@ -821,7 +819,7 @@ SYM_FUNC_START(schedule_tail_wrapper)
|
|||||||
popl %eax
|
popl %eax
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(schedule_tail_wrapper)
|
SYM_FUNC_END(schedule_tail_wrapper)
|
||||||
.popsection
|
.popsection
|
||||||
|
|
||||||
|
@@ -93,7 +93,7 @@ SYM_CODE_END(native_usergs_sysret64)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
SYM_CODE_START(entry_SYSCALL_64)
|
SYM_CODE_START(entry_SYSCALL_64)
|
||||||
UNWIND_HINT_EMPTY
|
UNWIND_HINT_ENTRY
|
||||||
|
|
||||||
swapgs
|
swapgs
|
||||||
/* tss.sp2 is scratch space. */
|
/* tss.sp2 is scratch space. */
|
||||||
@@ -117,6 +117,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
|
|||||||
/* IRQs are off. */
|
/* IRQs are off. */
|
||||||
movq %rax, %rdi
|
movq %rax, %rdi
|
||||||
movq %rsp, %rsi
|
movq %rsp, %rsi
|
||||||
|
|
||||||
|
/* clobbers %rax, make sure it is after saving the syscall nr */
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
call do_syscall_64 /* returns with IRQs disabled */
|
call do_syscall_64 /* returns with IRQs disabled */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -191,8 +196,8 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
|
|||||||
* perf profiles. Nothing jumps here.
|
* perf profiles. Nothing jumps here.
|
||||||
*/
|
*/
|
||||||
syscall_return_via_sysret:
|
syscall_return_via_sysret:
|
||||||
/* rcx and r11 are already restored (see code above) */
|
IBRS_EXIT
|
||||||
POP_REGS pop_rdi=0 skip_r11rcx=1
|
POP_REGS pop_rdi=0
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now all regs are restored except RSP and RDI.
|
* Now all regs are restored except RSP and RDI.
|
||||||
@@ -244,7 +249,6 @@ SYM_FUNC_START(__switch_to_asm)
|
|||||||
movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
|
movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
|
||||||
/*
|
/*
|
||||||
* When switching from a shallower to a deeper call stack
|
* When switching from a shallower to a deeper call stack
|
||||||
* the RSB may either underflow or use entries populated
|
* the RSB may either underflow or use entries populated
|
||||||
@@ -253,7 +257,6 @@ SYM_FUNC_START(__switch_to_asm)
|
|||||||
* speculative execution to prevent attack.
|
* speculative execution to prevent attack.
|
||||||
*/
|
*/
|
||||||
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||||
#endif
|
|
||||||
|
|
||||||
/* restore callee-saved registers */
|
/* restore callee-saved registers */
|
||||||
popq %r15
|
popq %r15
|
||||||
@@ -569,6 +572,7 @@ __irqentry_text_end:
|
|||||||
|
|
||||||
SYM_CODE_START_LOCAL(common_interrupt_return)
|
SYM_CODE_START_LOCAL(common_interrupt_return)
|
||||||
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
||||||
|
IBRS_EXIT
|
||||||
#ifdef CONFIG_DEBUG_ENTRY
|
#ifdef CONFIG_DEBUG_ENTRY
|
||||||
/* Assert that pt_regs indicates user mode. */
|
/* Assert that pt_regs indicates user mode. */
|
||||||
testb $3, CS(%rsp)
|
testb $3, CS(%rsp)
|
||||||
@@ -676,6 +680,7 @@ native_irq_return_ldt:
|
|||||||
pushq %rdi /* Stash user RDI */
|
pushq %rdi /* Stash user RDI */
|
||||||
swapgs /* to kernel GS */
|
swapgs /* to kernel GS */
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
movq PER_CPU_VAR(espfix_waddr), %rdi
|
movq PER_CPU_VAR(espfix_waddr), %rdi
|
||||||
movq %rax, (0*8)(%rdi) /* user RAX */
|
movq %rax, (0*8)(%rdi) /* user RAX */
|
||||||
@@ -740,7 +745,7 @@ SYM_FUNC_START(asm_load_gs_index)
|
|||||||
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
|
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
|
||||||
swapgs
|
swapgs
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(asm_load_gs_index)
|
SYM_FUNC_END(asm_load_gs_index)
|
||||||
EXPORT_SYMBOL(asm_load_gs_index)
|
EXPORT_SYMBOL(asm_load_gs_index)
|
||||||
|
|
||||||
@@ -799,7 +804,7 @@ SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
|
|||||||
|
|
||||||
/* Restore the previous stack pointer from RBP. */
|
/* Restore the previous stack pointer from RBP. */
|
||||||
leaveq
|
leaveq
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(asm_call_on_stack)
|
SYM_FUNC_END(asm_call_on_stack)
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
@@ -888,6 +893,9 @@ SYM_CODE_END(xen_failsafe_callback)
|
|||||||
* 1 -> no SWAPGS on exit
|
* 1 -> no SWAPGS on exit
|
||||||
*
|
*
|
||||||
* Y GSBASE value at entry, must be restored in paranoid_exit
|
* Y GSBASE value at entry, must be restored in paranoid_exit
|
||||||
|
*
|
||||||
|
* R14 - old CR3
|
||||||
|
* R15 - old SPEC_CTRL
|
||||||
*/
|
*/
|
||||||
SYM_CODE_START_LOCAL(paranoid_entry)
|
SYM_CODE_START_LOCAL(paranoid_entry)
|
||||||
UNWIND_HINT_FUNC
|
UNWIND_HINT_FUNC
|
||||||
@@ -932,7 +940,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
|||||||
* is needed here.
|
* is needed here.
|
||||||
*/
|
*/
|
||||||
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
|
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
|
||||||
ret
|
jmp .Lparanoid_gsbase_done
|
||||||
|
|
||||||
.Lparanoid_entry_checkgs:
|
.Lparanoid_entry_checkgs:
|
||||||
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
/* EBX = 1 -> kernel GSBASE active, no restore required */
|
||||||
@@ -951,9 +959,17 @@ SYM_CODE_START_LOCAL(paranoid_entry)
|
|||||||
xorl %ebx, %ebx
|
xorl %ebx, %ebx
|
||||||
swapgs
|
swapgs
|
||||||
.Lparanoid_kernel_gsbase:
|
.Lparanoid_kernel_gsbase:
|
||||||
|
|
||||||
FENCE_SWAPGS_KERNEL_ENTRY
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
ret
|
.Lparanoid_gsbase_done:
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
|
||||||
|
* CR3 above, keep the old value in a callee saved register.
|
||||||
|
*/
|
||||||
|
IBRS_ENTER save_reg=%r15
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
|
RET
|
||||||
SYM_CODE_END(paranoid_entry)
|
SYM_CODE_END(paranoid_entry)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -974,9 +990,19 @@ SYM_CODE_END(paranoid_entry)
|
|||||||
* 1 -> no SWAPGS on exit
|
* 1 -> no SWAPGS on exit
|
||||||
*
|
*
|
||||||
* Y User space GSBASE, must be restored unconditionally
|
* Y User space GSBASE, must be restored unconditionally
|
||||||
|
*
|
||||||
|
* R14 - old CR3
|
||||||
|
* R15 - old SPEC_CTRL
|
||||||
*/
|
*/
|
||||||
SYM_CODE_START_LOCAL(paranoid_exit)
|
SYM_CODE_START_LOCAL(paranoid_exit)
|
||||||
UNWIND_HINT_REGS
|
UNWIND_HINT_REGS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must restore IBRS state before both CR3 and %GS since we need access
|
||||||
|
* to the per-CPU x86_spec_ctrl_shadow variable.
|
||||||
|
*/
|
||||||
|
IBRS_EXIT save_reg=%r15
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The order of operations is important. RESTORE_CR3 requires
|
* The order of operations is important. RESTORE_CR3 requires
|
||||||
* kernel GSBASE.
|
* kernel GSBASE.
|
||||||
@@ -1023,8 +1049,11 @@ SYM_CODE_START_LOCAL(error_entry)
|
|||||||
FENCE_SWAPGS_USER_ENTRY
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
/* We have user CR3. Change to kernel CR3. */
|
/* We have user CR3. Change to kernel CR3. */
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
.Lerror_entry_from_usermode_after_swapgs:
|
.Lerror_entry_from_usermode_after_swapgs:
|
||||||
|
|
||||||
/* Put us onto the real thread stack. */
|
/* Put us onto the real thread stack. */
|
||||||
popq %r12 /* save return addr in %12 */
|
popq %r12 /* save return addr in %12 */
|
||||||
movq %rsp, %rdi /* arg0 = pt_regs pointer */
|
movq %rsp, %rdi /* arg0 = pt_regs pointer */
|
||||||
@@ -1032,7 +1061,7 @@ SYM_CODE_START_LOCAL(error_entry)
|
|||||||
movq %rax, %rsp /* switch stack */
|
movq %rax, %rsp /* switch stack */
|
||||||
ENCODE_FRAME_POINTER
|
ENCODE_FRAME_POINTER
|
||||||
pushq %r12
|
pushq %r12
|
||||||
ret
|
RET
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are two places in the kernel that can potentially fault with
|
* There are two places in the kernel that can potentially fault with
|
||||||
@@ -1063,7 +1092,8 @@ SYM_CODE_START_LOCAL(error_entry)
|
|||||||
*/
|
*/
|
||||||
.Lerror_entry_done_lfence:
|
.Lerror_entry_done_lfence:
|
||||||
FENCE_SWAPGS_KERNEL_ENTRY
|
FENCE_SWAPGS_KERNEL_ENTRY
|
||||||
ret
|
ANNOTATE_UNRET_END
|
||||||
|
RET
|
||||||
|
|
||||||
.Lbstep_iret:
|
.Lbstep_iret:
|
||||||
/* Fix truncated RIP */
|
/* Fix truncated RIP */
|
||||||
@@ -1078,6 +1108,8 @@ SYM_CODE_START_LOCAL(error_entry)
|
|||||||
SWAPGS
|
SWAPGS
|
||||||
FENCE_SWAPGS_USER_ENTRY
|
FENCE_SWAPGS_USER_ENTRY
|
||||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pretend that the exception came from user mode: set up pt_regs
|
* Pretend that the exception came from user mode: set up pt_regs
|
||||||
@@ -1182,6 +1214,9 @@ SYM_CODE_START(asm_exc_nmi)
|
|||||||
PUSH_AND_CLEAR_REGS rdx=(%rdx)
|
PUSH_AND_CLEAR_REGS rdx=(%rdx)
|
||||||
ENCODE_FRAME_POINTER
|
ENCODE_FRAME_POINTER
|
||||||
|
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point we no longer need to worry about stack damage
|
* At this point we no longer need to worry about stack damage
|
||||||
* due to nesting -- we're on the normal thread stack and we're
|
* due to nesting -- we're on the normal thread stack and we're
|
||||||
@@ -1404,6 +1439,9 @@ end_repeat_nmi:
|
|||||||
movq $-1, %rsi
|
movq $-1, %rsi
|
||||||
call exc_nmi
|
call exc_nmi
|
||||||
|
|
||||||
|
/* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
|
||||||
|
IBRS_EXIT save_reg=%r15
|
||||||
|
|
||||||
/* Always restore stashed CR3 value (see paranoid_entry) */
|
/* Always restore stashed CR3 value (see paranoid_entry) */
|
||||||
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
|
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
|
||||||
|
|
||||||
|
@@ -4,7 +4,6 @@
|
|||||||
*
|
*
|
||||||
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
|
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
|
||||||
*/
|
*/
|
||||||
#include "calling.h"
|
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <asm/current.h>
|
#include <asm/current.h>
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
@@ -14,9 +13,12 @@
|
|||||||
#include <asm/irqflags.h>
|
#include <asm/irqflags.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/smap.h>
|
#include <asm/smap.h>
|
||||||
|
#include <asm/nospec-branch.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
||||||
|
#include "calling.h"
|
||||||
|
|
||||||
.section .entry.text, "ax"
|
.section .entry.text, "ax"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -47,7 +49,7 @@
|
|||||||
* 0(%ebp) arg6
|
* 0(%ebp) arg6
|
||||||
*/
|
*/
|
||||||
SYM_CODE_START(entry_SYSENTER_compat)
|
SYM_CODE_START(entry_SYSENTER_compat)
|
||||||
UNWIND_HINT_EMPTY
|
UNWIND_HINT_ENTRY
|
||||||
/* Interrupts are off on entry. */
|
/* Interrupts are off on entry. */
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
|
||||||
@@ -112,6 +114,9 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
|
|||||||
|
|
||||||
cld
|
cld
|
||||||
|
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SYSENTER doesn't filter flags, so we need to clear NT and AC
|
* SYSENTER doesn't filter flags, so we need to clear NT and AC
|
||||||
* ourselves. To save a few cycles, we can check whether
|
* ourselves. To save a few cycles, we can check whether
|
||||||
@@ -197,7 +202,7 @@ SYM_CODE_END(entry_SYSENTER_compat)
|
|||||||
* 0(%esp) arg6
|
* 0(%esp) arg6
|
||||||
*/
|
*/
|
||||||
SYM_CODE_START(entry_SYSCALL_compat)
|
SYM_CODE_START(entry_SYSCALL_compat)
|
||||||
UNWIND_HINT_EMPTY
|
UNWIND_HINT_ENTRY
|
||||||
/* Interrupts are off on entry. */
|
/* Interrupts are off on entry. */
|
||||||
swapgs
|
swapgs
|
||||||
|
|
||||||
@@ -252,6 +257,9 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
|
|||||||
|
|
||||||
UNWIND_HINT_REGS
|
UNWIND_HINT_REGS
|
||||||
|
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
movq %rsp, %rdi
|
movq %rsp, %rdi
|
||||||
call do_fast_syscall_32
|
call do_fast_syscall_32
|
||||||
/* XEN PV guests always use IRET path */
|
/* XEN PV guests always use IRET path */
|
||||||
@@ -266,6 +274,8 @@ sysret32_from_system_call:
|
|||||||
*/
|
*/
|
||||||
STACKLEAK_ERASE
|
STACKLEAK_ERASE
|
||||||
|
|
||||||
|
IBRS_EXIT
|
||||||
|
|
||||||
movq RBX(%rsp), %rbx /* pt_regs->rbx */
|
movq RBX(%rsp), %rbx /* pt_regs->rbx */
|
||||||
movq RBP(%rsp), %rbp /* pt_regs->rbp */
|
movq RBP(%rsp), %rbp /* pt_regs->rbp */
|
||||||
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
|
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
|
||||||
@@ -339,7 +349,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
|
|||||||
* ebp arg6
|
* ebp arg6
|
||||||
*/
|
*/
|
||||||
SYM_CODE_START(entry_INT80_compat)
|
SYM_CODE_START(entry_INT80_compat)
|
||||||
UNWIND_HINT_EMPTY
|
UNWIND_HINT_ENTRY
|
||||||
/*
|
/*
|
||||||
* Interrupts are off on entry.
|
* Interrupts are off on entry.
|
||||||
*/
|
*/
|
||||||
@@ -409,6 +419,9 @@ SYM_CODE_START(entry_INT80_compat)
|
|||||||
|
|
||||||
cld
|
cld
|
||||||
|
|
||||||
|
IBRS_ENTER
|
||||||
|
UNTRAIN_RET
|
||||||
|
|
||||||
movq %rsp, %rdi
|
movq %rsp, %rdi
|
||||||
call do_int80_syscall_32
|
call do_int80_syscall_32
|
||||||
jmp swapgs_restore_regs_and_return_to_usermode
|
jmp swapgs_restore_regs_and_return_to_usermode
|
||||||
|
@@ -24,7 +24,7 @@ SYM_CODE_START_NOALIGN(\name)
|
|||||||
popl %edx
|
popl %edx
|
||||||
popl %ecx
|
popl %ecx
|
||||||
popl %eax
|
popl %eax
|
||||||
ret
|
RET
|
||||||
_ASM_NOKPROBE(\name)
|
_ASM_NOKPROBE(\name)
|
||||||
SYM_CODE_END(\name)
|
SYM_CODE_END(\name)
|
||||||
.endm
|
.endm
|
||||||
|
@@ -55,7 +55,7 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
|
|||||||
popq %rsi
|
popq %rsi
|
||||||
popq %rdi
|
popq %rdi
|
||||||
popq %rbp
|
popq %rbp
|
||||||
ret
|
RET
|
||||||
_ASM_NOKPROBE(__thunk_restore)
|
_ASM_NOKPROBE(__thunk_restore)
|
||||||
SYM_CODE_END(__thunk_restore)
|
SYM_CODE_END(__thunk_restore)
|
||||||
#endif
|
#endif
|
||||||
|
@@ -91,6 +91,7 @@ endif
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
$(vobjs): KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||||
|
$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO
|
||||||
|
|
||||||
#
|
#
|
||||||
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/dwarf2.h>
|
#include <asm/dwarf2.h>
|
||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
#include <asm/alternative-asm.h>
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.globl __kernel_vsyscall
|
.globl __kernel_vsyscall
|
||||||
@@ -78,7 +78,7 @@ SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
|
|||||||
popl %ecx
|
popl %ecx
|
||||||
CFI_RESTORE ecx
|
CFI_RESTORE ecx
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
CFI_ADJUST_CFA_OFFSET -4
|
||||||
ret
|
RET
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
|
|
||||||
.size __kernel_vsyscall,.-__kernel_vsyscall
|
.size __kernel_vsyscall,.-__kernel_vsyscall
|
||||||
|
@@ -20,16 +20,19 @@ __vsyscall_page:
|
|||||||
mov $__NR_gettimeofday, %rax
|
mov $__NR_gettimeofday, %rax
|
||||||
syscall
|
syscall
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
|
|
||||||
.balign 1024, 0xcc
|
.balign 1024, 0xcc
|
||||||
mov $__NR_time, %rax
|
mov $__NR_time, %rax
|
||||||
syscall
|
syscall
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
|
|
||||||
.balign 1024, 0xcc
|
.balign 1024, 0xcc
|
||||||
mov $__NR_getcpu, %rax
|
mov $__NR_getcpu, %rax
|
||||||
syscall
|
syscall
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
|
|
||||||
.balign 4096, 0xcc
|
.balign 4096, 0xcc
|
||||||
|
|
||||||
|
@@ -1,11 +1,16 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* These are in machine order; things rely on that.
|
||||||
|
*/
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
GEN(rax)
|
GEN(rax)
|
||||||
GEN(rbx)
|
|
||||||
GEN(rcx)
|
GEN(rcx)
|
||||||
GEN(rdx)
|
GEN(rdx)
|
||||||
|
GEN(rbx)
|
||||||
|
GEN(rsp)
|
||||||
|
GEN(rbp)
|
||||||
GEN(rsi)
|
GEN(rsi)
|
||||||
GEN(rdi)
|
GEN(rdi)
|
||||||
GEN(rbp)
|
|
||||||
GEN(r8)
|
GEN(r8)
|
||||||
GEN(r9)
|
GEN(r9)
|
||||||
GEN(r10)
|
GEN(r10)
|
||||||
@@ -16,10 +21,11 @@ GEN(r14)
|
|||||||
GEN(r15)
|
GEN(r15)
|
||||||
#else
|
#else
|
||||||
GEN(eax)
|
GEN(eax)
|
||||||
GEN(ebx)
|
|
||||||
GEN(ecx)
|
GEN(ecx)
|
||||||
GEN(edx)
|
GEN(edx)
|
||||||
|
GEN(ebx)
|
||||||
|
GEN(esp)
|
||||||
|
GEN(ebp)
|
||||||
GEN(esi)
|
GEN(esi)
|
||||||
GEN(edi)
|
GEN(edi)
|
||||||
GEN(ebp)
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -1,114 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
#ifndef _ASM_X86_ALTERNATIVE_ASM_H
|
|
||||||
#define _ASM_X86_ALTERNATIVE_ASM_H
|
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <asm/asm.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
.macro LOCK_PREFIX
|
|
||||||
672: lock
|
|
||||||
.pushsection .smp_locks,"a"
|
|
||||||
.balign 4
|
|
||||||
.long 672b - .
|
|
||||||
.popsection
|
|
||||||
.endm
|
|
||||||
#else
|
|
||||||
.macro LOCK_PREFIX
|
|
||||||
.endm
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* objtool annotation to ignore the alternatives and only consider the original
|
|
||||||
* instruction(s).
|
|
||||||
*/
|
|
||||||
.macro ANNOTATE_IGNORE_ALTERNATIVE
|
|
||||||
.Lannotate_\@:
|
|
||||||
.pushsection .discard.ignore_alts
|
|
||||||
.long .Lannotate_\@ - .
|
|
||||||
.popsection
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Issue one struct alt_instr descriptor entry (need to put it into
|
|
||||||
* the section .altinstructions, see below). This entry contains
|
|
||||||
* enough information for the alternatives patching code to patch an
|
|
||||||
* instruction. See apply_alternatives().
|
|
||||||
*/
|
|
||||||
.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
|
|
||||||
.long \orig - .
|
|
||||||
.long \alt - .
|
|
||||||
.word \feature
|
|
||||||
.byte \orig_len
|
|
||||||
.byte \alt_len
|
|
||||||
.byte \pad_len
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Define an alternative between two instructions. If @feature is
|
|
||||||
* present, early code in apply_alternatives() replaces @oldinstr with
|
|
||||||
* @newinstr. ".skip" directive takes care of proper instruction padding
|
|
||||||
* in case @newinstr is longer than @oldinstr.
|
|
||||||
*/
|
|
||||||
.macro ALTERNATIVE oldinstr, newinstr, feature
|
|
||||||
140:
|
|
||||||
\oldinstr
|
|
||||||
141:
|
|
||||||
.skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
|
|
||||||
142:
|
|
||||||
|
|
||||||
.pushsection .altinstructions,"a"
|
|
||||||
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
|
|
||||||
.popsection
|
|
||||||
|
|
||||||
.pushsection .altinstr_replacement,"ax"
|
|
||||||
143:
|
|
||||||
\newinstr
|
|
||||||
144:
|
|
||||||
.popsection
|
|
||||||
.endm
|
|
||||||
|
|
||||||
#define old_len 141b-140b
|
|
||||||
#define new_len1 144f-143f
|
|
||||||
#define new_len2 145f-144f
|
|
||||||
|
|
||||||
/*
|
|
||||||
* gas compatible max based on the idea from:
|
|
||||||
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
|
|
||||||
*
|
|
||||||
* The additional "-" is needed because gas uses a "true" value of -1.
|
|
||||||
*/
|
|
||||||
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
|
|
||||||
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
|
|
||||||
* @feature2, it replaces @oldinstr with @feature2.
|
|
||||||
*/
|
|
||||||
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
|
|
||||||
140:
|
|
||||||
\oldinstr
|
|
||||||
141:
|
|
||||||
.skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
|
|
||||||
(alt_max_short(new_len1, new_len2) - (old_len)),0x90
|
|
||||||
142:
|
|
||||||
|
|
||||||
.pushsection .altinstructions,"a"
|
|
||||||
altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
|
|
||||||
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
|
|
||||||
.popsection
|
|
||||||
|
|
||||||
.pushsection .altinstr_replacement,"ax"
|
|
||||||
143:
|
|
||||||
\newinstr1
|
|
||||||
144:
|
|
||||||
\newinstr2
|
|
||||||
145:
|
|
||||||
.popsection
|
|
||||||
.endm
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
|
|
@@ -2,13 +2,17 @@
|
|||||||
#ifndef _ASM_X86_ALTERNATIVE_H
|
#ifndef _ASM_X86_ALTERNATIVE_H
|
||||||
#define _ASM_X86_ALTERNATIVE_H
|
#define _ASM_X86_ALTERNATIVE_H
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/stddef.h>
|
|
||||||
#include <linux/stringify.h>
|
#include <linux/stringify.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
|
#define ALTINSTR_FLAG_INV (1 << 15)
|
||||||
|
#define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV)
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
#include <linux/stddef.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Alternative inline assembly for SMP.
|
* Alternative inline assembly for SMP.
|
||||||
*
|
*
|
||||||
@@ -61,7 +65,6 @@ struct alt_instr {
|
|||||||
u16 cpuid; /* cpuid bit set for replacement */
|
u16 cpuid; /* cpuid bit set for replacement */
|
||||||
u8 instrlen; /* length of original instruction */
|
u8 instrlen; /* length of original instruction */
|
||||||
u8 replacementlen; /* length of new instruction */
|
u8 replacementlen; /* length of new instruction */
|
||||||
u8 padlen; /* length of build-time padding */
|
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -72,6 +75,8 @@ extern int alternatives_patched;
|
|||||||
|
|
||||||
extern void alternative_instructions(void);
|
extern void alternative_instructions(void);
|
||||||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
||||||
|
extern void apply_retpolines(s32 *start, s32 *end);
|
||||||
|
extern void apply_returns(s32 *start, s32 *end);
|
||||||
|
|
||||||
struct module;
|
struct module;
|
||||||
|
|
||||||
@@ -100,7 +105,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
|
|
||||||
#define alt_end_marker "663"
|
#define alt_end_marker "663"
|
||||||
#define alt_slen "662b-661b"
|
#define alt_slen "662b-661b"
|
||||||
#define alt_pad_len alt_end_marker"b-662b"
|
|
||||||
#define alt_total_slen alt_end_marker"b-661b"
|
#define alt_total_slen alt_end_marker"b-661b"
|
||||||
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
|
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
|
||||||
|
|
||||||
@@ -147,8 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
|
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
|
||||||
" .word " __stringify(feature) "\n" /* feature bit */ \
|
" .word " __stringify(feature) "\n" /* feature bit */ \
|
||||||
" .byte " alt_total_slen "\n" /* source len */ \
|
" .byte " alt_total_slen "\n" /* source len */ \
|
||||||
" .byte " alt_rlen(num) "\n" /* replacement len */ \
|
" .byte " alt_rlen(num) "\n" /* replacement len */
|
||||||
" .byte " alt_pad_len "\n" /* pad len */
|
|
||||||
|
|
||||||
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
|
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
|
||||||
"# ALT: replacement " #num "\n" \
|
"# ALT: replacement " #num "\n" \
|
||||||
@@ -175,6 +178,11 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
|
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
|
||||||
".popsection\n"
|
".popsection\n"
|
||||||
|
|
||||||
|
/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
|
||||||
|
#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \
|
||||||
|
ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
|
||||||
|
newinstr_yes, feature)
|
||||||
|
|
||||||
#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \
|
#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \
|
||||||
OLDINSTR_3(oldinsn, 1, 2, 3) \
|
OLDINSTR_3(oldinsn, 1, 2, 3) \
|
||||||
".pushsection .altinstructions,\"a\"\n" \
|
".pushsection .altinstructions,\"a\"\n" \
|
||||||
@@ -206,15 +214,15 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
|
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
|
||||||
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
|
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
|
||||||
|
|
||||||
|
#define alternative_ternary(oldinstr, feature, newinstr_yes, newinstr_no) \
|
||||||
|
asm_inline volatile(ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) ::: "memory")
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Alternative inline assembly with input.
|
* Alternative inline assembly with input.
|
||||||
*
|
*
|
||||||
* Peculiarities:
|
* Peculiarities:
|
||||||
* No memory clobber here.
|
* No memory clobber here.
|
||||||
* Argument numbers start with 1.
|
* Argument numbers start with 1.
|
||||||
* Best is to use constraints that are fixed size (like (%1) ... "r")
|
|
||||||
* If you use variable sized constraints like "m" or "g" in the
|
|
||||||
* replacement make sure to pad to the worst case length.
|
|
||||||
* Leaving an unused argument 0 to keep API compatibility.
|
* Leaving an unused argument 0 to keep API compatibility.
|
||||||
*/
|
*/
|
||||||
#define alternative_input(oldinstr, newinstr, feature, input...) \
|
#define alternative_input(oldinstr, newinstr, feature, input...) \
|
||||||
@@ -271,6 +279,115 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|||||||
*/
|
*/
|
||||||
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
|
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
|
||||||
|
|
||||||
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
.macro LOCK_PREFIX
|
||||||
|
672: lock
|
||||||
|
.pushsection .smp_locks,"a"
|
||||||
|
.balign 4
|
||||||
|
.long 672b - .
|
||||||
|
.popsection
|
||||||
|
.endm
|
||||||
|
#else
|
||||||
|
.macro LOCK_PREFIX
|
||||||
|
.endm
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* objtool annotation to ignore the alternatives and only consider the original
|
||||||
|
* instruction(s).
|
||||||
|
*/
|
||||||
|
.macro ANNOTATE_IGNORE_ALTERNATIVE
|
||||||
|
.Lannotate_\@:
|
||||||
|
.pushsection .discard.ignore_alts
|
||||||
|
.long .Lannotate_\@ - .
|
||||||
|
.popsection
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Issue one struct alt_instr descriptor entry (need to put it into
|
||||||
|
* the section .altinstructions, see below). This entry contains
|
||||||
|
* enough information for the alternatives patching code to patch an
|
||||||
|
* instruction. See apply_alternatives().
|
||||||
|
*/
|
||||||
|
.macro altinstruction_entry orig alt feature orig_len alt_len
|
||||||
|
.long \orig - .
|
||||||
|
.long \alt - .
|
||||||
|
.word \feature
|
||||||
|
.byte \orig_len
|
||||||
|
.byte \alt_len
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define an alternative between two instructions. If @feature is
|
||||||
|
* present, early code in apply_alternatives() replaces @oldinstr with
|
||||||
|
* @newinstr. ".skip" directive takes care of proper instruction padding
|
||||||
|
* in case @newinstr is longer than @oldinstr.
|
||||||
|
*/
|
||||||
|
.macro ALTERNATIVE oldinstr, newinstr, feature
|
||||||
|
140:
|
||||||
|
\oldinstr
|
||||||
|
141:
|
||||||
|
.skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
|
||||||
|
142:
|
||||||
|
|
||||||
|
.pushsection .altinstructions,"a"
|
||||||
|
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f
|
||||||
|
.popsection
|
||||||
|
|
||||||
|
.pushsection .altinstr_replacement,"ax"
|
||||||
|
143:
|
||||||
|
\newinstr
|
||||||
|
144:
|
||||||
|
.popsection
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#define old_len 141b-140b
|
||||||
|
#define new_len1 144f-143f
|
||||||
|
#define new_len2 145f-144f
|
||||||
|
|
||||||
|
/*
|
||||||
|
* gas compatible max based on the idea from:
|
||||||
|
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
|
||||||
|
*
|
||||||
|
* The additional "-" is needed because gas uses a "true" value of -1.
|
||||||
|
*/
|
||||||
|
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
|
||||||
|
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
|
||||||
|
* @feature2, it replaces @oldinstr with @feature2.
|
||||||
|
*/
|
||||||
|
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
|
||||||
|
140:
|
||||||
|
\oldinstr
|
||||||
|
141:
|
||||||
|
.skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
|
||||||
|
(alt_max_short(new_len1, new_len2) - (old_len)),0x90
|
||||||
|
142:
|
||||||
|
|
||||||
|
.pushsection .altinstructions,"a"
|
||||||
|
altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f
|
||||||
|
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f
|
||||||
|
.popsection
|
||||||
|
|
||||||
|
.pushsection .altinstr_replacement,"ax"
|
||||||
|
143:
|
||||||
|
\newinstr1
|
||||||
|
144:
|
||||||
|
\newinstr2
|
||||||
|
145:
|
||||||
|
.popsection
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
|
||||||
|
#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \
|
||||||
|
ALTERNATIVE_2 oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
|
||||||
|
newinstr_yes, feature
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* _ASM_X86_ALTERNATIVE_H */
|
#endif /* _ASM_X86_ALTERNATIVE_H */
|
||||||
|
@@ -17,20 +17,3 @@
|
|||||||
extern void cmpxchg8b_emu(void);
|
extern void cmpxchg8b_emu(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
|
||||||
|
|
||||||
#define DECL_INDIRECT_THUNK(reg) \
|
|
||||||
extern asmlinkage void __x86_indirect_thunk_ ## reg (void);
|
|
||||||
|
|
||||||
#define DECL_RETPOLINE(reg) \
|
|
||||||
extern asmlinkage void __x86_retpoline_ ## reg (void);
|
|
||||||
|
|
||||||
#undef GEN
|
|
||||||
#define GEN(reg) DECL_INDIRECT_THUNK(reg)
|
|
||||||
#include <asm/GEN-for-each-reg.h>
|
|
||||||
|
|
||||||
#undef GEN
|
|
||||||
#define GEN(reg) DECL_RETPOLINE(reg)
|
|
||||||
#include <asm/GEN-for-each-reg.h>
|
|
||||||
|
|
||||||
#endif /* CONFIG_RETPOLINE */
|
|
||||||
|
@@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
enum cpuid_leafs
|
enum cpuid_leafs
|
||||||
{
|
{
|
||||||
@@ -172,39 +173,15 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
|
|||||||
*/
|
*/
|
||||||
static __always_inline bool _static_cpu_has(u16 bit)
|
static __always_inline bool _static_cpu_has(u16 bit)
|
||||||
{
|
{
|
||||||
asm_volatile_goto("1: jmp 6f\n"
|
asm_volatile_goto(
|
||||||
"2:\n"
|
ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
|
||||||
".skip -(((5f-4f) - (2b-1b)) > 0) * "
|
".section .altinstr_aux,\"ax\"\n"
|
||||||
"((5f-4f) - (2b-1b)),0x90\n"
|
"6:\n"
|
||||||
"3:\n"
|
" testb %[bitnum],%[cap_byte]\n"
|
||||||
".section .altinstructions,\"a\"\n"
|
" jnz %l[t_yes]\n"
|
||||||
" .long 1b - .\n" /* src offset */
|
" jmp %l[t_no]\n"
|
||||||
" .long 4f - .\n" /* repl offset */
|
".previous\n"
|
||||||
" .word %P[always]\n" /* always replace */
|
|
||||||
" .byte 3b - 1b\n" /* src len */
|
|
||||||
" .byte 5f - 4f\n" /* repl len */
|
|
||||||
" .byte 3b - 2b\n" /* pad len */
|
|
||||||
".previous\n"
|
|
||||||
".section .altinstr_replacement,\"ax\"\n"
|
|
||||||
"4: jmp %l[t_no]\n"
|
|
||||||
"5:\n"
|
|
||||||
".previous\n"
|
|
||||||
".section .altinstructions,\"a\"\n"
|
|
||||||
" .long 1b - .\n" /* src offset */
|
|
||||||
" .long 0\n" /* no replacement */
|
|
||||||
" .word %P[feature]\n" /* feature bit */
|
|
||||||
" .byte 3b - 1b\n" /* src len */
|
|
||||||
" .byte 0\n" /* repl len */
|
|
||||||
" .byte 0\n" /* pad len */
|
|
||||||
".previous\n"
|
|
||||||
".section .altinstr_aux,\"ax\"\n"
|
|
||||||
"6:\n"
|
|
||||||
" testb %[bitnum],%[cap_byte]\n"
|
|
||||||
" jnz %l[t_yes]\n"
|
|
||||||
" jmp %l[t_no]\n"
|
|
||||||
".previous\n"
|
|
||||||
: : [feature] "i" (bit),
|
: : [feature] "i" (bit),
|
||||||
[always] "i" (X86_FEATURE_ALWAYS),
|
|
||||||
[bitnum] "i" (1 << (bit & 7)),
|
[bitnum] "i" (1 << (bit & 7)),
|
||||||
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
|
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
|
||||||
: : t_yes, t_no);
|
: : t_yes, t_no);
|
||||||
|
@@ -203,8 +203,8 @@
|
|||||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||||
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
||||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
|
||||||
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
|
||||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||||
@@ -290,6 +290,14 @@
|
|||||||
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
||||||
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
|
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
|
||||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
||||||
|
/* FREE! (11*32+ 8) */
|
||||||
|
/* FREE! (11*32+ 9) */
|
||||||
|
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
|
||||||
|
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
|
||||||
|
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||||
|
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||||
|
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
||||||
|
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
||||||
@@ -308,6 +316,7 @@
|
|||||||
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
|
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
|
||||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||||
|
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
||||||
|
|
||||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||||
@@ -418,5 +427,6 @@
|
|||||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||||
|
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
||||||
|
|
||||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||||
|
@@ -56,6 +56,25 @@
|
|||||||
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
# define DISABLE_RETPOLINE 0
|
||||||
|
#else
|
||||||
|
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
|
||||||
|
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETHUNK
|
||||||
|
# define DISABLE_RETHUNK 0
|
||||||
|
#else
|
||||||
|
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||||
|
# define DISABLE_UNRET 0
|
||||||
|
#else
|
||||||
|
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Force disable because it's broken beyond repair */
|
/* Force disable because it's broken beyond repair */
|
||||||
#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
|
#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
|
||||||
|
|
||||||
@@ -73,7 +92,7 @@
|
|||||||
#define DISABLED_MASK8 0
|
#define DISABLED_MASK8 0
|
||||||
#define DISABLED_MASK9 (DISABLE_SMAP)
|
#define DISABLED_MASK9 (DISABLE_SMAP)
|
||||||
#define DISABLED_MASK10 0
|
#define DISABLED_MASK10 0
|
||||||
#define DISABLED_MASK11 0
|
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
|
||||||
#define DISABLED_MASK12 0
|
#define DISABLED_MASK12 0
|
||||||
#define DISABLED_MASK13 0
|
#define DISABLED_MASK13 0
|
||||||
#define DISABLED_MASK14 0
|
#define DISABLED_MASK14 0
|
||||||
|
@@ -6,7 +6,7 @@
|
|||||||
*
|
*
|
||||||
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
* Written by Masami Hiramatsu <mhiramat@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include <asm/inat_types.h>
|
#include <asm/inat_types.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal bits. Don't use bitmasks directly, because these bits are
|
* Internal bits. Don't use bitmasks directly, because these bits are
|
||||||
|
@@ -26,7 +26,7 @@ int insn_fetch_from_user(struct pt_regs *regs,
|
|||||||
unsigned char buf[MAX_INSN_SIZE]);
|
unsigned char buf[MAX_INSN_SIZE]);
|
||||||
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE]);
|
unsigned char buf[MAX_INSN_SIZE]);
|
||||||
bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
||||||
|
|
||||||
#endif /* _ASM_X86_INSN_EVAL_H */
|
#endif /* _ASM_X86_INSN_EVAL_H */
|
||||||
|
@@ -8,7 +8,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* insn_attr_t is defined in inat.h */
|
/* insn_attr_t is defined in inat.h */
|
||||||
#include <asm/inat.h>
|
#include <asm/inat.h> /* __ignore_sync_check__ */
|
||||||
|
|
||||||
struct insn_field {
|
struct insn_field {
|
||||||
union {
|
union {
|
||||||
@@ -87,13 +87,25 @@ struct insn {
|
|||||||
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
||||||
|
|
||||||
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
||||||
extern void insn_get_prefixes(struct insn *insn);
|
extern int insn_get_prefixes(struct insn *insn);
|
||||||
extern void insn_get_opcode(struct insn *insn);
|
extern int insn_get_opcode(struct insn *insn);
|
||||||
extern void insn_get_modrm(struct insn *insn);
|
extern int insn_get_modrm(struct insn *insn);
|
||||||
extern void insn_get_sib(struct insn *insn);
|
extern int insn_get_sib(struct insn *insn);
|
||||||
extern void insn_get_displacement(struct insn *insn);
|
extern int insn_get_displacement(struct insn *insn);
|
||||||
extern void insn_get_immediate(struct insn *insn);
|
extern int insn_get_immediate(struct insn *insn);
|
||||||
extern void insn_get_length(struct insn *insn);
|
extern int insn_get_length(struct insn *insn);
|
||||||
|
|
||||||
|
enum insn_mode {
|
||||||
|
INSN_MODE_32,
|
||||||
|
INSN_MODE_64,
|
||||||
|
/* Mode is determined by the current kernel build. */
|
||||||
|
INSN_MODE_KERN,
|
||||||
|
INSN_NUM_MODES,
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
|
||||||
|
|
||||||
|
#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
|
||||||
|
|
||||||
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
||||||
static inline void insn_get_attribute(struct insn *insn)
|
static inline void insn_get_attribute(struct insn *insn)
|
||||||
|
@@ -18,6 +18,28 @@
|
|||||||
#define __ALIGN_STR __stringify(__ALIGN)
|
#define __ALIGN_STR __stringify(__ALIGN)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||||
|
#define RET jmp __x86_return_thunk
|
||||||
|
#else /* CONFIG_RETPOLINE */
|
||||||
|
#ifdef CONFIG_SLS
|
||||||
|
#define RET ret; int3
|
||||||
|
#else
|
||||||
|
#define RET ret
|
||||||
|
#endif
|
||||||
|
#endif /* CONFIG_RETPOLINE */
|
||||||
|
|
||||||
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||||
|
#define ASM_RET "jmp __x86_return_thunk\n\t"
|
||||||
|
#else /* CONFIG_RETPOLINE */
|
||||||
|
#ifdef CONFIG_SLS
|
||||||
|
#define ASM_RET "ret; int3\n\t"
|
||||||
|
#else
|
||||||
|
#define ASM_RET "ret\n\t"
|
||||||
|
#endif
|
||||||
|
#endif /* CONFIG_RETPOLINE */
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* _ASM_X86_LINKAGE_H */
|
#endif /* _ASM_X86_LINKAGE_H */
|
||||||
|
@@ -51,6 +51,8 @@
|
|||||||
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||||
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||||
|
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
|
||||||
|
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
|
||||||
|
|
||||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||||
@@ -91,6 +93,7 @@
|
|||||||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||||
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
|
||||||
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
|
||||||
|
#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
|
||||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
|
||||||
#define ARCH_CAP_SSB_NO BIT(4) /*
|
#define ARCH_CAP_SSB_NO BIT(4) /*
|
||||||
* Not susceptible to Speculative Store Bypass
|
* Not susceptible to Speculative Store Bypass
|
||||||
@@ -138,6 +141,13 @@
|
|||||||
* bit available to control VERW
|
* bit available to control VERW
|
||||||
* behavior.
|
* behavior.
|
||||||
*/
|
*/
|
||||||
|
#define ARCH_CAP_RRSBA BIT(19) /*
|
||||||
|
* Indicates RET may use predictors
|
||||||
|
* other than the RSB. With eIBRS
|
||||||
|
* enabled predictions in kernel mode
|
||||||
|
* are restricted to targets in
|
||||||
|
* kernel.
|
||||||
|
*/
|
||||||
|
|
||||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||||
#define L1D_FLUSH BIT(0) /*
|
#define L1D_FLUSH BIT(0) /*
|
||||||
@@ -507,6 +517,9 @@
|
|||||||
/* Fam 17h MSRs */
|
/* Fam 17h MSRs */
|
||||||
#define MSR_F17H_IRPERF 0xc00000e9
|
#define MSR_F17H_IRPERF 0xc00000e9
|
||||||
|
|
||||||
|
#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
|
||||||
|
#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
|
||||||
|
|
||||||
/* Fam 16h MSRs */
|
/* Fam 16h MSRs */
|
||||||
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
|
||||||
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
|
||||||
|
@@ -5,12 +5,15 @@
|
|||||||
|
|
||||||
#include <linux/static_key.h>
|
#include <linux/static_key.h>
|
||||||
#include <linux/objtool.h>
|
#include <linux/objtool.h>
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
#include <asm/alternative-asm.h>
|
|
||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
#include <asm/msr-index.h>
|
#include <asm/msr-index.h>
|
||||||
#include <asm/unwind_hints.h>
|
#include <asm/unwind_hints.h>
|
||||||
|
#include <asm/percpu.h>
|
||||||
|
|
||||||
|
#define RETPOLINE_THUNK_SIZE 32
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill the CPU return stack buffer.
|
* Fill the CPU return stack buffer.
|
||||||
@@ -73,6 +76,23 @@
|
|||||||
.popsection
|
.popsection
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
|
||||||
|
* vs RETBleed validation.
|
||||||
|
*/
|
||||||
|
#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
|
||||||
|
* eventually turn into it's own annotation.
|
||||||
|
*/
|
||||||
|
.macro ANNOTATE_UNRET_END
|
||||||
|
#ifdef CONFIG_DEBUG_ENTRY
|
||||||
|
ANNOTATE_RETPOLINE_SAFE
|
||||||
|
nop
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
|
||||||
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
* indirect jmp/call which may be susceptible to the Spectre variant 2
|
||||||
@@ -81,7 +101,7 @@
|
|||||||
.macro JMP_NOSPEC reg:req
|
.macro JMP_NOSPEC reg:req
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
|
||||||
__stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
|
__stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
||||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
|
||||||
#else
|
#else
|
||||||
jmp *%\reg
|
jmp *%\reg
|
||||||
@@ -91,7 +111,7 @@
|
|||||||
.macro CALL_NOSPEC reg:req
|
.macro CALL_NOSPEC reg:req
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
|
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
|
||||||
__stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
|
__stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
|
||||||
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
|
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
|
||||||
#else
|
#else
|
||||||
call *%\reg
|
call *%\reg
|
||||||
@@ -103,10 +123,34 @@
|
|||||||
* monstrosity above, manually.
|
* monstrosity above, manually.
|
||||||
*/
|
*/
|
||||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
|
||||||
#ifdef CONFIG_RETPOLINE
|
|
||||||
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
||||||
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
||||||
.Lskip_rsb_\@:
|
.Lskip_rsb_\@:
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||||
|
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
|
||||||
|
#else
|
||||||
|
#define CALL_ZEN_UNTRAIN_RET ""
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
|
||||||
|
* return thunk isn't mapped into the userspace tables (then again, AMD
|
||||||
|
* typically has NO_MELTDOWN).
|
||||||
|
*
|
||||||
|
* While zen_untrain_ret() doesn't clobber anything but requires stack,
|
||||||
|
* entry_ibpb() will clobber AX, CX, DX.
|
||||||
|
*
|
||||||
|
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
|
||||||
|
* where we have a stack but before any RET instruction.
|
||||||
|
*/
|
||||||
|
.macro UNTRAIN_RET
|
||||||
|
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
|
||||||
|
ANNOTATE_UNRET_END
|
||||||
|
ALTERNATIVE_2 "", \
|
||||||
|
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||||
|
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@@ -118,7 +162,21 @@
|
|||||||
_ASM_PTR " 999b\n\t" \
|
_ASM_PTR " 999b\n\t" \
|
||||||
".popsection\n\t"
|
".popsection\n\t"
|
||||||
|
|
||||||
|
extern void __x86_return_thunk(void);
|
||||||
|
extern void zen_untrain_ret(void);
|
||||||
|
extern void entry_ibpb(void);
|
||||||
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
|
||||||
|
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
||||||
|
|
||||||
|
#define GEN(reg) \
|
||||||
|
extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
|
||||||
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
#undef GEN
|
||||||
|
|
||||||
|
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -129,7 +187,7 @@
|
|||||||
ALTERNATIVE_2( \
|
ALTERNATIVE_2( \
|
||||||
ANNOTATE_RETPOLINE_SAFE \
|
ANNOTATE_RETPOLINE_SAFE \
|
||||||
"call *%[thunk_target]\n", \
|
"call *%[thunk_target]\n", \
|
||||||
"call __x86_retpoline_%V[thunk_target]\n", \
|
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||||
X86_FEATURE_RETPOLINE, \
|
X86_FEATURE_RETPOLINE, \
|
||||||
"lfence;\n" \
|
"lfence;\n" \
|
||||||
ANNOTATE_RETPOLINE_SAFE \
|
ANNOTATE_RETPOLINE_SAFE \
|
||||||
@@ -181,6 +239,7 @@ enum spectre_v2_mitigation {
|
|||||||
SPECTRE_V2_EIBRS,
|
SPECTRE_V2_EIBRS,
|
||||||
SPECTRE_V2_EIBRS_RETPOLINE,
|
SPECTRE_V2_EIBRS_RETPOLINE,
|
||||||
SPECTRE_V2_EIBRS_LFENCE,
|
SPECTRE_V2_EIBRS_LFENCE,
|
||||||
|
SPECTRE_V2_IBRS,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* The indirect branch speculation control variants */
|
/* The indirect branch speculation control variants */
|
||||||
@@ -223,6 +282,9 @@ static inline void indirect_branch_prediction_barrier(void)
|
|||||||
|
|
||||||
/* The Intel SPEC CTRL MSR base value cache */
|
/* The Intel SPEC CTRL MSR base value cache */
|
||||||
extern u64 x86_spec_ctrl_base;
|
extern u64 x86_spec_ctrl_base;
|
||||||
|
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||||
|
extern void write_spec_ctrl_current(u64 val, bool force);
|
||||||
|
extern u64 spec_ctrl_current(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With retpoline, we must use IBRS to restrict branch prediction
|
* With retpoline, we must use IBRS to restrict branch prediction
|
||||||
@@ -232,18 +294,16 @@ extern u64 x86_spec_ctrl_base;
|
|||||||
*/
|
*/
|
||||||
#define firmware_restrict_branch_speculation_start() \
|
#define firmware_restrict_branch_speculation_start() \
|
||||||
do { \
|
do { \
|
||||||
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
|
|
||||||
\
|
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
|
||||||
|
spec_ctrl_current() | SPEC_CTRL_IBRS, \
|
||||||
X86_FEATURE_USE_IBRS_FW); \
|
X86_FEATURE_USE_IBRS_FW); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define firmware_restrict_branch_speculation_end() \
|
#define firmware_restrict_branch_speculation_end() \
|
||||||
do { \
|
do { \
|
||||||
u64 val = x86_spec_ctrl_base; \
|
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
|
||||||
\
|
spec_ctrl_current(), \
|
||||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
|
||||||
X86_FEATURE_USE_IBRS_FW); \
|
X86_FEATURE_USE_IBRS_FW); \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
@@ -306,63 +366,4 @@ static inline void mds_idle_clear_cpu_buffers(void)
|
|||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
/*
|
|
||||||
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
|
||||||
* for the following assembly:
|
|
||||||
*
|
|
||||||
* With retpolines configured:
|
|
||||||
*
|
|
||||||
* callq do_rop
|
|
||||||
* spec_trap:
|
|
||||||
* pause
|
|
||||||
* lfence
|
|
||||||
* jmp spec_trap
|
|
||||||
* do_rop:
|
|
||||||
* mov %rcx,(%rsp) for x86_64
|
|
||||||
* mov %edx,(%esp) for x86_32
|
|
||||||
* retq
|
|
||||||
*
|
|
||||||
* Without retpolines configured:
|
|
||||||
*
|
|
||||||
* jmp *%rcx for x86_64
|
|
||||||
* jmp *%edx for x86_32
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
|
||||||
# ifdef CONFIG_X86_64
|
|
||||||
# define RETPOLINE_RCX_BPF_JIT_SIZE 17
|
|
||||||
# define RETPOLINE_RCX_BPF_JIT() \
|
|
||||||
do { \
|
|
||||||
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
|
||||||
/* spec_trap: */ \
|
|
||||||
EMIT2(0xF3, 0x90); /* pause */ \
|
|
||||||
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
||||||
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
||||||
/* do_rop: */ \
|
|
||||||
EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \
|
|
||||||
EMIT1(0xC3); /* retq */ \
|
|
||||||
} while (0)
|
|
||||||
# else /* !CONFIG_X86_64 */
|
|
||||||
# define RETPOLINE_EDX_BPF_JIT() \
|
|
||||||
do { \
|
|
||||||
EMIT1_off32(0xE8, 7); /* call do_rop */ \
|
|
||||||
/* spec_trap: */ \
|
|
||||||
EMIT2(0xF3, 0x90); /* pause */ \
|
|
||||||
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
|
||||||
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
|
||||||
/* do_rop: */ \
|
|
||||||
EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */ \
|
|
||||||
EMIT1(0xC3); /* ret */ \
|
|
||||||
} while (0)
|
|
||||||
# endif
|
|
||||||
#else /* !CONFIG_RETPOLINE */
|
|
||||||
# ifdef CONFIG_X86_64
|
|
||||||
# define RETPOLINE_RCX_BPF_JIT_SIZE 2
|
|
||||||
# define RETPOLINE_RCX_BPF_JIT() \
|
|
||||||
EMIT2(0xFF, 0xE1); /* jmp *%rcx */
|
|
||||||
# else /* !CONFIG_X86_64 */
|
|
||||||
# define RETPOLINE_EDX_BPF_JIT() \
|
|
||||||
EMIT2(0xFF, 0xE2) /* jmp *%edx */
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|
||||||
|
@@ -630,7 +630,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
|
|||||||
"call " #func ";" \
|
"call " #func ";" \
|
||||||
PV_RESTORE_ALL_CALLER_REGS \
|
PV_RESTORE_ALL_CALLER_REGS \
|
||||||
FRAME_END \
|
FRAME_END \
|
||||||
"ret;" \
|
ASM_RET \
|
||||||
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
|
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
|
||||||
".popsection")
|
".popsection")
|
||||||
|
|
||||||
|
@@ -48,7 +48,7 @@ asm (".pushsection .text;"
|
|||||||
"jne .slowpath;"
|
"jne .slowpath;"
|
||||||
"pop %rdx;"
|
"pop %rdx;"
|
||||||
FRAME_END
|
FRAME_END
|
||||||
"ret;"
|
ASM_RET
|
||||||
".slowpath: "
|
".slowpath: "
|
||||||
"push %rsi;"
|
"push %rsi;"
|
||||||
"movzbl %al,%esi;"
|
"movzbl %al,%esi;"
|
||||||
@@ -56,7 +56,7 @@ asm (".pushsection .text;"
|
|||||||
"pop %rsi;"
|
"pop %rsi;"
|
||||||
"pop %rdx;"
|
"pop %rdx;"
|
||||||
FRAME_END
|
FRAME_END
|
||||||
"ret;"
|
ASM_RET
|
||||||
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
|
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
|
||||||
".popsection");
|
".popsection");
|
||||||
|
|
||||||
|
@@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
#include <asm/nops.h>
|
#include <asm/nops.h>
|
||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
/* "Raw" instruction opcodes */
|
/* "Raw" instruction opcodes */
|
||||||
#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
|
#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
|
||||||
@@ -18,8 +19,6 @@
|
|||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
#include <asm/alternative-asm.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_SMAP
|
#ifdef CONFIG_X86_SMAP
|
||||||
|
|
||||||
#define ASM_CLAC \
|
#define ASM_CLAC \
|
||||||
@@ -37,8 +36,6 @@
|
|||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#include <asm/alternative.h>
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_SMAP
|
#ifdef CONFIG_X86_SMAP
|
||||||
|
|
||||||
static __always_inline void clac(void)
|
static __always_inline void clac(void)
|
||||||
|
@@ -21,6 +21,16 @@
|
|||||||
* relative displacement across sections.
|
* relative displacement across sections.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The trampoline is 8 bytes and of the general form:
|
||||||
|
*
|
||||||
|
* jmp.d32 \func
|
||||||
|
* ud1 %esp, %ecx
|
||||||
|
*
|
||||||
|
* That trailing #UD provides both a speculation stop and serves as a unique
|
||||||
|
* 3 byte signature identifying static call trampolines. Also see tramp_ud[]
|
||||||
|
* and __static_call_fixup().
|
||||||
|
*/
|
||||||
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \
|
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \
|
||||||
asm(".pushsection .static_call.text, \"ax\" \n" \
|
asm(".pushsection .static_call.text, \"ax\" \n" \
|
||||||
".align 4 \n" \
|
".align 4 \n" \
|
||||||
@@ -34,8 +44,13 @@
|
|||||||
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
|
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
|
||||||
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
|
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETHUNK
|
||||||
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
|
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
|
||||||
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
|
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk")
|
||||||
|
#else
|
||||||
|
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
|
||||||
|
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#define ARCH_ADD_TRAMP_KEY(name) \
|
#define ARCH_ADD_TRAMP_KEY(name) \
|
||||||
@@ -44,4 +59,6 @@
|
|||||||
".long " STATIC_CALL_KEY_STR(name) " - . \n" \
|
".long " STATIC_CALL_KEY_STR(name) " - . \n" \
|
||||||
".popsection \n")
|
".popsection \n")
|
||||||
|
|
||||||
|
extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
|
||||||
|
|
||||||
#endif /* _ASM_STATIC_CALL_H */
|
#endif /* _ASM_STATIC_CALL_H */
|
||||||
|
@@ -8,7 +8,11 @@
|
|||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
|
|
||||||
.macro UNWIND_HINT_EMPTY
|
.macro UNWIND_HINT_EMPTY
|
||||||
UNWIND_HINT sp_reg=ORC_REG_UNDEFINED type=UNWIND_HINT_TYPE_CALL end=1
|
UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro UNWIND_HINT_ENTRY
|
||||||
|
UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0
|
.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0
|
||||||
@@ -48,17 +52,16 @@
|
|||||||
UNWIND_HINT_REGS base=\base offset=\offset partial=1
|
UNWIND_HINT_REGS base=\base offset=\offset partial=1
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro UNWIND_HINT_FUNC sp_offset=8
|
.macro UNWIND_HINT_FUNC
|
||||||
UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=\sp_offset type=UNWIND_HINT_TYPE_CALL
|
UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
.macro UNWIND_HINT_SAVE
|
||||||
* RET_OFFSET: Used on instructions that terminate a function; mostly RETURN
|
UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE
|
||||||
* and sibling calls. On these, sp_offset denotes the expected offset from
|
.endm
|
||||||
* initial_func_cfi.
|
|
||||||
*/
|
.macro UNWIND_HINT_RESTORE
|
||||||
.macro UNWIND_HINT_RET_OFFSET sp_offset=8
|
UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE
|
||||||
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_RET_OFFSET sp_offset=\sp_offset
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
@@ -60,7 +60,7 @@ save_registers:
|
|||||||
popl saved_context_eflags
|
popl saved_context_eflags
|
||||||
|
|
||||||
movl $ret_point, saved_eip
|
movl $ret_point, saved_eip
|
||||||
ret
|
RET
|
||||||
|
|
||||||
|
|
||||||
restore_registers:
|
restore_registers:
|
||||||
@@ -70,7 +70,7 @@ restore_registers:
|
|||||||
movl saved_context_edi, %edi
|
movl saved_context_edi, %edi
|
||||||
pushl saved_context_eflags
|
pushl saved_context_eflags
|
||||||
popfl
|
popfl
|
||||||
ret
|
RET
|
||||||
|
|
||||||
SYM_CODE_START(do_suspend_lowlevel)
|
SYM_CODE_START(do_suspend_lowlevel)
|
||||||
call save_processor_state
|
call save_processor_state
|
||||||
@@ -86,7 +86,7 @@ SYM_CODE_START(do_suspend_lowlevel)
|
|||||||
ret_point:
|
ret_point:
|
||||||
call restore_registers
|
call restore_registers
|
||||||
call restore_processor_state
|
call restore_processor_state
|
||||||
ret
|
RET
|
||||||
SYM_CODE_END(do_suspend_lowlevel)
|
SYM_CODE_END(do_suspend_lowlevel)
|
||||||
|
|
||||||
.data
|
.data
|
||||||
|
@@ -28,6 +28,7 @@
|
|||||||
#include <asm/insn.h>
|
#include <asm/insn.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
|
#include <asm/asm-prototypes.h>
|
||||||
|
|
||||||
int __read_mostly alternatives_patched;
|
int __read_mostly alternatives_patched;
|
||||||
|
|
||||||
@@ -268,6 +269,8 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern s32 __retpoline_sites[], __retpoline_sites_end[];
|
||||||
|
extern s32 __return_sites[], __return_sites_end[];
|
||||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||||
extern s32 __smp_locks[], __smp_locks_end[];
|
extern s32 __smp_locks[], __smp_locks_end[];
|
||||||
void text_poke_early(void *addr, const void *opcode, size_t len);
|
void text_poke_early(void *addr, const void *opcode, size_t len);
|
||||||
@@ -337,26 +340,70 @@ done:
|
|||||||
n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
|
n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
|
||||||
|
*
|
||||||
|
* @instr: instruction byte stream
|
||||||
|
* @instrlen: length of the above
|
||||||
|
* @off: offset within @instr where the first NOP has been detected
|
||||||
|
*
|
||||||
|
* Return: number of NOPs found (and replaced).
|
||||||
|
*/
|
||||||
|
static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int i = off, nnops;
|
||||||
|
|
||||||
|
while (i < instrlen) {
|
||||||
|
if (instr[i] != 0x90)
|
||||||
|
break;
|
||||||
|
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
nnops = i - off;
|
||||||
|
|
||||||
|
if (nnops <= 1)
|
||||||
|
return nnops;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
add_nops(instr + off, nnops);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
|
||||||
|
|
||||||
|
return nnops;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "noinline" to cause control flow change and thus invalidate I$ and
|
* "noinline" to cause control flow change and thus invalidate I$ and
|
||||||
* cause refetch after modification.
|
* cause refetch after modification.
|
||||||
*/
|
*/
|
||||||
static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
|
static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
struct insn insn;
|
||||||
int i;
|
int i = 0;
|
||||||
|
|
||||||
for (i = 0; i < a->padlen; i++) {
|
/*
|
||||||
if (instr[i] != 0x90)
|
* Jump over the non-NOP insns and optimize single-byte NOPs into bigger
|
||||||
|
* ones.
|
||||||
|
*/
|
||||||
|
for (;;) {
|
||||||
|
if (insn_decode_kernel(&insn, &instr[i]))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* See if this and any potentially following NOPs can be
|
||||||
|
* optimized.
|
||||||
|
*/
|
||||||
|
if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
|
||||||
|
i += optimize_nops_range(instr, len, i);
|
||||||
|
else
|
||||||
|
i += insn.length;
|
||||||
|
|
||||||
|
if (i >= len)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
|
|
||||||
instr, a->instrlen - a->padlen, a->padlen);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -388,23 +435,29 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
|||||||
*/
|
*/
|
||||||
for (a = start; a < end; a++) {
|
for (a = start; a < end; a++) {
|
||||||
int insn_buff_sz = 0;
|
int insn_buff_sz = 0;
|
||||||
|
/* Mask away "NOT" flag bit for feature to test. */
|
||||||
|
u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
|
||||||
|
|
||||||
instr = (u8 *)&a->instr_offset + a->instr_offset;
|
instr = (u8 *)&a->instr_offset + a->instr_offset;
|
||||||
replacement = (u8 *)&a->repl_offset + a->repl_offset;
|
replacement = (u8 *)&a->repl_offset + a->repl_offset;
|
||||||
BUG_ON(a->instrlen > sizeof(insn_buff));
|
BUG_ON(a->instrlen > sizeof(insn_buff));
|
||||||
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
|
BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
|
||||||
if (!boot_cpu_has(a->cpuid)) {
|
|
||||||
if (a->padlen > 1)
|
|
||||||
optimize_nops(a, instr);
|
|
||||||
|
|
||||||
continue;
|
/*
|
||||||
}
|
* Patch if either:
|
||||||
|
* - feature is present
|
||||||
|
* - feature not present but ALTINSTR_FLAG_INV is set to mean,
|
||||||
|
* patch if feature is *NOT* present.
|
||||||
|
*/
|
||||||
|
if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
|
||||||
|
goto next;
|
||||||
|
|
||||||
DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
|
DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
|
||||||
a->cpuid >> 5,
|
(a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
|
||||||
a->cpuid & 0x1f,
|
feature >> 5,
|
||||||
|
feature & 0x1f,
|
||||||
instr, instr, a->instrlen,
|
instr, instr, a->instrlen,
|
||||||
replacement, a->replacementlen, a->padlen);
|
replacement, a->replacementlen);
|
||||||
|
|
||||||
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
|
DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
|
||||||
DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
|
DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
|
||||||
@@ -428,17 +481,260 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
|||||||
if (a->replacementlen && is_jmp(replacement[0]))
|
if (a->replacementlen && is_jmp(replacement[0]))
|
||||||
recompute_jump(a, instr, replacement, insn_buff);
|
recompute_jump(a, instr, replacement, insn_buff);
|
||||||
|
|
||||||
if (a->instrlen > a->replacementlen) {
|
for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
|
||||||
add_nops(insn_buff + a->replacementlen,
|
insn_buff[insn_buff_sz] = 0x90;
|
||||||
a->instrlen - a->replacementlen);
|
|
||||||
insn_buff_sz += a->instrlen - a->replacementlen;
|
|
||||||
}
|
|
||||||
DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
|
DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
|
||||||
|
|
||||||
text_poke_early(instr, insn_buff, insn_buff_sz);
|
text_poke_early(instr, insn_buff, insn_buff_sz);
|
||||||
|
|
||||||
|
next:
|
||||||
|
optimize_nops(instr, a->instrlen);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CALL/JMP *%\reg
|
||||||
|
*/
|
||||||
|
static int emit_indirect(int op, int reg, u8 *bytes)
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
u8 modrm;
|
||||||
|
|
||||||
|
switch (op) {
|
||||||
|
case CALL_INSN_OPCODE:
|
||||||
|
modrm = 0x10; /* Reg = 2; CALL r/m */
|
||||||
|
break;
|
||||||
|
|
||||||
|
case JMP32_INSN_OPCODE:
|
||||||
|
modrm = 0x20; /* Reg = 4; JMP r/m */
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reg >= 8) {
|
||||||
|
bytes[i++] = 0x41; /* REX.B prefix */
|
||||||
|
reg -= 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
modrm |= 0xc0; /* Mod = 3 */
|
||||||
|
modrm += reg;
|
||||||
|
|
||||||
|
bytes[i++] = 0xff; /* opcode */
|
||||||
|
bytes[i++] = modrm;
|
||||||
|
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Rewrite the compiler generated retpoline thunk calls.
|
||||||
|
*
|
||||||
|
* For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
|
||||||
|
* indirect instructions, avoiding the extra indirection.
|
||||||
|
*
|
||||||
|
* For example, convert:
|
||||||
|
*
|
||||||
|
* CALL __x86_indirect_thunk_\reg
|
||||||
|
*
|
||||||
|
* into:
|
||||||
|
*
|
||||||
|
* CALL *%\reg
|
||||||
|
*
|
||||||
|
* It also tries to inline spectre_v2=retpoline,amd when size permits.
|
||||||
|
*/
|
||||||
|
static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
|
||||||
|
{
|
||||||
|
retpoline_thunk_t *target;
|
||||||
|
int reg, ret, i = 0;
|
||||||
|
u8 op, cc;
|
||||||
|
|
||||||
|
target = addr + insn->length + insn->immediate.value;
|
||||||
|
reg = target - __x86_indirect_thunk_array;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(reg & ~0xf))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
|
||||||
|
BUG_ON(reg == 4);
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
|
||||||
|
!cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
op = insn->opcode.bytes[0];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convert:
|
||||||
|
*
|
||||||
|
* Jcc.d32 __x86_indirect_thunk_\reg
|
||||||
|
*
|
||||||
|
* into:
|
||||||
|
*
|
||||||
|
* Jncc.d8 1f
|
||||||
|
* [ LFENCE ]
|
||||||
|
* JMP *%\reg
|
||||||
|
* [ NOP ]
|
||||||
|
* 1:
|
||||||
|
*/
|
||||||
|
/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
|
||||||
|
if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) {
|
||||||
|
cc = insn->opcode.bytes[1] & 0xf;
|
||||||
|
cc ^= 1; /* invert condition */
|
||||||
|
|
||||||
|
bytes[i++] = 0x70 + cc; /* Jcc.d8 */
|
||||||
|
bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
|
||||||
|
|
||||||
|
/* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
|
||||||
|
op = JMP32_INSN_OPCODE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE.
|
||||||
|
*/
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||||
|
bytes[i++] = 0x0f;
|
||||||
|
bytes[i++] = 0xae;
|
||||||
|
bytes[i++] = 0xe8; /* LFENCE */
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = emit_indirect(op, reg, bytes + i);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
i += ret;
|
||||||
|
|
||||||
|
for (; i < insn->length;)
|
||||||
|
bytes[i++] = 0x90;
|
||||||
|
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generated by 'objtool --retpoline'.
|
||||||
|
*/
|
||||||
|
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
||||||
|
{
|
||||||
|
s32 *s;
|
||||||
|
|
||||||
|
for (s = start; s < end; s++) {
|
||||||
|
void *addr = (void *)s + *s;
|
||||||
|
struct insn insn;
|
||||||
|
int len, ret;
|
||||||
|
u8 bytes[16];
|
||||||
|
u8 op1, op2;
|
||||||
|
|
||||||
|
ret = insn_decode_kernel(&insn, addr);
|
||||||
|
if (WARN_ON_ONCE(ret < 0))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
op1 = insn.opcode.bytes[0];
|
||||||
|
op2 = insn.opcode.bytes[1];
|
||||||
|
|
||||||
|
switch (op1) {
|
||||||
|
case CALL_INSN_OPCODE:
|
||||||
|
case JMP32_INSN_OPCODE:
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 0x0f: /* escape */
|
||||||
|
if (op2 >= 0x80 && op2 <= 0x8f)
|
||||||
|
break;
|
||||||
|
fallthrough;
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
DPRINTK("retpoline at: %pS (%px) len: %d to: %pS",
|
||||||
|
addr, addr, insn.length,
|
||||||
|
addr + insn.length + insn.immediate.value);
|
||||||
|
|
||||||
|
len = patch_retpoline(addr, &insn, bytes);
|
||||||
|
if (len == insn.length) {
|
||||||
|
optimize_nops(bytes, len);
|
||||||
|
DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
|
||||||
|
DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
|
||||||
|
text_poke_early(addr, bytes, len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETHUNK
|
||||||
|
/*
|
||||||
|
* Rewrite the compiler generated return thunk tail-calls.
|
||||||
|
*
|
||||||
|
* For example, convert:
|
||||||
|
*
|
||||||
|
* JMP __x86_return_thunk
|
||||||
|
*
|
||||||
|
* into:
|
||||||
|
*
|
||||||
|
* RET
|
||||||
|
*/
|
||||||
|
static int patch_return(void *addr, struct insn *insn, u8 *bytes)
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
bytes[i++] = RET_INSN_OPCODE;
|
||||||
|
|
||||||
|
for (; i < insn->length;)
|
||||||
|
bytes[i++] = INT3_INSN_OPCODE;
|
||||||
|
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
||||||
|
{
|
||||||
|
s32 *s;
|
||||||
|
|
||||||
|
for (s = start; s < end; s++) {
|
||||||
|
void *dest = NULL, *addr = (void *)s + *s;
|
||||||
|
struct insn insn;
|
||||||
|
int len, ret;
|
||||||
|
u8 bytes[16];
|
||||||
|
u8 op;
|
||||||
|
|
||||||
|
ret = insn_decode_kernel(&insn, addr);
|
||||||
|
if (WARN_ON_ONCE(ret < 0))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
op = insn.opcode.bytes[0];
|
||||||
|
if (op == JMP32_INSN_OPCODE)
|
||||||
|
dest = addr + insn.length + insn.immediate.value;
|
||||||
|
|
||||||
|
if (__static_call_fixup(addr, op, dest) ||
|
||||||
|
WARN_ON_ONCE(dest != &__x86_return_thunk))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
|
||||||
|
addr, addr, insn.length,
|
||||||
|
addr + insn.length + insn.immediate.value);
|
||||||
|
|
||||||
|
len = patch_return(addr, &insn, bytes);
|
||||||
|
if (len == insn.length) {
|
||||||
|
DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
|
||||||
|
DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
|
||||||
|
text_poke_early(addr, bytes, len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||||
|
#endif /* CONFIG_RETHUNK */
|
||||||
|
|
||||||
|
#else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */
|
||||||
|
|
||||||
|
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
|
||||||
|
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
|
||||||
|
|
||||||
|
#endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static void alternatives_smp_lock(const s32 *start, const s32 *end,
|
static void alternatives_smp_lock(const s32 *start, const s32 *end,
|
||||||
u8 *text, u8 *text_end)
|
u8 *text, u8 *text_end)
|
||||||
@@ -710,6 +1006,13 @@ void __init alternative_instructions(void)
|
|||||||
* patching.
|
* patching.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Rewrite the retpolines, must be done before alternatives since
|
||||||
|
* those can rewrite the retpoline thunks.
|
||||||
|
*/
|
||||||
|
apply_retpolines(__retpoline_sites, __retpoline_sites_end);
|
||||||
|
apply_returns(__return_sites, __return_sites_end);
|
||||||
|
|
||||||
apply_alternatives(__alt_instructions, __alt_instructions_end);
|
apply_alternatives(__alt_instructions, __alt_instructions_end);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
@@ -996,10 +1299,13 @@ void text_poke_sync(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct text_poke_loc {
|
struct text_poke_loc {
|
||||||
s32 rel_addr; /* addr := _stext + rel_addr */
|
/* addr := _stext + rel_addr */
|
||||||
s32 rel32;
|
s32 rel_addr;
|
||||||
|
s32 disp;
|
||||||
|
u8 len;
|
||||||
u8 opcode;
|
u8 opcode;
|
||||||
const u8 text[POKE_MAX_OPCODE_SIZE];
|
const u8 text[POKE_MAX_OPCODE_SIZE];
|
||||||
|
/* see text_poke_bp_batch() */
|
||||||
u8 old;
|
u8 old;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1014,7 +1320,8 @@ static struct bp_patching_desc *bp_desc;
|
|||||||
static __always_inline
|
static __always_inline
|
||||||
struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
|
struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
|
||||||
{
|
{
|
||||||
struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
|
/* rcu_dereference */
|
||||||
|
struct bp_patching_desc *desc = __READ_ONCE(*descp);
|
||||||
|
|
||||||
if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
|
if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -1048,7 +1355,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
struct bp_patching_desc *desc;
|
struct bp_patching_desc *desc;
|
||||||
struct text_poke_loc *tp;
|
struct text_poke_loc *tp;
|
||||||
int len, ret = 0;
|
int ret = 0;
|
||||||
void *ip;
|
void *ip;
|
||||||
|
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
@@ -1088,8 +1395,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
|
|||||||
goto out_put;
|
goto out_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = text_opcode_size(tp->opcode);
|
ip += tp->len;
|
||||||
ip += len;
|
|
||||||
|
|
||||||
switch (tp->opcode) {
|
switch (tp->opcode) {
|
||||||
case INT3_INSN_OPCODE:
|
case INT3_INSN_OPCODE:
|
||||||
@@ -1104,12 +1410,12 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case CALL_INSN_OPCODE:
|
case CALL_INSN_OPCODE:
|
||||||
int3_emulate_call(regs, (long)ip + tp->rel32);
|
int3_emulate_call(regs, (long)ip + tp->disp);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case JMP32_INSN_OPCODE:
|
case JMP32_INSN_OPCODE:
|
||||||
case JMP8_INSN_OPCODE:
|
case JMP8_INSN_OPCODE:
|
||||||
int3_emulate_jmp(regs, (long)ip + tp->rel32);
|
int3_emulate_jmp(regs, (long)ip + tp->disp);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -1184,7 +1490,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
|
|||||||
*/
|
*/
|
||||||
for (do_sync = 0, i = 0; i < nr_entries; i++) {
|
for (do_sync = 0, i = 0; i < nr_entries; i++) {
|
||||||
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
|
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
|
||||||
int len = text_opcode_size(tp[i].opcode);
|
int len = tp[i].len;
|
||||||
|
|
||||||
if (len - INT3_INSN_SIZE > 0) {
|
if (len - INT3_INSN_SIZE > 0) {
|
||||||
memcpy(old + INT3_INSN_SIZE,
|
memcpy(old + INT3_INSN_SIZE,
|
||||||
@@ -1261,20 +1567,36 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
|
|||||||
const void *opcode, size_t len, const void *emulate)
|
const void *opcode, size_t len, const void *emulate)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
|
int ret, i;
|
||||||
|
|
||||||
memcpy((void *)tp->text, opcode, len);
|
memcpy((void *)tp->text, opcode, len);
|
||||||
if (!emulate)
|
if (!emulate)
|
||||||
emulate = opcode;
|
emulate = opcode;
|
||||||
|
|
||||||
kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
|
ret = insn_decode_kernel(&insn, emulate);
|
||||||
insn_get_length(&insn);
|
BUG_ON(ret < 0);
|
||||||
|
|
||||||
BUG_ON(!insn_complete(&insn));
|
|
||||||
BUG_ON(len != insn.length);
|
|
||||||
|
|
||||||
tp->rel_addr = addr - (void *)_stext;
|
tp->rel_addr = addr - (void *)_stext;
|
||||||
|
tp->len = len;
|
||||||
tp->opcode = insn.opcode.bytes[0];
|
tp->opcode = insn.opcode.bytes[0];
|
||||||
|
|
||||||
|
switch (tp->opcode) {
|
||||||
|
case RET_INSN_OPCODE:
|
||||||
|
case JMP32_INSN_OPCODE:
|
||||||
|
case JMP8_INSN_OPCODE:
|
||||||
|
/*
|
||||||
|
* Control flow instructions without implied execution of the
|
||||||
|
* next instruction can be padded with INT3.
|
||||||
|
*/
|
||||||
|
for (i = insn.length; i < len; i++)
|
||||||
|
BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
BUG_ON(len != insn.length);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
switch (tp->opcode) {
|
switch (tp->opcode) {
|
||||||
case INT3_INSN_OPCODE:
|
case INT3_INSN_OPCODE:
|
||||||
case RET_INSN_OPCODE:
|
case RET_INSN_OPCODE:
|
||||||
@@ -1283,7 +1605,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
|
|||||||
case CALL_INSN_OPCODE:
|
case CALL_INSN_OPCODE:
|
||||||
case JMP32_INSN_OPCODE:
|
case JMP32_INSN_OPCODE:
|
||||||
case JMP8_INSN_OPCODE:
|
case JMP8_INSN_OPCODE:
|
||||||
tp->rel32 = insn.immediate.value;
|
tp->disp = insn.immediate.value;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: /* assume NOP */
|
default: /* assume NOP */
|
||||||
@@ -1291,13 +1613,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
|
|||||||
case 2: /* NOP2 -- emulate as JMP8+0 */
|
case 2: /* NOP2 -- emulate as JMP8+0 */
|
||||||
BUG_ON(memcmp(emulate, ideal_nops[len], len));
|
BUG_ON(memcmp(emulate, ideal_nops[len], len));
|
||||||
tp->opcode = JMP8_INSN_OPCODE;
|
tp->opcode = JMP8_INSN_OPCODE;
|
||||||
tp->rel32 = 0;
|
tp->disp = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 5: /* NOP5 -- emulate as JMP32+0 */
|
case 5: /* NOP5 -- emulate as JMP32+0 */
|
||||||
BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
|
BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
|
||||||
tp->opcode = JMP32_INSN_OPCODE;
|
tp->opcode = JMP32_INSN_OPCODE;
|
||||||
tp->rel32 = 0;
|
tp->disp = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default: /* unknown instruction */
|
default: /* unknown instruction */
|
||||||
|
@@ -914,6 +914,28 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||||||
clear_rdrand_cpuid_bit(c);
|
clear_rdrand_cpuid_bit(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void init_spectral_chicken(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On Zen2 we offer this chicken (bit) on the altar of Speculation.
|
||||||
|
*
|
||||||
|
* This suppresses speculation from the middle of a basic block, i.e. it
|
||||||
|
* suppresses non-branch predictions.
|
||||||
|
*
|
||||||
|
* We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
|
||||||
|
*/
|
||||||
|
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
||||||
|
if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
|
||||||
|
value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
|
||||||
|
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||||
@@ -922,12 +944,21 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
|||||||
node_reclaim_distance = 32;
|
node_reclaim_distance = 32;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/* Fix up CPUID bits, but only if not virtualised. */
|
||||||
* Fix erratum 1076: CPB feature bit not being set in CPUID.
|
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||||
* Always set it, except when running under a hypervisor.
|
|
||||||
*/
|
/* Erratum 1076: CPB feature bit not being set in CPUID. */
|
||||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
|
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zen3 (Fam19 model < 0x10) parts are not susceptible to
|
||||||
|
* Branch Type Confusion, but predate the allocation of the
|
||||||
|
* BTC_NO bit.
|
||||||
|
*/
|
||||||
|
if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
|
||||||
|
set_cpu_cap(c, X86_FEATURE_BTC_NO);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_amd(struct cpuinfo_x86 *c)
|
static void init_amd(struct cpuinfo_x86 *c)
|
||||||
@@ -959,7 +990,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||||||
case 0x12: init_amd_ln(c); break;
|
case 0x12: init_amd_ln(c); break;
|
||||||
case 0x15: init_amd_bd(c); break;
|
case 0x15: init_amd_bd(c); break;
|
||||||
case 0x16: init_amd_jg(c); break;
|
case 0x16: init_amd_jg(c); break;
|
||||||
case 0x17: fallthrough;
|
case 0x17: init_spectral_chicken(c);
|
||||||
|
fallthrough;
|
||||||
case 0x19: init_amd_zn(c); break;
|
case 0x19: init_amd_zn(c); break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -38,6 +38,8 @@
|
|||||||
|
|
||||||
static void __init spectre_v1_select_mitigation(void);
|
static void __init spectre_v1_select_mitigation(void);
|
||||||
static void __init spectre_v2_select_mitigation(void);
|
static void __init spectre_v2_select_mitigation(void);
|
||||||
|
static void __init retbleed_select_mitigation(void);
|
||||||
|
static void __init spectre_v2_user_select_mitigation(void);
|
||||||
static void __init ssb_select_mitigation(void);
|
static void __init ssb_select_mitigation(void);
|
||||||
static void __init l1tf_select_mitigation(void);
|
static void __init l1tf_select_mitigation(void);
|
||||||
static void __init mds_select_mitigation(void);
|
static void __init mds_select_mitigation(void);
|
||||||
@@ -47,16 +49,40 @@ static void __init taa_select_mitigation(void);
|
|||||||
static void __init mmio_select_mitigation(void);
|
static void __init mmio_select_mitigation(void);
|
||||||
static void __init srbds_select_mitigation(void);
|
static void __init srbds_select_mitigation(void);
|
||||||
|
|
||||||
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||||
u64 x86_spec_ctrl_base;
|
u64 x86_spec_ctrl_base;
|
||||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||||
|
|
||||||
|
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
|
||||||
|
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||||
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||||
|
|
||||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The vendor and possibly platform specific bits which can be modified in
|
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
|
||||||
* x86_spec_ctrl_base.
|
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
|
||||||
*/
|
*/
|
||||||
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
|
void write_spec_ctrl_current(u64 val, bool force)
|
||||||
|
{
|
||||||
|
if (this_cpu_read(x86_spec_ctrl_current) == val)
|
||||||
|
return;
|
||||||
|
|
||||||
|
this_cpu_write(x86_spec_ctrl_current, val);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When KERNEL_IBRS this MSR is written on return-to-user, unless
|
||||||
|
* forced the update can be delayed until that time.
|
||||||
|
*/
|
||||||
|
if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
|
||||||
|
wrmsrl(MSR_IA32_SPEC_CTRL, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 spec_ctrl_current(void)
|
||||||
|
{
|
||||||
|
return this_cpu_read(x86_spec_ctrl_current);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(spec_ctrl_current);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AMD specific MSR info for Speculative Store Bypass control.
|
* AMD specific MSR info for Speculative Store Bypass control.
|
||||||
@@ -106,13 +132,21 @@ void __init check_bugs(void)
|
|||||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||||
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||||
|
|
||||||
/* Allow STIBP in MSR_SPEC_CTRL if supported */
|
|
||||||
if (boot_cpu_has(X86_FEATURE_STIBP))
|
|
||||||
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
|
||||||
|
|
||||||
/* Select the proper CPU mitigations before patching alternatives: */
|
/* Select the proper CPU mitigations before patching alternatives: */
|
||||||
spectre_v1_select_mitigation();
|
spectre_v1_select_mitigation();
|
||||||
spectre_v2_select_mitigation();
|
spectre_v2_select_mitigation();
|
||||||
|
/*
|
||||||
|
* retbleed_select_mitigation() relies on the state set by
|
||||||
|
* spectre_v2_select_mitigation(); specifically it wants to know about
|
||||||
|
* spectre_v2=ibrs.
|
||||||
|
*/
|
||||||
|
retbleed_select_mitigation();
|
||||||
|
/*
|
||||||
|
* spectre_v2_user_select_mitigation() relies on the state set by
|
||||||
|
* retbleed_select_mitigation(); specifically the STIBP selection is
|
||||||
|
* forced for UNRET.
|
||||||
|
*/
|
||||||
|
spectre_v2_user_select_mitigation();
|
||||||
ssb_select_mitigation();
|
ssb_select_mitigation();
|
||||||
l1tf_select_mitigation();
|
l1tf_select_mitigation();
|
||||||
md_clear_select_mitigation();
|
md_clear_select_mitigation();
|
||||||
@@ -152,31 +186,17 @@ void __init check_bugs(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NOTE: For VMX, this function is not called in the vmexit path.
|
||||||
|
* It uses vmx_spec_ctrl_restore_host() instead.
|
||||||
|
*/
|
||||||
void
|
void
|
||||||
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||||
{
|
{
|
||||||
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
|
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
|
||||||
struct thread_info *ti = current_thread_info();
|
struct thread_info *ti = current_thread_info();
|
||||||
|
|
||||||
/* Is MSR_SPEC_CTRL implemented ? */
|
|
||||||
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
||||||
/*
|
|
||||||
* Restrict guest_spec_ctrl to supported values. Clear the
|
|
||||||
* modifiable bits in the host base value and or the
|
|
||||||
* modifiable bits from the guest value.
|
|
||||||
*/
|
|
||||||
guestval = hostval & ~x86_spec_ctrl_mask;
|
|
||||||
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
|
||||||
|
|
||||||
/* SSBD controlled in MSR_SPEC_CTRL */
|
|
||||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
|
||||||
static_cpu_has(X86_FEATURE_AMD_SSBD))
|
|
||||||
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
|
||||||
|
|
||||||
/* Conditional STIBP enabled? */
|
|
||||||
if (static_branch_unlikely(&switch_to_cond_stibp))
|
|
||||||
hostval |= stibp_tif_to_spec_ctrl(ti->flags);
|
|
||||||
|
|
||||||
if (hostval != guestval) {
|
if (hostval != guestval) {
|
||||||
msrval = setguest ? guestval : hostval;
|
msrval = setguest ? guestval : hostval;
|
||||||
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
||||||
@@ -708,12 +728,180 @@ static int __init nospectre_v1_cmdline(char *str)
|
|||||||
}
|
}
|
||||||
early_param("nospectre_v1", nospectre_v1_cmdline);
|
early_param("nospectre_v1", nospectre_v1_cmdline);
|
||||||
|
|
||||||
#undef pr_fmt
|
|
||||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
|
||||||
|
|
||||||
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||||
SPECTRE_V2_NONE;
|
SPECTRE_V2_NONE;
|
||||||
|
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "RETBleed: " fmt
|
||||||
|
|
||||||
|
enum retbleed_mitigation {
|
||||||
|
RETBLEED_MITIGATION_NONE,
|
||||||
|
RETBLEED_MITIGATION_UNRET,
|
||||||
|
RETBLEED_MITIGATION_IBPB,
|
||||||
|
RETBLEED_MITIGATION_IBRS,
|
||||||
|
RETBLEED_MITIGATION_EIBRS,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum retbleed_mitigation_cmd {
|
||||||
|
RETBLEED_CMD_OFF,
|
||||||
|
RETBLEED_CMD_AUTO,
|
||||||
|
RETBLEED_CMD_UNRET,
|
||||||
|
RETBLEED_CMD_IBPB,
|
||||||
|
};
|
||||||
|
|
||||||
|
const char * const retbleed_strings[] = {
|
||||||
|
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
|
||||||
|
[RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
|
||||||
|
[RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
|
||||||
|
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
|
||||||
|
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||||
|
};
|
||||||
|
|
||||||
|
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
|
||||||
|
RETBLEED_MITIGATION_NONE;
|
||||||
|
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
|
||||||
|
RETBLEED_CMD_AUTO;
|
||||||
|
|
||||||
|
static int __ro_after_init retbleed_nosmt = false;
|
||||||
|
|
||||||
|
static int __init retbleed_parse_cmdline(char *str)
|
||||||
|
{
|
||||||
|
if (!str)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
while (str) {
|
||||||
|
char *next = strchr(str, ',');
|
||||||
|
if (next) {
|
||||||
|
*next = 0;
|
||||||
|
next++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!strcmp(str, "off")) {
|
||||||
|
retbleed_cmd = RETBLEED_CMD_OFF;
|
||||||
|
} else if (!strcmp(str, "auto")) {
|
||||||
|
retbleed_cmd = RETBLEED_CMD_AUTO;
|
||||||
|
} else if (!strcmp(str, "unret")) {
|
||||||
|
retbleed_cmd = RETBLEED_CMD_UNRET;
|
||||||
|
} else if (!strcmp(str, "ibpb")) {
|
||||||
|
retbleed_cmd = RETBLEED_CMD_IBPB;
|
||||||
|
} else if (!strcmp(str, "nosmt")) {
|
||||||
|
retbleed_nosmt = true;
|
||||||
|
} else {
|
||||||
|
pr_err("Ignoring unknown retbleed option (%s).", str);
|
||||||
|
}
|
||||||
|
|
||||||
|
str = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("retbleed", retbleed_parse_cmdline);
|
||||||
|
|
||||||
|
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
|
||||||
|
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
|
||||||
|
|
||||||
|
static void __init retbleed_select_mitigation(void)
|
||||||
|
{
|
||||||
|
bool mitigate_smt = false;
|
||||||
|
|
||||||
|
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
|
||||||
|
return;
|
||||||
|
|
||||||
|
switch (retbleed_cmd) {
|
||||||
|
case RETBLEED_CMD_OFF:
|
||||||
|
return;
|
||||||
|
|
||||||
|
case RETBLEED_CMD_UNRET:
|
||||||
|
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
|
||||||
|
} else {
|
||||||
|
pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
|
||||||
|
goto do_cmd_auto;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case RETBLEED_CMD_IBPB:
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||||
|
pr_err("WARNING: CPU does not support IBPB.\n");
|
||||||
|
goto do_cmd_auto;
|
||||||
|
} else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
|
||||||
|
} else {
|
||||||
|
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
|
||||||
|
goto do_cmd_auto;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
do_cmd_auto:
|
||||||
|
case RETBLEED_CMD_AUTO:
|
||||||
|
default:
|
||||||
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||||
|
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||||
|
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
|
||||||
|
else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The Intel mitigation (IBRS or eIBRS) was already selected in
|
||||||
|
* spectre_v2_select_mitigation(). 'retbleed_mitigation' will
|
||||||
|
* be set accordingly below.
|
||||||
|
*/
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (retbleed_mitigation) {
|
||||||
|
case RETBLEED_MITIGATION_UNRET:
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||||
|
|
||||||
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||||
|
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||||
|
pr_err(RETBLEED_UNTRAIN_MSG);
|
||||||
|
|
||||||
|
mitigate_smt = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case RETBLEED_MITIGATION_IBPB:
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
|
||||||
|
mitigate_smt = true;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
|
||||||
|
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
|
||||||
|
cpu_smt_disable(false);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Let IBRS trump all on Intel without affecting the effects of the
|
||||||
|
* retbleed= cmdline option.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
|
||||||
|
switch (spectre_v2_enabled) {
|
||||||
|
case SPECTRE_V2_IBRS:
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
|
||||||
|
break;
|
||||||
|
case SPECTRE_V2_EIBRS:
|
||||||
|
case SPECTRE_V2_EIBRS_RETPOLINE:
|
||||||
|
case SPECTRE_V2_EIBRS_LFENCE:
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
pr_err(RETBLEED_INTEL_MSG);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||||
|
|
||||||
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
|
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
|
||||||
SPECTRE_V2_USER_NONE;
|
SPECTRE_V2_USER_NONE;
|
||||||
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
|
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
|
||||||
@@ -784,6 +972,7 @@ enum spectre_v2_mitigation_cmd {
|
|||||||
SPECTRE_V2_CMD_EIBRS,
|
SPECTRE_V2_CMD_EIBRS,
|
||||||
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
|
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
|
||||||
SPECTRE_V2_CMD_EIBRS_LFENCE,
|
SPECTRE_V2_CMD_EIBRS_LFENCE,
|
||||||
|
SPECTRE_V2_CMD_IBRS,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum spectre_v2_user_cmd {
|
enum spectre_v2_user_cmd {
|
||||||
@@ -824,13 +1013,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
|
|||||||
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
|
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
|
||||||
|
|
||||||
static enum spectre_v2_user_cmd __init
|
static enum spectre_v2_user_cmd __init
|
||||||
spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
|
spectre_v2_parse_user_cmdline(void)
|
||||||
{
|
{
|
||||||
char arg[20];
|
char arg[20];
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
switch (v2_cmd) {
|
switch (spectre_v2_cmd) {
|
||||||
case SPECTRE_V2_CMD_NONE:
|
case SPECTRE_V2_CMD_NONE:
|
||||||
return SPECTRE_V2_USER_CMD_NONE;
|
return SPECTRE_V2_USER_CMD_NONE;
|
||||||
case SPECTRE_V2_CMD_FORCE:
|
case SPECTRE_V2_CMD_FORCE:
|
||||||
@@ -856,15 +1047,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
|
|||||||
return SPECTRE_V2_USER_CMD_AUTO;
|
return SPECTRE_V2_USER_CMD_AUTO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
|
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
|
||||||
{
|
{
|
||||||
return (mode == SPECTRE_V2_EIBRS ||
|
return mode == SPECTRE_V2_IBRS ||
|
||||||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
mode == SPECTRE_V2_EIBRS ||
|
||||||
mode == SPECTRE_V2_EIBRS_LFENCE);
|
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||||
|
mode == SPECTRE_V2_EIBRS_LFENCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init
|
static void __init
|
||||||
spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
spectre_v2_user_select_mitigation(void)
|
||||||
{
|
{
|
||||||
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
|
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
|
||||||
bool smt_possible = IS_ENABLED(CONFIG_SMP);
|
bool smt_possible = IS_ENABLED(CONFIG_SMP);
|
||||||
@@ -877,7 +1069,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
|||||||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||||
smt_possible = false;
|
smt_possible = false;
|
||||||
|
|
||||||
cmd = spectre_v2_parse_user_cmdline(v2_cmd);
|
cmd = spectre_v2_parse_user_cmdline();
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case SPECTRE_V2_USER_CMD_NONE:
|
case SPECTRE_V2_USER_CMD_NONE:
|
||||||
goto set_mode;
|
goto set_mode;
|
||||||
@@ -925,12 +1117,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
|
* If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
|
||||||
* required.
|
* STIBP is not required.
|
||||||
*/
|
*/
|
||||||
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
|
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
|
||||||
!smt_possible ||
|
!smt_possible ||
|
||||||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
spectre_v2_in_ibrs_mode(spectre_v2_enabled))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -942,6 +1134,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
|||||||
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
|
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
|
||||||
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
||||||
|
|
||||||
|
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
|
||||||
|
if (mode != SPECTRE_V2_USER_STRICT &&
|
||||||
|
mode != SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||||
|
pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
|
||||||
|
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
||||||
|
}
|
||||||
|
|
||||||
spectre_v2_user_stibp = mode;
|
spectre_v2_user_stibp = mode;
|
||||||
|
|
||||||
set_mode:
|
set_mode:
|
||||||
@@ -955,6 +1154,7 @@ static const char * const spectre_v2_strings[] = {
|
|||||||
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||||
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
||||||
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
||||||
|
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct {
|
static const struct {
|
||||||
@@ -972,6 +1172,7 @@ static const struct {
|
|||||||
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
|
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
|
||||||
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
|
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
|
||||||
{ "auto", SPECTRE_V2_CMD_AUTO, false },
|
{ "auto", SPECTRE_V2_CMD_AUTO, false },
|
||||||
|
{ "ibrs", SPECTRE_V2_CMD_IBRS, false },
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init spec_v2_print_cond(const char *reason, bool secure)
|
static void __init spec_v2_print_cond(const char *reason, bool secure)
|
||||||
@@ -1034,6 +1235,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||||||
return SPECTRE_V2_CMD_AUTO;
|
return SPECTRE_V2_CMD_AUTO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
|
||||||
|
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
|
||||||
|
mitigation_options[i].option);
|
||||||
|
return SPECTRE_V2_CMD_AUTO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
||||||
|
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
|
||||||
|
mitigation_options[i].option);
|
||||||
|
return SPECTRE_V2_CMD_AUTO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
|
||||||
|
pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
|
||||||
|
mitigation_options[i].option);
|
||||||
|
return SPECTRE_V2_CMD_AUTO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
|
||||||
|
pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
|
||||||
|
mitigation_options[i].option);
|
||||||
|
return SPECTRE_V2_CMD_AUTO;
|
||||||
|
}
|
||||||
|
|
||||||
spec_v2_print_cond(mitigation_options[i].option,
|
spec_v2_print_cond(mitigation_options[i].option,
|
||||||
mitigation_options[i].secure);
|
mitigation_options[i].secure);
|
||||||
return cmd;
|
return cmd;
|
||||||
@@ -1049,6 +1274,22 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
|
|||||||
return SPECTRE_V2_RETPOLINE;
|
return SPECTRE_V2_RETPOLINE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Disable in-kernel use of non-RSB RET predictors */
|
||||||
|
static void __init spec_ctrl_disable_kernel_rrsba(void)
|
||||||
|
{
|
||||||
|
u64 ia32_cap;
|
||||||
|
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
|
||||||
|
return;
|
||||||
|
|
||||||
|
ia32_cap = x86_read_arch_cap_msr();
|
||||||
|
|
||||||
|
if (ia32_cap & ARCH_CAP_RRSBA) {
|
||||||
|
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
|
||||||
|
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void __init spectre_v2_select_mitigation(void)
|
static void __init spectre_v2_select_mitigation(void)
|
||||||
{
|
{
|
||||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||||
@@ -1073,6 +1314,15 @@ static void __init spectre_v2_select_mitigation(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
|
||||||
|
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
|
||||||
|
retbleed_cmd != RETBLEED_CMD_OFF &&
|
||||||
|
boot_cpu_has(X86_FEATURE_IBRS) &&
|
||||||
|
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
|
||||||
|
mode = SPECTRE_V2_IBRS;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
mode = spectre_v2_select_retpoline();
|
mode = spectre_v2_select_retpoline();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -1089,6 +1339,10 @@ static void __init spectre_v2_select_mitigation(void)
|
|||||||
mode = spectre_v2_select_retpoline();
|
mode = spectre_v2_select_retpoline();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_CMD_IBRS:
|
||||||
|
mode = SPECTRE_V2_IBRS;
|
||||||
|
break;
|
||||||
|
|
||||||
case SPECTRE_V2_CMD_EIBRS:
|
case SPECTRE_V2_CMD_EIBRS:
|
||||||
mode = SPECTRE_V2_EIBRS;
|
mode = SPECTRE_V2_EIBRS;
|
||||||
break;
|
break;
|
||||||
@@ -1105,10 +1359,9 @@ static void __init spectre_v2_select_mitigation(void)
|
|||||||
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
||||||
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
||||||
|
|
||||||
if (spectre_v2_in_eibrs_mode(mode)) {
|
if (spectre_v2_in_ibrs_mode(mode)) {
|
||||||
/* Force it so VMEXIT will restore correctly */
|
|
||||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
@@ -1116,6 +1369,10 @@ static void __init spectre_v2_select_mitigation(void)
|
|||||||
case SPECTRE_V2_EIBRS:
|
case SPECTRE_V2_EIBRS:
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case SPECTRE_V2_IBRS:
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
|
||||||
|
break;
|
||||||
|
|
||||||
case SPECTRE_V2_LFENCE:
|
case SPECTRE_V2_LFENCE:
|
||||||
case SPECTRE_V2_EIBRS_LFENCE:
|
case SPECTRE_V2_EIBRS_LFENCE:
|
||||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
|
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
|
||||||
@@ -1127,43 +1384,107 @@ static void __init spectre_v2_select_mitigation(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable alternate RSB predictions in kernel when indirect CALLs and
|
||||||
|
* JMPs gets protection against BHI and Intramode-BTI, but RET
|
||||||
|
* prediction from a non-RSB predictor is still a risk.
|
||||||
|
*/
|
||||||
|
if (mode == SPECTRE_V2_EIBRS_LFENCE ||
|
||||||
|
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
||||||
|
mode == SPECTRE_V2_RETPOLINE)
|
||||||
|
spec_ctrl_disable_kernel_rrsba();
|
||||||
|
|
||||||
spectre_v2_enabled = mode;
|
spectre_v2_enabled = mode;
|
||||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If spectre v2 protection has been enabled, unconditionally fill
|
* If Spectre v2 protection has been enabled, fill the RSB during a
|
||||||
* RSB during a context switch; this protects against two independent
|
* context switch. In general there are two types of RSB attacks
|
||||||
* issues:
|
* across context switches, for which the CALLs/RETs may be unbalanced.
|
||||||
*
|
*
|
||||||
* - RSB underflow (and switch to BTB) on Skylake+
|
* 1) RSB underflow
|
||||||
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
|
*
|
||||||
|
* Some Intel parts have "bottomless RSB". When the RSB is empty,
|
||||||
|
* speculated return targets may come from the branch predictor,
|
||||||
|
* which could have a user-poisoned BTB or BHB entry.
|
||||||
|
*
|
||||||
|
* AMD has it even worse: *all* returns are speculated from the BTB,
|
||||||
|
* regardless of the state of the RSB.
|
||||||
|
*
|
||||||
|
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
|
||||||
|
* scenario is mitigated by the IBRS branch prediction isolation
|
||||||
|
* properties, so the RSB buffer filling wouldn't be necessary to
|
||||||
|
* protect against this type of attack.
|
||||||
|
*
|
||||||
|
* The "user -> user" attack scenario is mitigated by RSB filling.
|
||||||
|
*
|
||||||
|
* 2) Poisoned RSB entry
|
||||||
|
*
|
||||||
|
* If the 'next' in-kernel return stack is shorter than 'prev',
|
||||||
|
* 'next' could be tricked into speculating with a user-poisoned RSB
|
||||||
|
* entry.
|
||||||
|
*
|
||||||
|
* The "user -> kernel" attack scenario is mitigated by SMEP and
|
||||||
|
* eIBRS.
|
||||||
|
*
|
||||||
|
* The "user -> user" scenario, also known as SpectreBHB, requires
|
||||||
|
* RSB clearing.
|
||||||
|
*
|
||||||
|
* So to mitigate all cases, unconditionally fill RSB on context
|
||||||
|
* switches.
|
||||||
|
*
|
||||||
|
* FIXME: Is this pointless for retbleed-affected AMD?
|
||||||
*/
|
*/
|
||||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Retpoline means the kernel is safe because it has no indirect
|
* Similar to context switches, there are two types of RSB attacks
|
||||||
* branches. Enhanced IBRS protects firmware too, so, enable restricted
|
* after vmexit:
|
||||||
* speculation around firmware calls only when Enhanced IBRS isn't
|
*
|
||||||
* supported.
|
* 1) RSB underflow
|
||||||
|
*
|
||||||
|
* 2) Poisoned RSB entry
|
||||||
|
*
|
||||||
|
* When retpoline is enabled, both are mitigated by filling/clearing
|
||||||
|
* the RSB.
|
||||||
|
*
|
||||||
|
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
|
||||||
|
* prediction isolation protections, RSB still needs to be cleared
|
||||||
|
* because of #2. Note that SMEP provides no protection here, unlike
|
||||||
|
* user-space-poisoned RSB entries.
|
||||||
|
*
|
||||||
|
* eIBRS, on the other hand, has RSB-poisoning protections, so it
|
||||||
|
* doesn't need RSB clearing after vmexit.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
|
||||||
|
boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
||||||
|
* and Enhanced IBRS protect firmware too, so enable IBRS around
|
||||||
|
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise
|
||||||
|
* enabled.
|
||||||
*
|
*
|
||||||
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
|
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
|
||||||
* the user might select retpoline on the kernel command line and if
|
* the user might select retpoline on the kernel command line and if
|
||||||
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
|
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
|
||||||
* enable IBRS around firmware calls.
|
* enable IBRS around firmware calls.
|
||||||
*/
|
*/
|
||||||
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
|
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
|
||||||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set up IBPB and STIBP depending on the general spectre V2 command */
|
/* Set up IBPB and STIBP depending on the general spectre V2 command */
|
||||||
spectre_v2_user_select_mitigation(cmd);
|
spectre_v2_cmd = cmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_stibp_msr(void * __unused)
|
static void update_stibp_msr(void * __unused)
|
||||||
{
|
{
|
||||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
|
||||||
|
write_spec_ctrl_current(val, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
||||||
@@ -1379,16 +1700,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
|
|
||||||
* bit in the mask to allow guests to use the mitigation even in the
|
|
||||||
* case where the host does not enable it.
|
|
||||||
*/
|
|
||||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
|
||||||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
|
||||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have three CPU feature flags that are in play here:
|
* We have three CPU feature flags that are in play here:
|
||||||
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
||||||
@@ -1406,7 +1717,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
|||||||
x86_amd_ssb_disable();
|
x86_amd_ssb_disable();
|
||||||
} else {
|
} else {
|
||||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1624,7 +1935,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
|||||||
void x86_spec_ctrl_setup_ap(void)
|
void x86_spec_ctrl_setup_ap(void)
|
||||||
{
|
{
|
||||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
write_spec_ctrl_current(x86_spec_ctrl_base, true);
|
||||||
|
|
||||||
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
||||||
x86_amd_ssb_disable();
|
x86_amd_ssb_disable();
|
||||||
@@ -1861,7 +2172,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
|
|||||||
|
|
||||||
static char *stibp_state(void)
|
static char *stibp_state(void)
|
||||||
{
|
{
|
||||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
|
||||||
return "";
|
return "";
|
||||||
|
|
||||||
switch (spectre_v2_user_stibp) {
|
switch (spectre_v2_user_stibp) {
|
||||||
@@ -1917,6 +2228,24 @@ static ssize_t srbds_show_state(char *buf)
|
|||||||
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t retbleed_show_state(char *buf)
|
||||||
|
{
|
||||||
|
if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
|
||||||
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||||
|
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||||
|
return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n");
|
||||||
|
|
||||||
|
return sprintf(buf, "%s; SMT %s\n",
|
||||||
|
retbleed_strings[retbleed_mitigation],
|
||||||
|
!sched_smt_active() ? "disabled" :
|
||||||
|
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||||
|
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
|
||||||
|
"enabled with STIBP protection" : "vulnerable");
|
||||||
|
}
|
||||||
|
|
||||||
|
return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||||
char *buf, unsigned int bug)
|
char *buf, unsigned int bug)
|
||||||
{
|
{
|
||||||
@@ -1962,6 +2291,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||||||
case X86_BUG_MMIO_STALE_DATA:
|
case X86_BUG_MMIO_STALE_DATA:
|
||||||
return mmio_stale_data_show_state(buf);
|
return mmio_stale_data_show_state(buf);
|
||||||
|
|
||||||
|
case X86_BUG_RETBLEED:
|
||||||
|
return retbleed_show_state(buf);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -2018,4 +2350,9 @@ ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *at
|
|||||||
{
|
{
|
||||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@@ -1092,48 +1092,60 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define VULNBL(vendor, family, model, blacklist) \
|
||||||
|
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
|
||||||
|
|
||||||
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
|
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
|
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
|
||||||
INTEL_FAM6_##model, steppings, \
|
INTEL_FAM6_##model, steppings, \
|
||||||
X86_FEATURE_ANY, issues)
|
X86_FEATURE_ANY, issues)
|
||||||
|
|
||||||
|
#define VULNBL_AMD(family, blacklist) \
|
||||||
|
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
|
||||||
|
|
||||||
|
#define VULNBL_HYGON(family, blacklist) \
|
||||||
|
VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
|
||||||
|
|
||||||
#define SRBDS BIT(0)
|
#define SRBDS BIT(0)
|
||||||
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
|
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
|
||||||
#define MMIO BIT(1)
|
#define MMIO BIT(1)
|
||||||
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
|
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
|
||||||
#define MMIO_SBDS BIT(2)
|
#define MMIO_SBDS BIT(2)
|
||||||
|
/* CPU is affected by RETbleed, speculating where you would not expect it */
|
||||||
|
#define RETBLEED BIT(3)
|
||||||
|
|
||||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
|
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO),
|
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
|
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||||
BIT(7) | BIT(0xB), MMIO),
|
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
|
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
|
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
|
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS),
|
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
|
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO),
|
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
|
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
|
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
|
|
||||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
|
||||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
|
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
|
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
|
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
|
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||||
|
|
||||||
|
VULNBL_AMD(0x15, RETBLEED),
|
||||||
|
VULNBL_AMD(0x16, RETBLEED),
|
||||||
|
VULNBL_AMD(0x17, RETBLEED),
|
||||||
|
VULNBL_HYGON(0x18, RETBLEED),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1235,6 +1247,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||||||
!arch_cap_mmio_immune(ia32_cap))
|
!arch_cap_mmio_immune(ia32_cap))
|
||||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||||
|
|
||||||
|
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
||||||
|
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
||||||
|
setup_force_cpu_bug(X86_BUG_RETBLEED);
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@@ -60,6 +60,8 @@ extern void tsx_disable(void);
|
|||||||
static inline void tsx_init(void) { }
|
static inline void tsx_init(void) { }
|
||||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
||||||
|
|
||||||
|
extern void init_spectral_chicken(struct cpuinfo_x86 *c);
|
||||||
|
|
||||||
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
||||||
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
|
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
|
||||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||||
|
@@ -318,6 +318,12 @@ static void init_hygon(struct cpuinfo_x86 *c)
|
|||||||
/* get apicid instead of initial apic id from cpuid */
|
/* get apicid instead of initial apic id from cpuid */
|
||||||
c->apicid = hard_smp_processor_id();
|
c->apicid = hard_smp_processor_id();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX someone from Hygon needs to confirm this DTRT
|
||||||
|
*
|
||||||
|
init_spectral_chicken(c);
|
||||||
|
*/
|
||||||
|
|
||||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||||
|
|
||||||
|
@@ -26,6 +26,7 @@ struct cpuid_bit {
|
|||||||
static const struct cpuid_bit cpuid_bits[] = {
|
static const struct cpuid_bit cpuid_bits[] = {
|
||||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||||
|
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
|
||||||
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
|
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
|
||||||
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
|
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
|
||||||
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
|
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
|
||||||
|
@@ -308,7 +308,7 @@ union ftrace_op_code_union {
|
|||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
};
|
};
|
||||||
|
|
||||||
#define RET_SIZE 1
|
#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
||||||
@@ -367,7 +367,10 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||||||
|
|
||||||
/* The trampoline ends with ret(q) */
|
/* The trampoline ends with ret(q) */
|
||||||
retq = (unsigned long)ftrace_stub;
|
retq = (unsigned long)ftrace_stub;
|
||||||
ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
|
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||||
|
memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE);
|
||||||
|
else
|
||||||
|
ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
|
||||||
if (WARN_ON(ret < 0))
|
if (WARN_ON(ret < 0))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@@ -19,7 +19,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
SYM_FUNC_START(__fentry__)
|
SYM_FUNC_START(__fentry__)
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(__fentry__)
|
SYM_FUNC_END(__fentry__)
|
||||||
EXPORT_SYMBOL(__fentry__)
|
EXPORT_SYMBOL(__fentry__)
|
||||||
|
|
||||||
@@ -84,7 +84,7 @@ ftrace_graph_call:
|
|||||||
|
|
||||||
/* This is weak to keep gas from relaxing the jumps */
|
/* This is weak to keep gas from relaxing the jumps */
|
||||||
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
|
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
|
||||||
ret
|
RET
|
||||||
SYM_CODE_END(ftrace_caller)
|
SYM_CODE_END(ftrace_caller)
|
||||||
|
|
||||||
SYM_CODE_START(ftrace_regs_caller)
|
SYM_CODE_START(ftrace_regs_caller)
|
||||||
@@ -177,7 +177,7 @@ SYM_CODE_START(ftrace_graph_caller)
|
|||||||
popl %edx
|
popl %edx
|
||||||
popl %ecx
|
popl %ecx
|
||||||
popl %eax
|
popl %eax
|
||||||
ret
|
RET
|
||||||
SYM_CODE_END(ftrace_graph_caller)
|
SYM_CODE_END(ftrace_graph_caller)
|
||||||
|
|
||||||
.globl return_to_handler
|
.globl return_to_handler
|
||||||
|
@@ -132,7 +132,7 @@
|
|||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
SYM_FUNC_START(__fentry__)
|
SYM_FUNC_START(__fentry__)
|
||||||
retq
|
RET
|
||||||
SYM_FUNC_END(__fentry__)
|
SYM_FUNC_END(__fentry__)
|
||||||
EXPORT_SYMBOL(__fentry__)
|
EXPORT_SYMBOL(__fentry__)
|
||||||
|
|
||||||
@@ -170,10 +170,11 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* This is weak to keep gas from relaxing the jumps.
|
* This is weak to keep gas from relaxing the jumps.
|
||||||
* It is also used to copy the retq for trampolines.
|
* It is also used to copy the RET for trampolines.
|
||||||
*/
|
*/
|
||||||
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
|
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
|
||||||
retq
|
UNWIND_HINT_FUNC
|
||||||
|
RET
|
||||||
SYM_FUNC_END(ftrace_epilogue)
|
SYM_FUNC_END(ftrace_epilogue)
|
||||||
|
|
||||||
SYM_FUNC_START(ftrace_regs_caller)
|
SYM_FUNC_START(ftrace_regs_caller)
|
||||||
@@ -265,7 +266,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
|
|||||||
restore_mcount_regs 8
|
restore_mcount_regs 8
|
||||||
/* Restore flags */
|
/* Restore flags */
|
||||||
popfq
|
popfq
|
||||||
UNWIND_HINT_RET_OFFSET
|
UNWIND_HINT_FUNC
|
||||||
jmp ftrace_epilogue
|
jmp ftrace_epilogue
|
||||||
|
|
||||||
SYM_FUNC_END(ftrace_regs_caller)
|
SYM_FUNC_END(ftrace_regs_caller)
|
||||||
@@ -287,7 +288,7 @@ fgraph_trace:
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
|
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
|
||||||
retq
|
RET
|
||||||
|
|
||||||
trace:
|
trace:
|
||||||
/* save_mcount_regs fills in first two parameters */
|
/* save_mcount_regs fills in first two parameters */
|
||||||
@@ -319,7 +320,7 @@ SYM_FUNC_START(ftrace_graph_caller)
|
|||||||
|
|
||||||
restore_mcount_regs
|
restore_mcount_regs
|
||||||
|
|
||||||
retq
|
RET
|
||||||
SYM_FUNC_END(ftrace_graph_caller)
|
SYM_FUNC_END(ftrace_graph_caller)
|
||||||
|
|
||||||
SYM_CODE_START(return_to_handler)
|
SYM_CODE_START(return_to_handler)
|
||||||
|
@@ -23,6 +23,7 @@
|
|||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
#include <asm/nops.h>
|
#include <asm/nops.h>
|
||||||
|
#include <asm/nospec-branch.h>
|
||||||
#include <asm/bootparam.h>
|
#include <asm/bootparam.h>
|
||||||
#include <asm/export.h>
|
#include <asm/export.h>
|
||||||
#include <asm/pgtable_32.h>
|
#include <asm/pgtable_32.h>
|
||||||
@@ -354,7 +355,7 @@ setup_once:
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
andl $0,setup_once_ref /* Once is enough, thanks */
|
andl $0,setup_once_ref /* Once is enough, thanks */
|
||||||
ret
|
RET
|
||||||
|
|
||||||
SYM_FUNC_START(early_idt_handler_array)
|
SYM_FUNC_START(early_idt_handler_array)
|
||||||
# 36(%esp) %eflags
|
# 36(%esp) %eflags
|
||||||
|
@@ -321,6 +321,8 @@ SYM_CODE_END(start_cpu0)
|
|||||||
SYM_CODE_START_NOALIGN(vc_boot_ghcb)
|
SYM_CODE_START_NOALIGN(vc_boot_ghcb)
|
||||||
UNWIND_HINT_IRET_REGS offset=8
|
UNWIND_HINT_IRET_REGS offset=8
|
||||||
|
|
||||||
|
ANNOTATE_UNRET_END
|
||||||
|
|
||||||
/* Build pt_regs */
|
/* Build pt_regs */
|
||||||
PUSH_AND_CLEAR_REGS
|
PUSH_AND_CLEAR_REGS
|
||||||
|
|
||||||
@@ -378,6 +380,7 @@ SYM_CODE_START(early_idt_handler_array)
|
|||||||
SYM_CODE_END(early_idt_handler_array)
|
SYM_CODE_END(early_idt_handler_array)
|
||||||
|
|
||||||
SYM_CODE_START_LOCAL(early_idt_handler_common)
|
SYM_CODE_START_LOCAL(early_idt_handler_common)
|
||||||
|
ANNOTATE_UNRET_END
|
||||||
/*
|
/*
|
||||||
* The stack is the hardware frame, an error code or zero, and the
|
* The stack is the hardware frame, an error code or zero, and the
|
||||||
* vector number.
|
* vector number.
|
||||||
@@ -424,6 +427,8 @@ SYM_CODE_END(early_idt_handler_common)
|
|||||||
SYM_CODE_START_NOALIGN(vc_no_ghcb)
|
SYM_CODE_START_NOALIGN(vc_no_ghcb)
|
||||||
UNWIND_HINT_IRET_REGS offset=8
|
UNWIND_HINT_IRET_REGS offset=8
|
||||||
|
|
||||||
|
ANNOTATE_UNRET_END
|
||||||
|
|
||||||
/* Build pt_regs */
|
/* Build pt_regs */
|
||||||
PUSH_AND_CLEAR_REGS
|
PUSH_AND_CLEAR_REGS
|
||||||
|
|
||||||
|
@@ -10,7 +10,7 @@
|
|||||||
SYM_FUNC_START(native_save_fl)
|
SYM_FUNC_START(native_save_fl)
|
||||||
pushf
|
pushf
|
||||||
pop %_ASM_AX
|
pop %_ASM_AX
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(native_save_fl)
|
SYM_FUNC_END(native_save_fl)
|
||||||
EXPORT_SYMBOL(native_save_fl)
|
EXPORT_SYMBOL(native_save_fl)
|
||||||
|
|
||||||
@@ -21,6 +21,6 @@ EXPORT_SYMBOL(native_save_fl)
|
|||||||
SYM_FUNC_START(native_restore_fl)
|
SYM_FUNC_START(native_restore_fl)
|
||||||
push %_ASM_ARG1
|
push %_ASM_ARG1
|
||||||
popf
|
popf
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(native_restore_fl)
|
SYM_FUNC_END(native_restore_fl)
|
||||||
EXPORT_SYMBOL(native_restore_fl)
|
EXPORT_SYMBOL(native_restore_fl)
|
||||||
|
@@ -768,7 +768,7 @@ asm(
|
|||||||
RESTORE_REGS_STRING
|
RESTORE_REGS_STRING
|
||||||
" popfl\n"
|
" popfl\n"
|
||||||
#endif
|
#endif
|
||||||
" ret\n"
|
ASM_RET
|
||||||
".size kretprobe_trampoline, .-kretprobe_trampoline\n"
|
".size kretprobe_trampoline, .-kretprobe_trampoline\n"
|
||||||
);
|
);
|
||||||
NOKPROBE_SYMBOL(kretprobe_trampoline);
|
NOKPROBE_SYMBOL(kretprobe_trampoline);
|
||||||
|
@@ -953,7 +953,7 @@ asm(
|
|||||||
"movq __per_cpu_offset(,%rdi,8), %rax;"
|
"movq __per_cpu_offset(,%rdi,8), %rax;"
|
||||||
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
||||||
"setne %al;"
|
"setne %al;"
|
||||||
"ret;"
|
ASM_RET
|
||||||
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
|
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
|
||||||
".popsection");
|
".popsection");
|
||||||
|
|
||||||
|
@@ -255,7 +255,8 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||||||
struct module *me)
|
struct module *me)
|
||||||
{
|
{
|
||||||
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
|
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
|
||||||
*para = NULL, *orc = NULL, *orc_ip = NULL;
|
*para = NULL, *orc = NULL, *orc_ip = NULL,
|
||||||
|
*retpolines = NULL, *returns = NULL;
|
||||||
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
||||||
|
|
||||||
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
|
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
|
||||||
@@ -271,8 +272,20 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||||||
orc = s;
|
orc = s;
|
||||||
if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
|
if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
|
||||||
orc_ip = s;
|
orc_ip = s;
|
||||||
|
if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
|
||||||
|
retpolines = s;
|
||||||
|
if (!strcmp(".return_sites", secstrings + s->sh_name))
|
||||||
|
returns = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (retpolines) {
|
||||||
|
void *rseg = (void *)retpolines->sh_addr;
|
||||||
|
apply_retpolines(rseg, rseg + retpolines->sh_size);
|
||||||
|
}
|
||||||
|
if (returns) {
|
||||||
|
void *rseg = (void *)returns->sh_addr;
|
||||||
|
apply_returns(rseg, rseg + returns->sh_size);
|
||||||
|
}
|
||||||
if (alt) {
|
if (alt) {
|
||||||
/* patch .altinstructions */
|
/* patch .altinstructions */
|
||||||
void *aseg = (void *)alt->sh_addr;
|
void *aseg = (void *)alt->sh_addr;
|
||||||
|
@@ -40,7 +40,7 @@ extern void _paravirt_nop(void);
|
|||||||
asm (".pushsection .entry.text, \"ax\"\n"
|
asm (".pushsection .entry.text, \"ax\"\n"
|
||||||
".global _paravirt_nop\n"
|
".global _paravirt_nop\n"
|
||||||
"_paravirt_nop:\n\t"
|
"_paravirt_nop:\n\t"
|
||||||
"ret\n\t"
|
ASM_RET
|
||||||
".size _paravirt_nop, . - _paravirt_nop\n\t"
|
".size _paravirt_nop, . - _paravirt_nop\n\t"
|
||||||
".type _paravirt_nop, @function\n\t"
|
".type _paravirt_nop, @function\n\t"
|
||||||
".popsection");
|
".popsection");
|
||||||
|
@@ -556,7 +556,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (updmsr)
|
if (updmsr)
|
||||||
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
write_spec_ctrl_current(msr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
||||||
|
@@ -7,10 +7,12 @@
|
|||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <asm/page_types.h>
|
#include <asm/page_types.h>
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
|
#include <asm/nospec-branch.h>
|
||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be relocatable PIC code callable as a C function
|
* Must be relocatable PIC code callable as a C function, in particular
|
||||||
|
* there must be a plain RET and not jump to return thunk.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PTR(x) (x << 2)
|
#define PTR(x) (x << 2)
|
||||||
@@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
|
|||||||
movl %edi, %eax
|
movl %edi, %eax
|
||||||
addl $(identity_mapped - relocate_kernel), %eax
|
addl $(identity_mapped - relocate_kernel), %eax
|
||||||
pushl %eax
|
pushl %eax
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(relocate_kernel)
|
SYM_CODE_END(relocate_kernel)
|
||||||
|
|
||||||
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||||
@@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
|||||||
xorl %edx, %edx
|
xorl %edx, %edx
|
||||||
xorl %esi, %esi
|
xorl %esi, %esi
|
||||||
xorl %ebp, %ebp
|
xorl %ebp, %ebp
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
1:
|
1:
|
||||||
popl %edx
|
popl %edx
|
||||||
movl CP_PA_SWAP_PAGE(%edi), %esp
|
movl CP_PA_SWAP_PAGE(%edi), %esp
|
||||||
addl $PAGE_SIZE, %esp
|
addl $PAGE_SIZE, %esp
|
||||||
2:
|
2:
|
||||||
|
ANNOTATE_RETPOLINE_SAFE
|
||||||
call *%edx
|
call *%edx
|
||||||
|
|
||||||
/* get the re-entry point of the peer system */
|
/* get the re-entry point of the peer system */
|
||||||
@@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
|||||||
movl %edi, %eax
|
movl %edi, %eax
|
||||||
addl $(virtual_mapped - relocate_kernel), %eax
|
addl $(virtual_mapped - relocate_kernel), %eax
|
||||||
pushl %eax
|
pushl %eax
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(identity_mapped)
|
SYM_CODE_END(identity_mapped)
|
||||||
|
|
||||||
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||||
@@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
|||||||
popl %edi
|
popl %edi
|
||||||
popl %esi
|
popl %esi
|
||||||
popl %ebx
|
popl %ebx
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(virtual_mapped)
|
SYM_CODE_END(virtual_mapped)
|
||||||
|
|
||||||
/* Do the copies */
|
/* Do the copies */
|
||||||
@@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
|
|||||||
popl %edi
|
popl %edi
|
||||||
popl %ebx
|
popl %ebx
|
||||||
popl %ebp
|
popl %ebp
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(swap_pages)
|
SYM_CODE_END(swap_pages)
|
||||||
|
|
||||||
.globl kexec_control_code_size
|
.globl kexec_control_code_size
|
||||||
|
@@ -13,7 +13,8 @@
|
|||||||
#include <asm/unwind_hints.h>
|
#include <asm/unwind_hints.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be relocatable PIC code callable as a C function
|
* Must be relocatable PIC code callable as a C function, in particular
|
||||||
|
* there must be a plain RET and not jump to return thunk.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PTR(x) (x << 3)
|
#define PTR(x) (x << 3)
|
||||||
@@ -104,7 +105,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
|
|||||||
/* jump to identity mapped page */
|
/* jump to identity mapped page */
|
||||||
addq $(identity_mapped - relocate_kernel), %r8
|
addq $(identity_mapped - relocate_kernel), %r8
|
||||||
pushq %r8
|
pushq %r8
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(relocate_kernel)
|
SYM_CODE_END(relocate_kernel)
|
||||||
|
|
||||||
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
||||||
@@ -191,7 +194,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
|||||||
xorl %r14d, %r14d
|
xorl %r14d, %r14d
|
||||||
xorl %r15d, %r15d
|
xorl %r15d, %r15d
|
||||||
|
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
|
|
||||||
1:
|
1:
|
||||||
popq %rdx
|
popq %rdx
|
||||||
@@ -210,7 +215,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
|
|||||||
call swap_pages
|
call swap_pages
|
||||||
movq $virtual_mapped, %rax
|
movq $virtual_mapped, %rax
|
||||||
pushq %rax
|
pushq %rax
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(identity_mapped)
|
SYM_CODE_END(identity_mapped)
|
||||||
|
|
||||||
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||||
@@ -231,7 +238,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
|||||||
popq %r12
|
popq %r12
|
||||||
popq %rbp
|
popq %rbp
|
||||||
popq %rbx
|
popq %rbx
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(virtual_mapped)
|
SYM_CODE_END(virtual_mapped)
|
||||||
|
|
||||||
/* Do the copies */
|
/* Do the copies */
|
||||||
@@ -288,7 +297,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
|
|||||||
lea PAGE_SIZE(%rax), %rsi
|
lea PAGE_SIZE(%rax), %rsi
|
||||||
jmp 0b
|
jmp 0b
|
||||||
3:
|
3:
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
ret
|
ret
|
||||||
|
int3
|
||||||
SYM_CODE_END(swap_pages)
|
SYM_CODE_END(swap_pages)
|
||||||
|
|
||||||
.globl kexec_control_code_size
|
.globl kexec_control_code_size
|
||||||
|
@@ -236,7 +236,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
|||||||
return ES_EXCEPTION;
|
return ES_EXCEPTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res))
|
if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, res))
|
||||||
return ES_DECODE_FAILED;
|
return ES_DECODE_FAILED;
|
||||||
} else {
|
} else {
|
||||||
res = vc_fetch_insn_kernel(ctxt, buffer);
|
res = vc_fetch_insn_kernel(ctxt, buffer);
|
||||||
|
@@ -85,5 +85,5 @@ SYM_FUNC_START(sev_verify_cbit)
|
|||||||
#endif
|
#endif
|
||||||
/* Return page-table pointer */
|
/* Return page-table pointer */
|
||||||
movq %rdi, %rax
|
movq %rdi, %rax
|
||||||
ret
|
RET
|
||||||
SYM_FUNC_END(sev_verify_cbit)
|
SYM_FUNC_END(sev_verify_cbit)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user