
Changes in 5.10.133 KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.SKVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw objtool: Refactor ORC section generation objtool: Add 'alt_group' struct objtool: Support stack layout changes in alternatives objtool: Support retpoline jump detection for vmlinux.o objtool: Assume only ELF functions do sibling calls objtool: Combine UNWIND_HINT_RET_OFFSET and UNWIND_HINT_FUNC x86/xen: Support objtool validation in xen-asm.S x86/xen: Support objtool vmlinux.o validation in xen-head.S x86/alternative: Merge include files x86/alternative: Support not-feature x86/alternative: Support ALTERNATIVE_TERNARY x86/alternative: Use ALTERNATIVE_TERNARY() in _static_cpu_has() x86/insn: Rename insn_decode() to insn_decode_from_regs() x86/insn: Add a __ignore_sync_check__ marker x86/insn: Add an insn_decode() API x86/insn-eval: Handle return values from the decoder x86/alternative: Use insn_decode() x86: Add insn_decode_kernel() x86/alternatives: Optimize optimize_nops() x86/retpoline: Simplify retpolines objtool: Correctly handle retpoline thunk calls objtool: Handle per arch retpoline naming objtool: Rework the elf_rebuild_reloc_section() logic objtool: Add elf_create_reloc() helper objtool: Create reloc sections implicitly objtool: Extract elf_strtab_concat() objtool: Extract elf_symbol_add() objtool: Add elf_create_undef_symbol() objtool: Keep track of retpoline call sites objtool: Cache instruction relocs objtool: Skip magical retpoline .altinstr_replacement objtool/x86: Rewrite retpoline thunk calls objtool: Support asm jump tables x86/alternative: Optimize single-byte NOPs at an arbitrary position objtool: Fix .symtab_shndx handling for elf_create_undef_symbol() objtool: Only rewrite unconditional retpoline thunk calls objtool/x86: Ignore __x86_indirect_alt_* symbols objtool: Don't make .altinstructions writable objtool: Teach get_alt_entry() about more relocation types objtool: print out the symbol type when complaining about it objtool: Remove reloc symbol type checks in get_alt_entry() objtool: Make .altinstructions section entry size consistent objtool: Introduce CFI hash objtool: Handle __sanitize_cov*() tail calls objtool: Classify symbols objtool: Explicitly avoid self modifying code in .altinstr_replacement objtool,x86: Replace alternatives with .retpoline_sites x86/retpoline: Remove unused replacement symbols x86/asm: Fix register order x86/asm: Fixup odd GEN-for-each-reg.h usage x86/retpoline: Move the retpoline thunk declarations to nospec-branch.h x86/retpoline: Create a retpoline thunk array x86/alternative: Implement .retpoline_sites support x86/alternative: Handle Jcc __x86_indirect_thunk_\reg x86/alternative: Try inline spectre_v2=retpoline,amd x86/alternative: Add debug prints to apply_retpolines() bpf,x86: Simplify computing label offsets bpf,x86: Respect X86_FEATURE_RETPOLINE* x86/lib/atomic64_386_32: Rename things x86: Prepare asm files for straight-line-speculation x86: Prepare inline-asm for straight-line-speculation x86/alternative: Relax text_poke_bp() constraint objtool: Add straight-line-speculation validation x86: Add straight-line-speculation mitigation tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' kvm/emulate: Fix SETcc emulation function offsets with SLS objtool: Default ignore INT3 for unreachable crypto: x86/poly1305 - Fixup SLS objtool: Fix SLS validation for kcov tail-call replacement objtool: Fix code relocs vs weak symbols objtool: Fix type of reloc::addend objtool: Fix symbol creation x86/entry: Remove skip_r11rcx objtool: Fix objtool regression on x32 systems x86/realmode: build with -D__DISABLE_EXPORTS x86/kvm/vmx: Make noinstr clean x86/cpufeatures: Move RETPOLINE flags to word 11 x86/retpoline: Cleanup some #ifdefery x86/retpoline: Swizzle retpoline thunk Makefile: Set retpoline cflags based on CONFIG_CC_IS_{CLANG,GCC} x86/retpoline: Use -mfunction-return x86: Undo return-thunk damage x86,objtool: Create .return_sites objtool: skip non-text sections when adding return-thunk sites x86,static_call: Use alternative RET encoding x86/ftrace: Use alternative RET encoding x86/bpf: Use alternative RET encoding x86/kvm: Fix SETcc emulation for return thunks x86/vsyscall_emu/64: Don't use RET in vsyscall emulation x86/sev: Avoid using __x86_return_thunk x86: Use return-thunk in asm code objtool: Treat .text.__x86.* as noinstr x86: Add magic AMD return-thunk x86/bugs: Report AMD retbleed vulnerability x86/bugs: Add AMD retbleed= boot parameter x86/bugs: Enable STIBP for JMP2RET x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value x86/entry: Add kernel IBRS implementation x86/bugs: Optimize SPEC_CTRL MSR writes x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation() x86/bugs: Report Intel retbleed vulnerability intel_idle: Disable IBRS during long idle objtool: Update Retpoline validation x86/xen: Rename SYS* entry points x86/bugs: Add retbleed=ibpb x86/bugs: Do IBPB fallback check only once objtool: Add entry UNRET validation x86/cpu/amd: Add Spectral Chicken x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n x86/speculation: Fix firmware entry SPEC_CTRL handling x86/speculation: Fix SPEC_CTRL write on SMT state change x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit x86/speculation: Remove x86_spec_ctrl_mask objtool: Re-add UNWIND_HINT_{SAVE_RESTORE} KVM: VMX: Flatten __vmx_vcpu_run() KVM: VMX: Convert launched argument to flags KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS KVM: VMX: Fix IBRS handling after vmexit x86/speculation: Fill RSB on vmexit for IBRS x86/common: Stamp out the stepping madness x86/cpu/amd: Enumerate BTC_NO x86/retbleed: Add fine grained Kconfig knobs x86/bugs: Add Cannon lake to RETBleed affected CPU list x86/bugs: Do not enable IBPB-on-entry when IBPB is not supported x86/kexec: Disable RET on kexec x86/speculation: Disable RRSBA behavior x86/static_call: Serialize __static_call_fixup() properly tools/insn: Restore the relative include paths for cross building x86, kvm: use proper ASM macros for kvm_vcpu_is_preempted x86/xen: Fix initialisation in hypercall_page after rethunk x86/ftrace: Add UNWIND_HINT_FUNC annotation for ftrace_stub x86/asm/32: Fix ANNOTATE_UNRET_SAFE use on 32-bit x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current efi/x86: use naked RET on mixed mode call wrapper x86/kvm: fix FASTOP_SIZE when return thunks are enabled KVM: emulate: do not adjust size of fastop and setcc subroutines tools arch x86: Sync the msr-index.h copy with the kernel sources tools headers cpufeatures: Sync with the kernel sources x86/bugs: Remove apostrophe typo um: Add missing apply_returns() x86: Use -mindirect-branch-cs-prefix for RETPOLINE builds kvm: fix objtool relocation warning objtool: Fix elf_create_undef_symbol() endianness tools arch: Update arch/x86/lib/mem{cpy,set}_64.S copies used in 'perf bench mem memcpy' - again tools headers: Remove broken definition of __LITTLE_ENDIAN Linux 5.10.133 Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Change-Id: I7e23843058c509562ae3f3a68e0710f31249a087
321 lines
7.8 KiB
C
321 lines
7.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Kernel module help for x86.
|
|
Copyright (C) 2001 Rusty Russell.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/moduleloader.h>
|
|
#include <linux/elf.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/string.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/random.h>
|
|
#include <linux/memory.h>
|
|
|
|
#include <asm/text-patching.h>
|
|
#include <asm/page.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/unwind.h>
|
|
|
|
#if 0
|
|
#define DEBUGP(fmt, ...) \
|
|
printk(KERN_DEBUG fmt, ##__VA_ARGS__)
|
|
#else
|
|
#define DEBUGP(fmt, ...) \
|
|
do { \
|
|
if (0) \
|
|
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
|
|
} while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
static unsigned long module_load_offset;
|
|
|
|
/* Mutex protects the module_load_offset. */
|
|
static DEFINE_MUTEX(module_kaslr_mutex);
|
|
|
|
static unsigned long int get_module_load_offset(void)
|
|
{
|
|
if (kaslr_enabled()) {
|
|
mutex_lock(&module_kaslr_mutex);
|
|
/*
|
|
* Calculate the module_load_offset the first time this
|
|
* code is called. Once calculated it stays the same until
|
|
* reboot.
|
|
*/
|
|
if (module_load_offset == 0)
|
|
module_load_offset =
|
|
(get_random_int() % 1024 + 1) * PAGE_SIZE;
|
|
mutex_unlock(&module_kaslr_mutex);
|
|
}
|
|
return module_load_offset;
|
|
}
|
|
#else
|
|
static unsigned long int get_module_load_offset(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
void *module_alloc(unsigned long size)
|
|
{
|
|
void *p;
|
|
|
|
if (PAGE_ALIGN(size) > MODULES_LEN)
|
|
return NULL;
|
|
|
|
p = __vmalloc_node_range(size, MODULE_ALIGN,
|
|
MODULES_VADDR + get_module_load_offset(),
|
|
MODULES_END, GFP_KERNEL,
|
|
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
|
__builtin_return_address(0));
|
|
if (p && (kasan_module_alloc(p, size) < 0)) {
|
|
vfree(p);
|
|
return NULL;
|
|
}
|
|
|
|
return p;
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
int apply_relocate(Elf32_Shdr *sechdrs,
|
|
const char *strtab,
|
|
unsigned int symindex,
|
|
unsigned int relsec,
|
|
struct module *me)
|
|
{
|
|
unsigned int i;
|
|
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
|
|
Elf32_Sym *sym;
|
|
uint32_t *location;
|
|
|
|
DEBUGP("Applying relocate section %u to %u\n",
|
|
relsec, sechdrs[relsec].sh_info);
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
/* This is where to make the change */
|
|
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
|
+ rel[i].r_offset;
|
|
/* This is the symbol it is referring to. Note that all
|
|
undefined symbols have been resolved. */
|
|
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
|
|
+ ELF32_R_SYM(rel[i].r_info);
|
|
|
|
switch (ELF32_R_TYPE(rel[i].r_info)) {
|
|
case R_386_32:
|
|
/* We add the value into the location given */
|
|
*location += sym->st_value;
|
|
break;
|
|
case R_386_PC32:
|
|
case R_386_PLT32:
|
|
/* Add the value, subtract its position */
|
|
*location += sym->st_value - (uint32_t)location;
|
|
break;
|
|
default:
|
|
pr_err("%s: Unknown relocation: %u\n",
|
|
me->name, ELF32_R_TYPE(rel[i].r_info));
|
|
return -ENOEXEC;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
#else /*X86_64*/
|
|
static int __apply_relocate_add(Elf64_Shdr *sechdrs,
|
|
const char *strtab,
|
|
unsigned int symindex,
|
|
unsigned int relsec,
|
|
struct module *me,
|
|
void *(*write)(void *dest, const void *src, size_t len))
|
|
{
|
|
unsigned int i;
|
|
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
|
Elf64_Sym *sym;
|
|
void *loc;
|
|
u64 val;
|
|
|
|
DEBUGP("Applying relocate section %u to %u\n",
|
|
relsec, sechdrs[relsec].sh_info);
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
/* This is where to make the change */
|
|
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
|
+ rel[i].r_offset;
|
|
|
|
/* This is the symbol it is referring to. Note that all
|
|
undefined symbols have been resolved. */
|
|
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
|
|
+ ELF64_R_SYM(rel[i].r_info);
|
|
|
|
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
|
|
(int)ELF64_R_TYPE(rel[i].r_info),
|
|
sym->st_value, rel[i].r_addend, (u64)loc);
|
|
|
|
val = sym->st_value + rel[i].r_addend;
|
|
|
|
switch (ELF64_R_TYPE(rel[i].r_info)) {
|
|
case R_X86_64_NONE:
|
|
break;
|
|
case R_X86_64_64:
|
|
if (*(u64 *)loc != 0)
|
|
goto invalid_relocation;
|
|
write(loc, &val, 8);
|
|
break;
|
|
case R_X86_64_32:
|
|
if (*(u32 *)loc != 0)
|
|
goto invalid_relocation;
|
|
write(loc, &val, 4);
|
|
if (val != *(u32 *)loc)
|
|
goto overflow;
|
|
break;
|
|
case R_X86_64_32S:
|
|
if (*(s32 *)loc != 0)
|
|
goto invalid_relocation;
|
|
write(loc, &val, 4);
|
|
if ((s64)val != *(s32 *)loc)
|
|
goto overflow;
|
|
break;
|
|
case R_X86_64_PC32:
|
|
case R_X86_64_PLT32:
|
|
if (*(u32 *)loc != 0)
|
|
goto invalid_relocation;
|
|
val -= (u64)loc;
|
|
write(loc, &val, 4);
|
|
#if 0
|
|
if ((s64)val != *(s32 *)loc)
|
|
goto overflow;
|
|
#endif
|
|
break;
|
|
case R_X86_64_PC64:
|
|
if (*(u64 *)loc != 0)
|
|
goto invalid_relocation;
|
|
val -= (u64)loc;
|
|
write(loc, &val, 8);
|
|
break;
|
|
case R_X86_64_8:
|
|
if (!strncmp(strtab + sym->st_name, "__typeid__", 10))
|
|
break;
|
|
/* fallthrough */
|
|
default:
|
|
pr_err("%s: Unknown rela relocation: %llu\n",
|
|
me->name, ELF64_R_TYPE(rel[i].r_info));
|
|
return -ENOEXEC;
|
|
}
|
|
}
|
|
return 0;
|
|
|
|
invalid_relocation:
|
|
pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
|
|
(int)ELF64_R_TYPE(rel[i].r_info), loc, val);
|
|
return -ENOEXEC;
|
|
|
|
overflow:
|
|
pr_err("overflow in relocation type %d val %Lx\n",
|
|
(int)ELF64_R_TYPE(rel[i].r_info), val);
|
|
pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
|
|
me->name);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|
const char *strtab,
|
|
unsigned int symindex,
|
|
unsigned int relsec,
|
|
struct module *me)
|
|
{
|
|
int ret;
|
|
bool early = me->state == MODULE_STATE_UNFORMED;
|
|
void *(*write)(void *, const void *, size_t) = memcpy;
|
|
|
|
if (!early) {
|
|
write = text_poke;
|
|
mutex_lock(&text_mutex);
|
|
}
|
|
|
|
ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
|
|
write);
|
|
|
|
if (!early) {
|
|
text_poke_sync();
|
|
mutex_unlock(&text_mutex);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#endif
|
|
|
|
int module_finalize(const Elf_Ehdr *hdr,
|
|
const Elf_Shdr *sechdrs,
|
|
struct module *me)
|
|
{
|
|
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
|
|
*para = NULL, *orc = NULL, *orc_ip = NULL,
|
|
*retpolines = NULL, *returns = NULL;
|
|
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
|
|
|
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
|
|
if (!strcmp(".text", secstrings + s->sh_name))
|
|
text = s;
|
|
if (!strcmp(".altinstructions", secstrings + s->sh_name))
|
|
alt = s;
|
|
if (!strcmp(".smp_locks", secstrings + s->sh_name))
|
|
locks = s;
|
|
if (!strcmp(".parainstructions", secstrings + s->sh_name))
|
|
para = s;
|
|
if (!strcmp(".orc_unwind", secstrings + s->sh_name))
|
|
orc = s;
|
|
if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
|
|
orc_ip = s;
|
|
if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
|
|
retpolines = s;
|
|
if (!strcmp(".return_sites", secstrings + s->sh_name))
|
|
returns = s;
|
|
}
|
|
|
|
if (retpolines) {
|
|
void *rseg = (void *)retpolines->sh_addr;
|
|
apply_retpolines(rseg, rseg + retpolines->sh_size);
|
|
}
|
|
if (returns) {
|
|
void *rseg = (void *)returns->sh_addr;
|
|
apply_returns(rseg, rseg + returns->sh_size);
|
|
}
|
|
if (alt) {
|
|
/* patch .altinstructions */
|
|
void *aseg = (void *)alt->sh_addr;
|
|
apply_alternatives(aseg, aseg + alt->sh_size);
|
|
}
|
|
if (locks && text) {
|
|
void *lseg = (void *)locks->sh_addr;
|
|
void *tseg = (void *)text->sh_addr;
|
|
alternatives_smp_module_add(me, me->name,
|
|
lseg, lseg + locks->sh_size,
|
|
tseg, tseg + text->sh_size);
|
|
}
|
|
|
|
if (para) {
|
|
void *pseg = (void *)para->sh_addr;
|
|
apply_paravirt(pseg, pseg + para->sh_size);
|
|
}
|
|
|
|
/* make jump label nops */
|
|
jump_label_apply_nops(me);
|
|
|
|
if (orc && orc_ip)
|
|
unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
|
|
(void *)orc->sh_addr, orc->sh_size);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void module_arch_cleanup(struct module *mod)
|
|
{
|
|
alternatives_smp_module_del(mod);
|
|
}
|