ANDROID: add support for Clang's Control Flow Integrity (CFI)
This change adds the CONFIG_CFI_CLANG option, CFI error handling, and a faster look-up table for cross module CFI checks. Bug: 145210207 Change-Id: I68d620ca548a911e2f49ba801bc0531406e679a3 Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:

committed by
Alistair Delva

parent
3d04fb2c95
commit
2c351bb70a
21
Makefile
21
Makefile
@@ -915,6 +915,27 @@ KBUILD_CFLAGS += $(CC_FLAGS_LTO)
|
||||
export CC_FLAGS_LTO
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CFI_CLANG
|
||||
CC_FLAGS_CFI := -fsanitize=cfi \
|
||||
-fsanitize-cfi-cross-dso \
|
||||
-fno-sanitize-cfi-canonical-jump-tables \
|
||||
-fno-sanitize-blacklist
|
||||
|
||||
ifdef CONFIG_CFI_PERMISSIVE
|
||||
CC_FLAGS_CFI += -fsanitize-recover=cfi \
|
||||
-fno-sanitize-trap=cfi
|
||||
else
|
||||
ifndef CONFIG_UBSAN_TRAP
|
||||
CC_FLAGS_CFI += -ftrap-function=__ubsan_handle_cfi_check_fail_abort
|
||||
endif
|
||||
endif
|
||||
|
||||
# If LTO flags are filtered out, we must also filter out CFI.
|
||||
CC_FLAGS_LTO += $(CC_FLAGS_CFI)
|
||||
KBUILD_CFLAGS += $(CC_FLAGS_CFI)
|
||||
export CC_FLAGS_CFI
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B
|
||||
KBUILD_CFLAGS += -falign-functions=32
|
||||
endif
|
||||
|
23
arch/Kconfig
23
arch/Kconfig
@@ -704,6 +704,29 @@ config LTO_CLANG_THIN
|
||||
If unsure, say Y.
|
||||
endchoice
|
||||
|
||||
config CFI_CLANG
|
||||
bool "Use Clang's Control Flow Integrity (CFI)"
|
||||
depends on LTO_CLANG && KALLSYMS
|
||||
help
|
||||
This option enables Clang's Control Flow Integrity (CFI), which adds
|
||||
runtime checking for indirect function calls.
|
||||
|
||||
config CFI_CLANG_SHADOW
|
||||
bool "Use CFI shadow to speed up cross-module checks"
|
||||
default y
|
||||
depends on CFI_CLANG && MODULES
|
||||
help
|
||||
If you select this option, the kernel builds a fast look-up table of
|
||||
CFI check functions in loaded modules to reduce overhead.
|
||||
|
||||
config CFI_PERMISSIVE
|
||||
bool "Use CFI in permissive mode"
|
||||
depends on CFI_CLANG
|
||||
help
|
||||
When selected, Control Flow Integrity (CFI) violations result in a
|
||||
warning instead of a kernel panic. This option is useful for finding
|
||||
CFI violations during development.
|
||||
|
||||
config HAVE_ARCH_WITHIN_STACK_FRAMES
|
||||
bool
|
||||
help
|
||||
|
@@ -573,6 +573,22 @@
|
||||
. = ALIGN((align)); \
|
||||
__end_rodata = .;
|
||||
|
||||
|
||||
/*
|
||||
* .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI)
|
||||
* jump table entries.
|
||||
*/
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
#define TEXT_CFI_JT \
|
||||
. = ALIGN(PMD_SIZE); \
|
||||
__cfi_jt_start = .; \
|
||||
*(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
|
||||
. = ALIGN(PMD_SIZE); \
|
||||
__cfi_jt_end = .;
|
||||
#else
|
||||
#define TEXT_CFI_JT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Non-instrumentable text section
|
||||
*/
|
||||
@@ -599,6 +615,7 @@
|
||||
NOINSTR_TEXT \
|
||||
*(.text..refcount) \
|
||||
*(.ref.text) \
|
||||
TEXT_CFI_JT \
|
||||
MEM_KEEP(init.text*) \
|
||||
MEM_KEEP(exit.text*) \
|
||||
|
||||
|
41
include/linux/cfi.h
Normal file
41
include/linux/cfi.h
Normal file
@@ -0,0 +1,41 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Clang Control Flow Integrity (CFI) support.
|
||||
*
|
||||
* Copyright (C) 2019 Google LLC
|
||||
*/
|
||||
#ifndef _LINUX_CFI_H
|
||||
#define _LINUX_CFI_H
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag);
|
||||
|
||||
/* Compiler-generated function in each module, and the kernel */
|
||||
extern void __cfi_check(uint64_t id, void *ptr, void *diag);
|
||||
|
||||
/*
|
||||
* Force the compiler to generate a CFI jump table entry for a function
|
||||
* and store the jump table address to __cfi_jt_<function>.
|
||||
*/
|
||||
#define __CFI_ADDRESSABLE(fn) \
|
||||
const void* __cfi_jt_ ## fn __visible = (void *)&fn;
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG_SHADOW
|
||||
|
||||
extern void cfi_module_add(struct module *mod, unsigned long base_addr);
|
||||
extern void cfi_module_remove(struct module *mod, unsigned long base_addr);
|
||||
|
||||
#else
|
||||
|
||||
static inline void cfi_module_add(struct module *mod, unsigned long base_addr) {}
|
||||
static inline void cfi_module_remove(struct module *mod, unsigned long base_addr) {}
|
||||
|
||||
#endif /* CONFIG_CFI_CLANG_SHADOW */
|
||||
|
||||
#else /* !CONFIG_CFI_CLANG */
|
||||
|
||||
#define __CFI_ADDRESSABLE(fn)
|
||||
|
||||
#endif /* CONFIG_CFI_CLANG */
|
||||
|
||||
#endif /* _LINUX_CFI_H */
|
@@ -65,3 +65,5 @@
|
||||
#if __has_feature(shadow_call_stack)
|
||||
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
|
||||
#endif
|
||||
|
||||
#define __nocfi __attribute__((__no_sanitize__("cfi")))
|
||||
|
@@ -237,6 +237,10 @@ struct ftrace_likely_data {
|
||||
# define __noscs
|
||||
#endif
|
||||
|
||||
#ifndef __nocfi
|
||||
# define __nocfi
|
||||
#endif
|
||||
|
||||
#ifndef asm_volatile_goto
|
||||
#define asm_volatile_goto(x...) asm goto(x)
|
||||
#endif
|
||||
|
@@ -47,7 +47,7 @@
|
||||
|
||||
/* These are for everybody (although not all archs will actually
|
||||
discard it in modules) */
|
||||
#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
|
||||
#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi
|
||||
#define __initdata __section(".init.data")
|
||||
#define __initconst __section(".init.rodata")
|
||||
#define __exitdata __section(".exit.data")
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <linux/tracepoint-defs.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/static_call_types.h>
|
||||
#include <linux/cfi.h>
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/module.h>
|
||||
@@ -131,13 +132,17 @@ extern void cleanup_module(void);
|
||||
#define module_init(initfn) \
|
||||
static inline initcall_t __maybe_unused __inittest(void) \
|
||||
{ return initfn; } \
|
||||
int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
|
||||
int init_module(void) __copy(initfn) \
|
||||
__attribute__((alias(#initfn))); \
|
||||
__CFI_ADDRESSABLE(init_module)
|
||||
|
||||
/* This is only required if you want to be unloadable. */
|
||||
#define module_exit(exitfn) \
|
||||
static inline exitcall_t __maybe_unused __exittest(void) \
|
||||
{ return exitfn; } \
|
||||
void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
|
||||
void cleanup_module(void) __copy(exitfn) \
|
||||
__attribute__((alias(#exitfn))); \
|
||||
__CFI_ADDRESSABLE(cleanup_module)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -379,6 +384,10 @@ struct module {
|
||||
const s32 *crcs;
|
||||
unsigned int num_syms;
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
cfi_check_fn cfi_check;
|
||||
#endif
|
||||
|
||||
/* Kernel parameters. */
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct mutex param_lock;
|
||||
|
@@ -2323,7 +2323,7 @@ endif # MODULES
|
||||
|
||||
config MODULES_TREE_LOOKUP
|
||||
def_bool y
|
||||
depends on PERF_EVENTS || TRACING
|
||||
depends on PERF_EVENTS || TRACING || CFI_CLANG
|
||||
|
||||
config INIT_ALL_POSSIBLE
|
||||
bool
|
||||
|
@@ -38,6 +38,9 @@ KASAN_SANITIZE_kcov.o := n
|
||||
KCSAN_SANITIZE_kcov.o := n
|
||||
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
|
||||
|
||||
# Don't instrument error handlers
|
||||
CFLAGS_REMOVE_cfi.o := $(CC_FLAGS_CFI)
|
||||
|
||||
obj-y += sched/
|
||||
obj-y += locking/
|
||||
obj-y += power/
|
||||
@@ -108,6 +111,7 @@ obj-$(CONFIG_BPF) += bpf/
|
||||
obj-$(CONFIG_KCSAN) += kcsan/
|
||||
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
|
||||
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o
|
||||
obj-$(CONFIG_CFI_CLANG) += cfi.o
|
||||
|
||||
obj-$(CONFIG_PERF_EVENTS) += events/
|
||||
|
||||
|
349
kernel/cfi.c
Normal file
349
kernel/cfi.c
Normal file
@@ -0,0 +1,349 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Clang Control Flow Integrity (CFI) error and slowpath handling.
|
||||
*
|
||||
* Copyright (C) 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
/* Compiler-defined handler names */
|
||||
#ifdef CONFIG_CFI_PERMISSIVE
|
||||
#define cfi_failure_handler __ubsan_handle_cfi_check_fail
|
||||
#define cfi_slowpath_handler __cfi_slowpath_diag
|
||||
#else /* enforcing */
|
||||
#define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
|
||||
#define cfi_slowpath_handler __cfi_slowpath
|
||||
#endif /* CONFIG_CFI_PERMISSIVE */
|
||||
|
||||
static inline void handle_cfi_failure(void *ptr)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
|
||||
WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
|
||||
else
|
||||
panic("CFI failure (target: %pS)\n", ptr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_CFI_CLANG_SHADOW
|
||||
/*
|
||||
* Index type. A 16-bit index can address at most (2^16)-2 pages (taking
|
||||
* into account SHADOW_INVALID), i.e. ~256M with 4k pages.
|
||||
*/
|
||||
typedef u16 shadow_t;
|
||||
#define SHADOW_INVALID ((shadow_t)~0UL)
|
||||
|
||||
struct cfi_shadow {
|
||||
/* Page index for the beginning of the shadow */
|
||||
unsigned long base;
|
||||
/* An array of __cfi_check locations (as indices to the shadow) */
|
||||
shadow_t shadow[1];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* The shadow covers ~128M from the beginning of the module region. If
|
||||
* the region is larger, we fall back to __module_address for the rest.
|
||||
*/
|
||||
#define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT)
|
||||
|
||||
/* The in-memory size of struct cfi_shadow, always at least one page */
|
||||
#define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT)
|
||||
#define SHADOW_PAGES max(1UL, __SHADOW_PAGES)
|
||||
#define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT)
|
||||
|
||||
/* The actual size of the shadow array, minus metadata */
|
||||
#define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow))
|
||||
#define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t))
|
||||
|
||||
static DEFINE_MUTEX(shadow_update_lock);
|
||||
static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
|
||||
|
||||
/* Returns the index in the shadow for the given address */
|
||||
static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
|
||||
{
|
||||
unsigned long index;
|
||||
unsigned long page = ptr >> PAGE_SHIFT;
|
||||
|
||||
if (unlikely(page < s->base))
|
||||
return -1; /* Outside of module area */
|
||||
|
||||
index = page - s->base;
|
||||
|
||||
if (index >= SHADOW_ARR_SLOTS)
|
||||
return -1; /* Cannot be addressed with shadow */
|
||||
|
||||
return (int)index;
|
||||
}
|
||||
|
||||
/* Returns the page address for an index in the shadow */
|
||||
static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
|
||||
int index)
|
||||
{
|
||||
if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
|
||||
return 0;
|
||||
|
||||
return (s->base + index) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* Returns the __cfi_check function address for the given shadow location */
|
||||
static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s,
|
||||
int index)
|
||||
{
|
||||
if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
|
||||
return 0;
|
||||
|
||||
if (unlikely(s->shadow[index] == SHADOW_INVALID))
|
||||
return 0;
|
||||
|
||||
/* __cfi_check is always page aligned */
|
||||
return (s->base + s->shadow[index]) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
|
||||
struct cfi_shadow *next)
|
||||
{
|
||||
int i, index, check;
|
||||
|
||||
/* Mark everything invalid */
|
||||
memset(next->shadow, 0xFF, SHADOW_ARR_SIZE);
|
||||
|
||||
if (!prev)
|
||||
return; /* No previous shadow */
|
||||
|
||||
/* If the base address didn't change, an update is not needed */
|
||||
if (prev->base == next->base) {
|
||||
memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Convert the previous shadow to the new address range */
|
||||
for (i = 0; i < SHADOW_ARR_SLOTS; ++i) {
|
||||
if (prev->shadow[i] == SHADOW_INVALID)
|
||||
continue;
|
||||
|
||||
index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
|
||||
if (index < 0)
|
||||
continue;
|
||||
|
||||
check = ptr_to_shadow(next,
|
||||
shadow_to_check_fn(prev, prev->shadow[i]));
|
||||
if (check < 0)
|
||||
continue;
|
||||
|
||||
next->shadow[index] = (shadow_t)check;
|
||||
}
|
||||
}
|
||||
|
||||
static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod,
|
||||
unsigned long min_addr, unsigned long max_addr)
|
||||
{
|
||||
int check_index;
|
||||
unsigned long check = (unsigned long)mod->cfi_check;
|
||||
unsigned long ptr;
|
||||
|
||||
if (unlikely(!PAGE_ALIGNED(check))) {
|
||||
pr_warn("cfi: not using shadow for module %s\n", mod->name);
|
||||
return;
|
||||
}
|
||||
|
||||
check_index = ptr_to_shadow(s, check);
|
||||
if (check_index < 0)
|
||||
return; /* Module not addressable with shadow */
|
||||
|
||||
/* For each page, store the check function index in the shadow */
|
||||
for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
|
||||
int index = ptr_to_shadow(s, ptr);
|
||||
|
||||
if (index >= 0) {
|
||||
/* Each page must only contain one module */
|
||||
WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID);
|
||||
s->shadow[index] = (shadow_t)check_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod,
|
||||
unsigned long min_addr, unsigned long max_addr)
|
||||
{
|
||||
unsigned long ptr;
|
||||
|
||||
for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
|
||||
int index = ptr_to_shadow(s, ptr);
|
||||
|
||||
if (index >= 0)
|
||||
s->shadow[index] = SHADOW_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *,
|
||||
unsigned long min_addr, unsigned long max_addr);
|
||||
|
||||
static void update_shadow(struct module *mod, unsigned long base_addr,
|
||||
update_shadow_fn fn)
|
||||
{
|
||||
struct cfi_shadow *prev;
|
||||
struct cfi_shadow *next;
|
||||
unsigned long min_addr, max_addr;
|
||||
|
||||
next = (struct cfi_shadow *)vmalloc(SHADOW_SIZE);
|
||||
WARN_ON(!next);
|
||||
|
||||
mutex_lock(&shadow_update_lock);
|
||||
prev = rcu_dereference_protected(cfi_shadow,
|
||||
mutex_is_locked(&shadow_update_lock));
|
||||
|
||||
if (next) {
|
||||
next->base = base_addr >> PAGE_SHIFT;
|
||||
prepare_next_shadow(prev, next);
|
||||
|
||||
min_addr = (unsigned long)mod->core_layout.base;
|
||||
max_addr = min_addr + mod->core_layout.text_size;
|
||||
fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK);
|
||||
|
||||
set_memory_ro((unsigned long)next, SHADOW_PAGES);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(cfi_shadow, next);
|
||||
mutex_unlock(&shadow_update_lock);
|
||||
synchronize_rcu();
|
||||
|
||||
if (prev) {
|
||||
set_memory_rw((unsigned long)prev, SHADOW_PAGES);
|
||||
vfree(prev);
|
||||
}
|
||||
}
|
||||
|
||||
void cfi_module_add(struct module *mod, unsigned long base_addr)
|
||||
{
|
||||
update_shadow(mod, base_addr, add_module_to_shadow);
|
||||
}
|
||||
|
||||
void cfi_module_remove(struct module *mod, unsigned long base_addr)
|
||||
{
|
||||
update_shadow(mod, base_addr, remove_module_from_shadow);
|
||||
}
|
||||
|
||||
static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
|
||||
unsigned long ptr)
|
||||
{
|
||||
int index;
|
||||
|
||||
if (unlikely(!s))
|
||||
return NULL; /* No shadow available */
|
||||
|
||||
index = ptr_to_shadow(s, ptr);
|
||||
if (index < 0)
|
||||
return NULL; /* Cannot be addressed with shadow */
|
||||
|
||||
return (cfi_check_fn)shadow_to_check_fn(s, index);
|
||||
}
|
||||
|
||||
static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
|
||||
{
|
||||
cfi_check_fn fn;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CFI_CLANG_SHADOW */
|
||||
|
||||
static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CFI_CLANG_SHADOW */
|
||||
|
||||
static inline cfi_check_fn __find_module_check_fn(unsigned long ptr)
|
||||
{
|
||||
cfi_check_fn fn = NULL;
|
||||
struct module *mod;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
mod = __module_address(ptr);
|
||||
if (mod)
|
||||
fn = mod->cfi_check;
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
||||
static inline cfi_check_fn find_check_fn(unsigned long ptr)
|
||||
{
|
||||
bool rcu;
|
||||
cfi_check_fn fn = NULL;
|
||||
|
||||
/*
|
||||
* Indirect call checks can happen when RCU is not watching. Both
|
||||
* the shadow and __module_address use RCU, so we need to wake it
|
||||
* up before proceeding. Use rcu_nmi_enter/exit() as these calls
|
||||
* can happen anywhere.
|
||||
*/
|
||||
rcu = rcu_is_watching();
|
||||
if (!rcu)
|
||||
rcu_nmi_enter();
|
||||
|
||||
if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) {
|
||||
fn = __find_shadow_check_fn(ptr);
|
||||
if (fn)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (is_kernel_text(ptr)) {
|
||||
fn = __cfi_check;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fn = __find_module_check_fn(ptr);
|
||||
|
||||
out:
|
||||
if (!rcu)
|
||||
rcu_nmi_exit();
|
||||
|
||||
return fn;
|
||||
}
|
||||
|
||||
void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
|
||||
{
|
||||
cfi_check_fn fn = find_check_fn((unsigned long)ptr);
|
||||
|
||||
if (likely(fn))
|
||||
fn(id, ptr, diag);
|
||||
else /* Don't allow unchecked modules */
|
||||
handle_cfi_failure(ptr);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_MODULES */
|
||||
|
||||
void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
|
||||
{
|
||||
handle_cfi_failure(ptr); /* No modules */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
EXPORT_SYMBOL(cfi_slowpath_handler);
|
||||
|
||||
void cfi_failure_handler(void *data, void *ptr, void *vtable)
|
||||
{
|
||||
handle_cfi_failure(ptr);
|
||||
}
|
||||
EXPORT_SYMBOL(cfi_failure_handler);
|
||||
|
||||
void __cfi_check_fail(void *data, void *ptr)
|
||||
{
|
||||
handle_cfi_failure(ptr);
|
||||
}
|
@@ -2209,6 +2209,8 @@ void __weak module_arch_freeing_init(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
static void cfi_cleanup(struct module *mod);
|
||||
|
||||
/* Free a module, remove from lists, etc. */
|
||||
static void free_module(struct module *mod)
|
||||
{
|
||||
@@ -2248,6 +2250,9 @@ static void free_module(struct module *mod)
|
||||
synchronize_rcu();
|
||||
mutex_unlock(&module_mutex);
|
||||
|
||||
/* Clean up CFI for the module. */
|
||||
cfi_cleanup(mod);
|
||||
|
||||
/* This may be empty, but that's OK */
|
||||
module_arch_freeing_init(mod);
|
||||
module_memfree(mod->init_layout.base);
|
||||
@@ -3828,6 +3833,8 @@ static int unknown_module_param_cb(char *param, char *val, const char *modname,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cfi_init(struct module *mod);
|
||||
|
||||
/* Allocate and load the module: note that size of section 0 is always
|
||||
zero, and we rely on this for optional sections. */
|
||||
static int load_module(struct load_info *info, const char __user *uargs,
|
||||
@@ -3931,6 +3938,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
||||
|
||||
flush_module_icache(mod);
|
||||
|
||||
/* Setup CFI for the module. */
|
||||
cfi_init(mod);
|
||||
|
||||
/* Now copy in args */
|
||||
mod->args = strndup_user(uargs, ~0UL >> 1);
|
||||
if (IS_ERR(mod->args)) {
|
||||
@@ -4004,6 +4014,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
||||
synchronize_rcu();
|
||||
kfree(mod->args);
|
||||
free_arch_cleanup:
|
||||
cfi_cleanup(mod);
|
||||
module_arch_cleanup(mod);
|
||||
free_modinfo:
|
||||
free_modinfo(mod);
|
||||
@@ -4341,6 +4352,36 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
|
||||
}
|
||||
#endif /* CONFIG_KALLSYMS */
|
||||
|
||||
static void cfi_init(struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
initcall_t *init;
|
||||
exitcall_t *exit;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
mod->cfi_check = (cfi_check_fn)
|
||||
find_kallsyms_symbol_value(mod, "__cfi_check");
|
||||
init = (initcall_t *)
|
||||
find_kallsyms_symbol_value(mod, "__cfi_jt_init_module");
|
||||
exit = (exitcall_t *)
|
||||
find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module");
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
/* Fix init/exit functions to point to the CFI jump table */
|
||||
if (init) mod->init = *init;
|
||||
if (exit) mod->exit = *exit;
|
||||
|
||||
cfi_module_add(mod, module_addr_min);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void cfi_cleanup(struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
cfi_module_remove(mod, module_addr_min);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Maximum number of characters written by module_flags() */
|
||||
#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
|
||||
|
||||
|
@@ -23,7 +23,9 @@ modname = $(notdir $(@:.mod.o=))
|
||||
part-of-module = y
|
||||
|
||||
quiet_cmd_cc_o_c = CC [M] $@
|
||||
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
|
||||
cmd_cc_o_c = \
|
||||
$(CC) $(filter-out $(CC_FLAGS_CFI) $(CC_FLAGS_FTRACE), \
|
||||
$(c_flags)) -c -o $@ $<
|
||||
|
||||
%.mod.o: %.mod.c FORCE
|
||||
$(call if_changed_dep,cc_o_c)
|
||||
|
Reference in New Issue
Block a user