Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2017-12-18
The following pull-request contains BPF updates for your *net-next* tree.
The main changes are:
1) Allow arbitrary function calls from one BPF function to another BPF function.
As of today when writing BPF programs, __always_inline had to be used in
the BPF C programs for all functions, unnecessarily causing LLVM to inflate
code size. Handle this more naturally with support for BPF to BPF calls
such that this __always_inline restriction can be overcome. As a result,
it allows for better optimized code and finally enables to introduce core
BPF libraries in the future that can be reused out of different projects.
x86 and arm64 JIT support was added as well, from Alexei.
2) Add infrastructure for tagging functions as error injectable and allow for
BPF to return arbitrary error values when BPF is attached via kprobes on
those. This way of injecting errors generically eases testing and debugging
without having to recompile or restart the kernel. Tags for opting-in for
this facility are added with BPF_ALLOW_ERROR_INJECTION(), from Josef.
3) For BPF offload via nfp JIT, add support for bpf_xdp_adjust_head() helper
call for XDP programs. First part of this work adds handling of BPF
capabilities included in the firmware, and the later patches add support
to the nfp verifier part and JIT as well as some small optimizations,
from Jakub.
4) The bpftool now also gets support for basic cgroup BPF operations such
as attaching, detaching and listing current BPF programs. As a requirement
for the attach part, bpftool can now also load object files through
'bpftool prog load'. This reuses libbpf which we have in the kernel tree
as well. bpftool-cgroup man page is added along with it, from Roman.
5) Back then commit e87c6bc385
("bpf: permit multiple bpf attachments for
a single perf event") added support for attaching multiple BPF programs
to a single perf event. Given they are configured through perf's ioctl()
interface, the interface has been extended with a PERF_EVENT_IOC_QUERY_BPF
command in this work in order to return an array of one or multiple BPF
prog ids that are currently attached, from Yonghong.
6) Various minor fixes and cleanups to the bpftool's Makefile as well
as a new 'uninstall' and 'doc-uninstall' target for removing bpftool
itself or prior installed documentation related to it, from Quentin.
7) Add CONFIG_CGROUP_BPF=y to the BPF kernel selftest config file which is
required for the test_dev_cgroup test case to run, from Naresh.
8) Fix reporting of XDP prog_flags for nfp driver, from Jakub.
9) Fix libbpf's exit code from the Makefile when libelf was not found in
the system, also from Jakub.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
||||
fp->pages = size / PAGE_SIZE;
|
||||
fp->aux = aux;
|
||||
fp->aux->prog = fp;
|
||||
fp->jit_requested = ebpf_jit_enabled();
|
||||
|
||||
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
|
||||
|
||||
@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
|
||||
{
|
||||
return BPF_CLASS(insn->code) == BPF_JMP &&
|
||||
/* Call and Exit are both special jumps with no
|
||||
* target inside the BPF instruction image.
|
||||
*/
|
||||
BPF_OP(insn->code) != BPF_CALL &&
|
||||
BPF_OP(insn->code) != BPF_EXIT;
|
||||
}
|
||||
|
||||
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
|
||||
{
|
||||
struct bpf_insn *insn = prog->insnsi;
|
||||
u32 i, insn_cnt = prog->len;
|
||||
bool pseudo_call;
|
||||
u8 code;
|
||||
int off;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
if (!bpf_is_jmp_and_has_target(insn))
|
||||
code = insn->code;
|
||||
if (BPF_CLASS(code) != BPF_JMP)
|
||||
continue;
|
||||
if (BPF_OP(code) == BPF_EXIT)
|
||||
continue;
|
||||
if (BPF_OP(code) == BPF_CALL) {
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
pseudo_call = true;
|
||||
else
|
||||
continue;
|
||||
} else {
|
||||
pseudo_call = false;
|
||||
}
|
||||
off = pseudo_call ? insn->imm : insn->off;
|
||||
|
||||
/* Adjust offset of jmps if we cross boundaries. */
|
||||
if (i < pos && i + insn->off + 1 > pos)
|
||||
insn->off += delta;
|
||||
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
|
||||
insn->off -= delta;
|
||||
if (i < pos && i + off + 1 > pos)
|
||||
off += delta;
|
||||
else if (i > pos + delta && i + off + 1 <= pos + delta)
|
||||
off -= delta;
|
||||
|
||||
if (pseudo_call)
|
||||
insn->imm = off;
|
||||
else
|
||||
insn->off = off;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -711,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
||||
struct bpf_insn *insn;
|
||||
int i, rewritten;
|
||||
|
||||
if (!bpf_jit_blinding_enabled())
|
||||
if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
|
||||
return prog;
|
||||
|
||||
clone = bpf_prog_clone_create(prog, GFP_USER);
|
||||
@@ -753,6 +764,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
||||
i += insn_delta;
|
||||
}
|
||||
|
||||
clone->blinded = 1;
|
||||
return clone;
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
@@ -774,8 +786,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
|
||||
*
|
||||
* Decode and execute eBPF instructions.
|
||||
*/
|
||||
static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
|
||||
u64 *stack)
|
||||
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
||||
{
|
||||
u64 tmp;
|
||||
static const void *jumptable[256] = {
|
||||
@@ -835,6 +846,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
|
||||
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
|
||||
/* Call instruction */
|
||||
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
|
||||
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
|
||||
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
|
||||
/* Jumps */
|
||||
[BPF_JMP | BPF_JA] = &&JMP_JA,
|
||||
@@ -1025,6 +1037,13 @@ select_insn:
|
||||
BPF_R4, BPF_R5);
|
||||
CONT;
|
||||
|
||||
JMP_CALL_ARGS:
|
||||
BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
|
||||
BPF_R3, BPF_R4,
|
||||
BPF_R5,
|
||||
insn + insn->off + 1);
|
||||
CONT;
|
||||
|
||||
JMP_TAIL_CALL: {
|
||||
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
@@ -1297,6 +1316,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
|
||||
return ___bpf_prog_run(regs, insn, stack); \
|
||||
}
|
||||
|
||||
#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
|
||||
#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
|
||||
static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
|
||||
const struct bpf_insn *insn) \
|
||||
{ \
|
||||
u64 stack[stack_size / sizeof(u64)]; \
|
||||
u64 regs[MAX_BPF_REG]; \
|
||||
\
|
||||
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
|
||||
BPF_R1 = r1; \
|
||||
BPF_R2 = r2; \
|
||||
BPF_R3 = r3; \
|
||||
BPF_R4 = r4; \
|
||||
BPF_R5 = r5; \
|
||||
return ___bpf_prog_run(regs, insn, stack); \
|
||||
}
|
||||
|
||||
#define EVAL1(FN, X) FN(X)
|
||||
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
|
||||
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
|
||||
@@ -1308,6 +1344,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
|
||||
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
|
||||
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
|
||||
|
||||
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
|
||||
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
|
||||
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
|
||||
|
||||
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
|
||||
|
||||
static unsigned int (*interpreters[])(const void *ctx,
|
||||
@@ -1316,10 +1356,31 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
|
||||
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
|
||||
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
|
||||
};
|
||||
#undef PROG_NAME_LIST
|
||||
#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
|
||||
static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
|
||||
const struct bpf_insn *insn) = {
|
||||
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
|
||||
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
|
||||
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
|
||||
};
|
||||
#undef PROG_NAME_LIST
|
||||
|
||||
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
|
||||
{
|
||||
stack_depth = max_t(u32, stack_depth, 1);
|
||||
insn->off = (s16) insn->imm;
|
||||
insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
|
||||
__bpf_call_base_args;
|
||||
insn->code = BPF_JMP | BPF_CALL_ARGS;
|
||||
}
|
||||
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array,
|
||||
const struct bpf_prog *fp)
|
||||
{
|
||||
if (fp->kprobe_override)
|
||||
return false;
|
||||
|
||||
if (!array->owner_prog_type) {
|
||||
/* There's no owner yet where we could check for
|
||||
* compatibility.
|
||||
@@ -1462,6 +1523,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
rcu_read_lock();
|
||||
prog = rcu_dereference(progs)->progs;
|
||||
for (; *prog; prog++) {
|
||||
if (*prog == &dummy_bpf_prog.prog)
|
||||
continue;
|
||||
id = (*prog)->aux->id;
|
||||
if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
|
||||
rcu_read_unlock();
|
||||
@@ -1545,14 +1608,41 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
|
||||
__u32 __user *prog_ids, u32 request_cnt,
|
||||
__u32 __user *prog_cnt)
|
||||
{
|
||||
u32 cnt = 0;
|
||||
|
||||
if (array)
|
||||
cnt = bpf_prog_array_length(array);
|
||||
|
||||
if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
|
||||
return -EFAULT;
|
||||
|
||||
/* return early if user requested only program count or nothing to copy */
|
||||
if (!request_cnt || !cnt)
|
||||
return 0;
|
||||
|
||||
return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
|
||||
}
|
||||
|
||||
static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_prog_aux *aux;
|
||||
int i;
|
||||
|
||||
aux = container_of(work, struct bpf_prog_aux, work);
|
||||
if (bpf_prog_is_dev_bound(aux))
|
||||
bpf_prog_offload_destroy(aux->prog);
|
||||
bpf_jit_free(aux->prog);
|
||||
for (i = 0; i < aux->func_cnt; i++)
|
||||
bpf_jit_free(aux->func[i]);
|
||||
if (aux->func_cnt) {
|
||||
kfree(aux->func);
|
||||
bpf_prog_unlock_free(aux->prog);
|
||||
} else {
|
||||
bpf_jit_free(aux->prog);
|
||||
}
|
||||
}
|
||||
|
||||
/* Free internal BPF program */
|
||||
|
@@ -189,8 +189,12 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
|
||||
if (opcode == BPF_CALL) {
|
||||
verbose(env, "(%02x) call %s#%d\n", insn->code,
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
if (insn->src_reg == BPF_PSEUDO_CALL)
|
||||
verbose(env, "(%02x) call pc%+d\n", insn->code,
|
||||
insn->imm);
|
||||
else
|
||||
verbose(env, "(%02x) call %s#%d\n", insn->code,
|
||||
func_id_name(insn->imm), insn->imm);
|
||||
} else if (insn->code == (BPF_JMP | BPF_JA)) {
|
||||
verbose(env, "(%02x) goto pc%+d\n",
|
||||
insn->code, insn->off);
|
||||
|
@@ -1194,7 +1194,8 @@ static int bpf_prog_load(union bpf_attr *attr)
|
||||
goto free_used_maps;
|
||||
|
||||
/* eBPF program is ready to be JITed */
|
||||
prog = bpf_prog_select_runtime(prog, &err);
|
||||
if (!prog->bpf_func)
|
||||
prog = bpf_prog_select_runtime(prog, &err);
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -4723,6 +4723,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
case PERF_EVENT_IOC_QUERY_BPF:
|
||||
return perf_event_query_prog_array(event, (void __user *)arg);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
@@ -8080,6 +8083,13 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Kprobe override only works for kprobes, not uprobes. */
|
||||
if (prog->kprobe_override &&
|
||||
!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
|
||||
bpf_prog_put(prog);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (is_tracepoint || is_syscall_tp) {
|
||||
int off = trace_event_get_offsets(event->tp_event);
|
||||
|
||||
|
163
kernel/kprobes.c
163
kernel/kprobes.c
@@ -83,6 +83,16 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
|
||||
return &(kretprobe_table_locks[hash].lock);
|
||||
}
|
||||
|
||||
/* List of symbols that can be overriden for error injection. */
|
||||
static LIST_HEAD(kprobe_error_injection_list);
|
||||
static DEFINE_MUTEX(kprobe_ei_mutex);
|
||||
struct kprobe_ei_entry {
|
||||
struct list_head list;
|
||||
unsigned long start_addr;
|
||||
unsigned long end_addr;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/* Blacklist -- list of struct kprobe_blacklist_entry */
|
||||
static LIST_HEAD(kprobe_blacklist);
|
||||
|
||||
@@ -1394,6 +1404,17 @@ bool within_kprobe_blacklist(unsigned long addr)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool within_kprobe_error_injection_list(unsigned long addr)
|
||||
{
|
||||
struct kprobe_ei_entry *ent;
|
||||
|
||||
list_for_each_entry(ent, &kprobe_error_injection_list, list) {
|
||||
if (addr >= ent->start_addr && addr < ent->end_addr)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have a symbol_name argument, look it up and add the offset field
|
||||
* to it. This way, we can specify a relative address to a symbol.
|
||||
@@ -2168,6 +2189,86 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
|
||||
/* Markers of the _kprobe_error_inject_list section */
|
||||
extern unsigned long __start_kprobe_error_inject_list[];
|
||||
extern unsigned long __stop_kprobe_error_inject_list[];
|
||||
|
||||
/*
|
||||
* Lookup and populate the kprobe_error_injection_list.
|
||||
*
|
||||
* For safety reasons we only allow certain functions to be overriden with
|
||||
* bpf_error_injection, so we need to populate the list of the symbols that have
|
||||
* been marked as safe for overriding.
|
||||
*/
|
||||
static void populate_kprobe_error_injection_list(unsigned long *start,
|
||||
unsigned long *end,
|
||||
void *priv)
|
||||
{
|
||||
unsigned long *iter;
|
||||
struct kprobe_ei_entry *ent;
|
||||
unsigned long entry, offset = 0, size = 0;
|
||||
|
||||
mutex_lock(&kprobe_ei_mutex);
|
||||
for (iter = start; iter < end; iter++) {
|
||||
entry = arch_deref_entry_point((void *)*iter);
|
||||
|
||||
if (!kernel_text_address(entry) ||
|
||||
!kallsyms_lookup_size_offset(entry, &size, &offset)) {
|
||||
pr_err("Failed to find error inject entry at %p\n",
|
||||
(void *)entry);
|
||||
continue;
|
||||
}
|
||||
|
||||
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
|
||||
if (!ent)
|
||||
break;
|
||||
ent->start_addr = entry;
|
||||
ent->end_addr = entry + size;
|
||||
ent->priv = priv;
|
||||
INIT_LIST_HEAD(&ent->list);
|
||||
list_add_tail(&ent->list, &kprobe_error_injection_list);
|
||||
}
|
||||
mutex_unlock(&kprobe_ei_mutex);
|
||||
}
|
||||
|
||||
static void __init populate_kernel_kprobe_ei_list(void)
|
||||
{
|
||||
populate_kprobe_error_injection_list(__start_kprobe_error_inject_list,
|
||||
__stop_kprobe_error_inject_list,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static void module_load_kprobe_ei_list(struct module *mod)
|
||||
{
|
||||
if (!mod->num_kprobe_ei_funcs)
|
||||
return;
|
||||
populate_kprobe_error_injection_list(mod->kprobe_ei_funcs,
|
||||
mod->kprobe_ei_funcs +
|
||||
mod->num_kprobe_ei_funcs, mod);
|
||||
}
|
||||
|
||||
static void module_unload_kprobe_ei_list(struct module *mod)
|
||||
{
|
||||
struct kprobe_ei_entry *ent, *n;
|
||||
if (!mod->num_kprobe_ei_funcs)
|
||||
return;
|
||||
|
||||
mutex_lock(&kprobe_ei_mutex);
|
||||
list_for_each_entry_safe(ent, n, &kprobe_error_injection_list, list) {
|
||||
if (ent->priv == mod) {
|
||||
list_del_init(&ent->list);
|
||||
kfree(ent);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&kprobe_ei_mutex);
|
||||
}
|
||||
#else
|
||||
static inline void __init populate_kernel_kprobe_ei_list(void) {}
|
||||
static inline void module_load_kprobe_ei_list(struct module *m) {}
|
||||
static inline void module_unload_kprobe_ei_list(struct module *m) {}
|
||||
#endif
|
||||
|
||||
/* Module notifier call back, checking kprobes on the module */
|
||||
static int kprobes_module_callback(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
@@ -2178,6 +2279,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
unsigned int i;
|
||||
int checkcore = (val == MODULE_STATE_GOING);
|
||||
|
||||
if (val == MODULE_STATE_COMING)
|
||||
module_load_kprobe_ei_list(mod);
|
||||
else if (val == MODULE_STATE_GOING)
|
||||
module_unload_kprobe_ei_list(mod);
|
||||
|
||||
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
@@ -2240,6 +2346,8 @@ static int __init init_kprobes(void)
|
||||
pr_err("Please take care of using kprobes.\n");
|
||||
}
|
||||
|
||||
populate_kernel_kprobe_ei_list();
|
||||
|
||||
if (kretprobe_blacklist_size) {
|
||||
/* lookup the function address from its name */
|
||||
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
|
||||
@@ -2407,6 +2515,56 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* kprobes/error_injection_list -- shows which functions can be overriden for
|
||||
* error injection.
|
||||
* */
|
||||
static void *kprobe_ei_seq_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
mutex_lock(&kprobe_ei_mutex);
|
||||
return seq_list_start(&kprobe_error_injection_list, *pos);
|
||||
}
|
||||
|
||||
static void kprobe_ei_seq_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
mutex_unlock(&kprobe_ei_mutex);
|
||||
}
|
||||
|
||||
static void *kprobe_ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
return seq_list_next(v, &kprobe_error_injection_list, pos);
|
||||
}
|
||||
|
||||
static int kprobe_ei_seq_show(struct seq_file *m, void *v)
|
||||
{
|
||||
char buffer[KSYM_SYMBOL_LEN];
|
||||
struct kprobe_ei_entry *ent =
|
||||
list_entry(v, struct kprobe_ei_entry, list);
|
||||
|
||||
sprint_symbol(buffer, ent->start_addr);
|
||||
seq_printf(m, "%s\n", buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations kprobe_ei_seq_ops = {
|
||||
.start = kprobe_ei_seq_start,
|
||||
.next = kprobe_ei_seq_next,
|
||||
.stop = kprobe_ei_seq_stop,
|
||||
.show = kprobe_ei_seq_show,
|
||||
};
|
||||
|
||||
static int kprobe_ei_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return seq_open(filp, &kprobe_ei_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations debugfs_kprobe_ei_ops = {
|
||||
.open = kprobe_ei_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static void arm_all_kprobes(void)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
@@ -2548,6 +2706,11 @@ static int __init debugfs_kprobe_init(void)
|
||||
if (!file)
|
||||
goto error;
|
||||
|
||||
file = debugfs_create_file("error_injection_list", 0444, dir, NULL,
|
||||
&debugfs_kprobe_ei_ops);
|
||||
if (!file)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
@@ -3118,7 +3118,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
|
||||
sizeof(*mod->ftrace_callsites),
|
||||
&mod->num_ftrace_callsites);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
|
||||
mod->kprobe_ei_funcs = section_objs(info, "_kprobe_error_inject_list",
|
||||
sizeof(*mod->kprobe_ei_funcs),
|
||||
&mod->num_kprobe_ei_funcs);
|
||||
#endif
|
||||
mod->extable = section_objs(info, "__ex_table",
|
||||
sizeof(*mod->extable), &mod->num_exentries);
|
||||
|
||||
|
@@ -530,6 +530,17 @@ config FUNCTION_PROFILER
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config BPF_KPROBE_OVERRIDE
|
||||
bool "Enable BPF programs to override a kprobed function"
|
||||
depends on BPF_EVENTS
|
||||
depends on KPROBES_ON_FTRACE
|
||||
depends on HAVE_KPROBE_OVERRIDE
|
||||
depends on DYNAMIC_FTRACE_WITH_REGS
|
||||
default n
|
||||
help
|
||||
Allows BPF to override the execution of a probed function and
|
||||
set a different return value. This is used for error injection.
|
||||
|
||||
config FTRACE_MCOUNT_RECORD
|
||||
def_bool y
|
||||
depends on DYNAMIC_FTRACE
|
||||
|
@@ -13,6 +13,10 @@
|
||||
#include <linux/filter.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
#include "trace_probe.h"
|
||||
#include "trace.h"
|
||||
|
||||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
@@ -76,6 +80,24 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_call_bpf);
|
||||
|
||||
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
|
||||
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
|
||||
{
|
||||
__this_cpu_write(bpf_kprobe_override, 1);
|
||||
regs_set_return_value(regs, rc);
|
||||
arch_ftrace_kprobe_override_function(regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_override_return_proto = {
|
||||
.func = bpf_override_return,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
#endif
|
||||
|
||||
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
|
||||
{
|
||||
int ret;
|
||||
@@ -556,6 +578,10 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
|
||||
return &bpf_get_stackid_proto;
|
||||
case BPF_FUNC_perf_event_read_value:
|
||||
return &bpf_perf_event_read_value_proto;
|
||||
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
|
||||
case BPF_FUNC_override_return:
|
||||
return &bpf_override_return_proto;
|
||||
#endif
|
||||
default:
|
||||
return tracing_func_proto(func_id);
|
||||
}
|
||||
@@ -773,6 +799,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
|
||||
struct bpf_prog_array *new_array;
|
||||
int ret = -EEXIST;
|
||||
|
||||
/*
|
||||
* Kprobe override only works for ftrace based kprobes, and only if they
|
||||
* are on the opt-in list.
|
||||
*/
|
||||
if (prog->kprobe_override &&
|
||||
(!trace_kprobe_ftrace(event->tp_event) ||
|
||||
!trace_kprobe_error_injectable(event->tp_event)))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
|
||||
if (event->prog)
|
||||
@@ -825,3 +860,26 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
|
||||
unlock:
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
}
|
||||
|
||||
int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
{
|
||||
struct perf_event_query_bpf __user *uquery = info;
|
||||
struct perf_event_query_bpf query = {};
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (event->attr.type != PERF_TYPE_TRACEPOINT)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&query, uquery, sizeof(query)))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
uquery->ids,
|
||||
query.ids_len,
|
||||
&uquery->prog_cnt);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -42,6 +42,7 @@ struct trace_kprobe {
|
||||
(offsetof(struct trace_kprobe, tp.args) + \
|
||||
(sizeof(struct probe_arg) * (n)))
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_kprobe_override);
|
||||
|
||||
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
|
||||
{
|
||||
@@ -87,6 +88,27 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
|
||||
return nhit;
|
||||
}
|
||||
|
||||
int trace_kprobe_ftrace(struct trace_event_call *call)
|
||||
{
|
||||
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
||||
return kprobe_ftrace(&tk->rp.kp);
|
||||
}
|
||||
|
||||
int trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||
{
|
||||
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
||||
unsigned long addr;
|
||||
|
||||
if (tk->symbol) {
|
||||
addr = (unsigned long)
|
||||
kallsyms_lookup_name(trace_kprobe_symbol(tk));
|
||||
addr += tk->rp.kp.offset;
|
||||
} else {
|
||||
addr = (unsigned long)tk->rp.kp.addr;
|
||||
}
|
||||
return within_kprobe_error_injection_list(addr);
|
||||
}
|
||||
|
||||
static int register_kprobe_event(struct trace_kprobe *tk);
|
||||
static int unregister_kprobe_event(struct trace_kprobe *tk);
|
||||
|
||||
@@ -1170,7 +1192,7 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/* Kprobe profile handler */
|
||||
static void
|
||||
static int
|
||||
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
{
|
||||
struct trace_event_call *call = &tk->tp.call;
|
||||
@@ -1179,12 +1201,29 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
int size, __size, dsize;
|
||||
int rctx;
|
||||
|
||||
if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
|
||||
return;
|
||||
if (bpf_prog_array_valid(call)) {
|
||||
int ret;
|
||||
|
||||
ret = trace_call_bpf(call, regs);
|
||||
|
||||
/*
|
||||
* We need to check and see if we modified the pc of the
|
||||
* pt_regs, and if so clear the kprobe and return 1 so that we
|
||||
* don't do the instruction skipping. Also reset our state so
|
||||
* we are clean the next pass through.
|
||||
*/
|
||||
if (__this_cpu_read(bpf_kprobe_override)) {
|
||||
__this_cpu_write(bpf_kprobe_override, 0);
|
||||
reset_current_kprobe();
|
||||
return 1;
|
||||
}
|
||||
if (!ret)
|
||||
return 0;
|
||||
}
|
||||
|
||||
head = this_cpu_ptr(call->perf_events);
|
||||
if (hlist_empty(head))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
dsize = __get_data_size(&tk->tp, regs);
|
||||
__size = sizeof(*entry) + tk->tp.size + dsize;
|
||||
@@ -1193,13 +1232,14 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
|
||||
entry = perf_trace_buf_alloc(size, NULL, &rctx);
|
||||
if (!entry)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
entry->ip = (unsigned long)tk->rp.kp.addr;
|
||||
memset(&entry[1], 0, dsize);
|
||||
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
|
||||
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
|
||||
head, NULL);
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_perf_func);
|
||||
|
||||
@@ -1275,16 +1315,24 @@ static int kprobe_register(struct trace_event_call *event,
|
||||
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
||||
{
|
||||
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
|
||||
int ret = 0;
|
||||
|
||||
raw_cpu_inc(*tk->nhit);
|
||||
|
||||
if (tk->tp.flags & TP_FLAG_TRACE)
|
||||
kprobe_trace_func(tk, regs);
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tk->tp.flags & TP_FLAG_PROFILE)
|
||||
kprobe_perf_func(tk, regs);
|
||||
if (tk->tp.flags & TP_FLAG_PROFILE) {
|
||||
ret = kprobe_perf_func(tk, regs);
|
||||
/*
|
||||
* The ftrace kprobe handler leaves it up to us to re-enable
|
||||
* preemption here before returning if we've modified the ip.
|
||||
*/
|
||||
if (ret)
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_dispatcher);
|
||||
|
||||
|
@@ -252,6 +252,8 @@ struct symbol_cache;
|
||||
unsigned long update_symbol_cache(struct symbol_cache *sc);
|
||||
void free_symbol_cache(struct symbol_cache *sc);
|
||||
struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
|
||||
int trace_kprobe_ftrace(struct trace_event_call *call);
|
||||
int trace_kprobe_error_injectable(struct trace_event_call *call);
|
||||
#else
|
||||
/* uprobes do not support symbol fetch methods */
|
||||
#define fetch_symbol_u8 NULL
|
||||
@@ -277,6 +279,16 @@ alloc_symbol_cache(const char *sym, long offset)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int trace_kprobe_ftrace(struct trace_event_call *call)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_KPROBE_EVENTS */
|
||||
|
||||
struct probe_arg {
|
||||
|
Reference in New Issue
Block a user