Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-05-31 The following pull-request contains BPF updates for your *net-next* tree. Lots of exciting new features in the first PR of this developement cycle! The main changes are: 1) misc verifier improvements, from Alexei. 2) bpftool can now convert btf to valid C, from Andrii. 3) verifier can insert explicit ZEXT insn when requested by 32-bit JITs. This feature greatly improves BPF speed on 32-bit architectures. From Jiong. 4) cgroups will now auto-detach bpf programs. This fixes issue of thousands bpf programs got stuck in dying cgroups. From Roman. 5) new bpf_send_signal() helper, from Yonghong. 6) cgroup inet skb programs can signal CN to the stack, from Lawrence. 7) miscellaneous cleanups, from many developers. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -19,6 +19,9 @@
|
||||
#include "trace_probe.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define bpf_event_rcu_dereference(p) \
|
||||
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
struct bpf_trace_module {
|
||||
struct module *module;
|
||||
@@ -567,6 +570,69 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = {
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
struct send_signal_irq_work {
|
||||
struct irq_work irq_work;
|
||||
struct task_struct *task;
|
||||
u32 sig;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
|
||||
|
||||
static void do_bpf_send_signal(struct irq_work *entry)
|
||||
{
|
||||
struct send_signal_irq_work *work;
|
||||
|
||||
work = container_of(entry, struct send_signal_irq_work, irq_work);
|
||||
group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
|
||||
}
|
||||
|
||||
BPF_CALL_1(bpf_send_signal, u32, sig)
|
||||
{
|
||||
struct send_signal_irq_work *work = NULL;
|
||||
|
||||
/* Similar to bpf_probe_write_user, task needs to be
|
||||
* in a sound condition and kernel memory access be
|
||||
* permitted in order to send signal to the current
|
||||
* task.
|
||||
*/
|
||||
if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
|
||||
return -EPERM;
|
||||
if (unlikely(uaccess_kernel()))
|
||||
return -EPERM;
|
||||
if (unlikely(!nmi_uaccess_okay()))
|
||||
return -EPERM;
|
||||
|
||||
if (in_nmi()) {
|
||||
/* Do an early check on signal validity. Otherwise,
|
||||
* the error is lost in deferred irq_work.
|
||||
*/
|
||||
if (unlikely(!valid_signal(sig)))
|
||||
return -EINVAL;
|
||||
|
||||
work = this_cpu_ptr(&send_signal_work);
|
||||
if (work->irq_work.flags & IRQ_WORK_BUSY)
|
||||
return -EBUSY;
|
||||
|
||||
/* Add the current task, which is the target of sending signal,
|
||||
* to the irq_work. The current task may change when queued
|
||||
* irq works get executed.
|
||||
*/
|
||||
work->task = current;
|
||||
work->sig = sig;
|
||||
irq_work_queue(&work->irq_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_send_signal_proto = {
|
||||
.func = bpf_send_signal,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
@@ -617,6 +683,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
case BPF_FUNC_get_current_cgroup_id:
|
||||
return &bpf_get_current_cgroup_id_proto;
|
||||
#endif
|
||||
case BPF_FUNC_send_signal:
|
||||
return &bpf_send_signal_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
@@ -1034,7 +1102,7 @@ static DEFINE_MUTEX(bpf_event_mutex);
|
||||
int perf_event_attach_bpf_prog(struct perf_event *event,
|
||||
struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_array __rcu *old_array;
|
||||
struct bpf_prog_array *old_array;
|
||||
struct bpf_prog_array *new_array;
|
||||
int ret = -EEXIST;
|
||||
|
||||
@@ -1052,7 +1120,7 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
|
||||
if (event->prog)
|
||||
goto unlock;
|
||||
|
||||
old_array = event->tp_event->prog_array;
|
||||
old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
|
||||
if (old_array &&
|
||||
bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
|
||||
ret = -E2BIG;
|
||||
@@ -1075,7 +1143,7 @@ unlock:
|
||||
|
||||
void perf_event_detach_bpf_prog(struct perf_event *event)
|
||||
{
|
||||
struct bpf_prog_array __rcu *old_array;
|
||||
struct bpf_prog_array *old_array;
|
||||
struct bpf_prog_array *new_array;
|
||||
int ret;
|
||||
|
||||
@@ -1084,7 +1152,7 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
|
||||
if (!event->prog)
|
||||
goto unlock;
|
||||
|
||||
old_array = event->tp_event->prog_array;
|
||||
old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
|
||||
ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
|
||||
if (ret == -ENOENT)
|
||||
goto unlock;
|
||||
@@ -1106,6 +1174,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
{
|
||||
struct perf_event_query_bpf __user *uquery = info;
|
||||
struct perf_event_query_bpf query = {};
|
||||
struct bpf_prog_array *progs;
|
||||
u32 *ids, prog_cnt, ids_len;
|
||||
int ret;
|
||||
|
||||
@@ -1130,10 +1199,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
*/
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
ids,
|
||||
ids_len,
|
||||
&prog_cnt);
|
||||
progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
|
||||
ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
|
||||
if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
|
||||
@@ -1343,5 +1410,18 @@ static int __init bpf_event_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init send_signal_irq_work_init(void)
|
||||
{
|
||||
int cpu;
|
||||
struct send_signal_irq_work *work;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
work = per_cpu_ptr(&send_signal_work, cpu);
|
||||
init_irq_work(&work->irq_work, do_bpf_send_signal);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
fs_initcall(bpf_event_init);
|
||||
subsys_initcall(send_signal_irq_work_init);
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
Reference in New Issue
Block a user