bpf: permit multiple bpf attachments for a single perf event
This patch enables multiple bpf attachments for a kprobe/uprobe/tracepoint single trace event. Each trace_event keeps a list of attached perf events. When an event happens, all attached bpf programs will be executed based on the order of attachment. A global bpf_event_mutex lock is introduced to protect prog_array attaching and detaching. An alternative will be introduce a mutex lock in every trace_event_call structure, but it takes a lot of extra memory. So a global bpf_event_mutex lock is a good compromise. The bpf prog detachment involves allocation of memory. If the allocation fails, a dummy do-nothing program will replace to-be-detached program in-place. Signed-off-by: Yonghong Song <yhs@fb.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
0b4c6841fe
commit
e87c6bc385
@@ -271,14 +271,37 @@ struct trace_event_call {
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
int perf_refcount;
|
||||
struct hlist_head __percpu *perf_events;
|
||||
struct bpf_prog *prog;
|
||||
struct perf_event *bpf_prog_owner;
|
||||
struct bpf_prog_array __rcu *prog_array;
|
||||
|
||||
int (*perf_perm)(struct trace_event_call *,
|
||||
struct perf_event *);
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
static inline bool bpf_prog_array_valid(struct trace_event_call *call)
|
||||
{
|
||||
/*
|
||||
* This inline function checks whether call->prog_array
|
||||
* is valid or not. The function is called in various places,
|
||||
* outside rcu_read_lock/unlock, as a heuristic to speed up execution.
|
||||
*
|
||||
* If this function returns true, and later call->prog_array
|
||||
* becomes false inside rcu_read_lock/unlock region,
|
||||
* we bail out then. If this function return false,
|
||||
* there is a risk that we might miss a few events if the checking
|
||||
* were delayed until inside rcu_read_lock/unlock region and
|
||||
* call->prog_array happened to become non-NULL then.
|
||||
*
|
||||
* Here, READ_ONCE() is used instead of rcu_access_pointer().
|
||||
* rcu_access_pointer() requires the actual definition of
|
||||
* "struct bpf_prog_array" while READ_ONCE() only needs
|
||||
* a declaration of the same type.
|
||||
*/
|
||||
return !!READ_ONCE(call->prog_array);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline const char *
|
||||
trace_event_name(struct trace_event_call *call)
|
||||
{
|
||||
@@ -435,12 +458,23 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_EVENTS
|
||||
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
|
||||
unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
|
||||
int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
|
||||
void perf_event_detach_bpf_prog(struct perf_event *event);
|
||||
#else
|
||||
static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
|
||||
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
|
||||
|
||||
#endif
|
||||
|
||||
enum {
|
||||
@@ -511,6 +545,7 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
|
||||
{
|
||||
perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_TRACE_EVENT_H */
|
||||
|
Reference in New Issue
Block a user