perf, bpf: Introduce PERF_RECORD_BPF_EVENT
For better performance analysis of BPF programs, this patch introduces PERF_RECORD_BPF_EVENT, a new perf_event_type that exposes BPF program load/unload information to user space. Each BPF program may contain up to BPF_MAX_SUBPROGS (256) sub programs. The following example shows kernel symbols for a BPF program with 7 sub programs: ffffffffa0257cf9 t bpf_prog_b07ccb89267cf242_F ffffffffa02592e1 t bpf_prog_2dcecc18072623fc_F ffffffffa025b0e9 t bpf_prog_bb7a405ebaec5d5c_F ffffffffa025dd2c t bpf_prog_a7540d4a39ec1fc7_F ffffffffa025fcca t bpf_prog_05762d4ade0e3737_F ffffffffa026108f t bpf_prog_db4bd11e35df90d4_F ffffffffa0263f00 t bpf_prog_89d64e4abf0f0126_F ffffffffa0257cf9 t bpf_prog_ae31629322c4b018__dummy_tracepoi When a bpf program is loaded, PERF_RECORD_KSYMBOL is generated for each of these sub programs. Therefore, PERF_RECORD_BPF_EVENT is not needed for simple profiling. For annotation, user space need to listen to PERF_RECORD_BPF_EVENT and gather more information about these (sub) programs via sys_bpf. Signed-off-by: Song Liu <songliubraving@fb.com> Reviewed-by: Arnaldo Carvalho de Melo <acme@redhat.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradeaed.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Peter Zijlstra <peterz@infradead.org> Cc: kernel-team@fb.com Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/20190117161521.1341602-4-songliubraving@fb.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:

committed by
Arnaldo Carvalho de Melo

parent
d764ac6464
commit
6ee52e2a3f
@@ -386,6 +386,7 @@ static atomic_t nr_task_events __read_mostly;
|
||||
static atomic_t nr_freq_events __read_mostly;
|
||||
static atomic_t nr_switch_events __read_mostly;
|
||||
static atomic_t nr_ksymbol_events __read_mostly;
|
||||
static atomic_t nr_bpf_events __read_mostly;
|
||||
|
||||
static LIST_HEAD(pmus);
|
||||
static DEFINE_MUTEX(pmus_lock);
|
||||
@@ -4308,6 +4309,8 @@ static void unaccount_event(struct perf_event *event)
|
||||
dec = true;
|
||||
if (event->attr.ksymbol)
|
||||
atomic_dec(&nr_ksymbol_events);
|
||||
if (event->attr.bpf_event)
|
||||
atomic_dec(&nr_bpf_events);
|
||||
|
||||
if (dec) {
|
||||
if (!atomic_add_unless(&perf_sched_count, -1, 1))
|
||||
@@ -7747,6 +7750,116 @@ err:
|
||||
WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
|
||||
}
|
||||
|
||||
/*
|
||||
* bpf program load/unload tracking
|
||||
*/
|
||||
|
||||
struct perf_bpf_event {
|
||||
struct bpf_prog *prog;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u16 type;
|
||||
u16 flags;
|
||||
u32 id;
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
} event_id;
|
||||
};
|
||||
|
||||
static int perf_event_bpf_match(struct perf_event *event)
|
||||
{
|
||||
return event->attr.bpf_event;
|
||||
}
|
||||
|
||||
static void perf_event_bpf_output(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_bpf_event *bpf_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int ret;
|
||||
|
||||
if (!perf_event_bpf_match(event))
|
||||
return;
|
||||
|
||||
perf_event_header__init_id(&bpf_event->event_id.header,
|
||||
&sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
bpf_event->event_id.header.size);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, bpf_event->event_id);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type)
|
||||
{
|
||||
bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
|
||||
char sym[KSYM_NAME_LEN];
|
||||
int i;
|
||||
|
||||
if (prog->aux->func_cnt == 0) {
|
||||
bpf_get_prog_name(prog, sym);
|
||||
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
(u64)(unsigned long)prog->bpf_func,
|
||||
prog->jited_len, unregister, sym);
|
||||
} else {
|
||||
for (i = 0; i < prog->aux->func_cnt; i++) {
|
||||
struct bpf_prog *subprog = prog->aux->func[i];
|
||||
|
||||
bpf_get_prog_name(subprog, sym);
|
||||
perf_event_ksymbol(
|
||||
PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
(u64)(unsigned long)subprog->bpf_func,
|
||||
subprog->jited_len, unregister, sym);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void perf_event_bpf_event(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type,
|
||||
u16 flags)
|
||||
{
|
||||
struct perf_bpf_event bpf_event;
|
||||
|
||||
if (type <= PERF_BPF_EVENT_UNKNOWN ||
|
||||
type >= PERF_BPF_EVENT_MAX)
|
||||
return;
|
||||
|
||||
switch (type) {
|
||||
case PERF_BPF_EVENT_PROG_LOAD:
|
||||
case PERF_BPF_EVENT_PROG_UNLOAD:
|
||||
if (atomic_read(&nr_ksymbol_events))
|
||||
perf_event_bpf_emit_ksymbols(prog, type);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!atomic_read(&nr_bpf_events))
|
||||
return;
|
||||
|
||||
bpf_event = (struct perf_bpf_event){
|
||||
.prog = prog,
|
||||
.event_id = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_BPF_EVENT,
|
||||
.size = sizeof(bpf_event.event_id),
|
||||
},
|
||||
.type = type,
|
||||
.flags = flags,
|
||||
.id = prog->aux->id,
|
||||
},
|
||||
};
|
||||
|
||||
BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
|
||||
|
||||
memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
|
||||
perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
|
||||
}
|
||||
|
||||
void perf_event_itrace_started(struct perf_event *event)
|
||||
{
|
||||
event->attach_state |= PERF_ATTACH_ITRACE;
|
||||
@@ -10008,6 +10121,8 @@ static void account_event(struct perf_event *event)
|
||||
inc = true;
|
||||
if (event->attr.ksymbol)
|
||||
atomic_inc(&nr_ksymbol_events);
|
||||
if (event->attr.bpf_event)
|
||||
atomic_inc(&nr_bpf_events);
|
||||
|
||||
if (inc) {
|
||||
/*
|
||||
|
Reference in New Issue
Block a user