Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2020-07-13

The following pull-request contains BPF updates for your *net-next* tree.

We've added 36 non-merge commits during the last 7 day(s) which contain
a total of 62 files changed, 2242 insertions(+), 468 deletions(-).

The main changes are:

1) Avoid trace_printk warning banner by switching bpf_trace_printk to use
   its own tracing event, from Alan.

2) Better libbpf support on older kernels, from Andrii.

3) Additional AF_XDP stats, from Ciara.

4) build time resolution of BTF IDs, from Jiri.

5) BPF_CGROUP_INET_SOCK_RELEASE hook, from Stanislav.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2020-07-13 18:04:05 -07:00
62 changed files with 2235 additions and 461 deletions

View File

@@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_core_retro.skel.h"
void test_core_retro(void)
{
int err, zero = 0, res, duration = 0;
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* trigger */
usleep(1);
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
goto out_close;
CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid());
out_close:
test_core_retro__destroy(skel);
}

View File

@@ -4,6 +4,7 @@
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
#include "test_perf_buffer.skel.h"
#include "bpf/libbpf_internal.h"
/* AddressSanitizer sometimes crashes due to data dereference below, due to
@@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
void test_perf_buffer(void)
{
int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
const char *prog_name = "kprobe/sys_nanosleep";
const char *file = "./test_perf_buffer.o";
int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
struct perf_buffer_opts pb_opts = {};
struct bpf_map *perf_buf_map;
struct test_perf_buffer *skel;
cpu_set_t cpu_set, cpu_seen;
struct bpf_program *prog;
struct bpf_object *obj;
struct perf_buffer *pb;
struct bpf_link *link;
bool *online;
nr_cpus = libbpf_num_possible_cpus();
@@ -51,33 +47,21 @@ void test_perf_buffer(void)
nr_on_cpus++;
/* load program */
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out_close;
}
prog = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
skel = test_perf_buffer__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
/* load map */
perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
goto out_close;
/* attach kprobe */
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
/* attach probe */
err = test_perf_buffer__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* set up perf buffer */
pb_opts.sample_cb = on_sample;
pb_opts.ctx = &cpu_seen;
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
goto out_detach;
goto out_close;
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
@@ -94,7 +78,7 @@ void test_perf_buffer(void)
&cpu_set);
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
i, err))
goto out_detach;
goto out_close;
usleep(1);
}
@@ -110,9 +94,7 @@ void test_perf_buffer(void)
out_free_pb:
perf_buffer__free(pb);
out_detach:
bpf_link__destroy(link);
out_close:
bpf_object__close(obj);
test_perf_buffer__destroy(skel);
free(online);
}

View File

@@ -0,0 +1,111 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <string.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include <linux/kernel.h>
#include <linux/btf_ids.h>
#include "test_progs.h"
static int duration;
struct symbol {
const char *name;
int type;
int id;
};
struct symbol test_symbols[] = {
{ "unused", BTF_KIND_UNKN, 0 },
{ "S", BTF_KIND_TYPEDEF, -1 },
{ "T", BTF_KIND_TYPEDEF, -1 },
{ "U", BTF_KIND_TYPEDEF, -1 },
{ "S", BTF_KIND_STRUCT, -1 },
{ "U", BTF_KIND_UNION, -1 },
{ "func", BTF_KIND_FUNC, -1 },
};
BTF_ID_LIST(test_list)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
static int
__resolve_symbol(struct btf *btf, int type_id)
{
const struct btf_type *type;
const char *str;
unsigned int i;
type = btf__type_by_id(btf, type_id);
if (!type) {
PRINT_FAIL("Failed to get type for ID %d\n", type_id);
return -1;
}
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
if (test_symbols[i].id != -1)
continue;
if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
continue;
str = btf__name_by_offset(btf, type->name_off);
if (!str) {
PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id);
return -1;
}
if (!strcmp(str, test_symbols[i].name))
test_symbols[i].id = type_id;
}
return 0;
}
static int resolve_symbols(void)
{
struct btf *btf;
int type_id;
__u32 nr;
btf = btf__parse_elf("btf_data.o", NULL);
if (CHECK(libbpf_get_error(btf), "resolve",
"Failed to load BTF from btf_data.o\n"))
return -1;
nr = btf__get_nr_types(btf);
for (type_id = 1; type_id <= nr; type_id++) {
if (__resolve_symbol(btf, type_id))
break;
}
btf__free(btf);
return 0;
}
int test_resolve_btfids(void)
{
unsigned int i;
int ret = 0;
if (resolve_symbols())
return -1;
/* Check BTF_ID_LIST(test_list) IDs */
for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) {
ret = CHECK(test_list[i] != test_symbols[i].id,
"id_check",
"wrong ID for %s (%d != %d)\n", test_symbols[i].name,
test_list[i], test_symbols[i].id);
}
return ret;
}

View File

@@ -41,7 +41,7 @@ void test_skeleton(void)
CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL);
CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL);
CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0);
CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0);
CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0);
/* validate we can pre-setup global variables, even in .bss */
@@ -49,7 +49,7 @@ void test_skeleton(void)
data->in2 = 11;
bss->in3 = 12;
bss->in4 = 13;
rodata->in6 = 14;
rodata->in.in6 = 14;
err = test_skeleton__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
@@ -60,7 +60,7 @@ void test_skeleton(void)
CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL);
CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12);
CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL);
CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14);
CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14);
/* now set new values and attach to get them into outX variables */
data->in1 = 1;

View File

@@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
#include <test_progs.h>
#include "trace_printk.skel.h"
#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
void test_trace_printk(void)
{
int err, iter = 0, duration = 0, found = 0;
struct trace_printk__bss *bss;
struct trace_printk *skel;
char *buf = NULL;
FILE *fp = NULL;
size_t buflen;
skel = trace_printk__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
err = trace_printk__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
bss = skel->bss;
err = trace_printk__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
fp = fopen(TRACEBUF, "r");
if (CHECK(fp == NULL, "could not open trace buffer",
"error %d opening %s", errno, TRACEBUF))
goto cleanup;
/* We do not want to wait forever if this test fails... */
fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
/* wait for tracepoint to trigger */
usleep(1);
trace_printk__detach(skel);
if (CHECK(bss->trace_printk_ran == 0,
"bpf_trace_printk never ran",
"ran == %d", bss->trace_printk_ran))
goto cleanup;
if (CHECK(bss->trace_printk_ret <= 0,
"bpf_trace_printk returned <= 0 value",
"got %d", bss->trace_printk_ret))
goto cleanup;
/* verify our search string is in the trace buffer */
while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
if (strstr(buf, SEARCHMSG) != NULL)
found++;
if (found == bss->trace_printk_ran)
break;
if (++iter > 1000)
break;
}
if (CHECK(!found, "message from bpf_trace_printk not found",
"no instance of %s in %s", SEARCHMSG, TRACEBUF))
goto cleanup;
cleanup:
trace_printk__destroy(skel);
free(buf);
if (fp)
fclose(fp);
}

View File

@@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "udp_limit.skel.h"
#include <sys/types.h>
#include <sys/socket.h>
static int duration;
void test_udp_limit(void)
{
struct udp_limit *skel;
int fd1 = -1, fd2 = -1;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/udp_limit");
if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
return;
skel = udp_limit__open_and_load();
if (CHECK(!skel, "skel-load", "errno %d", errno))
goto close_cgroup_fd;
skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release),
"cg-attach", "sock %ld sock_release %ld",
PTR_ERR(skel->links.sock),
PTR_ERR(skel->links.sock_release)))
goto close_skeleton;
/* BPF program enforces a single UDP socket per cgroup,
* verify that.
*/
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
if (CHECK(fd1 < 0, "fd1", "errno %d", errno))
goto close_skeleton;
fd2 = socket(AF_INET, SOCK_DGRAM, 0);
if (CHECK(fd2 >= 0, "fd2", "errno %d", errno))
goto close_skeleton;
/* We can reopen again after close. */
close(fd1);
fd1 = -1;
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno))
goto close_skeleton;
/* Make sure the program was invoked the expected
* number of times:
* - open fd1 - BPF_CGROUP_INET_SOCK_CREATE
* - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE
* - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE
* - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE
*/
if (CHECK(skel->bss->invocations != 4, "bss-invocations",
"invocations=%d", skel->bss->invocations))
goto close_skeleton;
/* We should still have a single socket in use */
if (CHECK(skel->bss->in_use != 1, "bss-in_use",
"in_use=%d", skel->bss->in_use))
goto close_skeleton;
close_skeleton:
if (fd1 >= 0)
close(fd1);
if (fd2 >= 0)
close(fd2);
udp_limit__destroy(skel);
close_cgroup_fd:
close(cgroup_fd);
}