Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Signed-off-by: David S. Miller <davem@davemloft.net>
这个提交包含在:
David S. Miller
2020-03-30 19:52:37 -07:00
当前提交 ed52f2c608
修改 107 个文件,包含 6098 行新增1740 行删除

查看文件

@@ -35,3 +35,5 @@ CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_IPV6_SIT=m
CONFIG_BPF_JIT=y
CONFIG_BPF_LSM=y
CONFIG_SECURITY=y

查看文件

@@ -11,6 +11,7 @@
static const unsigned int total_bytes = 10 * 1024 * 1024;
static const struct timeval timeo_sec = { .tv_sec = 10 };
static const size_t timeo_optlen = sizeof(timeo_sec);
static int expected_stg = 0xeB9F;
static int stop, duration;
static int settimeo(int fd)
@@ -88,7 +89,7 @@ done:
return NULL;
}
static void do_test(const char *tcp_ca)
static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
{
struct sockaddr_in6 sa6 = {};
ssize_t nr_recv = 0, bytes = 0;
@@ -126,14 +127,34 @@ static void do_test(const char *tcp_ca)
err = listen(lfd, 1);
if (CHECK(err == -1, "listen", "errno:%d\n", errno))
goto done;
err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
if (CHECK(err != 0, "pthread_create", "err:%d\n", err))
goto done;
if (sk_stg_map) {
err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
&expected_stg, BPF_NOEXIST);
if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
"err:%d errno:%d\n", err, errno))
goto done;
}
/* connect to server */
err = connect(fd, (struct sockaddr *)&sa6, addrlen);
if (CHECK(err == -1, "connect", "errno:%d\n", errno))
goto wait_thread;
goto done;
if (sk_stg_map) {
int tmp_stg;
err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
&tmp_stg);
if (CHECK(!err || errno != ENOENT,
"bpf_map_lookup_elem(sk_stg_map)",
"err:%d errno:%d\n", err, errno))
goto done;
}
err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
goto done;
/* recv total_bytes */
while (bytes < total_bytes && !READ_ONCE(stop)) {
@@ -149,7 +170,6 @@ static void do_test(const char *tcp_ca)
CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
bytes, total_bytes, nr_recv, errno);
wait_thread:
WRITE_ONCE(stop, 1);
pthread_join(srv_thread, &thread_ret);
CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
@@ -175,7 +195,7 @@ static void test_cubic(void)
return;
}
do_test("bpf_cubic");
do_test("bpf_cubic", NULL);
bpf_link__destroy(link);
bpf_cubic__destroy(cubic_skel);
@@ -197,7 +217,10 @@ static void test_dctcp(void)
return;
}
do_test("bpf_dctcp");
do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
CHECK(dctcp_skel->bss->stg_result != expected_stg,
"Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
dctcp_skel->bss->stg_result, expected_stg);
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);

查看文件

@@ -125,6 +125,6 @@ void test_btf_dump() {
if (!test__start_subtest(t->name))
continue;
test_btf_dump_case(i, &btf_dump_test_cases[i]);
test_btf_dump_case(i, &btf_dump_test_cases[i]);
}
}

查看文件

@@ -0,0 +1,244 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "test_cgroup_link.skel.h"
static __u32 duration = 0;
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
static struct test_cgroup_link *skel = NULL;
int ping_and_check(int exp_calls, int exp_alt_calls)
{
skel->bss->calls = 0;
skel->bss->alt_calls = 0;
CHECK_FAIL(system(PING_CMD));
if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
"exp %d, got %d\n", exp_calls, skel->bss->calls))
return -EINVAL;
if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
"exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
return -EINVAL;
return 0;
}
void test_cgroup_link(void)
{
struct {
const char *path;
int fd;
} cgs[] = {
{ "/cg1" },
{ "/cg1/cg2" },
{ "/cg1/cg2/cg3" },
{ "/cg1/cg2/cg3/cg4" },
};
int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags;
int i = 0, err, prog_fd;
bool detach_legacy = false;
skel = test_cgroup_link__open_and_load();
if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
return;
prog_fd = bpf_program__fd(skel->progs.egress);
err = setup_cgroup_environment();
if (CHECK(err, "cg_init", "failed: %d\n", err))
goto cleanup;
for (i = 0; i < cg_nr; i++) {
cgs[i].fd = create_and_get_cgroup(cgs[i].path);
if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
goto cleanup;
}
err = join_cgroup(cgs[last_cg].path);
if (CHECK(err, "cg_join", "fail: %d\n", err))
goto cleanup;
for (i = 0; i < cg_nr; i++) {
links[i] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[i].fd);
if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
i, PTR_ERR(links[i])))
goto cleanup;
}
ping_and_check(cg_nr, 0);
/* query the number of effective progs and attach flags in root cg */
err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
&prog_cnt);
CHECK_FAIL(err);
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
goto cleanup;
/* query the number of effective progs in last cg */
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, NULL, NULL,
&prog_cnt);
CHECK_FAIL(err);
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
cg_nr, prog_cnt))
goto cleanup;
/* query the effective prog IDs in last cg */
err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
BPF_F_QUERY_EFFECTIVE, &attach_flags,
prog_ids, &prog_cnt);
CHECK_FAIL(err);
CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
cg_nr, prog_cnt))
goto cleanup;
for (i = 1; i < prog_cnt; i++) {
CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
"idx %d, prev id %d, cur id %d\n",
i, prog_ids[i - 1], prog_ids[i]);
}
/* detach bottom program and ping again */
bpf_link__destroy(links[last_cg]);
links[last_cg] = NULL;
ping_and_check(cg_nr - 1, 0);
/* mix in with non link-based multi-attachments */
err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
goto cleanup;
detach_legacy = true;
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
PTR_ERR(links[last_cg])))
goto cleanup;
ping_and_check(cg_nr + 1, 0);
/* detach link */
bpf_link__destroy(links[last_cg]);
links[last_cg] = NULL;
/* detach legacy */
err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
goto cleanup;
detach_legacy = false;
/* attach legacy exclusive prog attachment */
err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS, 0);
if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
goto cleanup;
detach_legacy = true;
/* attempt to mix in with multi-attach bpf_link */
tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
bpf_link__destroy(tmp_link);
goto cleanup;
}
ping_and_check(cg_nr, 0);
/* detach */
err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
goto cleanup;
detach_legacy = false;
ping_and_check(cg_nr - 1, 0);
/* attach back link-based one */
links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
cgs[last_cg].fd);
if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
PTR_ERR(links[last_cg])))
goto cleanup;
ping_and_check(cg_nr, 0);
/* check legacy exclusive prog can't be attached */
err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS, 0);
if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
goto cleanup;
}
/* replace BPF programs inside their links for all but first link */
for (i = 1; i < cg_nr; i++) {
err = bpf_link__update_program(links[i], skel->progs.egress_alt);
if (CHECK(err, "prog_upd", "link #%d\n", i))
goto cleanup;
}
ping_and_check(1, cg_nr - 1);
/* Attempt program update with wrong expected BPF program */
link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
link_upd_opts.flags = BPF_F_REPLACE;
err = bpf_link_update(bpf_link__fd(links[0]),
bpf_program__fd(skel->progs.egress_alt),
&link_upd_opts);
if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
"unexpectedly succeeded, err %d, errno %d\n", err, -errno))
goto cleanup;
/* Compare-exchange single link program from egress to egress_alt */
link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
link_upd_opts.flags = BPF_F_REPLACE;
err = bpf_link_update(bpf_link__fd(links[0]),
bpf_program__fd(skel->progs.egress_alt),
&link_upd_opts);
if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
goto cleanup;
/* ping */
ping_and_check(0, cg_nr);
/* close cgroup FDs before detaching links */
for (i = 0; i < cg_nr; i++) {
if (cgs[i].fd > 0) {
close(cgs[i].fd);
cgs[i].fd = -1;
}
}
/* BPF programs should still get called */
ping_and_check(0, cg_nr);
/* leave cgroup and remove them, don't detach programs */
cleanup_cgroup_environment();
/* BPF programs should have been auto-detached */
ping_and_check(0, 0);
cleanup:
if (detach_legacy)
bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
BPF_CGROUP_INET_EGRESS);
for (i = 0; i < cg_nr; i++) {
if (!IS_ERR(links[i]))
bpf_link__destroy(links[i]);
}
test_cgroup_link__destroy(skel);
for (i = 0; i < cg_nr; i++) {
if (cgs[i].fd > 0)
close(cgs[i].fd);
}
cleanup_cgroup_environment();
}

查看文件

@@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
void test_get_stack_raw_tp(void)
{
const char *file = "./test_get_stack_rawtp.o";
const char *file_err = "./test_get_stack_rawtp_err.o";
const char *prog_name = "raw_tracepoint/sys_enter";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
struct perf_buffer_opts pb_opts = {};
@@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void)
struct bpf_map *map;
cpu_set_t cpu_set;
err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;

查看文件

@@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
void test_global_data_init(void)
{
const char *file = "./test_global_data.o";
int err = -ENOMEM, map_fd, zero = 0;
__u8 *buff = NULL, *newval = NULL;
struct bpf_object *obj;
struct bpf_map *map;
__u32 duration = 0;
size_t sz;
obj = bpf_object__open_file(file, NULL);
if (CHECK_FAIL(!obj))
return;
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
goto out;
sz = bpf_map__def(map)->value_size;
newval = malloc(sz);
if (CHECK_FAIL(!newval))
goto out;
memset(newval, 0, sz);
/* wrong size, should fail */
err = bpf_map__set_initial_value(map, newval, sz - 1);
if (CHECK(!err, "reject set initial value wrong size", "err %d\n", err))
goto out;
err = bpf_map__set_initial_value(map, newval, sz);
if (CHECK(err, "set initial value", "err %d\n", err))
goto out;
err = bpf_object__load(obj);
if (CHECK_FAIL(err))
goto out;
map_fd = bpf_map__fd(map);
if (CHECK_FAIL(map_fd < 0))
goto out;
buff = malloc(sz);
if (buff)
err = bpf_map_lookup_elem(map_fd, &zero, buff);
if (CHECK(!buff || err || memcmp(buff, newval, sz),
"compare .rodata map data override",
"err %d errno %d\n", err, errno))
goto out;
memset(newval, 1, sz);
/* object loaded - should fail */
err = bpf_map__set_initial_value(map, newval, sz);
CHECK(!err, "reject set initial value after load", "err %d\n", err);
out:
free(buff);
free(newval);
bpf_object__close(obj);
}

查看文件

@@ -0,0 +1,309 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
// Copyright (c) 2019 Cloudflare
// Copyright (c) 2020 Isovalent, Inc.
/*
* Test that the socket assign program is able to redirect traffic towards a
* socket, regardless of whether the port or address destination of the traffic
* matches the port.
*/
#define _GNU_SOURCE
#include <fcntl.h>
#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include "test_progs.h"
#define BIND_PORT 1234
#define CONNECT_PORT 4321
#define TEST_DADDR (0xC0A80203)
#define NS_SELF "/proc/self/ns/net"
static const struct timeval timeo_sec = { .tv_sec = 3 };
static const size_t timeo_optlen = sizeof(timeo_sec);
static int stop, duration;
static bool
configure_stack(void)
{
char tc_cmd[BUFSIZ];
/* Move to a new networking namespace */
if (CHECK_FAIL(unshare(CLONE_NEWNET)))
return false;
/* Configure necessary links, routes */
if (CHECK_FAIL(system("ip link set dev lo up")))
return false;
if (CHECK_FAIL(system("ip route add local default dev lo")))
return false;
if (CHECK_FAIL(system("ip -6 route add local default dev lo")))
return false;
/* Load qdisc, BPF program */
if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
return false;
sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
"direct-action object-file ./test_sk_assign.o",
"section classifier/sk_assign_test",
(env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "");
if (CHECK(system(tc_cmd), "BPF load failed;",
"run with -vv for more info\n"))
return false;
return true;
}
static int
start_server(const struct sockaddr *addr, socklen_t len, int type)
{
int fd;
fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
goto out;
if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
timeo_optlen)))
goto close_out;
if (CHECK_FAIL(bind(fd, addr, len) == -1))
goto close_out;
if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
goto close_out;
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int
connect_to_server(const struct sockaddr *addr, socklen_t len, int type)
{
int fd = -1;
fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
goto out;
if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec,
timeo_optlen)))
goto close_out;
if (CHECK_FAIL(connect(fd, addr, len)))
goto close_out;
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static in_port_t
get_port(int fd)
{
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
in_port_t port = 0;
if (CHECK_FAIL(getsockname(fd, (struct sockaddr *)&ss, &slen)))
return port;
switch (ss.ss_family) {
case AF_INET:
port = ((struct sockaddr_in *)&ss)->sin_port;
break;
case AF_INET6:
port = ((struct sockaddr_in6 *)&ss)->sin6_port;
break;
default:
CHECK(1, "Invalid address family", "%d\n", ss.ss_family);
}
return port;
}
static ssize_t
rcv_msg(int srv_client, int type)
{
struct sockaddr_storage ss;
char buf[BUFSIZ];
socklen_t slen;
if (type == SOCK_STREAM)
return read(srv_client, &buf, sizeof(buf));
else
return recvfrom(srv_client, &buf, sizeof(buf), 0,
(struct sockaddr *)&ss, &slen);
}
static int
run_test(int server_fd, const struct sockaddr *addr, socklen_t len, int type)
{
int client = -1, srv_client = -1;
char buf[] = "testing";
in_port_t port;
int ret = 1;
client = connect_to_server(addr, len, type);
if (client == -1) {
perror("Cannot connect to server");
goto out;
}
if (type == SOCK_STREAM) {
srv_client = accept(server_fd, NULL, NULL);
if (CHECK_FAIL(srv_client == -1)) {
perror("Can't accept connection");
goto out;
}
} else {
srv_client = server_fd;
}
if (CHECK_FAIL(write(client, buf, sizeof(buf)) != sizeof(buf))) {
perror("Can't write on client");
goto out;
}
if (CHECK_FAIL(rcv_msg(srv_client, type) != sizeof(buf))) {
perror("Can't read on server");
goto out;
}
port = get_port(srv_client);
if (CHECK_FAIL(!port))
goto out;
/* SOCK_STREAM is connected via accept(), so the server's local address
* will be the CONNECT_PORT rather than the BIND port that corresponds
* to the listen socket. SOCK_DGRAM on the other hand is connectionless
* so we can't really do the same check there; the server doesn't ever
* create a socket with CONNECT_PORT.
*/
if (type == SOCK_STREAM &&
CHECK(port != htons(CONNECT_PORT), "Expected", "port %u but got %u",
CONNECT_PORT, ntohs(port)))
goto out;
else if (type == SOCK_DGRAM &&
CHECK(port != htons(BIND_PORT), "Expected",
"port %u but got %u", BIND_PORT, ntohs(port)))
goto out;
ret = 0;
out:
close(client);
if (srv_client != server_fd)
close(srv_client);
if (ret)
WRITE_ONCE(stop, 1);
return ret;
}
static void
prepare_addr(struct sockaddr *addr, int family, __u16 port, bool rewrite_addr)
{
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = family;
addr4->sin_port = htons(port);
if (rewrite_addr)
addr4->sin_addr.s_addr = htonl(TEST_DADDR);
else
addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
break;
case AF_INET6:
addr6 = (struct sockaddr_in6 *)addr;
memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = family;
addr6->sin6_port = htons(port);
addr6->sin6_addr = in6addr_loopback;
if (rewrite_addr)
addr6->sin6_addr.s6_addr32[3] = htonl(TEST_DADDR);
break;
default:
fprintf(stderr, "Invalid family %d", family);
}
}
struct test_sk_cfg {
const char *name;
int family;
struct sockaddr *addr;
socklen_t len;
int type;
bool rewrite_addr;
};
#define TEST(NAME, FAMILY, TYPE, REWRITE) \
{ \
.name = NAME, \
.family = FAMILY, \
.addr = (FAMILY == AF_INET) ? (struct sockaddr *)&addr4 \
: (struct sockaddr *)&addr6, \
.len = (FAMILY == AF_INET) ? sizeof(addr4) : sizeof(addr6), \
.type = TYPE, \
.rewrite_addr = REWRITE, \
}
void test_sk_assign(void)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
struct test_sk_cfg tests[] = {
TEST("ipv4 tcp port redir", AF_INET, SOCK_STREAM, false),
TEST("ipv4 tcp addr redir", AF_INET, SOCK_STREAM, true),
TEST("ipv6 tcp port redir", AF_INET6, SOCK_STREAM, false),
TEST("ipv6 tcp addr redir", AF_INET6, SOCK_STREAM, true),
TEST("ipv4 udp port redir", AF_INET, SOCK_DGRAM, false),
TEST("ipv4 udp addr redir", AF_INET, SOCK_DGRAM, true),
TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
};
int server = -1;
int self_net;
self_net = open(NS_SELF, O_RDONLY);
if (CHECK_FAIL(self_net < 0)) {
perror("Unable to open "NS_SELF);
return;
}
if (!configure_stack()) {
perror("configure_stack");
goto cleanup;
}
for (int i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
struct test_sk_cfg *test = &tests[i];
const struct sockaddr *addr;
if (!test__start_subtest(test->name))
continue;
prepare_addr(test->addr, test->family, BIND_PORT, false);
addr = (const struct sockaddr *)test->addr;
server = start_server(addr, test->len, test->type);
if (server == -1)
goto cleanup;
/* connect to unbound ports */
prepare_addr(test->addr, test->family, CONNECT_PORT,
test->rewrite_addr);
if (run_test(server, addr, test->len, test->type))
goto close;
close(server);
server = -1;
}
close:
close(server);
cleanup:
if (CHECK_FAIL(setns(self_net, CLONE_NEWNET)))
perror("Failed to setns("NS_SELF")");
close(self_net);
}

查看文件

@@ -226,7 +226,7 @@ static void *server_thread(void *arg)
return ERR_PTR(err);
}
while (!server_done) {
while (true) {
client_fd = accept(fd, (struct sockaddr *)&addr, &len);
if (client_fd == -1 && errno == EAGAIN) {
usleep(50);
@@ -272,7 +272,7 @@ void test_tcp_rtt(void)
CHECK_FAIL(run_test(cgroup_fd, server_fd));
server_done = true;
pthread_join(tid, &server_res);
CHECK_FAIL(pthread_join(tid, &server_res));
CHECK_FAIL(IS_ERR(server_res));
close_server_fd:

查看文件

@@ -0,0 +1,86 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Google LLC.
*/
#include <test_progs.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
#include "lsm.skel.h"
char *CMD_ARGS[] = {"true", NULL};
int heap_mprotect(void)
{
void *buf;
long sz;
int ret;
sz = sysconf(_SC_PAGESIZE);
if (sz < 0)
return sz;
buf = memalign(sz, 2 * sz);
if (buf == NULL)
return -ENOMEM;
ret = mprotect(buf, sz, PROT_READ | PROT_WRITE | PROT_EXEC);
free(buf);
return ret;
}
int exec_cmd(int *monitored_pid)
{
int child_pid, child_status;
child_pid = fork();
if (child_pid == 0) {
*monitored_pid = getpid();
execvp(CMD_ARGS[0], CMD_ARGS);
return -EINVAL;
} else if (child_pid > 0) {
waitpid(child_pid, &child_status, 0);
return child_status;
}
return -EINVAL;
}
void test_test_lsm(void)
{
struct lsm *skel = NULL;
int err, duration = 0;
skel = lsm__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
goto close_prog;
err = lsm__attach(skel);
if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
goto close_prog;
err = exec_cmd(&skel->bss->monitored_pid);
if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
goto close_prog;
CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n",
skel->bss->bprm_count);
skel->bss->monitored_pid = getpid();
err = heap_mprotect();
if (CHECK(errno != EPERM, "heap_mprotect", "want errno=EPERM, got %d\n",
errno))
goto close_prog;
CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
"mprotect_count = %d\n", skel->bss->mprotect_count);
close_prog:
lsm__destroy(skel);
}

查看文件

@@ -11,7 +11,7 @@ static void nsleep()
{
struct timespec ts = { .tv_nsec = MY_TV_NSEC };
(void)nanosleep(&ts, NULL);
(void)syscall(__NR_nanosleep, &ts, NULL);
}
void test_vmlinux(void)

查看文件

@@ -0,0 +1,62 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#define IFINDEX_LO 1
#define XDP_FLAGS_REPLACE (1U << 4)
void test_xdp_attach(void)
{
struct bpf_object *obj1, *obj2, *obj3;
const char *file = "./test_xdp.o";
int err, fd1, fd2, fd3;
__u32 duration = 0;
DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts,
.old_fd = -1);
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
if (CHECK_FAIL(err))
return;
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
if (CHECK_FAIL(err))
goto out_1;
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
if (CHECK_FAIL(err))
goto out_2;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE,
&opts);
if (CHECK(err, "load_ok", "initial load failed"))
goto out_close;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE,
&opts);
if (CHECK(!err, "load_fail", "load with expected id didn't fail"))
goto out;
opts.old_fd = fd1;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts);
if (CHECK(err, "replace_ok", "replace valid old_fd failed"))
goto out;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts);
if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))
goto out;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts);
if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail"))
goto out;
opts.old_fd = fd2;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts);
if (CHECK(err, "remove_ok", "remove valid old_fd failed"))
goto out;
out:
bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
out_close:
bpf_object__close(obj3);
out_2:
bpf_object__close(obj2);
out_1:
bpf_object__close(obj1);
}

查看文件

@@ -6,6 +6,7 @@
* the kernel BPF logic.
*/
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
@@ -14,6 +15,15 @@
char _license[] SEC("license") = "GPL";
int stg_result = 0;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_stg_map SEC(".maps");
#define DCTCP_MAX_ALPHA 1024U
struct dctcp {
@@ -43,12 +53,18 @@ void BPF_PROG(dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
int *stg;
ca->prior_rcv_nxt = tp->rcv_nxt;
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca->loss_cwnd = 0;
ca->ce_state = 0;
stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
if (stg) {
stg_result = *stg;
bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
}
dctcp_reset(tp, ca);
}

查看文件

@@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Google LLC.
*/
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
char _license[] SEC("license") = "GPL";
int monitored_pid = 0;
int mprotect_count = 0;
int bprm_count = 0;
SEC("lsm/file_mprotect")
int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot, int ret)
{
if (ret != 0)
return ret;
__u32 pid = bpf_get_current_pid_tgid() >> 32;
int is_heap = 0;
is_heap = (vma->vm_start >= vma->vm_mm->start_brk &&
vma->vm_end <= vma->vm_mm->brk);
if (is_heap && monitored_pid == pid) {
mprotect_count++;
ret = -EPERM;
}
return ret;
}
SEC("lsm/bprm_committed_creds")
int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
if (monitored_pid == pid)
bprm_count++;
return 0;
}

查看文件

@@ -12,7 +12,6 @@ int bpf_prog1(struct __sk_buff *skb)
__u32 lport = skb->local_port;
__u32 rport = skb->remote_port;
__u8 *d = data;
__u32 len = (__u32) data_end - (__u32) data;
int err;
if (data + 10 > data_end) {

查看文件

@@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int calls = 0;
int alt_calls = 0;
SEC("cgroup_skb/egress1")
int egress(struct __sk_buff *skb)
{
__sync_fetch_and_add(&calls, 1);
return 1;
}
SEC("cgroup_skb/egress2")
int egress_alt(struct __sk_buff *skb)
{
__sync_fetch_and_add(&alt_calls, 1);
return 1;
}
char _license[] SEC("license") = "GPL";

查看文件

@@ -0,0 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define MAX_STACK_RAWTP 10
SEC("raw_tracepoint/sys_enter")
int bpf_prog2(void *ctx)
{
__u64 stack[MAX_STACK_RAWTP];
int error;
/* set all the flags which should return -EINVAL */
error = bpf_get_stack(ctx, stack, 0, -1);
if (error < 0)
goto loop;
return error;
loop:
while (1) {
error++;
}
}
char _license[] SEC("license") = "GPL";

查看文件

@@ -68,7 +68,7 @@ static struct foo struct3 = {
bpf_map_update_elem(&result_##map, &key, var, 0); \
} while (0)
SEC("static_data_load")
SEC("classifier/static_data_load")
int load_static_data(struct __sk_buff *skb)
{
static const __u64 bar = ~0;

查看文件

@@ -0,0 +1,204 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Cloudflare Ltd.
// Copyright (c) 2020 Isovalent, Inc.
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <sys/socket.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
static inline struct bpf_sock_tuple *
get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct bpf_sock_tuple *result;
struct ethhdr *eth;
__u64 tuple_len;
__u8 proto = 0;
__u64 ihl_len;
eth = (struct ethhdr *)(data);
if (eth + 1 > data_end)
return NULL;
if (eth->h_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
if (iph + 1 > data_end)
return NULL;
if (iph->ihl != 5)
/* Options are not supported */
return NULL;
ihl_len = iph->ihl * 4;
proto = iph->protocol;
*ipv4 = true;
result = (struct bpf_sock_tuple *)&iph->saddr;
} else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
if (ip6h + 1 > data_end)
return NULL;
ihl_len = sizeof(*ip6h);
proto = ip6h->nexthdr;
*ipv4 = false;
result = (struct bpf_sock_tuple *)&ip6h->saddr;
} else {
return (struct bpf_sock_tuple *)data;
}
if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
return NULL;
*tcp = (proto == IPPROTO_TCP);
return result;
}
static inline int
handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
size_t tuple_len;
int ret;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
return TC_ACT_SHOT;
sk = bpf_sk_lookup_udp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
if (sk)
goto assign;
if (ipv4) {
if (tuple->ipv4.dport != bpf_htons(4321))
return TC_ACT_OK;
ln.ipv4.daddr = bpf_htonl(0x7f000001);
ln.ipv4.dport = bpf_htons(1234);
sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv4),
BPF_F_CURRENT_NETNS, 0);
} else {
if (tuple->ipv6.dport != bpf_htons(4321))
return TC_ACT_OK;
/* Upper parts of daddr are already zero. */
ln.ipv6.daddr[3] = bpf_htonl(0x1);
ln.ipv6.dport = bpf_htons(1234);
sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv6),
BPF_F_CURRENT_NETNS, 0);
}
/* workaround: We can't do a single socket lookup here, because then
* the compiler will likely spill tuple_len to the stack. This makes it
* lose all bounds information in the verifier, which then rejects the
* call as unsafe.
*/
if (!sk)
return TC_ACT_SHOT;
assign:
ret = bpf_sk_assign(skb, sk, 0);
bpf_sk_release(sk);
return ret;
}
static inline int
handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
size_t tuple_len;
int ret;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
return TC_ACT_SHOT;
sk = bpf_skc_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
if (sk) {
if (sk->state != BPF_TCP_LISTEN)
goto assign;
bpf_sk_release(sk);
}
if (ipv4) {
if (tuple->ipv4.dport != bpf_htons(4321))
return TC_ACT_OK;
ln.ipv4.daddr = bpf_htonl(0x7f000001);
ln.ipv4.dport = bpf_htons(1234);
sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv4),
BPF_F_CURRENT_NETNS, 0);
} else {
if (tuple->ipv6.dport != bpf_htons(4321))
return TC_ACT_OK;
/* Upper parts of daddr are already zero. */
ln.ipv6.daddr[3] = bpf_htonl(0x1);
ln.ipv6.dport = bpf_htons(1234);
sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv6),
BPF_F_CURRENT_NETNS, 0);
}
/* workaround: We can't do a single socket lookup here, because then
* the compiler will likely spill tuple_len to the stack. This makes it
* lose all bounds information in the verifier, which then rejects the
* call as unsafe.
*/
if (!sk)
return TC_ACT_SHOT;
if (sk->state != BPF_TCP_LISTEN) {
bpf_sk_release(sk);
return TC_ACT_SHOT;
}
assign:
ret = bpf_sk_assign(skb, sk, 0);
bpf_sk_release(sk);
return ret;
}
SEC("classifier/sk_assign_test")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
struct bpf_sock_tuple *tuple, ln = {0};
bool ipv4 = false;
bool tcp = false;
int tuple_len;
int ret = 0;
tuple = get_tuple(skb, &ipv4, &tcp);
if (!tuple)
return TC_ACT_SHOT;
/* Note that the verifier socket return type for bpf_skc_lookup_tcp()
* differs from bpf_sk_lookup_udp(), so even though the C-level type is
* the same here, if we try to share the implementations they will
* fail to verify because we're crossing pointer types.
*/
if (tcp)
ret = handle_tcp(skb, tuple, ipv4);
else
ret = handle_udp(skb, tuple, ipv4);
return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
}

查看文件

@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
#define _GNU_SOURCE
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
#include <argp.h>
#include <string.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <execinfo.h> /* backtrace */
/* defined in test_progs.h */
@@ -35,16 +38,12 @@ struct prog_test_def {
*/
int usleep(useconds_t usec)
{
struct timespec ts;
struct timespec ts = {
.tv_sec = usec / 1000000,
.tv_nsec = (usec % 1000000) * 1000,
};
if (usec > 999999) {
ts.tv_sec = usec / 1000000;
ts.tv_nsec = usec % 1000000;
} else {
ts.tv_sec = 0;
ts.tv_nsec = usec;
}
return nanosleep(&ts, NULL);
return syscall(__NR_nanosleep, &ts, NULL);
}
static bool should_run(struct test_selector *sel, int num, const char *name)
@@ -94,6 +93,34 @@ static void skip_account(void)
}
}
static void stdio_restore(void);
/* A bunch of tests set custom affinity per-thread and/or per-process. Reset
* it after each test/sub-test.
*/
static void reset_affinity() {
cpu_set_t cpuset;
int i, err;
CPU_ZERO(&cpuset);
for (i = 0; i < env.nr_cpus; i++)
CPU_SET(i, &cpuset);
err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
exit(-1);
}
err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
exit(-1);
}
}
void test__end_subtest()
{
struct prog_test_def *test = env.test;
@@ -111,6 +138,8 @@ void test__end_subtest()
test->test_num, test->subtest_num,
test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
reset_affinity();
free(test->subtest_name);
test->subtest_name = NULL;
}
@@ -428,7 +457,7 @@ err:
int parse_num_list(const char *s, struct test_selector *sel)
{
int i, set_len = 0, num, start = 0, end = -1;
int i, set_len = 0, new_len, num, start = 0, end = -1;
bool *set = NULL, *tmp, parsing_end = false;
char *next;
@@ -463,18 +492,19 @@ int parse_num_list(const char *s, struct test_selector *sel)
return -EINVAL;
if (end + 1 > set_len) {
set_len = end + 1;
tmp = realloc(set, set_len);
new_len = end + 1;
tmp = realloc(set, new_len);
if (!tmp) {
free(set);
return -ENOMEM;
}
for (i = set_len; i < start; i++)
tmp[i] = false;
set = tmp;
set_len = new_len;
}
for (i = start; i <= end; i++) {
for (i = start; i <= end; i++)
set[i] = true;
}
}
if (!set)
@@ -682,6 +712,12 @@ int main(int argc, char **argv)
srand(time(NULL));
env.jit_enabled = is_jit_enabled();
env.nr_cpus = libbpf_num_possible_cpus();
if (env.nr_cpus < 0) {
fprintf(stderr, "Failed to get number of CPUs: %d!\n",
env.nr_cpus);
return -1;
}
stdio_hijack();
for (i = 0; i < prog_test_cnt; i++) {
@@ -712,6 +748,7 @@ int main(int argc, char **argv)
test->test_num, test->test_name,
test->error_cnt ? "FAIL" : "OK");
reset_affinity();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
}

查看文件

@@ -71,6 +71,7 @@ struct test_env {
FILE *stderr;
char *log_buf;
size_t log_cnt;
int nr_cpus;
int succ_cnt; /* successful tests */
int sub_succ_cnt; /* successful sub-tests */

查看文件

@@ -4,12 +4,15 @@
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/mman.h>
#include "trace_helpers.h"
#define DEBUGFS "/sys/kernel/debug/tracing/"
#define MAX_SYMS 300000
static struct ksym syms[MAX_SYMS];
static int sym_cnt;
@@ -86,3 +89,23 @@ long ksym_get_addr(const char *name)
return 0;
}
void read_trace_pipe(void)
{
int trace_fd;
trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
if (trace_fd < 0)
return;
while (1) {
static char buf[4096];
ssize_t sz;
sz = read(trace_fd, buf, sizeof(buf) - 1);
if (sz > 0) {
buf[sz] = 0;
puts(buf);
}
}
}

查看文件

@@ -12,5 +12,6 @@ struct ksym {
int load_kallsyms(void);
struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);
void read_trace_pipe(void);
#endif

查看文件

@@ -257,17 +257,15 @@
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */
/* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
.result = REJECT
},
{
@@ -299,17 +297,15 @@
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */
/* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
.result = REJECT
},
{
@@ -411,16 +407,14 @@
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
/* r1 = 0xffff'fffe (NOT 0!) */
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
/* computes OOB pointer */
/* error on computing OOB pointer */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.errstr = "R0 invalid mem access",
.errstr = "math between map_value pointer and 4294967294 is not allowed",
.result = REJECT,
},
{
@@ -506,3 +500,42 @@
.errstr = "map_value pointer and 1000000000000",
.result = REJECT
},
{
"bounds check mixed 32bit and 64bit arithmatic. test1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
/* r1 = 0xffffFFFF00000001 */
BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3),
/* check ALU64 op keeps 32bit bounds */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1),
BPF_JMP_A(1),
/* invalid ldx if bounds are lost above */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.result = ACCEPT
},
{
"bounds check mixed 32bit and 64bit arithmatic. test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
/* r1 = 0xffffFFFF00000001 */
BPF_MOV64_IMM(BPF_REG_2, 3),
/* r1 = 0x2 */
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
/* check ALU32 op zero extends 64bit bounds */
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1),
BPF_JMP_A(1),
/* invalid ldx if bounds are lost above */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.result = ACCEPT
},

查看文件

@@ -9,17 +9,17 @@
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
BPF_MOV64_IMM(BPF_REG_4, 256),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
@@ -29,7 +29,7 @@
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),

查看文件

@@ -91,3 +91,108 @@
.result = REJECT,
.errstr = "variable ctx access var_off=(0x0; 0x4)",
},
{
"pass ctx or null check, 1: ctx",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_netns_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = ACCEPT,
},
{
"pass ctx or null check, 2: null",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_netns_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = ACCEPT,
},
{
"pass ctx or null check, 3: 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_netns_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
},
{
"pass ctx or null check, 4: ctx - const",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_netns_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
},
{
"pass ctx or null check, 5: null (connect)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_netns_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_INET4_CONNECT,
.result = ACCEPT,
},
{
"pass ctx or null check, 6: null (bind)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_netns_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = ACCEPT,
},
{
"pass ctx or null check, 7: ctx (bind)",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_socket_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = ACCEPT,
},
{
"pass ctx or null check, 8: null (bind)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_socket_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
},