Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2020-05-01 (v2)

The following pull-request contains BPF updates for your *net-next* tree.

We've added 61 non-merge commits during the last 6 day(s) which contain
a total of 153 files changed, 6739 insertions(+), 3367 deletions(-).

The main changes are:

1) pulled work.sysctl from vfs tree with sysctl bpf changes.

2) bpf_link observability, from Andrii.

3) BTF-defined map in map, from Andrii.

4) asan fixes for selftests, from Andrii.

5) Allow bpf_map_lookup_elem for SOCKMAP and SOCKHASH, from Jakub.

6) production cloudflare classifier as a selftes, from Lorenz.

7) bpf_ktime_get_*_ns() helper improvements, from Maciej.

8) unprivileged bpftool feature probe, from Quentin.

9) BPF_ENABLE_STATS command, from Song.

10) enable bpf_[gs]etsockopt() helpers for sock_ops progs, from Stanislav.

11) enable a bunch of common helpers for cg-device, sysctl, sockopt progs,
 from Stanislav.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2020-05-01 17:02:27 -07:00
153 changed files with 6401 additions and 3029 deletions

View File

@@ -30,8 +30,6 @@ test_tcpnotify_user
test_libbpf
test_tcp_check_syncookie_user
test_sysctl
test_hashmap
test_btf_dump
test_current_pid_tgid_new_ns
xdping
test_cpp
@@ -39,4 +37,4 @@ test_cpp
/no_alu32
/bpf_gcc
/tools
/runqslower

View File

@@ -20,9 +20,10 @@ CLANG ?= clang
LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) -I$(CURDIR) \
-I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) -I$(TOOLSINCDIR) \
-I$(APIDIR) \
SAN_CFLAGS ?=
CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
LDLIBS += -lcap -lelf -lz -lrt -lpthread
@@ -32,7 +33,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
test_progs-no_alu32 \
test_current_pid_tgid_new_ns
@@ -141,7 +142,8 @@ VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
$(OUTPUT)/runqslower: $(BPFOBJ)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR)
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
cp $(SCRATCH_DIR)/runqslower $@
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ)
@@ -241,7 +243,7 @@ define GCC_BPF_BUILD_RULE
$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
# Set up extra TRUNNER_XXX "temporary" variables in the environment (relies on
# $eval()) and pass control to DEFINE_TEST_RUNNER_RULES.
@@ -323,7 +325,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_BPF_SKELS) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
cd $$(@D) && $$(CC) $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
%.c \

View File

@@ -1,26 +1,30 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#define nr_iters 2
void test_bpf_obj_id(void)
{
const __u64 array_magic_value = 0xfaceb00c;
const __u32 array_key = 0;
const int nr_iters = 2;
const char *file = "./test_obj_id.o";
const char *expected_prog_name = "test_obj_id";
const char *expected_map_name = "test_map_id";
const __u64 nsec_per_sec = 1000000000;
struct bpf_object *objs[nr_iters];
struct bpf_object *objs[nr_iters] = {};
struct bpf_link *links[nr_iters] = {};
struct bpf_program *prog;
int prog_fds[nr_iters], map_fds[nr_iters];
/* +1 to test for the info_len returned by kernel */
struct bpf_prog_info prog_infos[nr_iters + 1];
struct bpf_map_info map_infos[nr_iters + 1];
struct bpf_link_info link_infos[nr_iters + 1];
/* Each prog only uses one map. +1 to test nr_map_ids
* returned by kernel.
*/
__u32 map_ids[nr_iters + 1];
char jited_insns[128], xlated_insns[128], zeros[128];
char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128];
__u32 i, next_id, info_len, nr_id_found, duration = 0;
struct timespec real_time_ts, boot_time_ts;
int err = 0;
@@ -36,14 +40,15 @@ void test_bpf_obj_id(void)
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
for (i = 0; i < nr_iters; i++)
objs[i] = NULL;
err = bpf_link_get_fd_by_id(0);
CHECK(err >= 0 || errno != ENOENT,
"get-fd-by-notexist-link-id", "err %d errno %d\n", err, errno);
/* Check bpf_obj_get_info_by_fd() */
bzero(zeros, sizeof(zeros));
for (i = 0; i < nr_iters; i++) {
now = time(NULL);
err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
&objs[i], &prog_fds[i]);
/* test_obj_id.o is a dumb prog. It should never fail
* to load.
@@ -60,6 +65,17 @@ void test_bpf_obj_id(void)
if (CHECK_FAIL(err))
goto done;
prog = bpf_object__find_program_by_title(objs[i],
"raw_tp/sys_enter");
if (CHECK_FAIL(!prog))
goto done;
links[i] = bpf_program__attach(prog);
err = libbpf_get_error(links[i]);
if (CHECK(err, "prog_attach", "prog #%d, err %d\n", i, err)) {
links[i] = NULL;
goto done;
}
/* Check getting map info */
info_len = sizeof(struct bpf_map_info) * 2;
bzero(&map_infos[i], info_len);
@@ -107,7 +123,7 @@ void test_bpf_obj_id(void)
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ (prog_infos[i].load_time / nsec_per_sec);
if (CHECK(err ||
prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
prog_infos[i].type != BPF_PROG_TYPE_RAW_TRACEPOINT ||
info_len != sizeof(struct bpf_prog_info) ||
(env.jit_enabled && !prog_infos[i].jited_prog_len) ||
(env.jit_enabled &&
@@ -120,7 +136,11 @@ void test_bpf_obj_id(void)
*(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
strcmp((char *)prog_infos[i].name, expected_prog_name),
"get-prog-info(fd)",
"err %d errno %d i %d type %d(%d) info_len %u(%zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
"err %d errno %d i %d type %d(%d) info_len %u(%zu) "
"jit_enabled %d jited_prog_len %u xlated_prog_len %u "
"jited_prog %d xlated_prog %d load_time %lu(%lu) "
"uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) "
"name %s(%s)\n",
err, errno, i,
prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
info_len, sizeof(struct bpf_prog_info),
@@ -135,6 +155,33 @@ void test_bpf_obj_id(void)
*(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
prog_infos[i].name, expected_prog_name))
goto done;
/* Check getting link info */
info_len = sizeof(struct bpf_link_info) * 2;
bzero(&link_infos[i], info_len);
link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
&link_infos[i], &info_len);
if (CHECK(err ||
link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
link_infos[i].prog_id != prog_infos[i].id ||
link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
"sys_enter") ||
info_len != sizeof(struct bpf_link_info),
"get-link-info(fd)",
"err %d errno %d info_len %u(%zu) type %d(%d) id %d "
"prog_id %d (%d) tp_name %s(%s)\n",
err, errno,
info_len, sizeof(struct bpf_link_info),
link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
link_infos[i].id,
link_infos[i].prog_id, prog_infos[i].id,
(char *)link_infos[i].raw_tracepoint.tp_name,
"sys_enter"))
goto done;
}
/* Check bpf_prog_get_next_id() */
@@ -247,7 +294,52 @@ void test_bpf_obj_id(void)
"nr_id_found %u(%u)\n",
nr_id_found, nr_iters);
/* Check bpf_link_get_next_id() */
nr_id_found = 0;
next_id = 0;
while (!bpf_link_get_next_id(next_id, &next_id)) {
struct bpf_link_info link_info;
int link_fd, cmp_res;
info_len = sizeof(link_info);
memset(&link_info, 0, info_len);
link_fd = bpf_link_get_fd_by_id(next_id);
if (link_fd < 0 && errno == ENOENT)
/* The bpf_link is in the dead row */
continue;
if (CHECK(link_fd < 0, "get-link-fd(next_id)",
"link_fd %d next_id %u errno %d\n",
link_fd, next_id, errno))
break;
for (i = 0; i < nr_iters; i++)
if (link_infos[i].id == next_id)
break;
if (i == nr_iters)
continue;
nr_id_found++;
err = bpf_obj_get_info_by_fd(link_fd, &link_info, &info_len);
cmp_res = memcmp(&link_info, &link_infos[i],
offsetof(struct bpf_link_info, raw_tracepoint));
CHECK(err || info_len != sizeof(link_info) || cmp_res,
"check get-link-info(next_id->fd)",
"err %d errno %d info_len %u(%zu) memcmp %d\n",
err, errno, info_len, sizeof(struct bpf_link_info),
cmp_res);
close(link_fd);
}
CHECK(nr_id_found != nr_iters,
"check total link id found by get_next_id",
"nr_id_found %u(%u)\n", nr_id_found, nr_iters);
done:
for (i = 0; i < nr_iters; i++)
for (i = 0; i < nr_iters; i++) {
bpf_link__destroy(links[i]);
bpf_object__close(objs[i]);
}
}

View File

@@ -0,0 +1,49 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_btf_map_in_map.skel.h"
void test_btf_map_in_map(void)
{
int duration = 0, err, key = 0, val;
struct test_btf_map_in_map* skel;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
return;
err = test_btf_map_in_map__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* inner1 = input, inner2 = input + 1 */
val = bpf_map__fd(skel->maps.inner_map1);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
val = bpf_map__fd(skel->maps.inner_map2);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
skel->bss->input = 1;
usleep(1);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
/* inner1 = input + 1, inner2 = input */
val = bpf_map__fd(skel->maps.inner_map2);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
val = bpf_map__fd(skel->maps.inner_map1);
bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
skel->bss->input = 3;
usleep(1);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
cleanup:
test_btf_map_in_map__destroy(skel);
}

View File

@@ -0,0 +1,456 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2020 Cloudflare
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <string.h>
#include <linux/pkt_cls.h>
#include <test_progs.h>
#include "progs/test_cls_redirect.h"
#include "test_cls_redirect.skel.h"
#define ENCAP_IP INADDR_LOOPBACK
#define ENCAP_PORT (1234)
struct addr_port {
in_port_t port;
union {
struct in_addr in_addr;
struct in6_addr in6_addr;
};
};
struct tuple {
int family;
struct addr_port src;
struct addr_port dst;
};
static int start_server(const struct sockaddr *addr, socklen_t len, int type)
{
int fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
return -1;
if (CHECK_FAIL(bind(fd, addr, len) == -1))
goto err;
if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
goto err;
return fd;
err:
close(fd);
return -1;
}
static int connect_to_server(const struct sockaddr *addr, socklen_t len,
int type)
{
int fd = socket(addr->sa_family, type, 0);
if (CHECK_FAIL(fd == -1))
return -1;
if (CHECK_FAIL(connect(fd, addr, len)))
goto err;
return fd;
err:
close(fd);
return -1;
}
static bool fill_addr_port(const struct sockaddr *sa, struct addr_port *ap)
{
const struct sockaddr_in6 *in6;
const struct sockaddr_in *in;
switch (sa->sa_family) {
case AF_INET:
in = (const struct sockaddr_in *)sa;
ap->in_addr = in->sin_addr;
ap->port = in->sin_port;
return true;
case AF_INET6:
in6 = (const struct sockaddr_in6 *)sa;
ap->in6_addr = in6->sin6_addr;
ap->port = in6->sin6_port;
return true;
default:
return false;
}
}
static bool set_up_conn(const struct sockaddr *addr, socklen_t len, int type,
int *server, int *conn, struct tuple *tuple)
{
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
struct sockaddr *sa = (struct sockaddr *)&ss;
*server = start_server(addr, len, type);
if (*server < 0)
return false;
if (CHECK_FAIL(getsockname(*server, sa, &slen)))
goto close_server;
*conn = connect_to_server(sa, slen, type);
if (*conn < 0)
goto close_server;
/* We want to simulate packets arriving at conn, so we have to
* swap src and dst.
*/
slen = sizeof(ss);
if (CHECK_FAIL(getsockname(*conn, sa, &slen)))
goto close_conn;
if (CHECK_FAIL(!fill_addr_port(sa, &tuple->dst)))
goto close_conn;
slen = sizeof(ss);
if (CHECK_FAIL(getpeername(*conn, sa, &slen)))
goto close_conn;
if (CHECK_FAIL(!fill_addr_port(sa, &tuple->src)))
goto close_conn;
tuple->family = ss.ss_family;
return true;
close_conn:
close(*conn);
*conn = -1;
close_server:
close(*server);
*server = -1;
return false;
}
static socklen_t prepare_addr(struct sockaddr_storage *addr, int family)
{
struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6;
switch (family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
memset(addr4, 0, sizeof(*addr4));
addr4->sin_family = family;
addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
return sizeof(*addr4);
case AF_INET6:
addr6 = (struct sockaddr_in6 *)addr;
memset(addr6, 0, sizeof(*addr6));
addr6->sin6_family = family;
addr6->sin6_addr = in6addr_loopback;
return sizeof(*addr6);
default:
fprintf(stderr, "Invalid family %d", family);
return 0;
}
}
static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr)
{
return tattr->data_size_out < tattr->data_size_in;
}
enum type {
UDP,
TCP,
__NR_KIND,
};
enum hops {
NO_HOPS,
ONE_HOP,
};
enum flags {
NONE,
SYN,
ACK,
};
enum conn {
KNOWN_CONN,
UNKNOWN_CONN,
};
enum result {
ACCEPT,
FORWARD,
};
struct test_cfg {
enum type type;
enum result result;
enum conn conn;
enum hops hops;
enum flags flags;
};
static int test_str(void *buf, size_t len, const struct test_cfg *test,
int family)
{
const char *family_str, *type, *conn, *hops, *result, *flags;
family_str = "IPv4";
if (family == AF_INET6)
family_str = "IPv6";
type = "TCP";
if (test->type == UDP)
type = "UDP";
conn = "known";
if (test->conn == UNKNOWN_CONN)
conn = "unknown";
hops = "no hops";
if (test->hops == ONE_HOP)
hops = "one hop";
result = "accept";
if (test->result == FORWARD)
result = "forward";
flags = "none";
if (test->flags == SYN)
flags = "SYN";
else if (test->flags == ACK)
flags = "ACK";
return snprintf(buf, len, "%s %s %s %s (%s, flags: %s)", family_str,
type, result, conn, hops, flags);
}
static struct test_cfg tests[] = {
{ TCP, ACCEPT, UNKNOWN_CONN, NO_HOPS, SYN },
{ TCP, ACCEPT, UNKNOWN_CONN, NO_HOPS, ACK },
{ TCP, FORWARD, UNKNOWN_CONN, ONE_HOP, ACK },
{ TCP, ACCEPT, KNOWN_CONN, ONE_HOP, ACK },
{ UDP, ACCEPT, UNKNOWN_CONN, NO_HOPS, NONE },
{ UDP, FORWARD, UNKNOWN_CONN, ONE_HOP, NONE },
{ UDP, ACCEPT, KNOWN_CONN, ONE_HOP, NONE },
};
static void encap_init(encap_headers_t *encap, uint8_t hop_count, uint8_t proto)
{
const uint8_t hlen =
(sizeof(struct guehdr) / sizeof(uint32_t)) + hop_count;
*encap = (encap_headers_t){
.eth = { .h_proto = htons(ETH_P_IP) },
.ip = {
.ihl = 5,
.version = 4,
.ttl = IPDEFTTL,
.protocol = IPPROTO_UDP,
.daddr = htonl(ENCAP_IP)
},
.udp = {
.dest = htons(ENCAP_PORT),
},
.gue = {
.hlen = hlen,
.proto_ctype = proto
},
.unigue = {
.hop_count = hop_count
},
};
}
static size_t build_input(const struct test_cfg *test, void *const buf,
const struct tuple *tuple)
{
in_port_t sport = tuple->src.port;
encap_headers_t encap;
struct iphdr ip;
struct ipv6hdr ipv6;
struct tcphdr tcp;
struct udphdr udp;
struct in_addr next_hop;
uint8_t *p = buf;
int proto;
proto = IPPROTO_IPIP;
if (tuple->family == AF_INET6)
proto = IPPROTO_IPV6;
encap_init(&encap, test->hops == ONE_HOP ? 1 : 0, proto);
p = mempcpy(p, &encap, sizeof(encap));
if (test->hops == ONE_HOP) {
next_hop = (struct in_addr){ .s_addr = htonl(0x7f000002) };
p = mempcpy(p, &next_hop, sizeof(next_hop));
}
proto = IPPROTO_TCP;
if (test->type == UDP)
proto = IPPROTO_UDP;
switch (tuple->family) {
case AF_INET:
ip = (struct iphdr){
.ihl = 5,
.version = 4,
.ttl = IPDEFTTL,
.protocol = proto,
.saddr = tuple->src.in_addr.s_addr,
.daddr = tuple->dst.in_addr.s_addr,
};
p = mempcpy(p, &ip, sizeof(ip));
break;
case AF_INET6:
ipv6 = (struct ipv6hdr){
.version = 6,
.hop_limit = IPDEFTTL,
.nexthdr = proto,
.saddr = tuple->src.in6_addr,
.daddr = tuple->dst.in6_addr,
};
p = mempcpy(p, &ipv6, sizeof(ipv6));
break;
default:
return 0;
}
if (test->conn == UNKNOWN_CONN)
sport--;
switch (test->type) {
case TCP:
tcp = (struct tcphdr){
.source = sport,
.dest = tuple->dst.port,
};
if (test->flags == SYN)
tcp.syn = true;
if (test->flags == ACK)
tcp.ack = true;
p = mempcpy(p, &tcp, sizeof(tcp));
break;
case UDP:
udp = (struct udphdr){
.source = sport,
.dest = tuple->dst.port,
};
p = mempcpy(p, &udp, sizeof(udp));
break;
default:
return 0;
}
return (void *)p - buf;
}
static void close_fds(int *fds, int n)
{
int i;
for (i = 0; i < n; i++)
if (fds[i] > 0)
close(fds[i]);
}
void test_cls_redirect(void)
{
struct test_cls_redirect *skel = NULL;
struct bpf_prog_test_run_attr tattr = {};
int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss;
struct sockaddr *addr;
socklen_t slen;
int i, j, err;
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
skel = test_cls_redirect__open();
if (CHECK_FAIL(!skel))
return;
skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
if (CHECK_FAIL(test_cls_redirect__load(skel)))
goto cleanup;
addr = (struct sockaddr *)&ss;
for (i = 0; i < ARRAY_SIZE(families); i++) {
slen = prepare_addr(&ss, families[i]);
if (CHECK_FAIL(!slen))
goto cleanup;
if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_DGRAM,
&servers[UDP][i], &conns[UDP][i],
&tuples[UDP][i])))
goto cleanup;
if (CHECK_FAIL(!set_up_conn(addr, slen, SOCK_STREAM,
&servers[TCP][i], &conns[TCP][i],
&tuples[TCP][i])))
goto cleanup;
}
tattr.prog_fd = bpf_program__fd(skel->progs.cls_redirect);
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct test_cfg *test = &tests[i];
for (j = 0; j < ARRAY_SIZE(families); j++) {
struct tuple *tuple = &tuples[test->type][j];
char input[256];
char tmp[256];
test_str(tmp, sizeof(tmp), test, tuple->family);
if (!test__start_subtest(tmp))
continue;
tattr.data_out = tmp;
tattr.data_size_out = sizeof(tmp);
tattr.data_in = input;
tattr.data_size_in = build_input(test, input, tuple);
if (CHECK_FAIL(!tattr.data_size_in))
continue;
err = bpf_prog_test_run_xattr(&tattr);
if (CHECK_FAIL(err))
continue;
if (tattr.retval != TC_ACT_REDIRECT) {
PRINT_FAIL("expected TC_ACT_REDIRECT, got %d\n",
tattr.retval);
continue;
}
switch (test->result) {
case ACCEPT:
if (CHECK_FAIL(!was_decapsulated(&tattr)))
continue;
break;
case FORWARD:
if (CHECK_FAIL(was_decapsulated(&tattr)))
continue;
break;
default:
PRINT_FAIL("unknown result %d\n", test->result);
continue;
}
}
}
cleanup:
test_cls_redirect__destroy(skel);
close_fds((int *)servers, sizeof(servers) / sizeof(servers[0][0]));
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
}

View File

@@ -392,7 +392,7 @@ static struct core_reloc_test_case test_cases[] = {
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
.a = 42,
},
.input_len = sizeof(struct core_reloc_existence),
.input_len = sizeof(struct core_reloc_existence___minimal),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 1,
.b_exists = 0,

View File

@@ -0,0 +1,45 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_enable_stats.skel.h"
void test_enable_stats(void)
{
struct test_enable_stats *skel;
int stats_fd, err, prog_fd;
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
int duration = 0;
skel = test_enable_stats__open_and_load();
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
return;
stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME);
if (CHECK(stats_fd < 0, "get_stats_fd", "failed %d\n", errno)) {
test_enable_stats__destroy(skel);
return;
}
err = test_enable_stats__attach(skel);
if (CHECK(err, "attach_raw_tp", "err %d\n", err))
goto cleanup;
test_enable_stats__detach(skel);
prog_fd = bpf_program__fd(skel->progs.test_enable_stats);
memset(&info, 0, info_len);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
if (CHECK(err, "get_prog_info",
"failed to get bpf_prog_info for fd %d\n", prog_fd))
goto cleanup;
if (CHECK(info.run_time_ns == 0, "check_stats_enabled",
"failed to enable run_time_ns stats\n"))
goto cleanup;
CHECK(info.run_cnt != skel->bss->count, "check_run_cnt_valid",
"invalid run_cnt stats\n");
cleanup:
test_enable_stats__destroy(skel);
close(stats_fd);
}

View File

@@ -5,26 +5,17 @@
*
* Copyright (c) 2019 Facebook
*/
#include <stdio.h>
#include <errno.h>
#include <linux/err.h>
#include "test_progs.h"
#include "bpf/hashmap.h"
#define CHECK(condition, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
fprintf(stderr, format); \
} \
__ret; \
})
static int duration = 0;
size_t hash_fn(const void *k, void *ctx)
static size_t hash_fn(const void *k, void *ctx)
{
return (long)k;
}
bool equal_fn(const void *a, const void *b, void *ctx)
static bool equal_fn(const void *a, const void *b, void *ctx)
{
return (long)a == (long)b;
}
@@ -49,53 +40,55 @@ static inline size_t exp_cap(size_t sz)
#define ELEM_CNT 62
int test_hashmap_generic(void)
static void test_hashmap_generic(void)
{
struct hashmap_entry *entry, *tmp;
int err, bkt, found_cnt, i;
long long found_msk;
struct hashmap *map;
fprintf(stderr, "%s: ", __func__);
map = hashmap__new(hash_fn, equal_fn, NULL);
if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map)))
return 1;
if (CHECK(IS_ERR(map), "hashmap__new",
"failed to create map: %ld\n", PTR_ERR(map)))
return;
for (i = 0; i < ELEM_CNT; i++) {
const void *oldk, *k = (const void *)(long)i;
void *oldv, *v = (void *)(long)(1024 + i);
err = hashmap__update(map, k, v, &oldk, &oldv);
if (CHECK(err != -ENOENT, "unexpected result: %d\n", err))
return 1;
if (CHECK(err != -ENOENT, "hashmap__update",
"unexpected result: %d\n", err))
goto cleanup;
if (i % 2) {
err = hashmap__add(map, k, v);
} else {
err = hashmap__set(map, k, v, &oldk, &oldv);
if (CHECK(oldk != NULL || oldv != NULL,
if (CHECK(oldk != NULL || oldv != NULL, "check_kv",
"unexpected k/v: %p=%p\n", oldk, oldv))
return 1;
goto cleanup;
}
if (CHECK(err, "failed to add k/v %ld = %ld: %d\n",
if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n",
(long)k, (long)v, err))
return 1;
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv),
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
"failed to find key %ld\n", (long)k))
return 1;
if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv))
return 1;
goto cleanup;
if (CHECK(oldv != v, "elem_val",
"found value is wrong: %ld\n", (long)oldv))
goto cleanup;
}
if (CHECK(hashmap__size(map) != ELEM_CNT,
if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size",
"invalid map size: %zu\n", hashmap__size(map)))
return 1;
goto cleanup;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"hashmap_cap",
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
return 1;
goto cleanup;
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
@@ -103,42 +96,47 @@ int test_hashmap_generic(void)
long v = (long)entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 1024, "invalid k/v pair: %ld = %ld\n", k, v))
return 1;
if (CHECK(v - k != 1024, "check_kv",
"invalid k/v pair: %ld = %ld\n", k, v))
goto cleanup;
}
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1,
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, "elem_cnt",
"not all keys iterated: %llx\n", found_msk))
return 1;
goto cleanup;
for (i = 0; i < ELEM_CNT; i++) {
const void *oldk, *k = (const void *)(long)i;
void *oldv, *v = (void *)(long)(256 + i);
err = hashmap__add(map, k, v);
if (CHECK(err != -EEXIST, "unexpected add result: %d\n", err))
return 1;
if (CHECK(err != -EEXIST, "hashmap__add",
"unexpected add result: %d\n", err))
goto cleanup;
if (i % 2)
err = hashmap__update(map, k, v, &oldk, &oldv);
else
err = hashmap__set(map, k, v, &oldk, &oldv);
if (CHECK(err, "failed to update k/v %ld = %ld: %d\n",
(long)k, (long)v, err))
return 1;
if (CHECK(!hashmap__find(map, k, &oldv),
if (CHECK(err, "elem_upd",
"failed to update k/v %ld = %ld: %d\n",
(long)k, (long)v, err))
goto cleanup;
if (CHECK(!hashmap__find(map, k, &oldv), "elem_find",
"failed to find key %ld\n", (long)k))
return 1;
if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv))
return 1;
goto cleanup;
if (CHECK(oldv != v, "elem_val",
"found value is wrong: %ld\n", (long)oldv))
goto cleanup;
}
if (CHECK(hashmap__size(map) != ELEM_CNT,
if (CHECK(hashmap__size(map) != ELEM_CNT, "hashmap__size",
"invalid updated map size: %zu\n", hashmap__size(map)))
return 1;
goto cleanup;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"hashmap__capacity",
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
return 1;
goto cleanup;
found_msk = 0;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
@@ -146,20 +144,21 @@ int test_hashmap_generic(void)
long v = (long)entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 256,
if (CHECK(v - k != 256, "elem_check",
"invalid updated k/v pair: %ld = %ld\n", k, v))
return 1;
goto cleanup;
}
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1,
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1, "elem_cnt",
"not all keys iterated after update: %llx\n", found_msk))
return 1;
goto cleanup;
found_cnt = 0;
hashmap__for_each_key_entry(map, entry, (void *)0) {
found_cnt++;
}
if (CHECK(!found_cnt, "didn't find any entries for key 0\n"))
return 1;
if (CHECK(!found_cnt, "found_cnt",
"didn't find any entries for key 0\n"))
goto cleanup;
found_msk = 0;
found_cnt = 0;
@@ -173,30 +172,31 @@ int test_hashmap_generic(void)
found_cnt++;
found_msk |= 1ULL << (long)k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv),
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"failed to delete k/v %ld = %ld\n",
(long)k, (long)v))
return 1;
if (CHECK(oldk != k || oldv != v,
goto cleanup;
if (CHECK(oldk != k || oldv != v, "check_old",
"invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n",
(long)k, (long)v, (long)oldk, (long)oldv))
return 1;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv),
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"unexpectedly deleted k/v %ld = %ld\n",
(long)oldk, (long)oldv))
return 1;
goto cleanup;
}
if (CHECK(!found_cnt || !found_msk,
if (CHECK(!found_cnt || !found_msk, "found_entries",
"didn't delete any key entries\n"))
return 1;
if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt,
goto cleanup;
if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, "elem_cnt",
"invalid updated map size (already deleted: %d): %zu\n",
found_cnt, hashmap__size(map)))
return 1;
goto cleanup;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"hashmap__capacity",
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
return 1;
goto cleanup;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
const void *oldk, *k;
@@ -208,53 +208,56 @@ int test_hashmap_generic(void)
found_cnt++;
found_msk |= 1ULL << (long)k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv),
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"failed to delete k/v %ld = %ld\n",
(long)k, (long)v))
return 1;
if (CHECK(oldk != k || oldv != v,
goto cleanup;
if (CHECK(oldk != k || oldv != v, "elem_check",
"invalid old k/v: expect %ld = %ld, got %ld = %ld\n",
(long)k, (long)v, (long)oldk, (long)oldv))
return 1;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv),
goto cleanup;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del",
"unexpectedly deleted k/v %ld = %ld\n",
(long)k, (long)v))
return 1;
goto cleanup;
}
if (CHECK(found_cnt != ELEM_CNT || found_msk != (1ULL << ELEM_CNT) - 1,
"found_cnt",
"not all keys were deleted: found_cnt:%d, found_msk:%llx\n",
found_cnt, found_msk))
return 1;
if (CHECK(hashmap__size(map) != 0,
goto cleanup;
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
"invalid updated map size (already deleted: %d): %zu\n",
found_cnt, hashmap__size(map)))
return 1;
goto cleanup;
found_cnt = 0;
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "unexpected map entries left: %ld = %ld\n",
(long)entry->key, (long)entry->value);
return 1;
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
(long)entry->key, (long)entry->value);
goto cleanup;
}
hashmap__free(map);
hashmap__clear(map);
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "unexpected map entries left: %ld = %ld\n",
(long)entry->key, (long)entry->value);
return 1;
CHECK(false, "elem_exists",
"unexpected map entries left: %ld = %ld\n",
(long)entry->key, (long)entry->value);
goto cleanup;
}
fprintf(stderr, "OK\n");
return 0;
cleanup:
hashmap__free(map);
}
size_t collision_hash_fn(const void *k, void *ctx)
static size_t collision_hash_fn(const void *k, void *ctx)
{
return 0;
}
int test_hashmap_multimap(void)
static void test_hashmap_multimap(void)
{
void *k1 = (void *)0, *k2 = (void *)1;
struct hashmap_entry *entry;
@@ -262,121 +265,116 @@ int test_hashmap_multimap(void)
long found_msk;
int err, bkt;
fprintf(stderr, "%s: ", __func__);
/* force collisions */
map = hashmap__new(collision_hash_fn, equal_fn, NULL);
if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map)))
return 1;
if (CHECK(IS_ERR(map), "hashmap__new",
"failed to create map: %ld\n", PTR_ERR(map)))
return;
/* set up multimap:
* [0] -> 1, 2, 4;
* [1] -> 8, 16, 32;
*/
err = hashmap__append(map, k1, (void *)1);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k1, (void *)2);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k1, (void *)4);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k2, (void *)8);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k2, (void *)16);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
err = hashmap__append(map, k2, (void *)32);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err))
goto cleanup;
if (CHECK(hashmap__size(map) != 6,
if (CHECK(hashmap__size(map) != 6, "hashmap_size",
"invalid map size: %zu\n", hashmap__size(map)))
return 1;
goto cleanup;
/* verify global iteration still works and sees all values */
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
found_msk |= (long)entry->value;
}
if (CHECK(found_msk != (1 << 6) - 1,
if (CHECK(found_msk != (1 << 6) - 1, "found_msk",
"not all keys iterated: %lx\n", found_msk))
return 1;
goto cleanup;
/* iterate values for key 1 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k1) {
found_msk |= (long)entry->value;
}
if (CHECK(found_msk != (1 | 2 | 4),
if (CHECK(found_msk != (1 | 2 | 4), "found_msk",
"invalid k1 values: %lx\n", found_msk))
return 1;
goto cleanup;
/* iterate values for key 2 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k2) {
found_msk |= (long)entry->value;
}
if (CHECK(found_msk != (8 | 16 | 32),
if (CHECK(found_msk != (8 | 16 | 32), "found_msk",
"invalid k2 values: %lx\n", found_msk))
return 1;
goto cleanup;
fprintf(stderr, "OK\n");
return 0;
cleanup:
hashmap__free(map);
}
int test_hashmap_empty()
static void test_hashmap_empty()
{
struct hashmap_entry *entry;
int bkt;
struct hashmap *map;
void *k = (void *)0;
fprintf(stderr, "%s: ", __func__);
/* force collisions */
map = hashmap__new(hash_fn, equal_fn, NULL);
if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map)))
return 1;
if (CHECK(IS_ERR(map), "hashmap__new",
"failed to create map: %ld\n", PTR_ERR(map)))
goto cleanup;
if (CHECK(hashmap__size(map) != 0,
if (CHECK(hashmap__size(map) != 0, "hashmap__size",
"invalid map size: %zu\n", hashmap__size(map)))
return 1;
if (CHECK(hashmap__capacity(map) != 0,
goto cleanup;
if (CHECK(hashmap__capacity(map) != 0, "hashmap__capacity",
"invalid map capacity: %zu\n", hashmap__capacity(map)))
return 1;
if (CHECK(hashmap__find(map, k, NULL), "unexpected find\n"))
return 1;
if (CHECK(hashmap__delete(map, k, NULL, NULL), "unexpected delete\n"))
return 1;
goto cleanup;
if (CHECK(hashmap__find(map, k, NULL), "elem_find",
"unexpected find\n"))
goto cleanup;
if (CHECK(hashmap__delete(map, k, NULL, NULL), "elem_del",
"unexpected delete\n"))
goto cleanup;
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "unexpected iterated entry\n");
return 1;
CHECK(false, "elem_found", "unexpected iterated entry\n");
goto cleanup;
}
hashmap__for_each_key_entry(map, entry, k) {
CHECK(false, "unexpected key entry\n");
return 1;
CHECK(false, "key_found", "unexpected key entry\n");
goto cleanup;
}
fprintf(stderr, "OK\n");
return 0;
cleanup:
hashmap__free(map);
}
int main(int argc, char **argv)
void test_hashmap()
{
bool failed = false;
if (test_hashmap_generic())
failed = true;
if (test_hashmap_multimap())
failed = true;
if (test_hashmap_empty())
failed = true;
return failed;
if (test__start_subtest("generic"))
test_hashmap_generic();
if (test__start_subtest("multimap"))
test_hashmap_multimap();
if (test__start_subtest("empty"))
test_hashmap_empty();
}

View File

@@ -80,9 +80,6 @@ void test_ns_current_pid_tgid(void)
"User pid/tgid %llu BPF pid/tgid %llu\n", id, bss.pid_tgid))
goto cleanup;
cleanup:
if (!link) {
bpf_link__destroy(link);
link = NULL;
}
bpf_link__destroy(link);
bpf_object__close(obj);
}

View File

@@ -6,6 +6,11 @@
#include <test_progs.h>
#include "bpf/libbpf_internal.h"
/* AddressSanitizer sometimes crashes due to data dereference below, due to
* this being mmap()'ed memory. Disable instrumentation with
* no_sanitize_address attribute
*/
__attribute__((no_sanitize_address))
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
{
int cpu_data = *(int *)data, duration = 0;

View File

@@ -20,6 +20,7 @@
#define CONNECT_PORT 4321
#define TEST_DADDR (0xC0A80203)
#define NS_SELF "/proc/self/ns/net"
#define SERVER_MAP_PATH "/sys/fs/bpf/tc/globals/server_map"
static const struct timeval timeo_sec = { .tv_sec = 3 };
static const size_t timeo_optlen = sizeof(timeo_sec);
@@ -265,6 +266,7 @@ void test_sk_assign(void)
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
};
int server = -1;
int server_map;
int self_net;
self_net = open(NS_SELF, O_RDONLY);
@@ -278,9 +280,17 @@ void test_sk_assign(void)
goto cleanup;
}
server_map = bpf_obj_get(SERVER_MAP_PATH);
if (CHECK_FAIL(server_map < 0)) {
perror("Unable to open " SERVER_MAP_PATH);
goto cleanup;
}
for (int i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
struct test_sk_cfg *test = &tests[i];
const struct sockaddr *addr;
const int zero = 0;
int err;
if (!test__start_subtest(test->name))
continue;
@@ -288,7 +298,13 @@ void test_sk_assign(void)
addr = (const struct sockaddr *)test->addr;
server = start_server(addr, test->len, test->type);
if (server == -1)
goto cleanup;
goto close;
err = bpf_map_update_elem(server_map, &zero, &server, BPF_ANY);
if (CHECK_FAIL(err)) {
perror("Unable to update server_map");
goto close;
}
/* connect to unbound ports */
prepare_addr(test->addr, test->family, CONNECT_PORT,
@@ -302,7 +318,10 @@ void test_sk_assign(void)
close:
close(server);
close(server_map);
cleanup:
if (CHECK_FAIL(unlink(SERVER_MAP_PATH)))
perror("Unable to unlink " SERVER_MAP_PATH);
if (CHECK_FAIL(setns(self_net, CLONE_NEWNET)))
perror("Failed to setns("NS_SELF")");
close(self_net);

View File

@@ -8,6 +8,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <sys/socket.h>
#include <netinet/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
@@ -16,6 +17,10 @@
#define DST_REWRITE_IP4 0x7f000001U
#define DST_REWRITE_PORT4 4444
#ifndef TCP_CA_NAME_MAX
#define TCP_CA_NAME_MAX 16
#endif
int _version SEC("version") = 1;
__attribute__ ((noinline))
@@ -33,6 +38,43 @@ int do_bind(struct bpf_sock_addr *ctx)
return 1;
}
static __inline int verify_cc(struct bpf_sock_addr *ctx,
char expected[TCP_CA_NAME_MAX])
{
char buf[TCP_CA_NAME_MAX];
int i;
if (bpf_getsockopt(ctx, SOL_TCP, TCP_CONGESTION, &buf, sizeof(buf)))
return 1;
for (i = 0; i < TCP_CA_NAME_MAX; i++) {
if (buf[i] != expected[i])
return 1;
if (buf[i] == 0)
break;
}
return 0;
}
static __inline int set_cc(struct bpf_sock_addr *ctx)
{
char reno[TCP_CA_NAME_MAX] = "reno";
char cubic[TCP_CA_NAME_MAX] = "cubic";
if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &reno, sizeof(reno)))
return 1;
if (verify_cc(ctx, reno))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_CONGESTION, &cubic, sizeof(cubic)))
return 1;
if (verify_cc(ctx, cubic))
return 1;
return 0;
}
SEC("cgroup/connect4")
int connect_v4_prog(struct bpf_sock_addr *ctx)
{
@@ -66,6 +108,10 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
bpf_sk_release(sk);
/* Rewrite congestion control. */
if (ctx->type == SOCK_STREAM && set_cc(ctx))
return 0;
/* Rewrite destination. */
ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4);
ctx->user_port = bpf_htons(DST_REWRITE_PORT4);

View File

@@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} inner_map1 SEC(".maps"),
inner_map2 SEC(".maps");
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
__uint(key_size, sizeof(int));
__uint(value_size, sizeof(int));
/* it's possible to use anonymous struct as inner map definition here */
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
/* changing max_entries to 2 will fail during load
* due to incompatibility with inner_map definition */
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
});
} outer_arr SEC(".maps") = {
/* (void *) cast is necessary because we didn't use `struct inner_map`
* in __inner(values, ...)
* Actually, a conscious effort is required to screw up initialization
* of inner map slots, which is a great thing!
*/
.values = { (void *)&inner_map1, 0, (void *)&inner_map2 },
};
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 5);
__uint(key_size, sizeof(int));
/* Here everything works flawlessly due to reuse of struct inner_map
* and compiler will complain at the attempt to use non-inner_map
* references below. This is great experience.
*/
__array(values, struct inner_map);
} outer_hash SEC(".maps") = {
.values = {
[0] = &inner_map2,
[4] = &inner_map1,
},
};
int input = 0;
SEC("raw_tp/sys_enter")
int handle__sys_enter(void *ctx)
{
struct inner_map *inner_map;
int key = 0, val;
inner_map = bpf_map_lookup_elem(&outer_arr, &key);
if (!inner_map)
return 1;
val = input;
bpf_map_update_elem(inner_map, &key, &val, 0);
inner_map = bpf_map_lookup_elem(&outer_hash, &key);
if (!inner_map)
return 1;
val = input + 1;
bpf_map_update_elem(inner_map, &key, &val, 0);
return 0;
}
char _license[] SEC("license") = "GPL";

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright 2019, 2020 Cloudflare */
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
struct gre_base_hdr {
uint16_t flags;
uint16_t protocol;
} __attribute__((packed));
struct guehdr {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
uint8_t hlen : 5, control : 1, variant : 2;
#else
uint8_t variant : 2, control : 1, hlen : 5;
#endif
uint8_t proto_ctype;
uint16_t flags;
};
struct unigue {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
uint8_t _r : 2, last_hop_gre : 1, forward_syn : 1, version : 4;
#else
uint8_t version : 4, forward_syn : 1, last_hop_gre : 1, _r : 2;
#endif
uint8_t reserved;
uint8_t next_hop;
uint8_t hop_count;
// Next hops go here
} __attribute__((packed));
typedef struct {
struct ethhdr eth;
struct iphdr ip;
struct gre_base_hdr gre;
} __attribute__((packed)) encap_gre_t;
typedef struct {
struct ethhdr eth;
struct iphdr ip;
struct udphdr udp;
struct guehdr gue;
struct unigue unigue;
} __attribute__((packed)) encap_headers_t;

View File

@@ -0,0 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u64 count = 0;
SEC("raw_tracepoint/sys_enter")
int test_enable_stats(void *ctx)
{
count += 1;
return 0;
}

View File

@@ -3,16 +3,8 @@
*/
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h>
/* It is a dumb bpf program such that it must have no
* issue to be loaded since testing the verifier is
* not the focus here.
*/
int _version SEC("version") = 1;
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
@@ -20,13 +12,13 @@ struct {
__type(value, __u64);
} test_map_id SEC(".maps");
SEC("test_obj_id_dummy")
int test_obj_id(struct __sk_buff *skb)
SEC("raw_tp/sys_enter")
int test_obj_id(void *ctx)
{
__u32 key = 0;
__u64 *value;
value = bpf_map_lookup_elem(&test_map_id, &key);
return TC_ACT_OK;
return 0;
}

View File

@@ -16,6 +16,26 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
/* Pin map under /sys/fs/bpf/tc/globals/<map name> */
#define PIN_GLOBAL_NS 2
/* Must match struct bpf_elf_map layout from iproute2 */
struct {
__u32 type;
__u32 size_key;
__u32 size_value;
__u32 max_elem;
__u32 flags;
__u32 id;
__u32 pinning;
} server_map SEC("maps") = {
.type = BPF_MAP_TYPE_SOCKMAP,
.size_key = sizeof(int),
.size_value = sizeof(__u64),
.max_elem = 1,
.pinning = PIN_GLOBAL_NS,
};
int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
@@ -72,7 +92,9 @@ handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
__be16 dport;
int ret;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
@@ -83,32 +105,11 @@ handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
if (sk)
goto assign;
if (ipv4) {
if (tuple->ipv4.dport != bpf_htons(4321))
return TC_ACT_OK;
dport = ipv4 ? tuple->ipv4.dport : tuple->ipv6.dport;
if (dport != bpf_htons(4321))
return TC_ACT_OK;
ln.ipv4.daddr = bpf_htonl(0x7f000001);
ln.ipv4.dport = bpf_htons(1234);
sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv4),
BPF_F_CURRENT_NETNS, 0);
} else {
if (tuple->ipv6.dport != bpf_htons(4321))
return TC_ACT_OK;
/* Upper parts of daddr are already zero. */
ln.ipv6.daddr[3] = bpf_htonl(0x1);
ln.ipv6.dport = bpf_htons(1234);
sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv6),
BPF_F_CURRENT_NETNS, 0);
}
/* workaround: We can't do a single socket lookup here, because then
* the compiler will likely spill tuple_len to the stack. This makes it
* lose all bounds information in the verifier, which then rejects the
* call as unsafe.
*/
sk = bpf_map_lookup_elem(&server_map, &zero);
if (!sk)
return TC_ACT_SHOT;
@@ -123,7 +124,9 @@ handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
__be16 dport;
int ret;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
@@ -137,32 +140,11 @@ handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
bpf_sk_release(sk);
}
if (ipv4) {
if (tuple->ipv4.dport != bpf_htons(4321))
return TC_ACT_OK;
dport = ipv4 ? tuple->ipv4.dport : tuple->ipv6.dport;
if (dport != bpf_htons(4321))
return TC_ACT_OK;
ln.ipv4.daddr = bpf_htonl(0x7f000001);
ln.ipv4.dport = bpf_htons(1234);
sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv4),
BPF_F_CURRENT_NETNS, 0);
} else {
if (tuple->ipv6.dport != bpf_htons(4321))
return TC_ACT_OK;
/* Upper parts of daddr are already zero. */
ln.ipv6.daddr[3] = bpf_htonl(0x1);
ln.ipv6.dport = bpf_htons(1234);
sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv6),
BPF_F_CURRENT_NETNS, 0);
}
/* workaround: We can't do a single socket lookup here, because then
* the compiler will likely spill tuple_len to the stack. This makes it
* lose all bounds information in the verifier, which then rejects the
* call as unsafe.
*/
sk = bpf_map_lookup_elem(&server_map, &zero);
if (!sk)
return TC_ACT_SHOT;

View File

@@ -45,7 +45,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
unsigned long tcp_mem[3] = {0, 0, 0};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
int ret;
volatile int ret;
if (ctx->write)
return 0;

View File

@@ -351,6 +351,7 @@ int extract_build_id(char *build_id, size_t size)
len = size;
memcpy(build_id, line, len);
build_id[len] = '\0';
free(line);
return 0;
err:
fclose(fp);
@@ -420,6 +421,18 @@ static int libbpf_print_fn(enum libbpf_print_level level,
return 0;
}
static void free_str_set(const struct str_set *set)
{
int i;
if (!set)
return;
for (i = 0; i < set->cnt; i++)
free((void *)set->strs[i]);
free(set->strs);
}
static int parse_str_list(const char *s, struct str_set *set)
{
char *input, *state = NULL, *next, **tmp, **strs = NULL;
@@ -756,11 +769,11 @@ int main(int argc, char **argv)
fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
free(env.test_selector.blacklist.strs);
free(env.test_selector.whitelist.strs);
free_str_set(&env.test_selector.blacklist);
free_str_set(&env.test_selector.whitelist);
free(env.test_selector.num_set);
free(env.subtest_selector.blacklist.strs);
free(env.subtest_selector.whitelist.strs);
free_str_set(&env.subtest_selector.blacklist);
free_str_set(&env.subtest_selector.whitelist);
free(env.subtest_selector.num_set);
return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;

View File

@@ -105,6 +105,13 @@ struct ipv6_packet {
} __packed;
extern struct ipv6_packet pkt_v6;
#define PRINT_FAIL(format...) \
({ \
test__fail(); \
fprintf(stdout, "%s:FAIL:%d ", __func__, __LINE__); \
fprintf(stdout, ##format); \
})
#define _CHECK(condition, tag, duration, format...) ({ \
int __ret = !!(condition); \
int __save_errno = errno; \

View File

@@ -50,7 +50,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 19
#define MAX_NR_MAPS 20
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -86,6 +86,7 @@ struct bpf_test {
int fixup_map_array_small[MAX_FIXUPS];
int fixup_sk_storage_map[MAX_FIXUPS];
int fixup_map_event_output[MAX_FIXUPS];
int fixup_map_reuseport_array[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t insn_processed;
@@ -637,6 +638,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_array_small = test->fixup_map_array_small;
int *fixup_sk_storage_map = test->fixup_sk_storage_map;
int *fixup_map_event_output = test->fixup_map_event_output;
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -806,6 +808,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_event_output++;
} while (*fixup_map_event_output);
}
if (*fixup_map_reuseport_array) {
map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
sizeof(u32), sizeof(u64), 1, 0);
do {
prog[*fixup_map_reuseport_array].imm = map_fds[19];
fixup_map_reuseport_array++;
} while (*fixup_map_reuseport_array);
}
}
static int set_admin(bool admin)
@@ -943,7 +953,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
attr.insns = prog;
attr.insns_cnt = prog_len;
attr.license = "GPL";
attr.log_level = verbose || expected_ret == VERBOSE_ACCEPT ? 1 : 4;
if (verbose)
attr.log_level = 1;
else if (expected_ret == VERBOSE_ACCEPT)
attr.log_level = 2;
else
attr.log_level = 4;
attr.prog_flags = pflags;
fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));

View File

@@ -92,3 +92,27 @@
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup dev",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_DEVICE,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup sysctl",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},
{
"perfevent for cgroup sockopt",
.insns = { __PERF_EVENT_INSNS__ },
.prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
.fixup_map_event_output = { 4 },
.result = ACCEPT,
.retval = 1,
},

View File

@@ -1,33 +1,3 @@
{
"prevent map lookup in sockmap",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map_sockmap = { 3 },
.result = REJECT,
.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
},
{
"prevent map lookup in sockhash",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map_sockhash = { 3 },
.result = REJECT,
.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
},
{
"prevent map lookup in stack trace",
.insns = {

View File

@@ -516,3 +516,118 @@
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
{
"bpf_map_lookup_elem(sockmap, &key)",
.insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_sockmap = { 3 },
.prog_type = BPF_PROG_TYPE_SK_SKB,
.result = REJECT,
.errstr = "Unreleased reference id=2 alloc_insn=5",
},
{
"bpf_map_lookup_elem(sockhash, &key)",
.insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_sockhash = { 3 },
.prog_type = BPF_PROG_TYPE_SK_SKB,
.result = REJECT,
.errstr = "Unreleased reference id=2 alloc_insn=5",
},
{
"bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)",
.insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
},
.fixup_map_sockmap = { 3 },
.prog_type = BPF_PROG_TYPE_SK_SKB,
.result = ACCEPT,
},
{
"bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)",
.insns = {
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
},
.fixup_map_sockhash = { 3 },
.prog_type = BPF_PROG_TYPE_SK_SKB,
.result = ACCEPT,
},
{
"bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)",
.insns = {
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport),
BPF_EXIT_INSN(),
},
.fixup_map_reuseport_array = { 4 },
.prog_type = BPF_PROG_TYPE_SK_REUSEPORT,
.result = ACCEPT,
},
{
"bpf_sk_select_reuseport(ctx, sockmap, &key, flags)",
.insns = {
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport),
BPF_EXIT_INSN(),
},
.fixup_map_sockmap = { 4 },
.prog_type = BPF_PROG_TYPE_SK_REUSEPORT,
.result = ACCEPT,
},
{
"bpf_sk_select_reuseport(ctx, sockhash, &key, flags)",
.insns = {
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport),
BPF_EXIT_INSN(),
},
.fixup_map_sockmap = { 4 },
.prog_type = BPF_PROG_TYPE_SK_REUSEPORT,
.result = ACCEPT,
},