Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2018-10-08

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) sk_lookup_[tcp|udp] and sk_release helpers from Joe Stringer which allow
BPF programs to perform lookups for sockets in a network namespace. This would
allow programs to determine early on in processing whether the stack is
expecting to receive the packet, and perform some action (eg drop,
forward somewhere) based on this information.

2) per-cpu cgroup local storage from Roman Gushchin.
Per-cpu cgroup local storage is very similar to simple cgroup storage
except all the data is per-cpu. The main goal of per-cpu variant is to
implement super fast counters (e.g. packet counters), which don't require
neither lookups, neither atomic operations in a fast path.
The example of these hybrid counters is in selftests/bpf/netcnt_prog.c

3) allow HW offload of programs with BPF-to-BPF function calls from Quentin Monnet

4) support more than 64-byte key/value in HW offloaded BPF maps from Jakub Kicinski

5) rename of libbpf interfaces from Andrey Ignatov.
libbpf is maturing as a library and should follow good practices in
library design and implementation to play well with other libraries.
This patch set brings consistent naming convention to global symbols.

6) relicense libbpf as LGPL-2.1 OR BSD-2-Clause from Alexei Starovoitov
to let Apache2 projects use libbpf

7) various AF_XDP fixes from Björn and Magnus
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2018-10-08 23:42:44 -07:00
66 changed files with 4142 additions and 665 deletions

View File

@@ -23,7 +23,8 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
test_netcnt
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
@@ -35,7 +36,7 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
test_skb_cgroup_id_kern.o bpf_flow.o
test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_sk_lookup_kern.o
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -72,6 +73,7 @@ $(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
$(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
.PHONY: force

View File

@@ -143,6 +143,18 @@ static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
(void *) BPF_FUNC_skb_cgroup_id;
static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned int netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned int netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_udp;
static int (*bpf_sk_release)(struct bpf_sock *sk) =
(void *) BPF_FUNC_sk_release;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions

View File

@@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef __NETCNT_COMMON_H
#define __NETCNT_COMMON_H
#include <linux/types.h>
#define MAX_PERCPU_PACKETS 32
struct percpu_net_cnt {
__u64 packets;
__u64 bytes;
__u64 prev_ts;
__u64 prev_packets;
__u64 prev_bytes;
};
struct net_cnt {
__u64 packets;
__u64 bytes;
};
#endif

View File

@@ -0,0 +1,71 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <linux/version.h>
#include "bpf_helpers.h"
#include "netcnt_common.h"
#define MAX_BPS (3 * 1024 * 1024)
#define REFRESH_TIME_NS 100000000
#define NS_PER_SEC 1000000000
struct bpf_map_def SEC("maps") percpu_netcnt = {
.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct percpu_net_cnt),
};
struct bpf_map_def SEC("maps") netcnt = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct net_cnt),
};
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
struct percpu_net_cnt *percpu_cnt;
char fmt[] = "%d %llu %llu\n";
struct net_cnt *cnt;
__u64 ts, dt;
int ret;
cnt = bpf_get_local_storage(&netcnt, 0);
percpu_cnt = bpf_get_local_storage(&percpu_netcnt, 0);
percpu_cnt->packets++;
percpu_cnt->bytes += skb->len;
if (percpu_cnt->packets > MAX_PERCPU_PACKETS) {
__sync_fetch_and_add(&cnt->packets,
percpu_cnt->packets);
percpu_cnt->packets = 0;
__sync_fetch_and_add(&cnt->bytes,
percpu_cnt->bytes);
percpu_cnt->bytes = 0;
}
ts = bpf_ktime_get_ns();
dt = ts - percpu_cnt->prev_ts;
dt *= MAX_BPS;
dt /= NS_PER_SEC;
if (cnt->bytes + percpu_cnt->bytes - percpu_cnt->prev_bytes < dt)
ret = 1;
else
ret = 0;
if (dt > REFRESH_TIME_NS) {
percpu_cnt->prev_ts = ts;
percpu_cnt->prev_packets = cnt->packets;
percpu_cnt->prev_bytes = cnt->bytes;
}
return !!ret;
}
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = LINUX_VERSION_CODE;

View File

@@ -4,6 +4,7 @@
#include <linux/filter.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysinfo.h>
#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
@@ -15,6 +16,14 @@ char bpf_log_buf[BPF_LOG_BUF_SIZE];
int main(int argc, char **argv)
{
struct bpf_insn prog[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
@@ -28,9 +37,18 @@ int main(int argc, char **argv)
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
int error = EXIT_FAILURE;
int map_fd, prog_fd, cgroup_fd;
int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
struct bpf_cgroup_storage_key key;
unsigned long long value;
unsigned long long *percpu_value;
int cpu, nproc;
nproc = get_nprocs_conf();
percpu_value = malloc(sizeof(*percpu_value) * nproc);
if (!percpu_value) {
printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
goto err;
}
map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key),
sizeof(value), 0, 0);
@@ -39,7 +57,15 @@ int main(int argc, char **argv)
goto out;
}
prog[0].imm = map_fd;
percpu_map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
sizeof(key), sizeof(value), 0, 0);
if (percpu_map_fd < 0) {
printf("Failed to create map: %s\n", strerror(errno));
goto out;
}
prog[0].imm = percpu_map_fd;
prog[7].imm = map_fd;
prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
@@ -77,7 +103,15 @@ int main(int argc, char **argv)
}
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
printf("Failed to lookup cgroup storage\n");
printf("Failed to lookup cgroup storage 0\n");
goto err;
}
for (cpu = 0; cpu < nproc; cpu++)
percpu_value[cpu] = 1000;
if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) {
printf("Failed to update the data in the cgroup storage\n");
goto err;
}
@@ -120,11 +154,31 @@ int main(int argc, char **argv)
goto err;
}
/* Check the final value of the counter in the percpu local storage */
for (cpu = 0; cpu < nproc; cpu++)
percpu_value[cpu] = 0;
if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) {
printf("Failed to lookup the per-cpu cgroup storage\n");
goto err;
}
value = 0;
for (cpu = 0; cpu < nproc; cpu++)
value += percpu_value[cpu];
if (value != nproc * 1000 + 6) {
printf("Unexpected data in the per-cpu cgroup storage\n");
goto err;
}
error = 0;
printf("test_cgroup_storage:PASS\n");
err:
cleanup_cgroup_environment();
free(percpu_value);
out:
return error;

View File

@@ -0,0 +1,158 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/sysinfo.h>
#include <sys/time.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
#include "netcnt_common.h"
#define BPF_PROG "./netcnt_prog.o"
#define TEST_CGROUP "/test-network-counters/"
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
struct bpf_map *map;
map = bpf_object__find_map_by_name(obj, name);
if (!map) {
printf("%s:FAIL:map '%s' not found\n", test, name);
return -1;
}
return bpf_map__fd(map);
}
int main(int argc, char **argv)
{
struct percpu_net_cnt *percpu_netcnt;
struct bpf_cgroup_storage_key key;
int map_fd, percpu_map_fd;
int error = EXIT_FAILURE;
struct net_cnt netcnt;
struct bpf_object *obj;
int prog_fd, cgroup_fd;
unsigned long packets;
unsigned long bytes;
int cpu, nproc;
__u32 prog_cnt;
nproc = get_nprocs_conf();
percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
if (!percpu_netcnt) {
printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
goto err;
}
if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB,
&obj, &prog_fd)) {
printf("Failed to load bpf program\n");
goto out;
}
if (setup_cgroup_environment()) {
printf("Failed to load bpf program\n");
goto err;
}
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (!cgroup_fd) {
printf("Failed to create test cgroup\n");
goto err;
}
if (join_cgroup(TEST_CGROUP)) {
printf("Failed to join cgroup\n");
goto err;
}
/* Attach bpf program */
if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
printf("Failed to attach bpf program");
goto err;
}
assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0);
if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
&prog_cnt)) {
printf("Failed to query attached programs");
goto err;
}
map_fd = bpf_find_map(__func__, obj, "netcnt");
if (map_fd < 0) {
printf("Failed to find bpf map with net counters");
goto err;
}
percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt");
if (percpu_map_fd < 0) {
printf("Failed to find bpf map with percpu net counters");
goto err;
}
if (bpf_map_get_next_key(map_fd, NULL, &key)) {
printf("Failed to get key in cgroup storage\n");
goto err;
}
if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) {
printf("Failed to lookup cgroup storage\n");
goto err;
}
if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) {
printf("Failed to lookup percpu cgroup storage\n");
goto err;
}
/* Some packets can be still in per-cpu cache, but not more than
* MAX_PERCPU_PACKETS.
*/
packets = netcnt.packets;
bytes = netcnt.bytes;
for (cpu = 0; cpu < nproc; cpu++) {
if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) {
printf("Unexpected percpu value: %llu\n",
percpu_netcnt[cpu].packets);
goto err;
}
packets += percpu_netcnt[cpu].packets;
bytes += percpu_netcnt[cpu].bytes;
}
/* No packets should be lost */
if (packets != 10000) {
printf("Unexpected packet count: %lu\n", packets);
goto err;
}
/* Let's check that bytes counter matches the number of packets
* multiplied by the size of ipv6 ICMP packet.
*/
if (bytes != packets * 104) {
printf("Unexpected bytes count: %lu\n", bytes);
goto err;
}
error = 0;
printf("test_netcnt:PASS\n");
err:
cleanup_cgroup_environment();
free(percpu_netcnt);
out:
return error;
}

View File

@@ -1698,6 +1698,43 @@ static void test_task_fd_query_tp(void)
"sys_enter_read");
}
static void test_reference_tracking()
{
const char *file = "./test_sk_lookup_kern.o";
struct bpf_object *obj;
struct bpf_program *prog;
__u32 duration;
int err = 0;
obj = bpf_object__open(file);
if (IS_ERR(obj)) {
error_cnt++;
return;
}
bpf_object__for_each_program(prog, obj) {
const char *title;
/* Ignore .text sections */
title = bpf_program__title(prog, false);
if (strstr(title, ".text") != NULL)
continue;
bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
/* Expect verifier failure if test name has 'fail' */
if (strstr(title, "fail") != NULL) {
libbpf_set_print(NULL, NULL, NULL);
err = !bpf_program__load(prog, "GPL", 0);
libbpf_set_print(printf, printf, NULL);
} else {
err = bpf_program__load(prog, "GPL", 0);
}
CHECK(err, title, "\n");
}
bpf_object__close(obj);
}
int main(void)
{
jit_enabled = is_jit_enabled();
@@ -1719,6 +1756,7 @@ int main(void)
test_get_stack_raw_tp();
test_task_fd_query_rawtp();
test_task_fd_query_tp();
test_reference_tracking();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;

View File

@@ -0,0 +1,208 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <err.h>
#include <bpf/libbpf.h>
#include "bpf_util.h"
struct sec_name_test {
const char sec_name[32];
struct {
int rc;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
} expected_load;
struct {
int rc;
enum bpf_attach_type attach_type;
} expected_attach;
};
static struct sec_name_test tests[] = {
{"InvAliD", {-EINVAL, 0, 0}, {-EINVAL, 0} },
{"cgroup", {-EINVAL, 0, 0}, {-EINVAL, 0} },
{"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} },
{"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
{"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} },
{"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} },
{"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
{
"raw_tracepoint/",
{0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0},
{-EINVAL, 0},
},
{"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} },
{"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
{"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
{"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} },
{"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} },
{"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
{
"cgroup_skb/ingress",
{0, BPF_PROG_TYPE_CGROUP_SKB, 0},
{0, BPF_CGROUP_INET_INGRESS},
},
{
"cgroup_skb/egress",
{0, BPF_PROG_TYPE_CGROUP_SKB, 0},
{0, BPF_CGROUP_INET_EGRESS},
},
{"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
{
"cgroup/sock",
{0, BPF_PROG_TYPE_CGROUP_SOCK, 0},
{0, BPF_CGROUP_INET_SOCK_CREATE},
},
{
"cgroup/post_bind4",
{0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
{0, BPF_CGROUP_INET4_POST_BIND},
},
{
"cgroup/post_bind6",
{0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
{0, BPF_CGROUP_INET6_POST_BIND},
},
{
"cgroup/dev",
{0, BPF_PROG_TYPE_CGROUP_DEVICE, 0},
{0, BPF_CGROUP_DEVICE},
},
{"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} },
{
"sk_skb/stream_parser",
{0, BPF_PROG_TYPE_SK_SKB, 0},
{0, BPF_SK_SKB_STREAM_PARSER},
},
{
"sk_skb/stream_verdict",
{0, BPF_PROG_TYPE_SK_SKB, 0},
{0, BPF_SK_SKB_STREAM_VERDICT},
},
{"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
{"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} },
{"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} },
{
"flow_dissector",
{0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0},
{0, BPF_FLOW_DISSECTOR},
},
{
"cgroup/bind4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
{0, BPF_CGROUP_INET4_BIND},
},
{
"cgroup/bind6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
{0, BPF_CGROUP_INET6_BIND},
},
{
"cgroup/connect4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
{0, BPF_CGROUP_INET4_CONNECT},
},
{
"cgroup/connect6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
{0, BPF_CGROUP_INET6_CONNECT},
},
{
"cgroup/sendmsg4",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
{0, BPF_CGROUP_UDP4_SENDMSG},
},
{
"cgroup/sendmsg6",
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
{0, BPF_CGROUP_UDP6_SENDMSG},
},
};
static int test_prog_type_by_name(const struct sec_name_test *test)
{
enum bpf_attach_type expected_attach_type;
enum bpf_prog_type prog_type;
int rc;
rc = libbpf_prog_type_by_name(test->sec_name, &prog_type,
&expected_attach_type);
if (rc != test->expected_load.rc) {
warnx("prog: unexpected rc=%d for %s", rc, test->sec_name);
return -1;
}
if (rc)
return 0;
if (prog_type != test->expected_load.prog_type) {
warnx("prog: unexpected prog_type=%d for %s", prog_type,
test->sec_name);
return -1;
}
if (expected_attach_type != test->expected_load.expected_attach_type) {
warnx("prog: unexpected expected_attach_type=%d for %s",
expected_attach_type, test->sec_name);
return -1;
}
return 0;
}
static int test_attach_type_by_name(const struct sec_name_test *test)
{
enum bpf_attach_type attach_type;
int rc;
rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
if (rc != test->expected_attach.rc) {
warnx("attach: unexpected rc=%d for %s", rc, test->sec_name);
return -1;
}
if (rc)
return 0;
if (attach_type != test->expected_attach.attach_type) {
warnx("attach: unexpected attach_type=%d for %s", attach_type,
test->sec_name);
return -1;
}
return 0;
}
static int run_test_case(const struct sec_name_test *test)
{
if (test_prog_type_by_name(test))
return -1;
if (test_attach_type_by_name(test))
return -1;
return 0;
}
static int run_tests(void)
{
int passes = 0;
int fails = 0;
int i;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
if (run_test_case(&tests[i]))
++fails;
else
++passes;
}
printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
return fails ? -1 : 0;
}
int main(int argc, char **argv)
{
return run_tests();
}

View File

@@ -0,0 +1,180 @@
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/pkt_cls.h>
#include <linux/tcp.h>
#include <sys/socket.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
int _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";
/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
void *data_end, __u16 eth_proto,
bool *ipv4)
{
struct bpf_sock_tuple *result;
__u8 proto = 0;
__u64 ihl_len;
if (eth_proto == bpf_htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)(data + nh_off);
if (iph + 1 > data_end)
return NULL;
ihl_len = iph->ihl * 4;
proto = iph->protocol;
*ipv4 = true;
result = (struct bpf_sock_tuple *)&iph->saddr;
} else if (eth_proto == bpf_htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + nh_off);
if (ip6h + 1 > data_end)
return NULL;
ihl_len = sizeof(*ip6h);
proto = ip6h->nexthdr;
*ipv4 = true;
result = (struct bpf_sock_tuple *)&ip6h->saddr;
}
if (data + nh_off + ihl_len > data_end || proto != IPPROTO_TCP)
return NULL;
return result;
}
SEC("sk_lookup_success")
int bpf_sk_lookup_test0(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
struct ethhdr *eth = (struct ethhdr *)(data);
struct bpf_sock_tuple *tuple;
struct bpf_sock *sk;
size_t tuple_len;
bool ipv4;
if (eth + 1 > data_end)
return TC_ACT_SHOT;
tuple = get_tuple(data, sizeof(*eth), data_end, eth->h_proto, &ipv4);
if (!tuple || tuple + sizeof *tuple > data_end)
return TC_ACT_SHOT;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0);
if (sk)
bpf_sk_release(sk);
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
SEC("sk_lookup_success_simple")
int bpf_sk_lookup_test1(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
if (sk)
bpf_sk_release(sk);
return 0;
}
SEC("fail_use_after_free")
int bpf_sk_lookup_uaf(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
__u32 family = 0;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
if (sk) {
bpf_sk_release(sk);
family = sk->family;
}
return family;
}
SEC("fail_modify_sk_pointer")
int bpf_sk_lookup_modptr(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
__u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
if (sk) {
sk += 1;
bpf_sk_release(sk);
}
return 0;
}
SEC("fail_modify_sk_or_null_pointer")
int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
__u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
sk += 1;
if (sk)
bpf_sk_release(sk);
return 0;
}
SEC("fail_no_release")
int bpf_sk_lookup_test2(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
return 0;
}
SEC("fail_release_twice")
int bpf_sk_lookup_test3(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
bpf_sk_release(sk);
bpf_sk_release(sk);
return 0;
}
SEC("fail_release_unchecked")
int bpf_sk_lookup_test4(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
bpf_sk_release(sk);
return 0;
}
void lookup_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
}
SEC("fail_no_release_subcall")
int bpf_sk_lookup_test5(struct __sk_buff *skb)
{
lookup_no_release(skb);
return 0;
}

View File

@@ -158,11 +158,7 @@ static int run_test(int cgfd)
bpf_object__for_each_program(prog, pobj) {
prog_name = bpf_program__title(prog, /*needs_copy*/ false);
if (strcmp(prog_name, "cgroup/connect6") == 0) {
attach_type = BPF_CGROUP_INET6_CONNECT;
} else if (strcmp(prog_name, "sockops") == 0) {
attach_type = BPF_CGROUP_SOCK_OPS;
} else {
if (libbpf_attach_type_by_name(prog_name, &attach_type)) {
log_err("Unexpected prog: %s", prog_name);
goto err;
}

File diff suppressed because it is too large Load Diff