Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next

Pull networking updates from David Miller:

 1) Support 6Ghz band in ath11k driver, from Rajkumar Manoharan.

 2) Support UDP segmentation in code TSO code, from Eric Dumazet.

 3) Allow flashing different flash images in cxgb4 driver, from Vishal
    Kulkarni.

 4) Add drop frames counter and flow status to tc flower offloading,
    from Po Liu.

 5) Support n-tuple filters in cxgb4, from Vishal Kulkarni.

 6) Various new indirect call avoidance, from Eric Dumazet and Brian
    Vazquez.

 7) Fix BPF verifier failures on 32-bit pointer arithmetic, from
    Yonghong Song.

 8) Support querying and setting hardware address of a port function via
    devlink, use this in mlx5, from Parav Pandit.

 9) Support hw ipsec offload on bonding slaves, from Jarod Wilson.

10) Switch qca8k driver over to phylink, from Jonathan McDowell.

11) In bpftool, show list of processes holding BPF FD references to
    maps, programs, links, and btf objects. From Andrii Nakryiko.

12) Several conversions over to generic power management, from Vaibhav
    Gupta.

13) Add support for SO_KEEPALIVE et al. to bpf_setsockopt(), from Dmitry
    Yakunin.

14) Various https url conversions, from Alexander A. Klimov.

15) Timestamping and PHC support for mscc PHY driver, from Antoine
    Tenart.

16) Support bpf iterating over tcp and udp sockets, from Yonghong Song.

17) Support 5GBASE-T i40e NICs, from Aleksandr Loktionov.

18) Add kTLS RX HW offload support to mlx5e, from Tariq Toukan.

19) Fix the ->ndo_start_xmit() return type to be netdev_tx_t in several
    drivers. From Luc Van Oostenryck.

20) XDP support for xen-netfront, from Denis Kirjanov.

21) Support receive buffer autotuning in MPTCP, from Florian Westphal.

22) Support EF100 chip in sfc driver, from Edward Cree.

23) Add XDP support to mvpp2 driver, from Matteo Croce.

24) Support MPTCP in sock_diag, from Paolo Abeni.

25) Commonize UDP tunnel offloading code by creating udp_tunnel_nic
    infrastructure, from Jakub Kicinski.

26) Several pci_ --> dma_ API conversions, from Christophe JAILLET.

27) Add FLOW_ACTION_POLICE support to mlxsw, from Ido Schimmel.

28) Add SK_LOOKUP bpf program type, from Jakub Sitnicki.

29) Refactor a lot of networking socket option handling code in order to
    avoid set_fs() calls, from Christoph Hellwig.

30) Add rfc4884 support to icmp code, from Willem de Bruijn.

31) Support TBF offload in dpaa2-eth driver, from Ioana Ciornei.

32) Support XDP_REDIRECT in qede driver, from Alexander Lobakin.

33) Support PCI relaxed ordering in mlx5 driver, from Aya Levin.

34) Support TCP syncookies in MPTCP, from Flowian Westphal.

35) Fix several tricky cases of PMTU handling wrt. briding, from Stefano
    Brivio.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2056 commits)
  net: thunderx: initialize VF's mailbox mutex before first usage
  usb: hso: remove bogus check for EINPROGRESS
  usb: hso: no complaint about kmalloc failure
  hso: fix bailout in error case of probe
  ip_tunnel_core: Fix build for archs without _HAVE_ARCH_IPV6_CSUM
  selftests/net: relax cpu affinity requirement in msg_zerocopy test
  mptcp: be careful on subflow creation
  selftests: rtnetlink: make kci_test_encap() return sub-test result
  selftests: rtnetlink: correct the final return value for the test
  net: dsa: sja1105: use detected device id instead of DT one on mismatch
  tipc: set ub->ifindex for local ipv6 address
  ipv6: add ipv6_dev_find()
  net: openvswitch: silence suspicious RCU usage warning
  Revert "vxlan: fix tos value before xmit"
  ptp: only allow phase values lower than 1 period
  farsync: switch from 'pci_' to 'dma_' API
  wan: wanxl: switch from 'pci_' to 'dma_' API
  hv_netvsc: do not use VF device if link is down
  dpaa2-eth: Fix passing zero to 'PTR_ERR' warning
  net: macb: Properly handle phylink on at91sam9x
  ...
This commit is contained in:
Linus Torvalds
2020-08-05 20:13:21 -07:00
2088 changed files with 117472 additions and 40981 deletions

View File

@@ -56,6 +56,7 @@ TARGETS += splice
TARGETS += static_keys
TARGETS += sync
TARGETS += sysctl
TARGETS += tc-testing
TARGETS += timens
ifneq (1, $(quicktest))
TARGETS += timers

View File

@@ -111,6 +111,7 @@ SCRATCH_DIR := $(OUTPUT)/tools
BUILD_DIR := $(SCRATCH_DIR)/build
INCLUDE_DIR := $(SCRATCH_DIR)/include
BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids
# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
# to build individual tests.
@@ -134,12 +135,12 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
$(call msg,CC,,$@)
$(CC) -c $(CFLAGS) -o $@ $<
VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
../../../../vmlinux \
/sys/kernel/btf/vmlinux \
/boot/vmlinux-$(shell uname -r)
VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
$(OUTPUT)/runqslower: $(BPFOBJ)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
@@ -177,13 +178,28 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR):
$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
$(call msg,MKDIR,,$@)
mkdir -p $@
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
ifeq ($(VMLINUX_H),)
$(call msg,GEN,,$@)
$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
else
$(call msg,CP,,$@)
cp "$(VMLINUX_H)" $@
endif
$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \
$(TOOLSDIR)/bpf/resolve_btfids/main.c \
$(TOOLSDIR)/lib/rbtree.c \
$(TOOLSDIR)/lib/zalloc.c \
$(TOOLSDIR)/lib/string.c \
$(TOOLSDIR)/lib/ctype.c \
$(TOOLSDIR)/lib/str_error_r.c
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ)
# Get Clang's default includes on this system, as opposed to those seen by
# '-target bpf'. This fixes "missing" files on some architectures/distros,
@@ -347,9 +363,11 @@ endif
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \
$(RESOLVE_BTFIDS) \
| $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@)
$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
endef

View File

@@ -2,20 +2,6 @@
#ifndef __BPF_LEGACY__
#define __BPF_LEGACY__
/*
* legacy bpf_map_def with extra fields supported only by bpf_load(), do not
* use outside of samples/bpf
*/
struct bpf_map_def_legacy {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
unsigned int map_flags;
unsigned int inner_map_idx;
unsigned int numa_node;
};
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
struct ____btf_map_##name { \
type_key key; \

View File

@@ -290,3 +290,26 @@ free_mem:
free(fhp);
return ret;
}
int cgroup_setup_and_join(const char *path) {
int cg_fd;
if (setup_cgroup_environment()) {
fprintf(stderr, "Failed to setup cgroup environment\n");
return -EINVAL;
}
cg_fd = create_and_get_cgroup(path);
if (cg_fd < 0) {
fprintf(stderr, "Failed to create test cgroup\n");
cleanup_cgroup_environment();
return cg_fd;
}
if (join_cgroup(path)) {
fprintf(stderr, "Failed to join cgroup\n");
cleanup_cgroup_environment();
return -EINVAL;
}
return cg_fd;
}

View File

@@ -9,6 +9,7 @@
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
int cgroup_setup_and_join(const char *path);
int create_and_get_cgroup(const char *path);
int join_cgroup(const char *path);
int setup_cgroup_environment(void);

View File

@@ -58,20 +58,10 @@ int main(int argc, char **argv)
int exit_code = 1;
char buf[256];
err = setup_cgroup_environment();
if (CHECK(err, "setup_cgroup_environment", "err %d errno %d\n", err,
errno))
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (CHECK(cgroup_fd < 0, "create_and_get_cgroup", "err %d errno %d\n",
cgroup_fd, errno))
goto cleanup_cgroup_env;
err = join_cgroup(TEST_CGROUP);
if (CHECK(err, "join_cgroup", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;

View File

@@ -7,8 +7,6 @@
#include <arpa/inet.h>
#include <sys/epoll.h>
#include <linux/err.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -17,8 +15,13 @@
#include "network_helpers.h"
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
#define log_err(MSG, ...) ({ \
int __save = errno; \
fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
__FILE__, __LINE__, clean_errno(), \
##__VA_ARGS__); \
errno = __save; \
})
struct ipv4_packet pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
@@ -37,131 +40,169 @@ struct ipv6_packet pkt_v6 = {
.tcp.doff = 5,
};
int start_server_with_port(int family, int type, __u16 port)
static int settimeo(int fd, int timeout_ms)
{
struct sockaddr_storage addr = {};
socklen_t len;
int fd;
struct timeval timeout = { .tv_sec = 3 };
if (family == AF_INET) {
struct sockaddr_in *sin = (void *)&addr;
sin->sin_family = AF_INET;
sin->sin_port = htons(port);
len = sizeof(*sin);
} else {
struct sockaddr_in6 *sin6 = (void *)&addr;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = htons(port);
len = sizeof(*sin6);
if (timeout_ms > 0) {
timeout.tv_sec = timeout_ms / 1000;
timeout.tv_usec = (timeout_ms % 1000) * 1000;
}
fd = socket(family, type | SOCK_NONBLOCK, 0);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
}
if (bind(fd, (const struct sockaddr *)&addr, len) < 0) {
log_err("Failed to bind socket");
close(fd);
return -1;
}
if (type == SOCK_STREAM) {
if (listen(fd, 1) < 0) {
log_err("Failed to listed on socket");
close(fd);
return -1;
}
}
return fd;
}
int start_server(int family, int type)
{
return start_server_with_port(family, type, 0);
}
static const struct timeval timeo_sec = { .tv_sec = 3 };
static const size_t timeo_optlen = sizeof(timeo_sec);
int connect_to_fd(int family, int type, int server_fd)
{
int fd, save_errno;
fd = socket(family, type, 0);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
if (connect_fd_to_fd(fd, server_fd) < 0 && errno != EINPROGRESS) {
save_errno = errno;
close(fd);
errno = save_errno;
return -1;
}
return fd;
}
int connect_fd_to_fd(int client_fd, int server_fd)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
int save_errno;
if (setsockopt(client_fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
timeo_optlen)) {
if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout,
sizeof(timeout))) {
log_err("Failed to set SO_RCVTIMEO");
return -1;
}
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
return -1;
}
if (connect(client_fd, (const struct sockaddr *)&addr, len) < 0) {
if (errno != EINPROGRESS) {
save_errno = errno;
log_err("Failed to connect to server");
errno = save_errno;
}
if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout,
sizeof(timeout))) {
log_err("Failed to set SO_SNDTIMEO");
return -1;
}
return 0;
}
int connect_wait(int fd)
#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; })
int start_server(int family, int type, const char *addr_str, __u16 port,
int timeout_ms)
{
struct epoll_event ev = {}, events[2];
int timeout_ms = 1000;
int efd, nfd;
struct sockaddr_storage addr = {};
socklen_t len;
int fd;
efd = epoll_create1(EPOLL_CLOEXEC);
if (efd < 0) {
log_err("Failed to open epoll fd");
if (make_sockaddr(family, addr_str, port, &addr, &len))
return -1;
fd = socket(family, type, 0);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
}
ev.events = EPOLLRDHUP | EPOLLOUT;
ev.data.fd = fd;
if (settimeo(fd, timeout_ms))
goto error_close;
if (epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev) < 0) {
log_err("Failed to register fd=%d on epoll fd=%d", fd, efd);
close(efd);
return -1;
if (bind(fd, (const struct sockaddr *)&addr, len) < 0) {
log_err("Failed to bind socket");
goto error_close;
}
nfd = epoll_wait(efd, events, ARRAY_SIZE(events), timeout_ms);
if (nfd < 0)
log_err("Failed to wait for I/O event on epoll fd=%d", efd);
if (type == SOCK_STREAM) {
if (listen(fd, 1) < 0) {
log_err("Failed to listed on socket");
goto error_close;
}
}
close(efd);
return nfd;
return fd;
error_close:
save_errno_close(fd);
return -1;
}
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen)
{
if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
log_err("Failed to connect to server");
return -1;
}
return 0;
}
int connect_to_fd(int server_fd, int timeout_ms)
{
struct sockaddr_storage addr;
struct sockaddr_in *addr_in;
socklen_t addrlen, optlen;
int fd, type;
optlen = sizeof(type);
if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
log_err("getsockopt(SOL_TYPE)");
return -1;
}
addrlen = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
return -1;
}
addr_in = (struct sockaddr_in *)&addr;
fd = socket(addr_in->sin_family, type, 0);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
}
if (settimeo(fd, timeout_ms))
goto error_close;
if (connect_fd_to_addr(fd, &addr, addrlen))
goto error_close;
return fd;
error_close:
save_errno_close(fd);
return -1;
}
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
if (settimeo(client_fd, timeout_ms))
return -1;
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
return -1;
}
if (connect_fd_to_addr(client_fd, &addr, len))
return -1;
return 0;
}
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len)
{
if (family == AF_INET) {
struct sockaddr_in *sin = (void *)addr;
sin->sin_family = AF_INET;
sin->sin_port = htons(port);
if (addr_str &&
inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) {
log_err("inet_pton(AF_INET, %s)", addr_str);
return -1;
}
if (len)
*len = sizeof(*sin);
return 0;
} else if (family == AF_INET6) {
struct sockaddr_in6 *sin6 = (void *)addr;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = htons(port);
if (addr_str &&
inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) {
log_err("inet_pton(AF_INET6, %s)", addr_str);
return -1;
}
if (len)
*len = sizeof(*sin6);
return 0;
}
return -1;
}

View File

@@ -33,10 +33,11 @@ struct ipv6_packet {
} __packed;
extern struct ipv6_packet pkt_v6;
int start_server(int family, int type);
int start_server_with_port(int family, int type, __u16 port);
int connect_to_fd(int family, int type, int server_fd);
int connect_fd_to_fd(int client_fd, int server_fd);
int connect_wait(int client_fd);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int connect_to_fd(int server_fd, int timeout_ms);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);
#endif

View File

@@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <time.h>
#include "test_autoload.skel.h"
void test_autoload(void)
{
int duration = 0, err;
struct test_autoload* skel;
skel = test_autoload__open_and_load();
/* prog3 should be broken */
if (CHECK(skel, "skel_open_and_load", "unexpected success\n"))
goto cleanup;
skel = test_autoload__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
goto cleanup;
/* don't load prog3 */
bpf_program__set_autoload(skel->progs.prog3, false);
err = test_autoload__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
err = test_autoload__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
usleep(1);
CHECK(!skel->bss->prog1_called, "prog1", "not called\n");
CHECK(!skel->bss->prog2_called, "prog2", "not called\n");
CHECK(skel->bss->prog3_called, "prog3", "called?!\n");
cleanup:
test_autoload__destroy(skel);
}

View File

@@ -5,11 +5,23 @@
#include "bpf_iter_netlink.skel.h"
#include "bpf_iter_bpf_map.skel.h"
#include "bpf_iter_task.skel.h"
#include "bpf_iter_task_stack.skel.h"
#include "bpf_iter_task_file.skel.h"
#include "bpf_iter_tcp4.skel.h"
#include "bpf_iter_tcp6.skel.h"
#include "bpf_iter_udp4.skel.h"
#include "bpf_iter_udp6.skel.h"
#include "bpf_iter_test_kern1.skel.h"
#include "bpf_iter_test_kern2.skel.h"
#include "bpf_iter_test_kern3.skel.h"
#include "bpf_iter_test_kern4.skel.h"
#include "bpf_iter_bpf_hash_map.skel.h"
#include "bpf_iter_bpf_percpu_hash_map.skel.h"
#include "bpf_iter_bpf_array_map.skel.h"
#include "bpf_iter_bpf_percpu_array_map.skel.h"
#include "bpf_iter_bpf_sk_storage_map.skel.h"
#include "bpf_iter_test_kern5.skel.h"
#include "bpf_iter_test_kern6.skel.h"
static int duration;
@@ -106,6 +118,20 @@ static void test_task(void)
bpf_iter_task__destroy(skel);
}
static void test_task_stack(void)
{
struct bpf_iter_task_stack *skel;
skel = bpf_iter_task_stack__open_and_load();
if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
"skeleton open_and_load failed\n"))
return;
do_dummy_read(skel->progs.dump_task_stack);
bpf_iter_task_stack__destroy(skel);
}
static void test_task_file(void)
{
struct bpf_iter_task_file *skel;
@@ -120,6 +146,62 @@ static void test_task_file(void)
bpf_iter_task_file__destroy(skel);
}
static void test_tcp4(void)
{
struct bpf_iter_tcp4 *skel;
skel = bpf_iter_tcp4__open_and_load();
if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
"skeleton open_and_load failed\n"))
return;
do_dummy_read(skel->progs.dump_tcp4);
bpf_iter_tcp4__destroy(skel);
}
static void test_tcp6(void)
{
struct bpf_iter_tcp6 *skel;
skel = bpf_iter_tcp6__open_and_load();
if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
"skeleton open_and_load failed\n"))
return;
do_dummy_read(skel->progs.dump_tcp6);
bpf_iter_tcp6__destroy(skel);
}
static void test_udp4(void)
{
struct bpf_iter_udp4 *skel;
skel = bpf_iter_udp4__open_and_load();
if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
"skeleton open_and_load failed\n"))
return;
do_dummy_read(skel->progs.dump_udp4);
bpf_iter_udp4__destroy(skel);
}
static void test_udp6(void)
{
struct bpf_iter_udp6 *skel;
skel = bpf_iter_udp6__open_and_load();
if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
"skeleton open_and_load failed\n"))
return;
do_dummy_read(skel->progs.dump_udp6);
bpf_iter_udp6__destroy(skel);
}
/* The expected string is less than 16 bytes */
static int do_read_with_fd(int iter_fd, const char *expected,
bool read_one_char)
@@ -380,6 +462,440 @@ out:
bpf_iter_test_kern4__destroy(skel);
}
static void test_bpf_hash_map(void)
{
__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_hash_map *skel;
int err, i, len, map_fd, iter_fd;
__u64 val, expected_val = 0;
struct bpf_link *link;
struct key_t {
int a;
int b;
int c;
} key;
char buf[64];
skel = bpf_iter_bpf_hash_map__open();
if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
"skeleton open failed\n"))
return;
skel->bss->in_test_mode = true;
err = bpf_iter_bpf_hash_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
"skeleton load failed\n"))
goto out;
/* iterator with hashmap2 and hashmap3 should fail */
opts.map_fd = bpf_map__fd(skel->maps.hashmap2);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(!IS_ERR(link), "attach_iter",
"attach_iter for hashmap2 unexpected succeeded\n"))
goto out;
opts.map_fd = bpf_map__fd(skel->maps.hashmap3);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(!IS_ERR(link), "attach_iter",
"attach_iter for hashmap3 unexpected succeeded\n"))
goto out;
/* hashmap1 should be good, update map values here */
map_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
key.a = i + 1;
key.b = i + 2;
key.c = i + 3;
val = i + 4;
expected_key_a += key.a;
expected_key_b += key.b;
expected_key_c += key.c;
expected_val += val;
err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
if (CHECK(err, "map_update", "map_update failed\n"))
goto out;
}
opts.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (CHECK(skel->bss->key_sum_a != expected_key_a,
"key_sum_a", "got %u expected %u\n",
skel->bss->key_sum_a, expected_key_a))
goto close_iter;
if (CHECK(skel->bss->key_sum_b != expected_key_b,
"key_sum_b", "got %u expected %u\n",
skel->bss->key_sum_b, expected_key_b))
goto close_iter;
if (CHECK(skel->bss->val_sum != expected_val,
"val_sum", "got %llu expected %llu\n",
skel->bss->val_sum, expected_val))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_hash_map__destroy(skel);
}
static void test_bpf_percpu_hash_map(void)
{
__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_hash_map *skel;
int err, i, j, len, map_fd, iter_fd;
__u32 expected_val = 0;
struct bpf_link *link;
struct key_t {
int a;
int b;
int c;
} key;
char buf[64];
void *val;
val = malloc(8 * bpf_num_possible_cpus());
skel = bpf_iter_bpf_percpu_hash_map__open();
if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
"skeleton open failed\n"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
err = bpf_iter_bpf_percpu_hash_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
"skeleton load failed\n"))
goto out;
/* update map values here */
map_fd = bpf_map__fd(skel->maps.hashmap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
key.a = i + 1;
key.b = i + 2;
key.c = i + 3;
expected_key_a += key.a;
expected_key_b += key.b;
expected_key_c += key.c;
for (j = 0; j < bpf_num_possible_cpus(); j++) {
*(__u32 *)(val + j * 8) = i + j;
expected_val += i + j;
}
err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
if (CHECK(err, "map_update", "map_update failed\n"))
goto out;
}
opts.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (CHECK(skel->bss->key_sum_a != expected_key_a,
"key_sum_a", "got %u expected %u\n",
skel->bss->key_sum_a, expected_key_a))
goto close_iter;
if (CHECK(skel->bss->key_sum_b != expected_key_b,
"key_sum_b", "got %u expected %u\n",
skel->bss->key_sum_b, expected_key_b))
goto close_iter;
if (CHECK(skel->bss->val_sum != expected_val,
"val_sum", "got %u expected %u\n",
skel->bss->val_sum, expected_val))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_hash_map__destroy(skel);
}
static void test_bpf_array_map(void)
{
__u64 val, expected_val = 0, res_first_val, first_val = 0;
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
__u32 expected_key = 0, res_first_key;
struct bpf_iter_bpf_array_map *skel;
int err, i, map_fd, iter_fd;
struct bpf_link *link;
char buf[64] = {};
int len, start;
skel = bpf_iter_bpf_array_map__open_and_load();
if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
"skeleton open_and_load failed\n"))
return;
map_fd = bpf_map__fd(skel->maps.arraymap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
val = i + 4;
expected_key += i;
expected_val += val;
if (i == 0)
first_val = val;
err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
if (CHECK(err, "map_update", "map_update failed\n"))
goto out;
}
opts.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
goto free_link;
/* do some tests */
start = 0;
while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
start += len;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
res_first_key = *(__u32 *)buf;
res_first_val = *(__u64 *)(buf + sizeof(__u32));
if (CHECK(res_first_key != 0 || res_first_val != first_val,
"bpf_seq_write",
"seq_write failure: first key %u vs expected 0, "
" first value %llu vs expected %llu\n",
res_first_key, res_first_val, first_val))
goto close_iter;
if (CHECK(skel->bss->key_sum != expected_key,
"key_sum", "got %u expected %u\n",
skel->bss->key_sum, expected_key))
goto close_iter;
if (CHECK(skel->bss->val_sum != expected_val,
"val_sum", "got %llu expected %llu\n",
skel->bss->val_sum, expected_val))
goto close_iter;
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
err = bpf_map_lookup_elem(map_fd, &i, &val);
if (CHECK(err, "map_lookup", "map_lookup failed\n"))
goto out;
if (CHECK(i != val, "invalid_val",
"got value %llu expected %u\n", val, i))
goto out;
}
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_array_map__destroy(skel);
}
static void test_bpf_percpu_array_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_array_map *skel;
__u32 expected_key = 0, expected_val = 0;
int err, i, j, map_fd, iter_fd;
struct bpf_link *link;
char buf[64];
void *val;
int len;
val = malloc(8 * bpf_num_possible_cpus());
skel = bpf_iter_bpf_percpu_array_map__open();
if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
"skeleton open failed\n"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
err = bpf_iter_bpf_percpu_array_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
"skeleton load failed\n"))
goto out;
/* update map values here */
map_fd = bpf_map__fd(skel->maps.arraymap1);
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
expected_key += i;
for (j = 0; j < bpf_num_possible_cpus(); j++) {
*(__u32 *)(val + j * 8) = i + j;
expected_val += i + j;
}
err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
if (CHECK(err, "map_update", "map_update failed\n"))
goto out;
}
opts.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (CHECK(skel->bss->key_sum != expected_key,
"key_sum", "got %u expected %u\n",
skel->bss->key_sum, expected_key))
goto close_iter;
if (CHECK(skel->bss->val_sum != expected_val,
"val_sum", "got %u expected %u\n",
skel->bss->val_sum, expected_val))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_array_map__destroy(skel);
}
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
int err, i, len, map_fd, iter_fd, num_sockets;
struct bpf_iter_bpf_sk_storage_map *skel;
int sock_fd[3] = {-1, -1, -1};
__u32 val, expected_val = 0;
struct bpf_link *link;
char buf[64];
skel = bpf_iter_bpf_sk_storage_map__open_and_load();
if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
"skeleton open_and_load failed\n"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
num_sockets = ARRAY_SIZE(sock_fd);
for (i = 0; i < num_sockets; i++) {
sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
goto out;
val = i + 1;
expected_val += val;
err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
BPF_NOEXIST);
if (CHECK(err, "map_update", "map_update failed\n"))
goto out;
}
opts.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
goto free_link;
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
goto close_iter;
/* test results */
if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
"ipv6_sk_count", "got %u expected %u\n",
skel->bss->ipv6_sk_count, num_sockets))
goto close_iter;
if (CHECK(skel->bss->val_sum != expected_val,
"val_sum", "got %u expected %u\n",
skel->bss->val_sum, expected_val))
goto close_iter;
close_iter:
close(iter_fd);
free_link:
bpf_link__destroy(link);
out:
for (i = 0; i < num_sockets; i++) {
if (sock_fd[i] >= 0)
close(sock_fd[i]);
}
bpf_iter_bpf_sk_storage_map__destroy(skel);
}
static void test_rdonly_buf_out_of_bound(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_test_kern5 *skel;
struct bpf_link *link;
skel = bpf_iter_test_kern5__open_and_load();
if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
"skeleton open_and_load failed\n"))
return;
opts.map_fd = bpf_map__fd(skel->maps.hashmap1);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
bpf_link__destroy(link);
bpf_iter_test_kern5__destroy(skel);
}
static void test_buf_neg_offset(void)
{
struct bpf_iter_test_kern6 *skel;
skel = bpf_iter_test_kern6__open_and_load();
if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
"skeleton open_and_load unexpected success\n"))
bpf_iter_test_kern6__destroy(skel);
}
void test_bpf_iter(void)
{
if (test__start_subtest("btf_id_or_null"))
@@ -392,8 +908,18 @@ void test_bpf_iter(void)
test_bpf_map();
if (test__start_subtest("task"))
test_task();
if (test__start_subtest("task_stack"))
test_task_stack();
if (test__start_subtest("task_file"))
test_task_file();
if (test__start_subtest("tcp4"))
test_tcp4();
if (test__start_subtest("tcp6"))
test_tcp6();
if (test__start_subtest("udp4"))
test_udp4();
if (test__start_subtest("udp6"))
test_udp6();
if (test__start_subtest("anon"))
test_anon_iter(false);
if (test__start_subtest("anon-read-one-char"))
@@ -406,4 +932,18 @@ void test_bpf_iter(void)
test_overflow(true, false);
if (test__start_subtest("prog-ret-1"))
test_overflow(false, true);
if (test__start_subtest("bpf_hash_map"))
test_bpf_hash_map();
if (test__start_subtest("bpf_percpu_hash_map"))
test_bpf_percpu_hash_map();
if (test__start_subtest("bpf_array_map"))
test_bpf_array_map();
if (test__start_subtest("bpf_percpu_array_map"))
test_bpf_percpu_array_map();
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
if (test__start_subtest("rdonly-buf-out-of-bound"))
test_rdonly_buf_out_of_bound();
if (test__start_subtest("buf-neg-offset"))
test_buf_neg_offset();
}

View File

@@ -0,0 +1,417 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <test_progs.h>
#include <cgroup_helpers.h>
#include <network_helpers.h>
#include "progs/cg_storage_multi.h"
#include "cg_storage_multi_egress_only.skel.h"
#include "cg_storage_multi_isolated.skel.h"
#include "cg_storage_multi_shared.skel.h"
#define PARENT_CGROUP "/cgroup_storage"
#define CHILD_CGROUP "/cgroup_storage/child"
static int duration;
static bool assert_storage(struct bpf_map *map, const void *key,
struct cgroup_value *expected)
{
struct cgroup_value value;
int map_fd;
map_fd = bpf_map__fd(map);
if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) < 0,
"map-lookup", "errno %d", errno))
return true;
if (CHECK(memcmp(&value, expected, sizeof(struct cgroup_value)),
"assert-storage", "storages differ"))
return true;
return false;
}
static bool assert_storage_noexist(struct bpf_map *map, const void *key)
{
struct cgroup_value value;
int map_fd;
map_fd = bpf_map__fd(map);
if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) == 0,
"map-lookup", "succeeded, expected ENOENT"))
return true;
if (CHECK(errno != ENOENT,
"map-lookup", "errno %d, expected ENOENT", errno))
return true;
return false;
}
static bool connect_send(const char *cgroup_path)
{
bool res = true;
int server_fd = -1, client_fd = -1;
if (join_cgroup(cgroup_path))
goto out_clean;
server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0);
if (server_fd < 0)
goto out_clean;
client_fd = connect_to_fd(server_fd, 0);
if (client_fd < 0)
goto out_clean;
if (send(client_fd, "message", strlen("message"), 0) < 0)
goto out_clean;
res = false;
out_clean:
close(client_fd);
close(server_fd);
return res;
}
static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd)
{
struct cg_storage_multi_egress_only *obj;
struct cgroup_value expected_cgroup_value;
struct bpf_cgroup_storage_key key;
struct bpf_link *parent_link = NULL, *child_link = NULL;
bool err;
key.attach_type = BPF_CGROUP_INET_EGRESS;
obj = cg_storage_multi_egress_only__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
/* Attach to parent cgroup, trigger packet from child.
* Assert that there is only one run and in that run the storage is
* parent cgroup's storage.
* Also assert that child cgroup's storage does not exist
*/
parent_link = bpf_program__attach_cgroup(obj->progs.egress,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_link), "parent-cg-attach",
"err %ld", PTR_ERR(parent_link)))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 1,
"first-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there are two additional runs, one that run with parent
* cgroup's storage and one with child cgroup's storage.
*/
child_link = bpf_program__attach_cgroup(obj->progs.egress,
child_cgroup_fd);
if (CHECK(IS_ERR(child_link), "child-cg-attach",
"err %ld", PTR_ERR(child_link)))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 3,
"second-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
close_bpf_object:
if (!IS_ERR(parent_link))
bpf_link__destroy(parent_link);
if (!IS_ERR(child_link))
bpf_link__destroy(child_link);
cg_storage_multi_egress_only__destroy(obj);
}
static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd)
{
struct cg_storage_multi_isolated *obj;
struct cgroup_value expected_cgroup_value;
struct bpf_cgroup_storage_key key;
struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL;
struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL;
struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL;
bool err;
obj = cg_storage_multi_isolated__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
/* Attach to parent cgroup, trigger packet from child.
* Assert that there is three runs, two with parent cgroup egress and
* one with parent cgroup ingress, stored in separate parent storages.
* Also assert that child cgroup's storages does not exist
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach",
"err %ld", PTR_ERR(parent_egress1_link)))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach",
"err %ld", PTR_ERR(parent_egress2_link)))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach",
"err %ld", PTR_ERR(parent_ingress_link)))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 3,
"first-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there is six additional runs, parent cgroup egresses and
* ingress, child cgroup egresses and ingress.
* Assert that egree and ingress storages are separate.
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach",
"err %ld", PTR_ERR(child_egress1_link)))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach",
"err %ld", PTR_ERR(child_egress2_link)))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach",
"err %ld", PTR_ERR(child_ingress_link)))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 9,
"second-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 4 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP);
key.attach_type = BPF_CGROUP_INET_EGRESS;
expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key.attach_type = BPF_CGROUP_INET_INGRESS;
expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 };
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
close_bpf_object:
if (!IS_ERR(parent_egress1_link))
bpf_link__destroy(parent_egress1_link);
if (!IS_ERR(parent_egress2_link))
bpf_link__destroy(parent_egress2_link);
if (!IS_ERR(parent_ingress_link))
bpf_link__destroy(parent_ingress_link);
if (!IS_ERR(child_egress1_link))
bpf_link__destroy(child_egress1_link);
if (!IS_ERR(child_egress2_link))
bpf_link__destroy(child_egress2_link);
if (!IS_ERR(child_ingress_link))
bpf_link__destroy(child_ingress_link);
cg_storage_multi_isolated__destroy(obj);
}
static void test_shared(int parent_cgroup_fd, int child_cgroup_fd)
{
struct cg_storage_multi_shared *obj;
struct cgroup_value expected_cgroup_value;
__u64 key;
struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL;
struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL;
struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL;
bool err;
obj = cg_storage_multi_shared__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
/* Attach to parent cgroup, trigger packet from child.
* Assert that there is three runs, two with parent cgroup egress and
* one with parent cgroup ingress.
* Also assert that child cgroup's storage does not exist
*/
parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach",
"err %ld", PTR_ERR(parent_egress1_link)))
goto close_bpf_object;
parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach",
"err %ld", PTR_ERR(parent_egress2_link)))
goto close_bpf_object;
parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
parent_cgroup_fd);
if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach",
"err %ld", PTR_ERR(parent_ingress_link)))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "first-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 3,
"first-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) {
.egress_pkts = 2,
.ingress_pkts = 1,
};
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key = get_cgroup_id(CHILD_CGROUP);
if (assert_storage_noexist(obj->maps.cgroup_storage, &key))
goto close_bpf_object;
/* Attach to parent and child cgroup, trigger packet from child.
* Assert that there is six additional runs, parent cgroup egresses and
* ingress, child cgroup egresses and ingress.
*/
child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1,
child_cgroup_fd);
if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach",
"err %ld", PTR_ERR(child_egress1_link)))
goto close_bpf_object;
child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2,
child_cgroup_fd);
if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach",
"err %ld", PTR_ERR(child_egress2_link)))
goto close_bpf_object;
child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress,
child_cgroup_fd);
if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach",
"err %ld", PTR_ERR(child_ingress_link)))
goto close_bpf_object;
err = connect_send(CHILD_CGROUP);
if (CHECK(err, "second-connect-send", "errno %d", errno))
goto close_bpf_object;
if (CHECK(obj->bss->invocations != 9,
"second-invoke", "invocations=%d", obj->bss->invocations))
goto close_bpf_object;
key = get_cgroup_id(PARENT_CGROUP);
expected_cgroup_value = (struct cgroup_value) {
.egress_pkts = 4,
.ingress_pkts = 2,
};
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
key = get_cgroup_id(CHILD_CGROUP);
expected_cgroup_value = (struct cgroup_value) {
.egress_pkts = 2,
.ingress_pkts = 1,
};
if (assert_storage(obj->maps.cgroup_storage,
&key, &expected_cgroup_value))
goto close_bpf_object;
close_bpf_object:
if (!IS_ERR(parent_egress1_link))
bpf_link__destroy(parent_egress1_link);
if (!IS_ERR(parent_egress2_link))
bpf_link__destroy(parent_egress2_link);
if (!IS_ERR(parent_ingress_link))
bpf_link__destroy(parent_ingress_link);
if (!IS_ERR(child_egress1_link))
bpf_link__destroy(child_egress1_link);
if (!IS_ERR(child_egress2_link))
bpf_link__destroy(child_egress2_link);
if (!IS_ERR(child_ingress_link))
bpf_link__destroy(child_ingress_link);
cg_storage_multi_shared__destroy(obj);
}
void test_cg_storage_multi(void)
{
int parent_cgroup_fd = -1, child_cgroup_fd = -1;
parent_cgroup_fd = test__join_cgroup(PARENT_CGROUP);
if (CHECK(parent_cgroup_fd < 0, "cg-create-parent", "errno %d", errno))
goto close_cgroup_fd;
child_cgroup_fd = create_and_get_cgroup(CHILD_CGROUP);
if (CHECK(child_cgroup_fd < 0, "cg-create-child", "errno %d", errno))
goto close_cgroup_fd;
if (test__start_subtest("egress_only"))
test_egress_only(parent_cgroup_fd, child_cgroup_fd);
if (test__start_subtest("isolated"))
test_isolated(parent_cgroup_fd, child_cgroup_fd);
if (test__start_subtest("shared"))
test_shared(parent_cgroup_fd, child_cgroup_fd);
close_cgroup_fd:
close(child_cgroup_fd);
close(parent_cgroup_fd);
}

View File

@@ -2,6 +2,7 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#include "test_cgroup_link.skel.h"
static __u32 duration = 0;
@@ -37,7 +38,8 @@ void test_cgroup_link(void)
int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags;
__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id;
struct bpf_link_info info;
int i = 0, err, prog_fd;
bool detach_legacy = false;
@@ -219,6 +221,22 @@ void test_cgroup_link(void)
/* BPF programs should still get called */
ping_and_check(0, cg_nr);
prog_id = link_info_prog_id(links[0], &info);
CHECK(prog_id == 0, "link_info", "failed\n");
CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
err = bpf_link__detach(links[0]);
if (CHECK(err, "link_detach", "failed %d\n", err))
goto cleanup;
/* cgroup_id should be zero in link_info */
prog_id = link_info_prog_id(links[0], &info);
CHECK(prog_id == 0, "link_info", "failed\n");
CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
/* First BPF program shouldn't be called anymore */
ping_and_check(0, cg_nr - 1);
/* leave cgroup and remove them, don't detach programs */
cleanup_cgroup_environment();

View File

@@ -13,7 +13,7 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk)
socklen_t addr_len = sizeof(addr);
__u32 duration = 0;
serv_sk = start_server(AF_INET6, SOCK_STREAM);
serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
return;
@@ -24,17 +24,13 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk)
*g_serv_port = addr.sin6_port;
/* Client outside of test cgroup should fail to connect by timeout. */
err = connect_fd_to_fd(out_sk, serv_sk);
err = connect_fd_to_fd(out_sk, serv_sk, 1000);
if (CHECK(!err || errno != EINPROGRESS, "connect_fd_to_fd",
"unexpected result err %d errno %d\n", err, errno))
goto cleanup;
err = connect_wait(out_sk);
if (CHECK(err, "connect_wait", "unexpected result %d\n", err))
goto cleanup;
/* Client inside test cgroup should connect just fine. */
in_sk = connect_to_fd(AF_INET6, SOCK_STREAM, serv_sk);
in_sk = connect_to_fd(serv_sk, 0);
if (CHECK(in_sk < 0, "connect_to_fd", "errno %d\n", errno))
goto cleanup;
@@ -85,7 +81,7 @@ void test_cgroup_skb_sk_lookup(void)
* differs from that of testing cgroup. Moving selftests process to
* testing cgroup won't change cgroup id of an already created socket.
*/
out_sk = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
out_sk = socket(AF_INET6, SOCK_STREAM, 0);
if (CHECK_FAIL(out_sk < 0))
return;

View File

@@ -114,7 +114,7 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
goto close_bpf_object;
}
fd = connect_to_fd(family, type, server_fd);
fd = connect_to_fd(server_fd, 0);
if (fd < 0) {
err = -1;
goto close_bpf_object;
@@ -137,25 +137,25 @@ void test_connect_force_port(void)
if (CHECK_FAIL(cgroup_fd < 0))
return;
server_fd = start_server_with_port(AF_INET, SOCK_STREAM, 60123);
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM));
close(server_fd);
server_fd = start_server_with_port(AF_INET6, SOCK_STREAM, 60124);
server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM));
close(server_fd);
server_fd = start_server_with_port(AF_INET, SOCK_DGRAM, 60123);
server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM));
close(server_fd);
server_fd = start_server_with_port(AF_INET6, SOCK_DGRAM, 60124);
server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM));

View File

@@ -0,0 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_core_retro.skel.h"
void test_core_retro(void)
{
int err, zero = 0, res, duration = 0, my_pid = getpid();
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0);
if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* trigger */
usleep(1);
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
goto out_close;
CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid);
out_close:
test_core_retro__destroy(skel);
}

View File

@@ -0,0 +1,53 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include "test_endian.skel.h"
static int duration;
#define IN16 0x1234
#define IN32 0x12345678U
#define IN64 0x123456789abcdef0ULL
#define OUT16 0x3412
#define OUT32 0x78563412U
#define OUT64 0xf0debc9a78563412ULL
void test_endian(void)
{
struct test_endian* skel;
struct test_endian__bss *bss;
int err;
skel = test_endian__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
bss->in16 = IN16;
bss->in32 = IN32;
bss->in64 = IN64;
err = test_endian__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
usleep(1);
CHECK(bss->out16 != OUT16, "out16", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->out16, (__u64)OUT16);
CHECK(bss->out32 != OUT32, "out32", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->out32, (__u64)OUT32);
CHECK(bss->out64 != OUT64, "out16", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->out64, (__u64)OUT64);
CHECK(bss->const16 != OUT16, "const16", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->const16, (__u64)OUT16);
CHECK(bss->const32 != OUT32, "const32", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->const32, (__u64)OUT32);
CHECK(bss->const64 != OUT64, "const64", "got 0x%llx != exp 0x%llx\n",
(__u64)bss->const64, (__u64)OUT64);
cleanup:
test_endian__destroy(skel);
}

View File

@@ -0,0 +1,91 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <test_progs.h>
#include "test_stacktrace_build_id.skel.h"
void test_get_stackid_cannot_attach(void)
{
struct perf_event_attr attr = {
/* .type = PERF_TYPE_SOFTWARE, */
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.precise_ip = 1,
.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK,
.branch_sample_type = PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_CALL_STACK,
.sample_period = 5000,
.size = sizeof(struct perf_event_attr),
};
struct test_stacktrace_build_id *skel;
__u32 duration = 0;
int pmu_fd, err;
skel = test_stacktrace_build_id__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
/* override program type */
bpf_program__set_perf_event(skel->progs.oncpu);
err = test_stacktrace_build_id__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) {
printf("%s:SKIP:cannot open PERF_COUNT_HW_CPU_CYCLES with precise_ip > 0\n",
__func__);
test__skip();
goto cleanup;
}
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_no_callchain",
"should have failed\n");
close(pmu_fd);
/* add PERF_SAMPLE_CALLCHAIN, attach should succeed */
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event_callchain",
"err: %ld\n", PTR_ERR(skel->links.oncpu));
close(pmu_fd);
/* add exclude_callchain_kernel, attach should fail */
attr.exclude_callchain_kernel = 1;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
pmu_fd, errno))
goto cleanup;
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_exclude_callchain_kernel",
"should have failed\n");
close(pmu_fd);
cleanup:
test_stacktrace_build_id__destroy(skel);
}

View File

@@ -0,0 +1,71 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "test_ksyms.skel.h"
#include <sys/stat.h>
static int duration;
static __u64 kallsyms_find(const char *sym)
{
char type, name[500];
__u64 addr, res = 0;
FILE *f;
f = fopen("/proc/kallsyms", "r");
if (CHECK(!f, "kallsyms_fopen", "failed to open: %d\n", errno))
return 0;
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &addr, &type, name) > 0) {
if (strcmp(name, sym) == 0) {
res = addr;
goto out;
}
}
CHECK(false, "not_found", "symbol %s not found\n", sym);
out:
fclose(f);
return res;
}
void test_ksyms(void)
{
__u64 link_fops_addr = kallsyms_find("bpf_link_fops");
const char *btf_path = "/sys/kernel/btf/vmlinux";
struct test_ksyms *skel;
struct test_ksyms__data *data;
struct stat st;
__u64 btf_size;
int err;
if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
return;
btf_size = st.st_size;
skel = test_ksyms__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
return;
err = test_ksyms__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
/* trigger tracepoint */
usleep(1);
data = skel->data;
CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops",
"got 0x%llx, exp 0x%llx\n",
data->out__bpf_link_fops, link_fops_addr);
CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1",
"got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
CHECK(data->out__btf_size != btf_size, "btf_size",
"got %llu, exp %llu\n", data->out__btf_size, btf_size);
CHECK(data->out__per_cpu_start != 0, "__per_cpu_start",
"got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0);
cleanup:
test_ksyms__destroy(skel);
}

View File

@@ -23,7 +23,7 @@ void test_load_bytes_relative(void)
if (CHECK_FAIL(cgroup_fd < 0))
return;
server_fd = start_server(AF_INET, SOCK_STREAM);
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
@@ -49,7 +49,7 @@ void test_load_bytes_relative(void)
if (CHECK_FAIL(err))
goto close_bpf_object;
client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd);
client_fd = connect_to_fd(server_fd, 0);
if (CHECK_FAIL(client_fd < 0))
goto close_bpf_object;
close(client_fd);

View File

@@ -0,0 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <test_progs.h>
#include <network_helpers.h>
#include "map_ptr_kern.skel.h"
void test_map_ptr(void)
{
struct map_ptr_kern *skel;
__u32 duration = 0, retval;
char buf[128];
int err;
skel = map_ptr_kern__open_and_load();
if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
return;
err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
sizeof(pkt_v4), buf, NULL, &retval, NULL);
if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno))
goto cleanup;
if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval,
skel->bss->g_map_type, skel->bss->g_line))
goto cleanup;
cleanup:
map_ptr_kern__destroy(skel);
}

View File

@@ -4,6 +4,7 @@
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
#include "test_perf_buffer.skel.h"
#include "bpf/libbpf_internal.h"
/* AddressSanitizer sometimes crashes due to data dereference below, due to
@@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
void test_perf_buffer(void)
{
int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
const char *prog_name = "kprobe/sys_nanosleep";
const char *file = "./test_perf_buffer.o";
int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
struct perf_buffer_opts pb_opts = {};
struct bpf_map *perf_buf_map;
struct test_perf_buffer *skel;
cpu_set_t cpu_set, cpu_seen;
struct bpf_program *prog;
struct bpf_object *obj;
struct perf_buffer *pb;
struct bpf_link *link;
bool *online;
nr_cpus = libbpf_num_possible_cpus();
@@ -51,33 +47,21 @@ void test_perf_buffer(void)
nr_on_cpus++;
/* load program */
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out_close;
}
prog = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
skel = test_perf_buffer__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
/* load map */
perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
goto out_close;
/* attach kprobe */
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
/* attach probe */
err = test_perf_buffer__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* set up perf buffer */
pb_opts.sample_cb = on_sample;
pb_opts.ctx = &cpu_seen;
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
goto out_detach;
goto out_close;
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
@@ -94,7 +78,7 @@ void test_perf_buffer(void)
&cpu_set);
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
i, err))
goto out_detach;
goto out_close;
usleep(1);
}
@@ -110,9 +94,7 @@ void test_perf_buffer(void)
out_free_pb:
perf_buffer__free(pb);
out_detach:
bpf_link__destroy(link);
out_close:
bpf_object__close(obj);
test_perf_buffer__destroy(skel);
free(online);
}

View File

@@ -0,0 +1,116 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <test_progs.h>
#include "perf_event_stackmap.skel.h"
#ifndef noinline
#define noinline __attribute__((noinline))
#endif
noinline int func_1(void)
{
static int val = 1;
val += 1;
usleep(100);
return val;
}
noinline int func_2(void)
{
return func_1();
}
noinline int func_3(void)
{
return func_2();
}
noinline int func_4(void)
{
return func_3();
}
noinline int func_5(void)
{
return func_4();
}
noinline int func_6(void)
{
int i, val = 1;
for (i = 0; i < 100; i++)
val += func_5();
return val;
}
void test_perf_event_stackmap(void)
{
struct perf_event_attr attr = {
/* .type = PERF_TYPE_SOFTWARE, */
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.precise_ip = 2,
.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK |
PERF_SAMPLE_CALLCHAIN,
.branch_sample_type = PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES |
PERF_SAMPLE_BRANCH_CALL_STACK,
.sample_period = 5000,
.size = sizeof(struct perf_event_attr),
};
struct perf_event_stackmap *skel;
__u32 duration = 0;
cpu_set_t cpu_set;
int pmu_fd, err;
skel = perf_event_stackmap__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = perf_event_stackmap__load(skel);
if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
goto cleanup;
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
goto cleanup;
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0 /* cpu 0 */, -1 /* group id */,
0 /* flags */);
if (pmu_fd < 0) {
printf("%s:SKIP:cpu doesn't support the event\n", __func__);
test__skip();
goto cleanup;
}
skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
pmu_fd);
if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
"err %ld\n", PTR_ERR(skel->links.oncpu))) {
close(pmu_fd);
goto cleanup;
}
/* create kernel and user stack traces for testing */
func_6();
CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n");
CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n");
CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n");
CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n");
cleanup:
perf_event_stackmap__destroy(skel);
}

View File

@@ -0,0 +1,129 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
#include <string.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <linux/btf.h>
#include <linux/kernel.h>
#define CONFIG_DEBUG_INFO_BTF
#include <linux/btf_ids.h>
#include "test_progs.h"
static int duration;
struct symbol {
const char *name;
int type;
int id;
};
struct symbol test_symbols[] = {
{ "unused", BTF_KIND_UNKN, 0 },
{ "S", BTF_KIND_TYPEDEF, -1 },
{ "T", BTF_KIND_TYPEDEF, -1 },
{ "U", BTF_KIND_TYPEDEF, -1 },
{ "S", BTF_KIND_STRUCT, -1 },
{ "U", BTF_KIND_UNION, -1 },
{ "func", BTF_KIND_FUNC, -1 },
};
BTF_ID_LIST(test_list_local)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
extern __u32 test_list_global[];
BTF_ID_LIST_GLOBAL(test_list_global)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
BTF_ID(typedef, U)
BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
static int
__resolve_symbol(struct btf *btf, int type_id)
{
const struct btf_type *type;
const char *str;
unsigned int i;
type = btf__type_by_id(btf, type_id);
if (!type) {
PRINT_FAIL("Failed to get type for ID %d\n", type_id);
return -1;
}
for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
if (test_symbols[i].id != -1)
continue;
if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
continue;
str = btf__name_by_offset(btf, type->name_off);
if (!str) {
PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id);
return -1;
}
if (!strcmp(str, test_symbols[i].name))
test_symbols[i].id = type_id;
}
return 0;
}
static int resolve_symbols(void)
{
struct btf *btf;
int type_id;
__u32 nr;
btf = btf__parse_elf("btf_data.o", NULL);
if (CHECK(libbpf_get_error(btf), "resolve",
"Failed to load BTF from btf_data.o\n"))
return -1;
nr = btf__get_nr_types(btf);
for (type_id = 1; type_id <= nr; type_id++) {
if (__resolve_symbol(btf, type_id))
break;
}
btf__free(btf);
return 0;
}
int test_resolve_btfids(void)
{
__u32 *test_list, *test_lists[] = { test_list_local, test_list_global };
unsigned int i, j;
int ret = 0;
if (resolve_symbols())
return -1;
/* Check BTF_ID_LIST(test_list_local) and
* BTF_ID_LIST_GLOBAL(test_list_global) IDs
*/
for (j = 0; j < ARRAY_SIZE(test_lists); j++) {
test_list = test_lists[j];
for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) {
ret = CHECK(test_list[i] != test_symbols[i].id,
"id_check",
"wrong ID for %s (%d != %d)\n",
test_symbols[i].name,
test_list[i], test_symbols[i].id);
}
}
return ret;
}

View File

@@ -35,7 +35,7 @@ static struct sec_name_test tests[] = {
{-EINVAL, 0},
},
{"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} },
{"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} },
{"xdp", {0, BPF_PROG_TYPE_XDP, BPF_XDP}, {0, BPF_XDP} },
{"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
{"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
{"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} },

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@ void test_skb_ctx(void)
.cb[3] = 4,
.cb[4] = 5,
.priority = 6,
.ifindex = 1,
.tstamp = 7,
.wire_len = 100,
.gso_segs = 8,
@@ -92,6 +93,10 @@ void test_skb_ctx(void)
"ctx_out_priority",
"skb->priority == %d, expected %d\n",
skb.priority, 7);
CHECK_ATTR(skb.ifindex != 1,
"ctx_out_ifindex",
"skb->ifindex == %d, expected %d\n",
skb.ifindex, 1);
CHECK_ATTR(skb.tstamp != 8,
"ctx_out_tstamp",
"skb->tstamp == %lld, expected %d\n",

View File

@@ -41,7 +41,7 @@ void test_skeleton(void)
CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL);
CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL);
CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0);
CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0);
CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0);
/* validate we can pre-setup global variables, even in .bss */
@@ -49,7 +49,7 @@ void test_skeleton(void)
data->in2 = 11;
bss->in3 = 12;
bss->in4 = 13;
rodata->in6 = 14;
rodata->in.in6 = 14;
err = test_skeleton__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
@@ -60,7 +60,7 @@ void test_skeleton(void)
CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL);
CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12);
CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL);
CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14);
CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14);
/* now set new values and attach to get them into outX variables */
data->in1 = 1;

View File

@@ -193,11 +193,10 @@ static void run_test(int cgroup_fd)
if (CHECK_FAIL(server_fd < 0))
goto close_bpf_object;
pthread_mutex_lock(&server_started_mtx);
if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
(void *)&server_fd)))
goto close_server_fd;
pthread_mutex_lock(&server_started_mtx);
pthread_cond_wait(&server_started, &server_started_mtx);
pthread_mutex_unlock(&server_started_mtx);

View File

@@ -118,7 +118,7 @@ static int run_test(int cgroup_fd, int server_fd)
goto close_bpf_object;
}
client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd);
client_fd = connect_to_fd(server_fd, 0);
if (client_fd < 0) {
err = -1;
goto close_bpf_object;
@@ -161,7 +161,7 @@ void test_tcp_rtt(void)
if (CHECK_FAIL(cgroup_fd < 0))
return;
server_fd = start_server(AF_INET, SOCK_STREAM);
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;

View File

@@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Oracle and/or its affiliates. */
#include <test_progs.h>
#include "trace_printk.skel.h"
#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
void test_trace_printk(void)
{
int err, iter = 0, duration = 0, found = 0;
struct trace_printk__bss *bss;
struct trace_printk *skel;
char *buf = NULL;
FILE *fp = NULL;
size_t buflen;
skel = trace_printk__open();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
err = trace_printk__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;
bss = skel->bss;
err = trace_printk__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
fp = fopen(TRACEBUF, "r");
if (CHECK(fp == NULL, "could not open trace buffer",
"error %d opening %s", errno, TRACEBUF))
goto cleanup;
/* We do not want to wait forever if this test fails... */
fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
/* wait for tracepoint to trigger */
usleep(1);
trace_printk__detach(skel);
if (CHECK(bss->trace_printk_ran == 0,
"bpf_trace_printk never ran",
"ran == %d", bss->trace_printk_ran))
goto cleanup;
if (CHECK(bss->trace_printk_ret <= 0,
"bpf_trace_printk returned <= 0 value",
"got %d", bss->trace_printk_ret))
goto cleanup;
/* verify our search string is in the trace buffer */
while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
if (strstr(buf, SEARCHMSG) != NULL)
found++;
if (found == bss->trace_printk_ran)
break;
if (++iter > 1000)
break;
}
if (CHECK(!found, "message from bpf_trace_printk not found",
"no instance of %s in %s", SEARCHMSG, TRACEBUF))
goto cleanup;
cleanup:
trace_printk__destroy(skel);
free(buf);
if (fp)
fclose(fp);
}

View File

@@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "udp_limit.skel.h"
#include <sys/types.h>
#include <sys/socket.h>
static int duration;
void test_udp_limit(void)
{
struct udp_limit *skel;
int fd1 = -1, fd2 = -1;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/udp_limit");
if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
return;
skel = udp_limit__open_and_load();
if (CHECK(!skel, "skel-load", "errno %d", errno))
goto close_cgroup_fd;
skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release),
"cg-attach", "sock %ld sock_release %ld",
PTR_ERR(skel->links.sock),
PTR_ERR(skel->links.sock_release)))
goto close_skeleton;
/* BPF program enforces a single UDP socket per cgroup,
* verify that.
*/
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
if (CHECK(fd1 < 0, "fd1", "errno %d", errno))
goto close_skeleton;
fd2 = socket(AF_INET, SOCK_DGRAM, 0);
if (CHECK(fd2 >= 0, "fd2", "errno %d", errno))
goto close_skeleton;
/* We can reopen again after close. */
close(fd1);
fd1 = -1;
fd1 = socket(AF_INET, SOCK_DGRAM, 0);
if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno))
goto close_skeleton;
/* Make sure the program was invoked the expected
* number of times:
* - open fd1 - BPF_CGROUP_INET_SOCK_CREATE
* - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE
* - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE
* - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE
*/
if (CHECK(skel->bss->invocations != 4, "bss-invocations",
"invocations=%d", skel->bss->invocations))
goto close_skeleton;
/* We should still have a single socket in use */
if (CHECK(skel->bss->in_use != 1, "bss-in_use",
"in_use=%d", skel->bss->in_use))
goto close_skeleton;
close_skeleton:
if (fd1 >= 0)
close(fd1);
if (fd2 >= 0)
close(fd2);
udp_limit__destroy(skel);
close_cgroup_fd:
close(cgroup_fd);
}

View File

@@ -0,0 +1,68 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <test_progs.h>
#include <time.h>
#include "test_varlen.skel.h"
#define CHECK_VAL(got, exp) \
CHECK((got) != (exp), "check", "got %ld != exp %ld\n", \
(long)(got), (long)(exp))
void test_varlen(void)
{
int duration = 0, err;
struct test_varlen* skel;
struct test_varlen__bss *bss;
struct test_varlen__data *data;
const char str1[] = "Hello, ";
const char str2[] = "World!";
const char exp_str[] = "Hello, \0World!\0";
const int size1 = sizeof(str1);
const int size2 = sizeof(str2);
skel = test_varlen__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
bss = skel->bss;
data = skel->data;
err = test_varlen__attach(skel);
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
goto cleanup;
bss->test_pid = getpid();
/* trigger everything */
memcpy(bss->buf_in1, str1, size1);
memcpy(bss->buf_in2, str2, size2);
bss->capture = true;
usleep(1);
bss->capture = false;
CHECK_VAL(bss->payload1_len1, size1);
CHECK_VAL(bss->payload1_len2, size2);
CHECK_VAL(bss->total1, size1 + size2);
CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
"doesn't match!");
CHECK_VAL(data->payload2_len1, size1);
CHECK_VAL(data->payload2_len2, size2);
CHECK_VAL(data->total2, size1 + size2);
CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
"doesn't match!");
CHECK_VAL(data->payload3_len1, size1);
CHECK_VAL(data->payload3_len2, size2);
CHECK_VAL(data->total3, size1 + size2);
CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
"doesn't match!");
CHECK_VAL(data->payload4_len1, size1);
CHECK_VAL(data->payload4_len2, size2);
CHECK_VAL(data->total4, size1 + size2);
CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
"doesn't match!");
cleanup:
test_varlen__destroy(skel);
}

View File

@@ -0,0 +1,70 @@
// SPDX-License-Identifier: GPL-2.0
#include <uapi/linux/bpf.h>
#include <linux/if_link.h>
#include <test_progs.h>
#include "test_xdp_with_cpumap_helpers.skel.h"
#define IFINDEX_LO 1
void test_xdp_with_cpumap_helpers(void)
{
struct test_xdp_with_cpumap_helpers *skel;
struct bpf_prog_info info = {};
struct bpf_cpumap_val val = {
.qsize = 192,
};
__u32 duration = 0, idx = 0;
__u32 len = sizeof(info);
int err, prog_fd, map_fd;
skel = test_xdp_with_cpumap_helpers__open_and_load();
if (CHECK_FAIL(!skel)) {
perror("test_xdp_with_cpumap_helpers__open_and_load");
return;
}
/* can not attach program with cpumaps that allow programs
* as xdp generic
*/
prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
CHECK(err == 0, "Generic attach of program with 8-byte CPUMAP",
"should have failed\n");
prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
map_fd = bpf_map__fd(skel->maps.cpu_map);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
if (CHECK_FAIL(err))
goto out_close;
val.bpf_prog.fd = prog_fd;
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
CHECK(err, "Add program to cpumap entry", "err %d errno %d\n",
err, errno);
err = bpf_map_lookup_elem(map_fd, &idx, &val);
CHECK(err, "Read cpumap entry", "err %d errno %d\n", err, errno);
CHECK(info.id != val.bpf_prog.id, "Expected program id in cpumap entry",
"expected %u read %u\n", info.id, val.bpf_prog.id);
/* can not attach BPF_XDP_CPUMAP program to a device */
err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
CHECK(err == 0, "Attach of BPF_XDP_CPUMAP program",
"should have failed\n");
val.qsize = 192;
val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
err = bpf_map_update_elem(map_fd, &idx, &val, 0);
CHECK(err == 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry",
"should have failed\n");
out_close:
test_xdp_with_cpumap_helpers__destroy(skel);
}
void test_xdp_cpumap_attach(void)
{
if (test__start_subtest("cpumap_with_progs"))
test_xdp_with_cpumap_helpers();
}

View File

@@ -0,0 +1,151 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <uapi/linux/if_link.h>
#include <test_progs.h>
#include "test_xdp_link.skel.h"
#define IFINDEX_LO 1
void test_xdp_link(void)
{
__u32 duration = 0, id1, id2, id0 = 0, prog_fd1, prog_fd2, err;
DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1);
struct test_xdp_link *skel1 = NULL, *skel2 = NULL;
struct bpf_link_info link_info;
struct bpf_prog_info prog_info;
struct bpf_link *link;
__u32 link_info_len = sizeof(link_info);
__u32 prog_info_len = sizeof(prog_info);
skel1 = test_xdp_link__open_and_load();
if (CHECK(!skel1, "skel_load", "skeleton open and load failed\n"))
goto cleanup;
prog_fd1 = bpf_program__fd(skel1->progs.xdp_handler);
skel2 = test_xdp_link__open_and_load();
if (CHECK(!skel2, "skel_load", "skeleton open and load failed\n"))
goto cleanup;
prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler);
memset(&prog_info, 0, sizeof(prog_info));
err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
if (CHECK(err, "fd_info1", "failed %d\n", -errno))
goto cleanup;
id1 = prog_info.id;
memset(&prog_info, 0, sizeof(prog_info));
err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
if (CHECK(err, "fd_info2", "failed %d\n", -errno))
goto cleanup;
id2 = prog_info.id;
/* set initial prog attachment */
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts);
if (CHECK(err, "fd_attach", "initial prog attach failed: %d\n", err))
goto cleanup;
/* validate prog ID */
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
CHECK(err || id0 != id1, "id1_check",
"loaded prog id %u != id1 %u, err %d", id0, id1, err);
/* BPF link is not allowed to replace prog attachment */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) {
bpf_link__destroy(link);
/* best-effort detach prog */
opts.old_fd = prog_fd1;
bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts);
goto cleanup;
}
/* detach BPF program */
opts.old_fd = prog_fd1;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts);
if (CHECK(err, "prog_detach", "failed %d\n", err))
goto cleanup;
/* now BPF link should attach successfully */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
goto cleanup;
skel1->links.xdp_handler = link;
/* validate prog ID */
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
if (CHECK(err || id0 != id1, "id1_check",
"loaded prog id %u != id1 %u, err %d", id0, id1, err))
goto cleanup;
/* BPF prog attach is not allowed to replace BPF link */
opts.old_fd = prog_fd1;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts);
if (CHECK(!err, "prog_attach_fail", "unexpected success\n"))
goto cleanup;
/* Can't force-update when BPF link is active */
err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd2, 0);
if (CHECK(!err, "prog_update_fail", "unexpected success\n"))
goto cleanup;
/* Can't force-detach when BPF link is active */
err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
if (CHECK(!err, "prog_detach_fail", "unexpected success\n"))
goto cleanup;
/* BPF link is not allowed to replace another BPF link */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) {
bpf_link__destroy(link);
goto cleanup;
}
bpf_link__destroy(skel1->links.xdp_handler);
skel1->links.xdp_handler = NULL;
/* new link attach should succeed */
link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO);
if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link)))
goto cleanup;
skel2->links.xdp_handler = link;
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
if (CHECK(err || id0 != id2, "id2_check",
"loaded prog id %u != id2 %u, err %d", id0, id1, err))
goto cleanup;
/* updating program under active BPF link works as expected */
err = bpf_link__update_program(link, skel1->progs.xdp_handler);
if (CHECK(err, "link_upd", "failed: %d\n", err))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
if (CHECK(err, "link_info", "failed: %d\n", err))
goto cleanup;
CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type",
"got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP);
CHECK(link_info.prog_id != id1, "link_prog_id",
"got %u != exp %u\n", link_info.prog_id, id1);
CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex",
"got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO);
err = bpf_link__detach(link);
if (CHECK(err, "link_detach", "failed %d\n", err))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
if (CHECK(err, "link_info", "failed: %d\n", err))
goto cleanup;
CHECK(link_info.prog_id != id1, "link_prog_id",
"got %u != exp %u\n", link_info.prog_id, id1);
/* ifindex should be zeroed out */
CHECK(link_info.xdp.ifindex != 0, "link_ifindex",
"got %u != exp %u\n", link_info.xdp.ifindex, 0);
cleanup:
test_xdp_link__destroy(skel1);
test_xdp_link__destroy(skel2);
}

View File

@@ -0,0 +1,98 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used
#define bpf_iter__netlink bpf_iter__netlink___not_used
#define bpf_iter__task bpf_iter__task___not_used
#define bpf_iter__task_file bpf_iter__task_file___not_used
#define bpf_iter__tcp bpf_iter__tcp___not_used
#define tcp6_sock tcp6_sock___not_used
#define bpf_iter__udp bpf_iter__udp___not_used
#define udp6_sock udp6_sock___not_used
#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__bpf_map
#undef bpf_iter__ipv6_route
#undef bpf_iter__netlink
#undef bpf_iter__task
#undef bpf_iter__task_file
#undef bpf_iter__tcp
#undef tcp6_sock
#undef bpf_iter__udp
#undef udp6_sock
#undef bpf_iter__bpf_map_elem
#undef bpf_iter__bpf_sk_storage_map
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__ipv6_route {
struct bpf_iter_meta *meta;
struct fib6_info *rt;
} __attribute__((preserve_access_index));
struct bpf_iter__netlink {
struct bpf_iter_meta *meta;
struct netlink_sock *sk;
} __attribute__((preserve_access_index));
struct bpf_iter__task {
struct bpf_iter_meta *meta;
struct task_struct *task;
} __attribute__((preserve_access_index));
struct bpf_iter__task_file {
struct bpf_iter_meta *meta;
struct task_struct *task;
__u32 fd;
struct file *file;
} __attribute__((preserve_access_index));
struct bpf_iter__bpf_map {
struct bpf_iter_meta *meta;
struct bpf_map *map;
} __attribute__((preserve_access_index));
struct bpf_iter__tcp {
struct bpf_iter_meta *meta;
struct sock_common *sk_common;
uid_t uid;
} __attribute__((preserve_access_index));
struct tcp6_sock {
struct tcp_sock tcp;
struct ipv6_pinfo inet6;
} __attribute__((preserve_access_index));
struct bpf_iter__udp {
struct bpf_iter_meta *meta;
struct udp_sock *udp_sk;
uid_t uid __attribute__((aligned(8)));
int bucket __attribute__((aligned(8)));
} __attribute__((preserve_access_index));
struct udp6_sock {
struct udp_sock udp;
struct ipv6_pinfo inet6;
} __attribute__((preserve_access_index));
struct bpf_iter__bpf_map_elem {
struct bpf_iter_meta *meta;
struct bpf_map *map;
void *key;
void *value;
};
struct bpf_iter__bpf_sk_storage_map {
struct bpf_iter_meta *meta;
struct bpf_map *map;
struct sock *sk;
void *value;
};

View File

@@ -0,0 +1,40 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u64);
} arraymap1 SEC(".maps");
__u32 key_sum = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
__u32 *key = ctx->key;
__u64 *val = ctx->value;
if (key == (void *)0 || val == (void *)0)
return 0;
bpf_seq_write(ctx->meta->seq, key, sizeof(__u32));
bpf_seq_write(ctx->meta->seq, val, sizeof(__u64));
key_sum += *key;
val_sum += *val;
*val = *key;
return 0;
}

View File

@@ -0,0 +1,100 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u64);
} hashmap1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, __u64);
__type(value, __u64);
} hashmap2 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u32);
} hashmap3 SEC(".maps");
/* will set before prog run */
bool in_test_mode = 0;
/* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
__u64 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
struct seq_file *seq = ctx->meta->seq;
__u32 seq_num = ctx->meta->seq_num;
struct bpf_map *map = ctx->map;
struct key_t *key = ctx->key;
__u64 *val = ctx->value;
if (in_test_mode) {
/* test mode is used by selftests to
* test functionality of bpf_hash_map iter.
*
* the above hashmap1 will have correct size
* and will be accepted, hashmap2 and hashmap3
* should be rejected due to smaller key/value
* size.
*/
if (key == (void *)0 || val == (void *)0)
return 0;
key_sum_a += key->a;
key_sum_b += key->b;
key_sum_c += key->c;
val_sum += *val;
return 0;
}
/* non-test mode, the map is prepared with the
* below bpftool command sequence:
* bpftool map create /sys/fs/bpf/m1 type hash \
* key 12 value 8 entries 3 name map1
* bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 1 \
* value 0 0 0 1 0 0 0 1
* bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 2 \
* value 0 0 0 1 0 0 0 2
* The bpftool iter command line:
* bpftool iter pin ./bpf_iter_bpf_hash_map.o /sys/fs/bpf/p1 \
* map id 77
* The below output will be:
* map dump starts
* 77: (1000000 0 2000000) (200000001000000)
* 77: (1000000 0 1000000) (100000001000000)
* map dump ends
*/
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, "map dump starts\n");
if (key == (void *)0 || val == (void *)0) {
BPF_SEQ_PRINTF(seq, "map dump ends\n");
return 0;
}
BPF_SEQ_PRINTF(seq, "%d: (%x %d %x) (%llx)\n", map->id,
key->a, key->b, key->c, *val);
return 0;
}

View File

@@ -1,27 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__bpf_map
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__bpf_map {
struct bpf_iter_meta *meta;
struct bpf_map *map;
} __attribute__((preserve_access_index));
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
{

View File

@@ -0,0 +1,46 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u32);
} arraymap1 SEC(".maps");
/* will set before prog run */
volatile const __u32 num_cpus = 0;
__u32 key_sum = 0, val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_percpu_array_map(struct bpf_iter__bpf_map_elem *ctx)
{
__u32 *key = ctx->key;
void *pptr = ctx->value;
__u32 step;
int i;
if (key == (void *)0 || pptr == (void *)0)
return 0;
key_sum += *key;
step = 8;
for (i = 0; i < num_cpus; i++) {
val_sum += *(__u32 *)pptr;
pptr += step;
}
return 0;
}

View File

@@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u32);
} hashmap1 SEC(".maps");
/* will set before prog run */
volatile const __u32 num_cpus = 0;
/* will collect results during prog run */
__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0;
__u32 val_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_percpu_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
struct key_t *key = ctx->key;
void *pptr = ctx->value;
__u32 step;
int i;
if (key == (void *)0 || pptr == (void *)0)
return 0;
key_sum_a += key->a;
key_sum_b += key->b;
key_sum_c += key->c;
step = 8;
for (i = 0; i < num_cpus; i++) {
val_sum += *(__u32 *)pptr;
pptr += step;
}
return 0;
}

View File

@@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_stg_map SEC(".maps");
__u32 val_sum = 0;
__u32 ipv6_sk_count = 0;
SEC("iter/bpf_sk_storage_map")
int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
if (sk == (void *)0 || val == (void *)0)
return 0;
if (sk->sk_family == AF_INET6)
ipv6_sk_count++;
val_sum += *val;
return 0;
}

View File

@@ -1,35 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__ipv6_route
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__ipv6_route {
struct bpf_iter_meta *meta;
struct fib6_info *rt;
} __attribute__((preserve_access_index));
char _license[] SEC("license") = "GPL";
extern bool CONFIG_IPV6_SUBTREES __kconfig __weak;
#define RTF_GATEWAY 0x0002
#define IFNAMSIZ 16
#define fib_nh_gw_family nh_common.nhc_gw_family
#define fib_nh_gw6 nh_common.nhc_gw.ipv6
#define fib_nh_dev nh_common.nhc_dev
SEC("iter/ipv6_route")
int dump_ipv6_route(struct bpf_iter__ipv6_route *ctx)
{

View File

@@ -1,30 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__netlink bpf_iter__netlink___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__netlink
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define sk_rmem_alloc sk_backlog.rmem_alloc
#define sk_refcnt __sk_common.skc_refcnt
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__netlink {
struct bpf_iter_meta *meta;
struct netlink_sock *sk;
} __attribute__((preserve_access_index));
static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
{
return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
@@ -54,10 +36,10 @@ int dump_netlink(struct bpf_iter__netlink *ctx)
if (!nlk->groups) {
group = 0;
} else {
/* FIXME: temporary use bpf_probe_read here, needs
/* FIXME: temporary use bpf_probe_read_kernel here, needs
* verifier support to do direct access.
*/
bpf_probe_read(&group, sizeof(group), &nlk->groups[0]);
bpf_probe_read_kernel(&group, sizeof(group), &nlk->groups[0]);
}
BPF_SEQ_PRINTF(seq, "%-10u %08x %-8d %-8d %-5d %-8d ",
nlk->portid, (u32)group,
@@ -74,7 +56,7 @@ int dump_netlink(struct bpf_iter__netlink *ctx)
* with current verifier.
*/
inode = SOCK_INODE(sk);
bpf_probe_read(&ino, sizeof(ino), &inode->i_ino);
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
}
BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino);

View File

@@ -1,27 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__task bpf_iter__task___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__task
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__task {
struct bpf_iter_meta *meta;
struct task_struct *task;
} __attribute__((preserve_access_index));
SEC("iter/task")
int dump_task(struct bpf_iter__task *ctx)
{

View File

@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__task_file bpf_iter__task_file___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__task_file
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__task_file {
struct bpf_iter_meta *meta;
struct task_struct *task;
__u32 fd;
struct file *file;
} __attribute__((preserve_access_index));
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
{

View File

@@ -0,0 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
#define MAX_STACK_TRACE_DEPTH 64
unsigned long entries[MAX_STACK_TRACE_DEPTH] = {};
#define SIZE_OF_ULONG (sizeof(unsigned long))
SEC("iter/task")
int dump_task_stack(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
long i, retlen;
if (task == (void *)0)
return 0;
retlen = bpf_get_task_stack(task, entries,
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0);
if (retlen < 0)
return 0;
BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid,
retlen / SIZE_OF_ULONG);
for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) {
if (retlen > i * SIZE_OF_ULONG)
BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]);
}
BPF_SEQ_PRINTF(seq, "\n");
return 0;
}

View File

@@ -0,0 +1,234 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !(h->pprev);
}
static int timer_pending(const struct timer_list * timer)
{
return !hlist_unhashed_lockless(&timer->entry);
}
extern unsigned CONFIG_HZ __kconfig;
#define USER_HZ 100
#define NSEC_PER_SEC 1000000000ULL
static clock_t jiffies_to_clock_t(unsigned long x)
{
/* The implementation here tailored to a particular
* setting of USER_HZ.
*/
u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ;
u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ;
if ((tick_nsec % user_hz_nsec) == 0) {
if (CONFIG_HZ < USER_HZ)
return x * (USER_HZ / CONFIG_HZ);
else
return x / (CONFIG_HZ / USER_HZ);
}
return x * tick_nsec/user_hz_nsec;
}
static clock_t jiffies_delta_to_clock_t(long delta)
{
if (delta <= 0)
return 0;
return jiffies_to_clock_t(delta);
}
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
static bool
inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk)
{
return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp)
{
return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
}
static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
uid_t uid, __u32 seq_num)
{
const struct inet_connection_sock *icsk;
const struct fastopen_queue *fastopenq;
const struct inet_sock *inet;
unsigned long timer_expires;
const struct sock *sp;
__u16 destp, srcp;
__be32 dest, src;
int timer_active;
int rx_queue;
int state;
icsk = &tp->inet_conn;
inet = &icsk->icsk_inet;
sp = &inet->sk;
fastopenq = &icsk->icsk_accept_queue.fastopenq;
dest = inet->inet_daddr;
src = inet->inet_rcv_saddr;
destp = bpf_ntohs(inet->inet_dport);
srcp = bpf_ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
}
state = sp->sk_state;
if (state == TCP_LISTEN) {
rx_queue = sp->sk_ack_backlog;
} else {
rx_queue = tp->rcv_nxt - tp->copied_seq;
if (rx_queue < 0)
rx_queue = 0;
}
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
seq_num, src, srcp, destp, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
state,
tp->write_seq - tp->snd_una, rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()),
icsk->icsk_retransmits, uid,
icsk->icsk_probes_out,
sock_i_ino(sp),
sp->sk_refcnt.refs.counter);
BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n",
tp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk),
tp->snd_cwnd,
state == TCP_LISTEN ? fastopenq->max_qlen
: (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
);
return 0;
}
static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw,
uid_t uid, __u32 seq_num)
{
struct inet_timewait_sock *tw = &ttw->tw_sk;
__u16 destp, srcp;
__be32 dest, src;
long delta;
delta = tw->tw_timer.expires - bpf_jiffies64();
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
destp = bpf_ntohs(tw->tw_dport);
srcp = bpf_ntohs(tw->tw_sport);
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
seq_num, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
tw->tw_refcnt.refs.counter, tw);
return 0;
}
static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq,
uid_t uid, __u32 seq_num)
{
struct inet_request_sock *irsk = &treq->req;
struct request_sock *req = &irsk->req;
long ttd;
ttd = req->rsk_timer.expires - bpf_jiffies64();
if (ttd < 0)
ttd = 0;
BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
seq_num, irsk->ir_loc_addr,
irsk->ir_num, irsk->ir_rmt_addr,
bpf_ntohs(irsk->ir_rmt_port));
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd),
req->num_timeout, uid, 0, 0, 0, req);
return 0;
}
SEC("iter/tcp")
int dump_tcp4(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
struct seq_file *seq = ctx->meta->seq;
struct tcp_timewait_sock *tw;
struct tcp_request_sock *req;
struct tcp_sock *tp;
uid_t uid = ctx->uid;
__u32 seq_num;
if (sk_common == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, " sl "
"local_address "
"rem_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode\n");
if (sk_common->skc_family != AF_INET)
return 0;
tp = bpf_skc_to_tcp_sock(sk_common);
if (tp)
return dump_tcp_sock(seq, tp, uid, seq_num);
tw = bpf_skc_to_tcp_timewait_sock(sk_common);
if (tw)
return dump_tw_sock(seq, tw, uid, seq_num);
req = bpf_skc_to_tcp_request_sock(sk_common);
if (req)
return dump_req_sock(seq, req, uid, seq_num);
return 0;
}

View File

@@ -0,0 +1,250 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !(h->pprev);
}
static int timer_pending(const struct timer_list * timer)
{
return !hlist_unhashed_lockless(&timer->entry);
}
extern unsigned CONFIG_HZ __kconfig;
#define USER_HZ 100
#define NSEC_PER_SEC 1000000000ULL
static clock_t jiffies_to_clock_t(unsigned long x)
{
/* The implementation here tailored to a particular
* setting of USER_HZ.
*/
u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ;
u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ;
if ((tick_nsec % user_hz_nsec) == 0) {
if (CONFIG_HZ < USER_HZ)
return x * (USER_HZ / CONFIG_HZ);
else
return x / (CONFIG_HZ / USER_HZ);
}
return x * tick_nsec/user_hz_nsec;
}
static clock_t jiffies_delta_to_clock_t(long delta)
{
if (delta <= 0)
return 0;
return jiffies_to_clock_t(delta);
}
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
static bool
inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk)
{
return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp)
{
return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
}
static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
uid_t uid, __u32 seq_num)
{
const struct inet_connection_sock *icsk;
const struct fastopen_queue *fastopenq;
const struct in6_addr *dest, *src;
const struct inet_sock *inet;
unsigned long timer_expires;
const struct sock *sp;
__u16 destp, srcp;
int timer_active;
int rx_queue;
int state;
icsk = &tp->tcp.inet_conn;
inet = &icsk->icsk_inet;
sp = &inet->sk;
fastopenq = &icsk->icsk_accept_queue.fastopenq;
dest = &sp->sk_v6_daddr;
src = &sp->sk_v6_rcv_saddr;
destp = bpf_ntohs(inet->inet_dport);
srcp = bpf_ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = bpf_jiffies64();
}
state = sp->sk_state;
if (state == TCP_LISTEN) {
rx_queue = sp->sk_ack_backlog;
} else {
rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq;
if (rx_queue < 0)
rx_queue = 0;
}
BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
seq_num,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
state,
tp->tcp.write_seq - tp->tcp.snd_una, rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()),
icsk->icsk_retransmits, uid,
icsk->icsk_probes_out,
sock_i_ino(sp),
sp->sk_refcnt.refs.counter);
BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n",
tp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk),
tp->tcp.snd_cwnd,
state == TCP_LISTEN ? fastopenq->max_qlen
: (tcp_in_initial_slowstart(&tp->tcp) ? -1
: tp->tcp.snd_ssthresh)
);
return 0;
}
static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw,
uid_t uid, __u32 seq_num)
{
struct inet_timewait_sock *tw = &ttw->tw_sk;
const struct in6_addr *dest, *src;
__u16 destp, srcp;
long delta;
delta = tw->tw_timer.expires - bpf_jiffies64();
dest = &tw->tw_v6_daddr;
src = &tw->tw_v6_rcv_saddr;
destp = bpf_ntohs(tw->tw_dport);
srcp = bpf_ntohs(tw->tw_sport);
BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
seq_num,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
tw->tw_refcnt.refs.counter, tw);
return 0;
}
static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq,
uid_t uid, __u32 seq_num)
{
struct inet_request_sock *irsk = &treq->req;
struct request_sock *req = &irsk->req;
struct in6_addr *src, *dest;
long ttd;
ttd = req->rsk_timer.expires - bpf_jiffies64();
src = &irsk->ir_v6_loc_addr;
dest = &irsk->ir_v6_rmt_addr;
if (ttd < 0)
ttd = 0;
BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
seq_num,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
irsk->ir_num,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3],
bpf_ntohs(irsk->ir_rmt_port));
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd),
req->num_timeout, uid, 0, 0, 0, req);
return 0;
}
SEC("iter/tcp")
int dump_tcp6(struct bpf_iter__tcp *ctx)
{
struct sock_common *sk_common = ctx->sk_common;
struct seq_file *seq = ctx->meta->seq;
struct tcp_timewait_sock *tw;
struct tcp_request_sock *req;
struct tcp6_sock *tp;
uid_t uid = ctx->uid;
__u32 seq_num;
if (sk_common == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, " sl "
"local_address "
"remote_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode\n");
if (sk_common->skc_family != AF_INET6)
return 0;
tp = bpf_skc_to_tcp6_sock(sk_common);
if (tp)
return dump_tcp6_sock(seq, tp, uid, seq_num);
tw = bpf_skc_to_tcp_timewait_sock(sk_common);
if (tw)
return dump_tw_sock(seq, tw, uid, seq_num);
req = bpf_skc_to_tcp_request_sock(sk_common);
if (req)
return dump_req_sock(seq, req, uid, seq_num);
return 0;
}

View File

@@ -1,25 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__task bpf_iter__task___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__task
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__task {
struct bpf_iter_meta *meta;
struct task_struct *task;
} __attribute__((preserve_access_index));
SEC("iter/task")
int dump_task(struct bpf_iter__task *ctx)
{

View File

@@ -1,25 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__bpf_map
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__bpf_map {
struct bpf_iter_meta *meta;
struct bpf_map *map;
} __attribute__((preserve_access_index));
__u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;

View File

@@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct key_t {
int a;
int b;
int c;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 3);
__type(key, struct key_t);
__type(value, __u64);
} hashmap1 SEC(".maps");
__u32 key_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
void *key = ctx->key;
if (key == (void *)0)
return 0;
/* out of bound access w.r.t. hashmap1 */
key_sum += *(__u32 *)(key + sizeof(struct key_t));
return 0;
}

View File

@@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 value_sum = 0;
SEC("iter/bpf_map_elem")
int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
{
void *value = ctx->value;
if (value == (void *)0)
return 0;
/* negative offset, verifier failure. */
value_sum += *(__u32 *)(value - 4);
return 0;
}

View File

@@ -1,27 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
/* "undefine" structs in vmlinux.h, because we "override" them below */
#define bpf_iter_meta bpf_iter_meta___not_used
#define bpf_iter__task bpf_iter__task___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__task
#include "bpf_iter.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
int count = 0;
struct bpf_iter_meta {
struct seq_file *seq;
__u64 session_id;
__u64 seq_num;
} __attribute__((preserve_access_index));
struct bpf_iter__task {
struct bpf_iter_meta *meta;
struct task_struct *task;
} __attribute__((preserve_access_index));
SEC("iter/task")
int dump_task(struct bpf_iter__task *ctx)
{

View File

@@ -0,0 +1,71 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
SEC("iter/udp")
int dump_udp4(struct bpf_iter__udp *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct udp_sock *udp_sk = ctx->udp_sk;
struct inet_sock *inet;
__u16 srcp, destp;
__be32 dest, src;
__u32 seq_num;
int rqueue;
if (udp_sk == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq,
" sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops\n");
/* filter out udp6 sockets */
inet = &udp_sk->inet;
if (inet->sk.sk_family == AF_INET6)
return 0;
inet = &udp_sk->inet;
dest = inet->inet_daddr;
src = inet->inet_rcv_saddr;
srcp = bpf_ntohs(inet->inet_sport);
destp = bpf_ntohs(inet->inet_dport);
rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit;
BPF_SEQ_PRINTF(seq, "%5d: %08X:%04X %08X:%04X ",
ctx->bucket, src, srcp, dest, destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
inet->sk.sk_state,
inet->sk.sk_wmem_alloc.refs.counter - 1,
rqueue,
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
inet->sk.sk_drops.counter);
return 0;
}

View File

@@ -0,0 +1,79 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "bpf_iter.h"
#include "bpf_tracing_net.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
char _license[] SEC("license") = "GPL";
#define IPV6_SEQ_DGRAM_HEADER \
" sl " \
"local_address " \
"remote_address " \
"st tx_queue rx_queue tr tm->when retrnsmt" \
" uid timeout inode ref pointer drops\n"
static long sock_i_ino(const struct sock *sk)
{
const struct socket *sk_socket = sk->sk_socket;
const struct inode *inode;
unsigned long ino;
if (!sk_socket)
return 0;
inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
return ino;
}
SEC("iter/udp")
int dump_udp6(struct bpf_iter__udp *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct udp_sock *udp_sk = ctx->udp_sk;
const struct in6_addr *dest, *src;
struct udp6_sock *udp6_sk;
struct inet_sock *inet;
__u16 srcp, destp;
__u32 seq_num;
int rqueue;
if (udp_sk == (void *)0)
return 0;
seq_num = ctx->meta->seq_num;
if (seq_num == 0)
BPF_SEQ_PRINTF(seq, IPV6_SEQ_DGRAM_HEADER);
udp6_sk = bpf_skc_to_udp6_sock(udp_sk);
if (udp6_sk == (void *)0)
return 0;
inet = &udp_sk->inet;
srcp = bpf_ntohs(inet->inet_sport);
destp = bpf_ntohs(inet->inet_dport);
rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit;
dest = &inet->sk.sk_v6_daddr;
src = &inet->sk.sk_v6_rcv_saddr;
BPF_SEQ_PRINTF(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
ctx->bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp);
BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
inet->sk.sk_state,
inet->sk.sk_wmem_alloc.refs.counter - 1,
rqueue,
0, 0L, 0, ctx->uid, 0,
sock_i_ino(&inet->sk),
inet->sk.sk_refcnt.refs.counter, udp_sk,
inet->sk.sk_drops.counter);
return 0;
}

View File

@@ -0,0 +1,51 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_TRACING_NET_H__
#define __BPF_TRACING_NET_H__
#define AF_INET 2
#define AF_INET6 10
#define ICSK_TIME_RETRANS 1
#define ICSK_TIME_PROBE0 3
#define ICSK_TIME_LOSS_PROBE 5
#define ICSK_TIME_REO_TIMEOUT 6
#define IFNAMSIZ 16
#define RTF_GATEWAY 0x0002
#define TCP_INFINITE_SSTHRESH 0x7fffffff
#define TCP_PINGPONG_THRESH 3
#define fib_nh_dev nh_common.nhc_dev
#define fib_nh_gw_family nh_common.nhc_gw_family
#define fib_nh_gw6 nh_common.nhc_gw.ipv6
#define inet_daddr sk.__sk_common.skc_daddr
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
#define inet_dport sk.__sk_common.skc_dport
#define ir_loc_addr req.__req_common.skc_rcv_saddr
#define ir_num req.__req_common.skc_num
#define ir_rmt_addr req.__req_common.skc_daddr
#define ir_rmt_port req.__req_common.skc_dport
#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr
#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr
#define sk_family __sk_common.skc_family
#define sk_rmem_alloc sk_backlog.rmem_alloc
#define sk_refcnt __sk_common.skc_refcnt
#define sk_state __sk_common.skc_state
#define sk_v6_daddr __sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
#define s6_addr32 in6_u.u6_addr32
#define tw_daddr __tw_common.skc_daddr
#define tw_rcv_saddr __tw_common.skc_rcv_saddr
#define tw_dport __tw_common.skc_dport
#define tw_refcnt __tw_common.skc_refcnt
#define tw_v6_daddr __tw_common.skc_v6_daddr
#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr
#endif

View File

@@ -0,0 +1,50 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
struct S {
int a;
int b;
int c;
};
union U {
int a;
int b;
int c;
};
struct S1 {
int a;
int b;
int c;
};
union U1 {
int a;
int b;
int c;
};
typedef int T;
typedef int S;
typedef int U;
typedef int T1;
typedef int S1;
typedef int U1;
struct root_struct {
S m_1;
T m_2;
U m_3;
S1 m_4;
T1 m_5;
U1 m_6;
struct S m_7;
struct S1 m_8;
union U m_9;
union U1 m_10;
};
int func(struct root_struct *root)
{
return 0;
}

View File

@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __PROGS_CG_STORAGE_MULTI_H
#define __PROGS_CG_STORAGE_MULTI_H
#include <asm/types.h>
struct cgroup_value {
__u32 egress_pkts;
__u32 ingress_pkts;
};
#endif

View File

@@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "progs/cg_storage_multi.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, struct cgroup_value);
} cgroup_storage SEC(".maps");
__u32 invocations = 0;
SEC("cgroup_skb/egress")
int egress(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}

View File

@@ -0,0 +1,57 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "progs/cg_storage_multi.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, struct cgroup_value);
} cgroup_storage SEC(".maps");
__u32 invocations = 0;
SEC("cgroup_skb/egress/1")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/egress/2")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/ingress")
int ingress(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->ingress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}

View File

@@ -0,0 +1,57 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <errno.h>
#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include "progs/cg_storage_multi.h"
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, __u64);
__type(value, struct cgroup_value);
} cgroup_storage SEC(".maps");
__u32 invocations = 0;
SEC("cgroup_skb/egress/1")
int egress1(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/egress/2")
int egress2(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}
SEC("cgroup_skb/ingress")
int ingress(struct __sk_buff *skb)
{
struct cgroup_value *ptr_cg_storage =
bpf_get_local_storage(&cgroup_storage, 0);
__sync_fetch_and_add(&ptr_cg_storage->ingress_pkts, 1);
__sync_fetch_and_add(&invocations, 1);
return 1;
}

View File

@@ -104,6 +104,30 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
return 0;
}
static __inline int set_keepalive(struct bpf_sock_addr *ctx)
{
int zero = 0, one = 1;
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one)))
return 1;
if (ctx->type == SOCK_STREAM) {
if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one)))
return 1;
if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one)))
return 1;
}
if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero)))
return 1;
return 0;
}
SEC("cgroup/connect4")
int connect_v4_prog(struct bpf_sock_addr *ctx)
{
@@ -121,6 +145,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
if (bind_to_device(ctx))
return 0;
if (set_keepalive(ctx))
return 0;
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
else if (ctx->type == SOCK_STREAM)

View File

@@ -0,0 +1,686 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define LOOP_BOUND 0xf
#define MAX_ENTRIES 8
#define HALF_ENTRIES (MAX_ENTRIES >> 1)
_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
if (!func()) \
return 0; \
})
#define VERIFY(expr) ({ \
g_line = __LINE__; \
if (!(expr)) \
return 0; \
})
struct bpf_map_memory {
__u32 pages;
} __attribute__((preserve_access_index));
struct bpf_map {
enum bpf_map_type map_type;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 id;
struct bpf_map_memory memory;
} __attribute__((preserve_access_index));
static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
__u32 value_size, __u32 max_entries)
{
VERIFY(map->map_type == g_map_type);
VERIFY(map->key_size == key_size);
VERIFY(map->value_size == value_size);
VERIFY(map->max_entries == max_entries);
VERIFY(map->id > 0);
VERIFY(map->memory.pages > 0);
return 1;
}
static inline int check_bpf_map_ptr(struct bpf_map *indirect,
struct bpf_map *direct)
{
VERIFY(indirect->map_type == direct->map_type);
VERIFY(indirect->key_size == direct->key_size);
VERIFY(indirect->value_size == direct->value_size);
VERIFY(indirect->max_entries == direct->max_entries);
VERIFY(indirect->id == direct->id);
VERIFY(indirect->memory.pages == direct->memory.pages);
return 1;
}
static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
__u32 key_size, __u32 value_size, __u32 max_entries)
{
VERIFY(check_bpf_map_ptr(indirect, direct));
VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
max_entries));
return 1;
}
static inline int check_default(struct bpf_map *indirect,
struct bpf_map *direct)
{
VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
typedef struct {
int counter;
} atomic_t;
struct bpf_htab {
struct bpf_map map;
atomic_t count;
__u32 n_buckets;
__u32 elem_size;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_hash SEC(".maps");
static inline int check_hash(void)
{
struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
struct bpf_map *map = (struct bpf_map *)&m_hash;
int i;
VERIFY(check_default(&hash->map, map));
VERIFY(hash->n_buckets == MAX_ENTRIES);
VERIFY(hash->elem_size == 64);
VERIFY(hash->count.counter == 0);
for (i = 0; i < HALF_ENTRIES; ++i) {
const __u32 key = i;
const __u32 val = 1;
if (bpf_map_update_elem(hash, &key, &val, 0))
return 0;
}
VERIFY(hash->count.counter == HALF_ENTRIES);
return 1;
}
struct bpf_array {
struct bpf_map map;
__u32 elem_size;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_array SEC(".maps");
static inline int check_array(void)
{
struct bpf_array *array = (struct bpf_array *)&m_array;
struct bpf_map *map = (struct bpf_map *)&m_array;
int i, n_lookups = 0, n_keys = 0;
VERIFY(check_default(&array->map, map));
VERIFY(array->elem_size == 8);
for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
const __u32 key = i;
__u32 *val = bpf_map_lookup_elem(array, &key);
++n_lookups;
if (val)
++n_keys;
}
VERIFY(n_lookups == MAX_ENTRIES);
VERIFY(n_keys == MAX_ENTRIES);
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_prog_array SEC(".maps");
static inline int check_prog_array(void)
{
struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
struct bpf_map *map = (struct bpf_map *)&m_prog_array;
VERIFY(check_default(&prog_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_perf_event_array SEC(".maps");
static inline int check_perf_event_array(void)
{
struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
VERIFY(check_default(&perf_event_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_percpu_hash SEC(".maps");
static inline int check_percpu_hash(void)
{
struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
VERIFY(check_default(&percpu_hash->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_percpu_array SEC(".maps");
static inline int check_percpu_array(void)
{
struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
VERIFY(check_default(&percpu_array->map, map));
return 1;
}
struct bpf_stack_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u64);
} m_stack_trace SEC(".maps");
static inline int check_stack_trace(void)
{
struct bpf_stack_map *stack_trace =
(struct bpf_stack_map *)&m_stack_trace;
struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
MAX_ENTRIES));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_cgroup_array SEC(".maps");
static inline int check_cgroup_array(void)
{
struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
VERIFY(check_default(&cgroup_array->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_lru_hash SEC(".maps");
static inline int check_lru_hash(void)
{
struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
VERIFY(check_default(&lru_hash->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_lru_percpu_hash SEC(".maps");
static inline int check_lru_percpu_hash(void)
{
struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
VERIFY(check_default(&lru_percpu_hash->map, map));
return 1;
}
struct lpm_trie {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct lpm_key {
struct bpf_lpm_trie_key trie_key;
__u32 data;
};
struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct lpm_key);
__type(value, __u32);
} m_lpm_trie SEC(".maps");
static inline int check_lpm_trie(void)
{
struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
MAX_ENTRIES));
return 1;
}
struct inner_map {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} inner_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
__array(values, struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
});
} m_array_of_maps SEC(".maps") = {
.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
};
static inline int check_array_of_maps(void)
{
struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
VERIFY(check_default(&array_of_maps->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
__array(values, struct inner_map);
} m_hash_of_maps SEC(".maps") = {
.values = {
[2] = &inner_map,
},
};
static inline int check_hash_of_maps(void)
{
struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
VERIFY(check_default(&hash_of_maps->map, map));
return 1;
}
struct bpf_dtab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_devmap SEC(".maps");
static inline int check_devmap(void)
{
struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
struct bpf_map *map = (struct bpf_map *)&m_devmap;
VERIFY(check_default(&devmap->map, map));
return 1;
}
struct bpf_stab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_sockmap SEC(".maps");
static inline int check_sockmap(void)
{
struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
struct bpf_map *map = (struct bpf_map *)&m_sockmap;
VERIFY(check_default(&sockmap->map, map));
return 1;
}
struct bpf_cpu_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_cpumap SEC(".maps");
static inline int check_cpumap(void)
{
struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
struct bpf_map *map = (struct bpf_map *)&m_cpumap;
VERIFY(check_default(&cpumap->map, map));
return 1;
}
struct xsk_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_XSKMAP);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_xskmap SEC(".maps");
static inline int check_xskmap(void)
{
struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
struct bpf_map *map = (struct bpf_map *)&m_xskmap;
VERIFY(check_default(&xskmap->map, map));
return 1;
}
struct bpf_shtab {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_sockhash SEC(".maps");
static inline int check_sockhash(void)
{
struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
struct bpf_map *map = (struct bpf_map *)&m_sockhash;
VERIFY(check_default(&sockhash->map, map));
return 1;
}
struct bpf_cgroup_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u32);
} m_cgroup_storage SEC(".maps");
static inline int check_cgroup_storage(void)
{
struct bpf_cgroup_storage_map *cgroup_storage =
(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
VERIFY(check(&cgroup_storage->map, map,
sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
return 1;
}
struct reuseport_array {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_reuseport_sockarray SEC(".maps");
static inline int check_reuseport_sockarray(void)
{
struct reuseport_array *reuseport_sockarray =
(struct reuseport_array *)&m_reuseport_sockarray;
struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
VERIFY(check_default(&reuseport_sockarray->map, map));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
__type(key, struct bpf_cgroup_storage_key);
__type(value, __u32);
} m_percpu_cgroup_storage SEC(".maps");
static inline int check_percpu_cgroup_storage(void)
{
struct bpf_cgroup_storage_map *percpu_cgroup_storage =
(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
VERIFY(check(&percpu_cgroup_storage->map, map,
sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
return 1;
}
struct bpf_queue_stack {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_QUEUE);
__uint(max_entries, MAX_ENTRIES);
__type(value, __u32);
} m_queue SEC(".maps");
static inline int check_queue(void)
{
struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
struct bpf_map *map = (struct bpf_map *)&m_queue;
VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_STACK);
__uint(max_entries, MAX_ENTRIES);
__type(value, __u32);
} m_stack SEC(".maps");
static inline int check_stack(void)
{
struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
struct bpf_map *map = (struct bpf_map *)&m_stack;
VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
return 1;
}
struct bpf_sk_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, __u32);
__type(value, __u32);
} m_sk_storage SEC(".maps");
static inline int check_sk_storage(void)
{
struct bpf_sk_storage_map *sk_storage =
(struct bpf_sk_storage_map *)&m_sk_storage;
struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
return 1;
}
struct {
__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, __u32);
__type(value, __u32);
} m_devmap_hash SEC(".maps");
static inline int check_devmap_hash(void)
{
struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
VERIFY(check_default(&devmap_hash->map, map));
return 1;
}
struct bpf_ringbuf_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
{
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
return 1;
}
SEC("cgroup_skb/egress")
int cg_skb(void *ctx)
{
VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
check_reuseport_sockarray);
VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
check_percpu_cgroup_storage);
VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
return 1;
}
__u32 _version SEC("version") = 1;
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,59 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(stack_trace_t));
} stackmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, stack_trace_t);
} stackdata_map SEC(".maps");
long stackid_kernel = 1;
long stackid_user = 1;
long stack_kernel = 1;
long stack_user = 1;
SEC("perf_event")
int oncpu(void *ctx)
{
stack_trace_t *trace;
__u32 key = 0;
long val;
val = bpf_get_stackid(ctx, &stackmap, 0);
if (val > 0)
stackid_kernel = 2;
val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
if (val > 0)
stackid_user = 2;
trace = bpf_map_lookup_elem(&stackdata_map, &key);
if (!trace)
return 0;
val = bpf_get_stack(ctx, trace, sizeof(stack_trace_t), 0);
if (val > 0)
stack_kernel = 2;
val = bpf_get_stack(ctx, trace, sizeof(stack_trace_t), BPF_F_USER_STACK);
if (val > 0)
stack_user = 2;
return 0;
}
char LICENSE[] SEC("license") = "GPL";

View File

@@ -0,0 +1,40 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
bool prog1_called = false;
bool prog2_called = false;
bool prog3_called = false;
SEC("raw_tp/sys_enter")
int prog1(const void *ctx)
{
prog1_called = true;
return 0;
}
SEC("raw_tp/sys_exit")
int prog2(const void *ctx)
{
prog2_called = true;
return 0;
}
struct fake_kernel_struct {
int whatever;
} __attribute__((preserve_access_index));
SEC("fentry/unexisting-kprobe-will-fail-if-loaded")
int prog3(const void *ctx)
{
struct fake_kernel_struct *fake = (void *)ctx;
fake->whatever = 123;
prog3_called = true;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,43 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct task_struct {
int tgid;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} exp_tgid_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} results SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
struct task_struct *task = (void *)bpf_get_current_task();
int tgid = BPF_CORE_READ(task, tgid);
int zero = 0;
int real_tgid = bpf_get_current_pid_tgid() >> 32;
int *exp_tgid = bpf_map_lookup_elem(&exp_tgid_map, &zero);
/* only pass through sys_enters from test process */
if (!exp_tgid || *exp_tgid != real_tgid)
return 0;
bpf_map_update_elem(&results, &zero, &tgid, 0);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#define IN16 0x1234
#define IN32 0x12345678U
#define IN64 0x123456789abcdef0ULL
__u16 in16 = 0;
__u32 in32 = 0;
__u64 in64 = 0;
__u16 out16 = 0;
__u32 out32 = 0;
__u64 out64 = 0;
__u16 const16 = 0;
__u32 const32 = 0;
__u64 const64 = 0;
SEC("raw_tp/sys_enter")
int sys_enter(const void *ctx)
{
out16 = __builtin_bswap16(in16);
out32 = __builtin_bswap32(in32);
out64 = __builtin_bswap64(in64);
const16 = ___bpf_swab16(IN16);
const32 = ___bpf_swab32(IN32);
const64 = ___bpf_swab64(IN64);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -57,8 +57,9 @@ struct {
SEC("raw_tracepoint/sys_enter")
int bpf_prog1(void *ctx)
{
int max_len, max_buildid_len, usize, ksize, total_size;
int max_len, max_buildid_len, total_size;
struct stack_trace_t *data;
long usize, ksize;
void *raw_data;
__u32 key = 0;

View File

@@ -0,0 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
__u64 out__bpf_link_fops = -1;
__u64 out__bpf_link_fops1 = -1;
__u64 out__btf_size = -1;
__u64 out__per_cpu_start = -1;
extern const void bpf_link_fops __ksym;
extern const void __start_BTF __ksym;
extern const void __stop_BTF __ksym;
extern const void __per_cpu_start __ksym;
/* non-existing symbol, weak, default to zero */
extern const void bpf_link_fops1 __ksym __weak;
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
out__bpf_link_fops = (__u64)&bpf_link_fops;
out__btf_size = (__u64)(&__stop_BTF - &__start_BTF);
out__per_cpu_start = (__u64)&__per_cpu_start;
out__bpf_link_fops1 = (__u64)&bpf_link_fops1;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -12,8 +12,8 @@ struct {
__uint(value_size, sizeof(int));
} perf_buf_map SEC(".maps");
SEC("kprobe/sys_nanosleep")
int BPF_KPROBE(handle_sys_nanosleep_entry)
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
int cpu = bpf_get_smp_processor_id();

View File

@@ -0,0 +1,641 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
// Copyright (c) 2020 Cloudflare
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/in.h>
#include <sys/socket.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
#define IP4(a, b, c, d) \
bpf_htonl((((__u32)(a) & 0xffU) << 24) | \
(((__u32)(b) & 0xffU) << 16) | \
(((__u32)(c) & 0xffU) << 8) | \
(((__u32)(d) & 0xffU) << 0))
#define IP6(aaaa, bbbb, cccc, dddd) \
{ bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
#define MAX_SOCKS 32
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, MAX_SOCKS);
__type(key, __u32);
__type(value, __u64);
} redir_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, int);
} run_map SEC(".maps");
enum {
PROG1 = 0,
PROG2,
};
enum {
SERVER_A = 0,
SERVER_B,
};
/* Addressable key/value constants for convenience */
static const int KEY_PROG1 = PROG1;
static const int KEY_PROG2 = PROG2;
static const int PROG_DONE = 1;
static const __u32 KEY_SERVER_A = SERVER_A;
static const __u32 KEY_SERVER_B = SERVER_B;
static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
SEC("sk_lookup/lookup_pass")
int lookup_pass(struct bpf_sk_lookup *ctx)
{
return SK_PASS;
}
SEC("sk_lookup/lookup_drop")
int lookup_drop(struct bpf_sk_lookup *ctx)
{
return SK_DROP;
}
SEC("sk_reuseport/reuse_pass")
int reuseport_pass(struct sk_reuseport_md *ctx)
{
return SK_PASS;
}
SEC("sk_reuseport/reuse_drop")
int reuseport_drop(struct sk_reuseport_md *ctx)
{
return SK_DROP;
}
/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
SEC("sk_lookup/redir_port")
int redir_port(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
if (ctx->local_port != DST_PORT)
return SK_PASS;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
SEC("sk_lookup/redir_ip4")
int redir_ip4(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
if (ctx->family != AF_INET)
return SK_PASS;
if (ctx->local_port != DST_PORT)
return SK_PASS;
if (ctx->local_ip4 != DST_IP4)
return SK_PASS;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
SEC("sk_lookup/redir_ip6")
int redir_ip6(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
if (ctx->family != AF_INET6)
return SK_PASS;
if (ctx->local_port != DST_PORT)
return SK_PASS;
if (ctx->local_ip6[0] != DST_IP6[0] ||
ctx->local_ip6[1] != DST_IP6[1] ||
ctx->local_ip6[2] != DST_IP6[2] ||
ctx->local_ip6[3] != DST_IP6[3])
return SK_PASS;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
SEC("sk_lookup/select_sock_a")
int select_sock_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_PASS;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
SEC("sk_lookup/select_sock_a_no_reuseport")
int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_DROP;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT);
bpf_sk_release(sk);
return err ? SK_DROP : SK_PASS;
}
SEC("sk_reuseport/select_sock_b")
int select_sock_b(struct sk_reuseport_md *ctx)
{
__u32 key = KEY_SERVER_B;
int err;
err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0);
return err ? SK_DROP : SK_PASS;
}
/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
SEC("sk_lookup/sk_assign_eexist")
int sk_assign_eexist(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, ret;
ret = SK_DROP;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err)
goto out;
bpf_sk_release(sk);
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err != -EEXIST) {
bpf_printk("sk_assign returned %d, expected %d\n",
err, -EEXIST);
goto out;
}
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
SEC("sk_lookup/sk_assign_replace_flag")
int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, ret;
ret = SK_DROP;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err)
goto out;
bpf_sk_release(sk);
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
if (err) {
bpf_printk("sk_assign returned %d, expected 0\n", err);
goto out;
}
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
/* Check that bpf_sk_assign(sk=NULL) is accepted. */
SEC("sk_lookup/sk_assign_null")
int sk_assign_null(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk = NULL;
int err, ret;
ret = SK_DROP;
err = bpf_sk_assign(ctx, NULL, 0);
if (err) {
bpf_printk("sk_assign returned %d, expected 0\n", err);
goto out;
}
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
if (err) {
bpf_printk("sk_assign returned %d, expected 0\n", err);
goto out;
}
if (ctx->sk != sk)
goto out;
err = bpf_sk_assign(ctx, NULL, 0);
if (err != -EEXIST)
goto out;
err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
/* Check that selected sk is accessible through context. */
SEC("sk_lookup/access_ctx_sk")
int access_ctx_sk(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk1 = NULL, *sk2 = NULL;
int err, ret;
ret = SK_DROP;
/* Try accessing unassigned (NULL) ctx->sk field */
if (ctx->sk && ctx->sk->family != AF_INET)
goto out;
/* Assign a value to ctx->sk */
sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk1)
goto out;
err = bpf_sk_assign(ctx, sk1, 0);
if (err)
goto out;
if (ctx->sk != sk1)
goto out;
/* Access ctx->sk fields */
if (ctx->sk->family != AF_INET ||
ctx->sk->type != SOCK_STREAM ||
ctx->sk->state != BPF_TCP_LISTEN)
goto out;
/* Reset selection */
err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
if (ctx->sk)
goto out;
/* Assign another socket */
sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (!sk2)
goto out;
err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE);
if (err)
goto out;
if (ctx->sk != sk2)
goto out;
/* Access reassigned ctx->sk fields */
if (ctx->sk->family != AF_INET ||
ctx->sk->type != SOCK_STREAM ||
ctx->sk->state != BPF_TCP_LISTEN)
goto out;
ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
out:
if (sk1)
bpf_sk_release(sk1);
if (sk2)
bpf_sk_release(sk2);
return ret;
}
/* Check narrow loads from ctx fields that support them.
*
* Narrow loads of size >= target field size from a non-zero offset
* are not covered because they give bogus results, that is the
* verifier ignores the offset.
*/
SEC("sk_lookup/ctx_narrow_access")
int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, family;
__u16 *half;
__u8 *byte;
bool v4;
v4 = (ctx->family == AF_INET);
/* Narrow loads from family field */
byte = (__u8 *)&ctx->family;
half = (__u16 *)&ctx->family;
if (byte[0] != (v4 ? AF_INET : AF_INET6) ||
byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
return SK_DROP;
if (half[0] != (v4 ? AF_INET : AF_INET6))
return SK_DROP;
byte = (__u8 *)&ctx->protocol;
if (byte[0] != IPPROTO_TCP ||
byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
return SK_DROP;
half = (__u16 *)&ctx->protocol;
if (half[0] != IPPROTO_TCP)
return SK_DROP;
/* Narrow loads from remote_port field. Expect non-0 value. */
byte = (__u8 *)&ctx->remote_port;
if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0)
return SK_DROP;
half = (__u16 *)&ctx->remote_port;
if (half[0] == 0)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
byte = (__u8 *)&ctx->local_port;
if (byte[0] != ((DST_PORT >> 0) & 0xff) ||
byte[1] != ((DST_PORT >> 8) & 0xff) ||
byte[2] != 0 || byte[3] != 0)
return SK_DROP;
half = (__u16 *)&ctx->local_port;
if (half[0] != DST_PORT)
return SK_DROP;
/* Narrow loads from IPv4 fields */
if (v4) {
/* Expect non-0.0.0.0 in remote_ip4 */
byte = (__u8 *)&ctx->remote_ip4;
if (byte[0] == 0 && byte[1] == 0 &&
byte[2] == 0 && byte[3] == 0)
return SK_DROP;
half = (__u16 *)&ctx->remote_ip4;
if (half[0] == 0 && half[1] == 0)
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
byte = (__u8 *)&ctx->local_ip4;
if (byte[0] != ((DST_IP4 >> 0) & 0xff) ||
byte[1] != ((DST_IP4 >> 8) & 0xff) ||
byte[2] != ((DST_IP4 >> 16) & 0xff) ||
byte[3] != ((DST_IP4 >> 24) & 0xff))
return SK_DROP;
half = (__u16 *)&ctx->local_ip4;
if (half[0] != ((DST_IP4 >> 0) & 0xffff) ||
half[1] != ((DST_IP4 >> 16) & 0xffff))
return SK_DROP;
} else {
/* Expect 0.0.0.0 IPs when family != AF_INET */
byte = (__u8 *)&ctx->remote_ip4;
if (byte[0] != 0 || byte[1] != 0 &&
byte[2] != 0 || byte[3] != 0)
return SK_DROP;
half = (__u16 *)&ctx->remote_ip4;
if (half[0] != 0 || half[1] != 0)
return SK_DROP;
byte = (__u8 *)&ctx->local_ip4;
if (byte[0] != 0 || byte[1] != 0 &&
byte[2] != 0 || byte[3] != 0)
return SK_DROP;
half = (__u16 *)&ctx->local_ip4;
if (half[0] != 0 || half[1] != 0)
return SK_DROP;
}
/* Narrow loads from IPv6 fields */
if (!v4) {
/* Expenct non-:: IP in remote_ip6 */
byte = (__u8 *)&ctx->remote_ip6;
if (byte[0] == 0 && byte[1] == 0 &&
byte[2] == 0 && byte[3] == 0 &&
byte[4] == 0 && byte[5] == 0 &&
byte[6] == 0 && byte[7] == 0 &&
byte[8] == 0 && byte[9] == 0 &&
byte[10] == 0 && byte[11] == 0 &&
byte[12] == 0 && byte[13] == 0 &&
byte[14] == 0 && byte[15] == 0)
return SK_DROP;
half = (__u16 *)&ctx->remote_ip6;
if (half[0] == 0 && half[1] == 0 &&
half[2] == 0 && half[3] == 0 &&
half[4] == 0 && half[5] == 0 &&
half[6] == 0 && half[7] == 0)
return SK_DROP;
/* Expect DST_IP6 in local_ip6 */
byte = (__u8 *)&ctx->local_ip6;
if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) ||
byte[1] != ((DST_IP6[0] >> 8) & 0xff) ||
byte[2] != ((DST_IP6[0] >> 16) & 0xff) ||
byte[3] != ((DST_IP6[0] >> 24) & 0xff) ||
byte[4] != ((DST_IP6[1] >> 0) & 0xff) ||
byte[5] != ((DST_IP6[1] >> 8) & 0xff) ||
byte[6] != ((DST_IP6[1] >> 16) & 0xff) ||
byte[7] != ((DST_IP6[1] >> 24) & 0xff) ||
byte[8] != ((DST_IP6[2] >> 0) & 0xff) ||
byte[9] != ((DST_IP6[2] >> 8) & 0xff) ||
byte[10] != ((DST_IP6[2] >> 16) & 0xff) ||
byte[11] != ((DST_IP6[2] >> 24) & 0xff) ||
byte[12] != ((DST_IP6[3] >> 0) & 0xff) ||
byte[13] != ((DST_IP6[3] >> 8) & 0xff) ||
byte[14] != ((DST_IP6[3] >> 16) & 0xff) ||
byte[15] != ((DST_IP6[3] >> 24) & 0xff))
return SK_DROP;
half = (__u16 *)&ctx->local_ip6;
if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) ||
half[1] != ((DST_IP6[0] >> 16) & 0xffff) ||
half[2] != ((DST_IP6[1] >> 0) & 0xffff) ||
half[3] != ((DST_IP6[1] >> 16) & 0xffff) ||
half[4] != ((DST_IP6[2] >> 0) & 0xffff) ||
half[5] != ((DST_IP6[2] >> 16) & 0xffff) ||
half[6] != ((DST_IP6[3] >> 0) & 0xffff) ||
half[7] != ((DST_IP6[3] >> 16) & 0xffff))
return SK_DROP;
} else {
/* Expect :: IPs when family != AF_INET6 */
byte = (__u8 *)&ctx->remote_ip6;
if (byte[0] != 0 || byte[1] != 0 ||
byte[2] != 0 || byte[3] != 0 ||
byte[4] != 0 || byte[5] != 0 ||
byte[6] != 0 || byte[7] != 0 ||
byte[8] != 0 || byte[9] != 0 ||
byte[10] != 0 || byte[11] != 0 ||
byte[12] != 0 || byte[13] != 0 ||
byte[14] != 0 || byte[15] != 0)
return SK_DROP;
half = (__u16 *)&ctx->remote_ip6;
if (half[0] != 0 || half[1] != 0 ||
half[2] != 0 || half[3] != 0 ||
half[4] != 0 || half[5] != 0 ||
half[6] != 0 || half[7] != 0)
return SK_DROP;
byte = (__u8 *)&ctx->local_ip6;
if (byte[0] != 0 || byte[1] != 0 ||
byte[2] != 0 || byte[3] != 0 ||
byte[4] != 0 || byte[5] != 0 ||
byte[6] != 0 || byte[7] != 0 ||
byte[8] != 0 || byte[9] != 0 ||
byte[10] != 0 || byte[11] != 0 ||
byte[12] != 0 || byte[13] != 0 ||
byte[14] != 0 || byte[15] != 0)
return SK_DROP;
half = (__u16 *)&ctx->local_ip6;
if (half[0] != 0 || half[1] != 0 ||
half[2] != 0 || half[3] != 0 ||
half[4] != 0 || half[5] != 0 ||
half[6] != 0 || half[7] != 0)
return SK_DROP;
}
/* Success, redirect to KEY_SERVER_B */
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
if (sk) {
bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
}
return SK_PASS;
}
/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
SEC("sk_lookup/sk_assign_esocknosupport")
int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, ret;
ret = SK_DROP;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
goto out;
err = bpf_sk_assign(ctx, sk, 0);
if (err != -ESOCKTNOSUPPORT) {
bpf_printk("sk_assign returned %d, expected %d\n",
err, -ESOCKTNOSUPPORT);
goto out;
}
ret = SK_PASS; /* Success, pass to regular lookup */
out:
if (sk)
bpf_sk_release(sk);
return ret;
}
SEC("sk_lookup/multi_prog_pass1")
int multi_prog_pass1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup/multi_prog_pass2")
int multi_prog_pass2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup/multi_prog_drop1")
int multi_prog_drop1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
SEC("sk_lookup/multi_prog_drop2")
int multi_prog_drop2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err;
sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
if (!sk)
return SK_DROP;
err = bpf_sk_assign(ctx, sk, 0);
bpf_sk_release(sk);
if (err)
return SK_DROP;
return SK_PASS;
}
SEC("sk_lookup/multi_prog_redir1")
int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{
int ret;
ret = select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup/multi_prog_redir2")
int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{
int ret;
ret = select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
char _license[] SEC("license") = "Dual BSD/GPL";
__u32 _version SEC("version") = 1;

View File

@@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0;
struct s in5 = {};
/* .rodata section */
const volatile int in6 = 0;
const volatile struct {
const int in6;
} in = {};
/* .data section */
int out1 = -1;
@@ -46,7 +48,7 @@ int handler(const void *ctx)
out3 = in3;
out4 = in4;
out5 = in5;
out6 = in6;
out6 = in.in6;
bpf_syscall = CONFIG_BPF_SYSCALL;
kern_ver = LINUX_KERNEL_VERSION;

View File

@@ -0,0 +1,158 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define MAX_LEN 256
char buf_in1[MAX_LEN] = {};
char buf_in2[MAX_LEN] = {};
int test_pid = 0;
bool capture = false;
/* .bss */
long payload1_len1 = 0;
long payload1_len2 = 0;
long total1 = 0;
char payload1[MAX_LEN + MAX_LEN] = {};
/* .data */
int payload2_len1 = -1;
int payload2_len2 = -1;
int total2 = -1;
char payload2[MAX_LEN + MAX_LEN] = { 1 };
int payload3_len1 = -1;
int payload3_len2 = -1;
int total3= -1;
char payload3[MAX_LEN + MAX_LEN] = { 1 };
int payload4_len1 = -1;
int payload4_len2 = -1;
int total4= -1;
char payload4[MAX_LEN + MAX_LEN] = { 1 };
SEC("raw_tp/sys_enter")
int handler64_unsigned(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload1;
u64 len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len <= MAX_LEN) {
payload += len;
payload1_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len <= MAX_LEN) {
payload += len;
payload1_len2 = len;
}
total1 = payload - (void *)payload1;
return 0;
}
SEC("raw_tp/sys_exit")
int handler64_signed(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload3;
long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len >= 0) {
payload += len;
payload3_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len >= 0) {
payload += len;
payload3_len2 = len;
}
total3 = payload - (void *)payload3;
return 0;
}
SEC("tp/raw_syscalls/sys_enter")
int handler32_unsigned(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload2;
u32 len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len <= MAX_LEN) {
payload += len;
payload2_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len <= MAX_LEN) {
payload += len;
payload2_len2 = len;
}
total2 = payload - (void *)payload2;
return 0;
}
SEC("tp/raw_syscalls/sys_exit")
int handler32_signed(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload4;
int len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
if (len >= 0) {
payload += len;
payload4_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
if (len >= 0) {
payload += len;
payload4_len2 = len;
}
total4 = payload - (void *)payload4;
return 0;
}
SEC("tp/syscalls/sys_exit_getpid")
int handler_exit(void *regs)
{
long bla;
if (bpf_probe_read_kernel(&bla, sizeof(bla), 0))
return 1;
else
return 0;
}
char LICENSE[] SEC("license") = "GPL";

View File

@@ -63,20 +63,20 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
return 0;
}
SEC("kprobe/hrtimer_nanosleep")
int BPF_KPROBE(handle__kprobe,
ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
SEC("kprobe/hrtimer_start_range_ns")
int BPF_KPROBE(handle__kprobe, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
const enum hrtimer_mode mode)
{
if (rqtp == MY_TV_NSEC)
if (tim == MY_TV_NSEC)
kprobe_called = true;
return 0;
}
SEC("fentry/hrtimer_nanosleep")
int BPF_PROG(handle__fentry,
ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
SEC("fentry/hrtimer_start_range_ns")
int BPF_PROG(handle__fentry, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
const enum hrtimer_mode mode)
{
if (rqtp == MY_TV_NSEC)
if (tim == MY_TV_NSEC)
fentry_called = true;
return 0;
}

View File

@@ -0,0 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char LICENSE[] SEC("license") = "GPL";
SEC("xdp/handler")
int xdp_handler(struct xdp_md *xdp)
{
return 0;
}

View File

@@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define IFINDEX_LO 1
struct {
__uint(type, BPF_MAP_TYPE_CPUMAP);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(struct bpf_cpumap_val));
__uint(max_entries, 4);
} cpu_map SEC(".maps");
SEC("xdp_redir")
int xdp_redir_prog(struct xdp_md *ctx)
{
return bpf_redirect_map(&cpu_map, 1, 0);
}
SEC("xdp_dummy")
int xdp_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
}
SEC("xdp_cpumap/dummy_cm")
int xdp_dummy_cm(struct xdp_md *ctx)
{
if (ctx->ingress_ifindex == IFINDEX_LO)
return XDP_DROP;
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020, Oracle and/or its affiliates.
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
SEC("tp/raw_syscalls/sys_enter")
int sys_enter(void *ctx)
{
static const char fmt[] = "testing,testing %d\n";
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
return 0;
}

View File

@@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <sys/socket.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
int invocations = 0, in_use = 0;
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, int);
} sk_map SEC(".maps");
SEC("cgroup/sock_create")
int sock(struct bpf_sock *ctx)
{
int *sk_storage;
__u32 key;
if (ctx->type != SOCK_DGRAM)
return 1;
sk_storage = bpf_sk_storage_get(&sk_map, ctx, 0,
BPF_SK_STORAGE_GET_F_CREATE);
if (!sk_storage)
return 0;
*sk_storage = 0xdeadbeef;
__sync_fetch_and_add(&invocations, 1);
if (in_use > 0) {
/* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called
* when we return an error from the BPF
* program!
*/
return 0;
}
__sync_fetch_and_add(&in_use, 1);
return 1;
}
SEC("cgroup/sock_release")
int sock_release(struct bpf_sock *ctx)
{
int *sk_storage;
__u32 key;
if (ctx->type != SOCK_DGRAM)
return 1;
sk_storage = bpf_sk_storage_get(&sk_map, ctx, 0, 0);
if (!sk_storage || *sk_storage != 0xdeadbeef)
return 0;
__sync_fetch_and_add(&invocations, 1);
__sync_fetch_and_add(&in_use, -1);
return 1;
}

View File

@@ -34,7 +34,7 @@ serverPort = int(sys.argv[1])
# create active socket
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
sock.connect(('localhost', serverPort))
sock.connect(('::1', serverPort))
except socket.error as e:
sys.exit(1)

View File

@@ -38,7 +38,7 @@ serverSocket = None
# create passive socket
serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try: serverSocket.bind(('localhost', 0))
try: serverSocket.bind(('::1', 0))
except socket.error as msg:
print('bind fails: ' + str(msg))

View File

@@ -74,22 +74,7 @@ int main(int argc, char **argv)
goto out;
}
if (setup_cgroup_environment()) {
printf("Failed to setup cgroup environment\n");
goto err;
}
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
if (join_cgroup(TEST_CGROUP)) {
printf("Failed to join cgroup\n");
goto err;
}
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
/* Attach the bpf program */
if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {

View File

@@ -33,21 +33,10 @@ int main(int argc, char **argv)
goto out;
}
if (setup_cgroup_environment()) {
printf("Failed to load DEV_CGROUP program\n");
goto err;
}
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
if (join_cgroup(TEST_CGROUP)) {
printf("Failed to join cgroup\n");
goto err;
goto out;
}
/* Attach bpf program */

View File

@@ -10,7 +10,13 @@ if [ "$(id -u)" != "0" ]; then
exit $ksft_skip
fi
SRC_TREE=../../../../
if [ "$building_out_of_srctree" ]; then
# We are in linux-build/kselftest/bpf
OUTPUT=../../
else
# We are in linux/tools/testing/selftests/bpf
OUTPUT=../../../../
fi
test_run()
{
@@ -19,8 +25,8 @@ test_run()
echo "[ JIT enabled:$1 hardened:$2 ]"
dmesg -C
if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then
insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null
if [ -f ${OUTPUT}/lib/test_bpf.ko ]; then
insmod ${OUTPUT}/lib/test_bpf.ko 2> /dev/null
if [ $? -ne 0 ]; then
rc=1
fi

View File

@@ -140,7 +140,7 @@ ip netns exec ns6 sysctl net.ipv6.conf.veth10.seg6_enabled=1 > /dev/null
ip netns exec ns6 nc -l -6 -u -d 7330 > $TMP_FILE &
ip netns exec ns1 bash -c "echo 'foobar' | nc -w0 -6 -u -p 2121 -s fb00::1 fb00::6 7330"
sleep 5 # wait enough time to ensure the UDP datagram arrived to the last segment
kill -INT $!
kill -TERM $!
if [[ $(< $TMP_FILE) != "foobar" ]]; then
exit 1

View File

@@ -58,22 +58,9 @@ int main(int argc, char **argv)
goto out;
}
if (setup_cgroup_environment()) {
printf("Failed to load bpf program\n");
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
if (cgroup_fd < 0)
goto err;
}
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
if (cgroup_fd < 0) {
printf("Failed to create test cgroup\n");
goto err;
}
if (join_cgroup(TEST_CGROUP)) {
printf("Failed to join cgroup\n");
goto err;
}
/* Attach bpf program */
if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
@@ -82,9 +69,9 @@ int main(int argc, char **argv)
}
if (system("which ping6 &>/dev/null") == 0)
assert(!system("ping6 localhost -c 10000 -f -q > /dev/null"));
assert(!system("ping6 ::1 -c 10000 -f -q > /dev/null"));
else
assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null"));
assert(!system("ping -6 ::1 -c 10000 -f -q > /dev/null"));
if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
&prog_cnt)) {

View File

@@ -12,6 +12,9 @@
#include <string.h>
#include <execinfo.h> /* backtrace */
#define EXIT_NO_TEST 2
#define EXIT_ERR_SETUP_INFRA 3
/* defined in test_progs.h */
struct test_env env = {};
@@ -111,13 +114,31 @@ static void reset_affinity() {
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
exit(-1);
exit(EXIT_ERR_SETUP_INFRA);
}
err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
exit(-1);
exit(EXIT_ERR_SETUP_INFRA);
}
}
static void save_netns(void)
{
env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
if (env.saved_netns_fd == -1) {
perror("open(/proc/self/ns/net)");
exit(EXIT_ERR_SETUP_INFRA);
}
}
static void restore_netns(void)
{
if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
stdio_restore();
perror("setns(CLONE_NEWNS)");
exit(EXIT_ERR_SETUP_INFRA);
}
}
@@ -138,8 +159,6 @@ void test__end_subtest()
test->test_num, test->subtest_num,
test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
reset_affinity();
free(test->subtest_name);
test->subtest_name = NULL;
}
@@ -366,6 +385,8 @@ enum ARG_KEYS {
ARG_TEST_NAME_BLACKLIST = 'b',
ARG_VERIFIER_STATS = 's',
ARG_VERBOSE = 'v',
ARG_GET_TEST_CNT = 'c',
ARG_LIST_TEST_NAMES = 'l',
};
static const struct argp_option opts[] = {
@@ -379,6 +400,10 @@ static const struct argp_option opts[] = {
"Output verifier statistics", },
{ "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
"Verbose output (use -vv or -vvv for progressively verbose output)" },
{ "count", ARG_GET_TEST_CNT, NULL, 0,
"Get number of selected top-level tests " },
{ "list", ARG_LIST_TEST_NAMES, NULL, 0,
"List test names that would run (without running them) " },
{},
};
@@ -511,6 +536,12 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
}
break;
case ARG_GET_TEST_CNT:
env->get_test_cnt = true;
break;
case ARG_LIST_TEST_NAMES:
env->list_test_names = true;
break;
case ARGP_KEY_ARG:
argp_usage(state);
break;
@@ -643,6 +674,7 @@ int main(int argc, char **argv)
return -1;
}
save_netns();
stdio_hijack();
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
@@ -654,6 +686,17 @@ int main(int argc, char **argv)
test->test_num, test->test_name))
continue;
if (env.get_test_cnt) {
env.succ_cnt++;
continue;
}
if (env.list_test_names) {
fprintf(env.stdout, "%s\n", test->test_name);
env.succ_cnt++;
continue;
}
test->run_test();
/* ensure last sub-test is finalized properly */
if (test->subtest_name)
@@ -673,19 +716,34 @@ int main(int argc, char **argv)
test->error_cnt ? "FAIL" : "OK");
reset_affinity();
restore_netns();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
}
stdio_restore();
if (env.get_test_cnt) {
printf("%d\n", env.succ_cnt);
goto out;
}
if (env.list_test_names)
goto out;
fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
out:
free_str_set(&env.test_selector.blacklist);
free_str_set(&env.test_selector.whitelist);
free(env.test_selector.num_set);
free_str_set(&env.subtest_selector.blacklist);
free_str_set(&env.subtest_selector.whitelist);
free(env.subtest_selector.num_set);
close(env.saved_netns_fd);
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
return EXIT_NO_TEST;
return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
}

View File

@@ -66,6 +66,8 @@ struct test_env {
enum verbosity verbosity;
bool jit_enabled;
bool get_test_cnt;
bool list_test_names;
struct prog_test_def *test;
FILE *stdout;
@@ -78,6 +80,8 @@ struct test_env {
int sub_succ_cnt; /* successful sub-tests */
int fail_cnt; /* total failed tests + sub-tests */
int skip_cnt; /* skipped tests */
int saved_netns_fd;
};
extern struct test_env env;

View File

@@ -160,16 +160,10 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
if (setup_cgroup_environment())
goto err;
cgfd = create_and_get_cgroup(CGROUP_PATH);
cgfd = cgroup_setup_and_join(CGROUP_PATH);
if (cgfd < 0)
goto err;
if (join_cgroup(CGROUP_PATH))
goto err;
if (send_packet(argv[1]))
goto err;

View File

@@ -464,16 +464,10 @@ int main(int argc, char **argv)
int cgfd = -1;
int err = 0;
if (setup_cgroup_environment())
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
goto err;
if (run_tests(cgfd))
goto err;

View File

@@ -1638,16 +1638,10 @@ int main(int argc, char **argv)
exit(err);
}
if (setup_cgroup_environment())
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
goto err;
if (run_tests(cgfd))
goto err;

View File

@@ -421,19 +421,11 @@ int main(int argc, char **argv)
struct bpf_object *obj;
struct bpf_map *map;
err = setup_cgroup_environment();
CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d",
err, errno);
atexit(cleanup_cgroup_environment);
/* Create a cgroup, get fd, and join it */
cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
CHECK(cgroup_fd == -1, "create_and_get_cgroup()",
cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
CHECK(cgroup_fd < 0, "cgroup_setup_and_join()",
"cgroup_fd:%d errno:%d", cgroup_fd, errno);
err = join_cgroup(TEST_CGROUP);
CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
atexit(cleanup_cgroup_environment);
err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);

View File

@@ -191,16 +191,10 @@ int main(int argc, char **argv)
int cgfd = -1;
int err = 0;
if (setup_cgroup_environment())
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
goto err;
if (run_test(cgfd))
goto err;

View File

@@ -1963,23 +1963,9 @@ int main(int argc, char **argv)
}
if (!cg_fd) {
if (setup_cgroup_environment()) {
fprintf(stderr, "ERROR: cgroup env failed\n");
return -EINVAL;
}
cg_fd = create_and_get_cgroup(CG_PATH);
if (cg_fd < 0) {
fprintf(stderr,
"ERROR: (%i) open cg path failed: %s\n",
cg_fd, strerror(errno));
cg_fd = cgroup_setup_and_join(CG_PATH);
if (cg_fd < 0)
return cg_fd;
}
if (join_cgroup(CG_PATH)) {
fprintf(stderr, "ERROR: failed to join cgroup\n");
return -EINVAL;
}
cg_created = 1;
}

View File

@@ -1619,16 +1619,10 @@ int main(int argc, char **argv)
int cgfd = -1;
int err = 0;
if (setup_cgroup_environment())
goto err;
cgfd = create_and_get_cgroup(CG_PATH);
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
if (join_cgroup(CG_PATH))
goto err;
if (run_tests(cgfd))
goto err;

View File

@@ -102,16 +102,10 @@ int main(int argc, char **argv)
__u32 key = 0;
int rv;
if (setup_cgroup_environment())
goto err;
cg_fd = create_and_get_cgroup(cg_path);
cg_fd = cgroup_setup_and_join(cg_path);
if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
goto err;
if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;

View File

@@ -86,16 +86,10 @@ int main(int argc, char **argv)
CPU_SET(0, &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
if (setup_cgroup_environment())
goto err;
cg_fd = create_and_get_cgroup(cg_path);
cg_fd = cgroup_setup_and_join(cg_path);
if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
goto err;
if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;

View File

@@ -10,52 +10,72 @@
# | xdp forwarding |
# ------------------
ret=0
setup()
{
local xdpmode=$1
ip netns add ns1
ip netns add ns2
ip link add veth1 index 111 type veth peer name veth11 netns ns1
ip link add veth2 index 222 type veth peer name veth22 netns ns2
ip link set veth1 up
ip link set veth2 up
ip -n ns1 link set dev veth11 up
ip -n ns2 link set dev veth22 up
ip -n ns1 addr add 10.1.1.11/24 dev veth11
ip -n ns2 addr add 10.1.1.22/24 dev veth22
}
cleanup()
{
if [ "$?" = "0" ]; then
echo "selftests: test_xdp_redirect [PASS]";
else
echo "selftests: test_xdp_redirect [FAILED]";
fi
set +e
ip link del veth1 2> /dev/null
ip link del veth2 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
if [ $? -ne 0 ];then
echo "selftests: [SKIP] Could not run test without the ip xdpgeneric support"
exit 0
fi
test_xdp_redirect()
{
local xdpmode=$1
setup
ip link set dev veth1 $xdpmode off &> /dev/null
if [ $? -ne 0 ];then
echo "selftests: test_xdp_redirect $xdpmode [SKIP]"
return 0
fi
ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null
ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null
ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null
ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null
ip netns exec ns1 ping -c 1 10.1.1.22 &> /dev/null
local ret1=$?
ip netns exec ns2 ping -c 1 10.1.1.11 &> /dev/null
local ret2=$?
if [ $ret1 -eq 0 -a $ret2 -eq 0 ]; then
echo "selftests: test_xdp_redirect $xdpmode [PASS]";
else
ret=1
echo "selftests: test_xdp_redirect $xdpmode [FAILED]";
fi
cleanup
}
set -e
trap cleanup 2 3 6 9
ip netns add ns1
ip netns add ns2
test_xdp_redirect xdpgeneric
test_xdp_redirect xdpdrv
trap cleanup 0 2 3 6 9
ip link add veth1 index 111 type veth peer name veth11
ip link add veth2 index 222 type veth peer name veth22
ip link set veth11 netns ns1
ip link set veth22 netns ns2
ip link set veth1 up
ip link set veth2 up
ip netns exec ns1 ip addr add 10.1.1.11/24 dev veth11
ip netns exec ns2 ip addr add 10.1.1.22/24 dev veth22
ip netns exec ns1 ip link set dev veth11 up
ip netns exec ns2 ip link set dev veth22 up
ip link set dev veth1 xdpgeneric obj test_xdp_redirect.o sec redirect_to_222
ip link set dev veth2 xdpgeneric obj test_xdp_redirect.o sec redirect_to_111
ip netns exec ns1 ping -c 1 10.1.1.22
ip netns exec ns2 ping -c 1 10.1.1.11
exit 0
exit $ret

View File

@@ -64,3 +64,17 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
return 0;
}
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
{
__u32 info_len = sizeof(*info);
int err;
memset(info, 0, sizeof(*info));
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), info, &info_len);
if (err) {
printf("failed to get link info: %d\n", -errno);
return 0;
}
return info->prog_id;
}

View File

@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (C) 2020 Facebook, Inc. */
#include <stdbool.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
int parse_num_list(const char *s, bool **set, int *set_len);
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info);

View File

@@ -0,0 +1,492 @@
{
"valid 1,2,4,8-byte reads from bpf_sk_lookup",
.insns = {
/* 1-byte read from family field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 3),
/* 2-byte read from family field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family) + 2),
/* 4-byte read from family field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
/* 1-byte read from protocol field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 3),
/* 2-byte read from protocol field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol) + 2),
/* 4-byte read from protocol field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
/* 1-byte read from remote_ip4 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 3),
/* 2-byte read from remote_ip4 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4) + 2),
/* 4-byte read from remote_ip4 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
/* 1-byte read from remote_ip6 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 5),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 6),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 7),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 9),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 10),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 11),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 13),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 14),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 15),
/* 2-byte read from remote_ip6 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 6),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 10),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 14),
/* 4-byte read from remote_ip6 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
/* 1-byte read from remote_port field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 3),
/* 2-byte read from remote_port field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port) + 2),
/* 4-byte read from remote_port field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
/* 1-byte read from local_ip4 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 3),
/* 2-byte read from local_ip4 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4) + 2),
/* 4-byte read from local_ip4 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
/* 1-byte read from local_ip6 field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 4),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 5),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 6),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 7),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 8),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 9),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 10),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 11),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 12),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 13),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 14),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 15),
/* 2-byte read from local_ip6 field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 2),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 4),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 6),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 8),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 10),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 12),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 14),
/* 4-byte read from local_ip6 field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 4),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6) + 12),
/* 1-byte read from local_port field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 3),
/* 2-byte read from local_port field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port) + 2),
/* 4-byte read from local_port field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
/* 8-byte read from sk field */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
{
"invalid 8-byte read from bpf_sk_lookup family field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, family)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup protocol field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, protocol)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup remote_ip4 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip4)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup remote_ip6 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_ip6)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup remote_port field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, remote_port)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup local_ip4 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip4)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup local_ip6 field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_ip6)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 8-byte read from bpf_sk_lookup local_port field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */
{
"invalid 4-byte read from bpf_sk_lookup sk field",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 2-byte read from bpf_sk_lookup sk field",
.insns = {
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 1-byte read from bpf_sk_lookup sk field",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
/* out of bounds and unaligned reads from bpf_sk_lookup */
{
"invalid 4-byte read past end of bpf_sk_lookup",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
sizeof(struct bpf_sk_lookup)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte unaligned read from bpf_sk_lookup at odd offset",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte unaligned read from bpf_sk_lookup at even offset",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 2),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
/* in-bound and out-of-bound writes to bpf_sk_lookup */
{
"invalid 8-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 2-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 1-byte write to bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},
{
"invalid 4-byte write past end of bpf_sk_lookup",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
sizeof(struct bpf_sk_lookup)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
},

Some files were not shown because too many files have changed in this diff Show More