Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2019-07-09 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Lots of libbpf improvements: i) addition of new APIs to attach BPF programs to tracing entities such as {k,u}probes or tracepoints, ii) improve specification of BTF-defined maps by eliminating the need for data initialization for some of the members, iii) addition of a high-level API for setting up and polling perf buffers for BPF event output helpers, all from Andrii. 2) Add "prog run" subcommand to bpftool in order to test-run programs through the kernel testing infrastructure of BPF, from Quentin. 3) Improve verifier for BPF sockaddr programs to support 8-byte stores for user_ip6 and msg_src_ip6 members given clang tends to generate such stores, from Stanislav. 4) Enable the new BPF JIT zero-extension optimization for further riscv64 ALU ops, from Luke. 5) Fix a bpftool json JIT dump crash on powerpc, from Jiri. 6) Fix an AF_XDP race in generic XDP's receive path, from Ilya. 7) Various smaller fixes from Ilya, Yue and Arnd. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
1
tools/testing/selftests/bpf/.gitignore
vendored
1
tools/testing/selftests/bpf/.gitignore
vendored
@@ -42,3 +42,4 @@ xdping
|
||||
test_sockopt
|
||||
test_sockopt_sk
|
||||
test_sockopt_multi
|
||||
test_tcp_rtt
|
||||
|
@@ -8,6 +8,9 @@
|
||||
*/
|
||||
#define SEC(NAME) __attribute__((section(NAME), used))
|
||||
|
||||
#define __uint(name, val) int (*name)[val]
|
||||
#define __type(name, val) val *name
|
||||
|
||||
/* helper macro to print out debug messages */
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
|
166
tools/testing/selftests/bpf/prog_tests/attach_probe.c
Normal file
166
tools/testing/selftests/bpf/prog_tests/attach_probe.c
Normal file
@@ -0,0 +1,166 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <test_progs.h>
|
||||
|
||||
ssize_t get_base_addr() {
|
||||
size_t start;
|
||||
char buf[256];
|
||||
FILE *f;
|
||||
|
||||
f = fopen("/proc/self/maps", "r");
|
||||
if (!f)
|
||||
return -errno;
|
||||
|
||||
while (fscanf(f, "%zx-%*x %s %*s\n", &start, buf) == 2) {
|
||||
if (strcmp(buf, "r-xp") == 0) {
|
||||
fclose(f);
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef __x86_64__
|
||||
#define SYS_KPROBE_NAME "__x64_sys_nanosleep"
|
||||
#else
|
||||
#define SYS_KPROBE_NAME "sys_nanosleep"
|
||||
#endif
|
||||
|
||||
void test_attach_probe(void)
|
||||
{
|
||||
const char *kprobe_name = "kprobe/sys_nanosleep";
|
||||
const char *kretprobe_name = "kretprobe/sys_nanosleep";
|
||||
const char *uprobe_name = "uprobe/trigger_func";
|
||||
const char *uretprobe_name = "uretprobe/trigger_func";
|
||||
const int kprobe_idx = 0, kretprobe_idx = 1;
|
||||
const int uprobe_idx = 2, uretprobe_idx = 3;
|
||||
const char *file = "./test_attach_probe.o";
|
||||
struct bpf_program *kprobe_prog, *kretprobe_prog;
|
||||
struct bpf_program *uprobe_prog, *uretprobe_prog;
|
||||
struct bpf_object *obj;
|
||||
int err, prog_fd, duration = 0, res;
|
||||
struct bpf_link *kprobe_link = NULL;
|
||||
struct bpf_link *kretprobe_link = NULL;
|
||||
struct bpf_link *uprobe_link = NULL;
|
||||
struct bpf_link *uretprobe_link = NULL;
|
||||
int results_map_fd;
|
||||
size_t uprobe_offset;
|
||||
ssize_t base_addr;
|
||||
|
||||
base_addr = get_base_addr();
|
||||
if (CHECK(base_addr < 0, "get_base_addr",
|
||||
"failed to find base addr: %zd", base_addr))
|
||||
return;
|
||||
uprobe_offset = (size_t)&get_base_addr - base_addr;
|
||||
|
||||
/* load programs */
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
|
||||
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
|
||||
if (CHECK(!kprobe_prog, "find_probe",
|
||||
"prog '%s' not found\n", kprobe_name))
|
||||
goto cleanup;
|
||||
kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
|
||||
if (CHECK(!kretprobe_prog, "find_probe",
|
||||
"prog '%s' not found\n", kretprobe_name))
|
||||
goto cleanup;
|
||||
uprobe_prog = bpf_object__find_program_by_title(obj, uprobe_name);
|
||||
if (CHECK(!uprobe_prog, "find_probe",
|
||||
"prog '%s' not found\n", uprobe_name))
|
||||
goto cleanup;
|
||||
uretprobe_prog = bpf_object__find_program_by_title(obj, uretprobe_name);
|
||||
if (CHECK(!uretprobe_prog, "find_probe",
|
||||
"prog '%s' not found\n", uretprobe_name))
|
||||
goto cleanup;
|
||||
|
||||
/* load maps */
|
||||
results_map_fd = bpf_find_map(__func__, obj, "results_map");
|
||||
if (CHECK(results_map_fd < 0, "find_results_map",
|
||||
"err %d\n", results_map_fd))
|
||||
goto cleanup;
|
||||
|
||||
kprobe_link = bpf_program__attach_kprobe(kprobe_prog,
|
||||
false /* retprobe */,
|
||||
SYS_KPROBE_NAME);
|
||||
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
|
||||
"err %ld\n", PTR_ERR(kprobe_link))) {
|
||||
kprobe_link = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog,
|
||||
true /* retprobe */,
|
||||
SYS_KPROBE_NAME);
|
||||
if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
|
||||
"err %ld\n", PTR_ERR(kretprobe_link))) {
|
||||
kretprobe_link = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
uprobe_link = bpf_program__attach_uprobe(uprobe_prog,
|
||||
false /* retprobe */,
|
||||
0 /* self pid */,
|
||||
"/proc/self/exe",
|
||||
uprobe_offset);
|
||||
if (CHECK(IS_ERR(uprobe_link), "attach_uprobe",
|
||||
"err %ld\n", PTR_ERR(uprobe_link))) {
|
||||
uprobe_link = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
uretprobe_link = bpf_program__attach_uprobe(uretprobe_prog,
|
||||
true /* retprobe */,
|
||||
-1 /* any pid */,
|
||||
"/proc/self/exe",
|
||||
uprobe_offset);
|
||||
if (CHECK(IS_ERR(uretprobe_link), "attach_uretprobe",
|
||||
"err %ld\n", PTR_ERR(uretprobe_link))) {
|
||||
uretprobe_link = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* trigger & validate kprobe && kretprobe */
|
||||
usleep(1);
|
||||
|
||||
err = bpf_map_lookup_elem(results_map_fd, &kprobe_idx, &res);
|
||||
if (CHECK(err, "get_kprobe_res",
|
||||
"failed to get kprobe res: %d\n", err))
|
||||
goto cleanup;
|
||||
if (CHECK(res != kprobe_idx + 1, "check_kprobe_res",
|
||||
"wrong kprobe res: %d\n", res))
|
||||
goto cleanup;
|
||||
|
||||
err = bpf_map_lookup_elem(results_map_fd, &kretprobe_idx, &res);
|
||||
if (CHECK(err, "get_kretprobe_res",
|
||||
"failed to get kretprobe res: %d\n", err))
|
||||
goto cleanup;
|
||||
if (CHECK(res != kretprobe_idx + 1, "check_kretprobe_res",
|
||||
"wrong kretprobe res: %d\n", res))
|
||||
goto cleanup;
|
||||
|
||||
/* trigger & validate uprobe & uretprobe */
|
||||
get_base_addr();
|
||||
|
||||
err = bpf_map_lookup_elem(results_map_fd, &uprobe_idx, &res);
|
||||
if (CHECK(err, "get_uprobe_res",
|
||||
"failed to get uprobe res: %d\n", err))
|
||||
goto cleanup;
|
||||
if (CHECK(res != uprobe_idx + 1, "check_uprobe_res",
|
||||
"wrong uprobe res: %d\n", res))
|
||||
goto cleanup;
|
||||
|
||||
err = bpf_map_lookup_elem(results_map_fd, &uretprobe_idx, &res);
|
||||
if (CHECK(err, "get_uretprobe_res",
|
||||
"failed to get uretprobe res: %d\n", err))
|
||||
goto cleanup;
|
||||
if (CHECK(res != uretprobe_idx + 1, "check_uretprobe_res",
|
||||
"wrong uretprobe res: %d\n", res))
|
||||
goto cleanup;
|
||||
|
||||
cleanup:
|
||||
bpf_link__destroy(kprobe_link);
|
||||
bpf_link__destroy(kretprobe_link);
|
||||
bpf_link__destroy(uprobe_link);
|
||||
bpf_link__destroy(uretprobe_link);
|
||||
bpf_object__close(obj);
|
||||
}
|
100
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
Normal file
100
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
Normal file
@@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#define _GNU_SOURCE
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <sys/socket.h>
|
||||
#include <test_progs.h>
|
||||
|
||||
#ifdef __x86_64__
|
||||
#define SYS_KPROBE_NAME "__x64_sys_nanosleep"
|
||||
#else
|
||||
#define SYS_KPROBE_NAME "sys_nanosleep"
|
||||
#endif
|
||||
|
||||
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
|
||||
{
|
||||
int cpu_data = *(int *)data, duration = 0;
|
||||
cpu_set_t *cpu_seen = ctx;
|
||||
|
||||
if (cpu_data != cpu)
|
||||
CHECK(cpu_data != cpu, "check_cpu_data",
|
||||
"cpu_data %d != cpu %d\n", cpu_data, cpu);
|
||||
|
||||
CPU_SET(cpu, cpu_seen);
|
||||
}
|
||||
|
||||
void test_perf_buffer(void)
|
||||
{
|
||||
int err, prog_fd, nr_cpus, i, duration = 0;
|
||||
const char *prog_name = "kprobe/sys_nanosleep";
|
||||
const char *file = "./test_perf_buffer.o";
|
||||
struct perf_buffer_opts pb_opts = {};
|
||||
struct bpf_map *perf_buf_map;
|
||||
cpu_set_t cpu_set, cpu_seen;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct perf_buffer *pb;
|
||||
struct bpf_link *link;
|
||||
|
||||
nr_cpus = libbpf_num_possible_cpus();
|
||||
if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
|
||||
return;
|
||||
|
||||
/* load program */
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
|
||||
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
|
||||
goto out_close;
|
||||
|
||||
/* load map */
|
||||
perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
|
||||
if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
|
||||
goto out_close;
|
||||
|
||||
/* attach kprobe */
|
||||
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
|
||||
SYS_KPROBE_NAME);
|
||||
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
|
||||
goto out_close;
|
||||
|
||||
/* set up perf buffer */
|
||||
pb_opts.sample_cb = on_sample;
|
||||
pb_opts.ctx = &cpu_seen;
|
||||
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
|
||||
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
|
||||
goto out_detach;
|
||||
|
||||
/* trigger kprobe on every CPU */
|
||||
CPU_ZERO(&cpu_seen);
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
CPU_ZERO(&cpu_set);
|
||||
CPU_SET(i, &cpu_set);
|
||||
|
||||
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
|
||||
&cpu_set);
|
||||
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
|
||||
i, err))
|
||||
goto out_detach;
|
||||
|
||||
usleep(1);
|
||||
}
|
||||
|
||||
/* read perf buffer */
|
||||
err = perf_buffer__poll(pb, 100);
|
||||
if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
|
||||
goto out_free_pb;
|
||||
|
||||
if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt",
|
||||
"expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen)))
|
||||
goto out_free_pb;
|
||||
|
||||
out_free_pb:
|
||||
perf_buffer__free(pb);
|
||||
out_detach:
|
||||
bpf_link__destroy(link);
|
||||
out_close:
|
||||
bpf_object__close(obj);
|
||||
}
|
@@ -4,11 +4,13 @@
|
||||
void test_stacktrace_build_id(void)
|
||||
{
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
|
||||
const char *prog_name = "tracepoint/random/urandom_read";
|
||||
const char *file = "./test_stacktrace_build_id.o";
|
||||
int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
|
||||
struct perf_event_attr attr = {};
|
||||
int err, prog_fd, stack_trace_len;
|
||||
__u32 key, previous_key, val, duration = 0;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_link *link = NULL;
|
||||
char buf[256];
|
||||
int i, j;
|
||||
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
|
||||
@@ -18,44 +20,16 @@ void test_stacktrace_build_id(void)
|
||||
retry:
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
/* Get the ID for the sched/sched_switch tracepoint */
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/random/urandom_read/id");
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
|
||||
"read", "bytes %d errno %d\n", bytes, errno))
|
||||
link = bpf_program__attach_tracepoint(prog, "random", "urandom_read");
|
||||
if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link)))
|
||||
goto close_prog;
|
||||
|
||||
/* Open the perf event and attach bpf progrram */
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
|
||||
attr.sample_period = 1;
|
||||
attr.wakeup_events = 1;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
|
||||
pmu_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
|
||||
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto disable_pmu;
|
||||
|
||||
/* find map fds */
|
||||
control_map_fd = bpf_find_map(__func__, obj, "control_map");
|
||||
if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
|
||||
@@ -133,8 +107,7 @@ retry:
|
||||
* try it one more time.
|
||||
*/
|
||||
if (build_id_matches < 1 && retry--) {
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
||||
close(pmu_fd);
|
||||
bpf_link__destroy(link);
|
||||
bpf_object__close(obj);
|
||||
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
|
||||
__func__);
|
||||
@@ -152,14 +125,8 @@ retry:
|
||||
"err %d errno %d\n", err, errno);
|
||||
|
||||
disable_pmu:
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
||||
|
||||
close_pmu:
|
||||
close(pmu_fd);
|
||||
bpf_link__destroy(link);
|
||||
|
||||
close_prog:
|
||||
bpf_object__close(obj);
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
@@ -17,6 +17,7 @@ static __u64 read_perf_max_sample_freq(void)
|
||||
void test_stacktrace_build_id_nmi(void)
|
||||
{
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
|
||||
const char *prog_name = "tracepoint/random/urandom_read";
|
||||
const char *file = "./test_stacktrace_build_id.o";
|
||||
int err, pmu_fd, prog_fd;
|
||||
struct perf_event_attr attr = {
|
||||
@@ -25,7 +26,9 @@ void test_stacktrace_build_id_nmi(void)
|
||||
.config = PERF_COUNT_HW_CPU_CYCLES,
|
||||
};
|
||||
__u32 key, previous_key, val, duration = 0;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_link *link;
|
||||
char buf[256];
|
||||
int i, j;
|
||||
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
|
||||
@@ -39,6 +42,10 @@ retry:
|
||||
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
@@ -47,15 +54,12 @@ retry:
|
||||
pmu_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
|
||||
if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto disable_pmu;
|
||||
link = bpf_program__attach_perf_event(prog, pmu_fd);
|
||||
if (CHECK(IS_ERR(link), "attach_perf_event",
|
||||
"err %ld\n", PTR_ERR(link))) {
|
||||
close(pmu_fd);
|
||||
goto close_prog;
|
||||
}
|
||||
|
||||
/* find map fds */
|
||||
control_map_fd = bpf_find_map(__func__, obj, "control_map");
|
||||
@@ -134,8 +138,7 @@ retry:
|
||||
* try it one more time.
|
||||
*/
|
||||
if (build_id_matches < 1 && retry--) {
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
||||
close(pmu_fd);
|
||||
bpf_link__destroy(link);
|
||||
bpf_object__close(obj);
|
||||
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
|
||||
__func__);
|
||||
@@ -154,11 +157,7 @@ retry:
|
||||
*/
|
||||
|
||||
disable_pmu:
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
||||
|
||||
close_pmu:
|
||||
close(pmu_fd);
|
||||
|
||||
bpf_link__destroy(link);
|
||||
close_prog:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@@ -4,50 +4,26 @@
|
||||
void test_stacktrace_map(void)
|
||||
{
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
|
||||
const char *prog_name = "tracepoint/sched/sched_switch";
|
||||
int err, prog_fd, stack_trace_len;
|
||||
const char *file = "./test_stacktrace_map.o";
|
||||
int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
|
||||
struct perf_event_attr attr = {};
|
||||
__u32 key, val, duration = 0;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
char buf[256];
|
||||
struct bpf_link *link;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
/* Get the ID for the sched/sched_switch tracepoint */
|
||||
snprintf(buf, sizeof(buf),
|
||||
"/sys/kernel/debug/tracing/events/sched/sched_switch/id");
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (bytes <= 0 || bytes >= sizeof(buf))
|
||||
link = bpf_program__attach_tracepoint(prog, "sched", "sched_switch");
|
||||
if (CHECK(IS_ERR(link), "attach_tp", "err %ld\n", PTR_ERR(link)))
|
||||
goto close_prog;
|
||||
|
||||
/* Open the perf event and attach bpf progrram */
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
attr.type = PERF_TYPE_TRACEPOINT;
|
||||
attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
|
||||
attr.sample_period = 1;
|
||||
attr.wakeup_events = 1;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
|
||||
0 /* cpu 0 */, -1 /* group id */,
|
||||
0 /* flags */);
|
||||
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
|
||||
pmu_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (err)
|
||||
goto disable_pmu;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
|
||||
if (err)
|
||||
goto disable_pmu;
|
||||
|
||||
/* find map fds */
|
||||
control_map_fd = bpf_find_map(__func__, obj, "control_map");
|
||||
if (control_map_fd < 0)
|
||||
@@ -96,8 +72,7 @@ void test_stacktrace_map(void)
|
||||
disable_pmu:
|
||||
error_cnt++;
|
||||
disable_pmu_noerr:
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
||||
close(pmu_fd);
|
||||
bpf_link__destroy(link);
|
||||
close_prog:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@@ -3,18 +3,25 @@
|
||||
|
||||
void test_stacktrace_map_raw_tp(void)
|
||||
{
|
||||
const char *prog_name = "tracepoint/sched/sched_switch";
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd;
|
||||
const char *file = "./test_stacktrace_map.o";
|
||||
int efd, err, prog_fd;
|
||||
__u32 key, val, duration = 0;
|
||||
int err, prog_fd;
|
||||
struct bpf_program *prog;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_link *link = NULL;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
|
||||
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
||||
prog = bpf_object__find_program_by_title(obj, prog_name);
|
||||
if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
|
||||
goto close_prog;
|
||||
|
||||
link = bpf_program__attach_raw_tracepoint(prog, "sched_switch");
|
||||
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
|
||||
goto close_prog;
|
||||
|
||||
/* find map fds */
|
||||
@@ -55,5 +62,7 @@ void test_stacktrace_map_raw_tp(void)
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
if (!IS_ERR_OR_NULL(link))
|
||||
bpf_link__destroy(link);
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@@ -58,26 +58,18 @@ struct frag_hdr {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
} jmp_table SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PROG_ARRAY,
|
||||
.max_entries = 8,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u32),
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
|
||||
__uint(max_entries, 8);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} jmp_table SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct bpf_flow_keys *value;
|
||||
} last_dissection SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct bpf_flow_keys);
|
||||
} last_dissection SEC(".maps");
|
||||
|
||||
static __always_inline int export_flow_keys(struct bpf_flow_keys *keys,
|
||||
int ret)
|
||||
|
@@ -4,19 +4,19 @@
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct bpf_map_def SEC("maps") cg_ids = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u64),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} cg_ids SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") pidmap = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} pidmap SEC(".maps");
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_nanosleep")
|
||||
int trace(void *ctx)
|
||||
|
@@ -11,20 +11,16 @@
|
||||
#define NS_PER_SEC 1000000000
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
struct bpf_cgroup_storage_key *key;
|
||||
struct percpu_net_cnt *value;
|
||||
} percpu_netcnt SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
|
||||
__type(key, struct bpf_cgroup_storage_key);
|
||||
__type(value, struct percpu_net_cnt);
|
||||
} percpu_netcnt SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
struct bpf_cgroup_storage_key *key;
|
||||
struct net_cnt *value;
|
||||
} netcnt SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
|
||||
__type(key, struct bpf_cgroup_storage_key);
|
||||
__type(value, struct net_cnt);
|
||||
} netcnt SEC(".maps");
|
||||
|
||||
SEC("cgroup/skb")
|
||||
int bpf_nextcnt(struct __sk_buff *skb)
|
||||
|
@@ -58,14 +58,6 @@ typedef struct {
|
||||
} Event;
|
||||
|
||||
|
||||
struct bpf_elf_map {
|
||||
__u32 type;
|
||||
__u32 size_key;
|
||||
__u32 size_value;
|
||||
__u32 max_elem;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
typedef int pid_t;
|
||||
|
||||
typedef struct {
|
||||
@@ -118,47 +110,47 @@ static __always_inline bool get_frame_data(void *frame_ptr, PidData *pidData,
|
||||
return true;
|
||||
}
|
||||
|
||||
struct bpf_elf_map SEC("maps") pidmap = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.size_key = sizeof(int),
|
||||
.size_value = sizeof(PidData),
|
||||
.max_elem = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, PidData);
|
||||
} pidmap SEC(".maps");
|
||||
|
||||
struct bpf_elf_map SEC("maps") eventmap = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.size_key = sizeof(int),
|
||||
.size_value = sizeof(Event),
|
||||
.max_elem = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, Event);
|
||||
} eventmap SEC(".maps");
|
||||
|
||||
struct bpf_elf_map SEC("maps") symbolmap = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.size_key = sizeof(Symbol),
|
||||
.size_value = sizeof(int),
|
||||
.max_elem = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, Symbol);
|
||||
__type(value, int);
|
||||
} symbolmap SEC(".maps");
|
||||
|
||||
struct bpf_elf_map SEC("maps") statsmap = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.size_key = sizeof(Stats),
|
||||
.size_value = sizeof(int),
|
||||
.max_elem = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, Stats);
|
||||
} statsmap SEC(".maps");
|
||||
|
||||
struct bpf_elf_map SEC("maps") perfmap = {
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.size_key = sizeof(int),
|
||||
.size_value = sizeof(int),
|
||||
.max_elem = 32,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(max_entries, 32);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} perfmap SEC(".maps");
|
||||
|
||||
struct bpf_elf_map SEC("maps") stackmap = {
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
.size_key = sizeof(int),
|
||||
.size_value = sizeof(long long) * 127,
|
||||
.max_elem = 1000,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
||||
__uint(max_entries, 1000);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(long long) * 127);
|
||||
} stackmap SEC(".maps");
|
||||
|
||||
static __always_inline int __on_event(struct pt_regs *ctx)
|
||||
{
|
||||
|
@@ -13,14 +13,11 @@ struct socket_cookie {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 map_flags;
|
||||
int *key;
|
||||
struct socket_cookie *value;
|
||||
} socket_cookies SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_SK_STORAGE,
|
||||
.map_flags = BPF_F_NO_PREALLOC,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
__type(key, int);
|
||||
__type(value, struct socket_cookie);
|
||||
} socket_cookies SEC(".maps");
|
||||
|
||||
SEC("cgroup/connect6")
|
||||
int set_cookie(struct bpf_sock_addr *ctx)
|
||||
|
@@ -4,33 +4,33 @@
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_rx = {
|
||||
.type = BPF_MAP_TYPE_SOCKMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_SOCKMAP);
|
||||
__uint(max_entries, 20);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} sock_map_rx SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_tx = {
|
||||
.type = BPF_MAP_TYPE_SOCKMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_SOCKMAP);
|
||||
__uint(max_entries, 20);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} sock_map_tx SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_msg = {
|
||||
.type = BPF_MAP_TYPE_SOCKMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_SOCKMAP);
|
||||
__uint(max_entries, 20);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} sock_map_msg SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_break = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 20);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_map_break SEC(".maps");
|
||||
|
||||
SEC("sk_skb2")
|
||||
int bpf_prog2(struct __sk_buff *skb)
|
||||
|
@@ -204,40 +204,40 @@ struct strobelight_bpf_sample {
|
||||
char dummy_safeguard;
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") samples = {
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 32,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(max_entries, 32);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} samples SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") stacks_0 = {
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(uint64_t) * PERF_MAX_STACK_DEPTH,
|
||||
.max_entries = 16,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
||||
__uint(max_entries, 16);
|
||||
__uint(key_size, sizeof(uint32_t));
|
||||
__uint(value_size, sizeof(uint64_t) * PERF_MAX_STACK_DEPTH);
|
||||
} stacks_0 SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") stacks_1 = {
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(uint64_t) * PERF_MAX_STACK_DEPTH,
|
||||
.max_entries = 16,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
||||
__uint(max_entries, 16);
|
||||
__uint(key_size, sizeof(uint32_t));
|
||||
__uint(value_size, sizeof(uint64_t) * PERF_MAX_STACK_DEPTH);
|
||||
} stacks_1 SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sample_heap = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.key_size = sizeof(uint32_t),
|
||||
.value_size = sizeof(struct strobelight_bpf_sample),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, uint32_t);
|
||||
__type(value, struct strobelight_bpf_sample);
|
||||
} sample_heap SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") strobemeta_cfgs = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.key_size = sizeof(pid_t),
|
||||
.value_size = sizeof(struct strobemeta_cfg),
|
||||
.max_entries = STROBE_MAX_CFGS,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, STROBE_MAX_CFGS);
|
||||
__type(key, pid_t);
|
||||
__type(value, struct strobemeta_cfg);
|
||||
} strobemeta_cfgs SEC(".maps");
|
||||
|
||||
/* Type for the dtv. */
|
||||
/* https://github.com/lattera/glibc/blob/master/nptl/sysdeps/x86_64/tls.h#L34 */
|
||||
|
52
tools/testing/selftests/bpf/progs/test_attach_probe.c
Normal file
52
tools/testing/selftests/bpf/progs/test_attach_probe.c
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2017 Facebook
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 4);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} results_map SEC(".maps");
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int handle_sys_nanosleep_entry(struct pt_regs *ctx)
|
||||
{
|
||||
const int key = 0, value = 1;
|
||||
|
||||
bpf_map_update_elem(&results_map, &key, &value, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kretprobe/sys_nanosleep")
|
||||
int handle_sys_getpid_return(struct pt_regs *ctx)
|
||||
{
|
||||
const int key = 1, value = 2;
|
||||
|
||||
bpf_map_update_elem(&results_map, &key, &value, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uprobe/trigger_func")
|
||||
int handle_uprobe_entry(struct pt_regs *ctx)
|
||||
{
|
||||
const int key = 2, value = 3;
|
||||
|
||||
bpf_map_update_elem(&results_map, &key, &value, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uretprobe/trigger_func")
|
||||
int handle_uprobe_return(struct pt_regs *ctx)
|
||||
{
|
||||
const int key = 3, value = 4;
|
||||
|
||||
bpf_map_update_elem(&results_map, &key, &value, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
__u32 _version SEC("version") = 1;
|
@@ -21,14 +21,11 @@ struct bpf_map_def SEC("maps") btf_map_legacy = {
|
||||
BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
|
||||
|
||||
struct {
|
||||
int *key;
|
||||
struct ipv_counts *value;
|
||||
unsigned int type;
|
||||
unsigned int max_entries;
|
||||
} btf_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 4,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 4);
|
||||
__type(key, int);
|
||||
__type(value, struct ipv_counts);
|
||||
} btf_map SEC(".maps");
|
||||
|
||||
struct dummy_tracepoint_args {
|
||||
unsigned long long pad;
|
||||
|
@@ -16,26 +16,18 @@ struct stack_trace_t {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
} perfmap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.max_entries = 2,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(__u32),
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(max_entries, 2);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} perfmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct stack_trace_t *value;
|
||||
} stackdata_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct stack_trace_t);
|
||||
} stackdata_map SEC(".maps");
|
||||
|
||||
/* Allocate per-cpu space twice the needed. For the code below
|
||||
* usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
|
||||
@@ -56,14 +48,11 @@ struct {
|
||||
* This is an acceptable workaround since there is one entry here.
|
||||
*/
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__u64 (*value)[2 * MAX_STACK_RAWTP];
|
||||
} rawdata_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
} rawdata_map SEC(".maps");
|
||||
|
||||
SEC("tracepoint/raw_syscalls/sys_enter")
|
||||
int bpf_prog1(void *ctx)
|
||||
|
@@ -8,24 +8,18 @@
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u64 *value;
|
||||
} result_number SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 11,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 11);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} result_number SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 5);
|
||||
__type(key, __u32);
|
||||
const char (*value)[32];
|
||||
} result_string SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 5,
|
||||
};
|
||||
} result_string SEC(".maps");
|
||||
|
||||
struct foo {
|
||||
__u8 a;
|
||||
@@ -34,14 +28,11 @@ struct foo {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct foo *value;
|
||||
} result_struct SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 5,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 5);
|
||||
__type(key, __u32);
|
||||
__type(value, struct foo);
|
||||
} result_struct SEC(".maps");
|
||||
|
||||
/* Relocation tests for __u64s. */
|
||||
static __u64 num0;
|
||||
|
@@ -170,54 +170,39 @@ struct eth_hdr {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
struct vip *key;
|
||||
struct vip_meta *value;
|
||||
} vip_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = MAX_VIPS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_VIPS);
|
||||
__type(key, struct vip);
|
||||
__type(value, struct vip_meta);
|
||||
} vip_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} ch_rings SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = CH_RINGS_SIZE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, CH_RINGS_SIZE);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} ch_rings SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct real_definition *value;
|
||||
} reals SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = MAX_REALS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, MAX_REALS);
|
||||
__type(key, __u32);
|
||||
__type(value, struct real_definition);
|
||||
} reals SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct vip_stats *value;
|
||||
} stats SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.max_entries = MAX_VIPS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, MAX_VIPS);
|
||||
__type(key, __u32);
|
||||
__type(value, struct vip_stats);
|
||||
} stats SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct ctl_value *value;
|
||||
} ctl_array SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = CTL_MAP_SIZE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, CTL_MAP_SIZE);
|
||||
__type(key, __u32);
|
||||
__type(value, struct ctl_value);
|
||||
} ctl_array SEC(".maps");
|
||||
|
||||
static __always_inline __u32 get_packet_hash(struct packet_description *pckt,
|
||||
bool ipv6)
|
||||
|
@@ -166,54 +166,39 @@ struct eth_hdr {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
struct vip *key;
|
||||
struct vip_meta *value;
|
||||
} vip_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = MAX_VIPS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_VIPS);
|
||||
__type(key, struct vip);
|
||||
__type(value, struct vip_meta);
|
||||
} vip_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} ch_rings SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = CH_RINGS_SIZE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, CH_RINGS_SIZE);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} ch_rings SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct real_definition *value;
|
||||
} reals SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = MAX_REALS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, MAX_REALS);
|
||||
__type(key, __u32);
|
||||
__type(value, struct real_definition);
|
||||
} reals SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct vip_stats *value;
|
||||
} stats SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.max_entries = MAX_VIPS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, MAX_VIPS);
|
||||
__type(key, __u32);
|
||||
__type(value, struct vip_stats);
|
||||
} stats SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct ctl_value *value;
|
||||
} ctl_array SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = CTL_MAP_SIZE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, CTL_MAP_SIZE);
|
||||
__type(key, __u32);
|
||||
__type(value, struct ctl_value);
|
||||
} ctl_array SEC(".maps");
|
||||
|
||||
static __u32 get_packet_hash(struct packet_description *pckt,
|
||||
bool ipv6)
|
||||
|
@@ -5,23 +5,23 @@
|
||||
#include <linux/types.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct bpf_map_def SEC("maps") mim_array = {
|
||||
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
|
||||
.key_size = sizeof(int),
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
|
||||
__uint(max_entries, 1);
|
||||
__uint(map_flags, 0);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
/* must be sizeof(__u32) for map in map */
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 1,
|
||||
.map_flags = 0,
|
||||
};
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} mim_array SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") mim_hash = {
|
||||
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
|
||||
.key_size = sizeof(int),
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
|
||||
__uint(max_entries, 1);
|
||||
__uint(map_flags, 0);
|
||||
__uint(key_size, sizeof(int));
|
||||
/* must be sizeof(__u32) for map in map */
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 1,
|
||||
.map_flags = 0,
|
||||
};
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} mim_hash SEC(".maps");
|
||||
|
||||
SEC("xdp_mimtest")
|
||||
int xdp_mimtest0(struct xdp_md *ctx)
|
||||
|
@@ -12,14 +12,11 @@ struct hmap_elem {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct hmap_elem *value;
|
||||
} hash_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct hmap_elem);
|
||||
} hash_map SEC(".maps");
|
||||
|
||||
struct array_elem {
|
||||
struct bpf_spin_lock lock;
|
||||
@@ -27,14 +24,11 @@ struct array_elem {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
int *key;
|
||||
struct array_elem *value;
|
||||
} array_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct array_elem);
|
||||
} array_map SEC(".maps");
|
||||
|
||||
SEC("map_lock_demo")
|
||||
int bpf_map_lock_test(struct __sk_buff *skb)
|
||||
|
@@ -13,12 +13,12 @@
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
struct bpf_map_def SEC("maps") test_map_id = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u64),
|
||||
.max_entries = 1,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} test_map_id SEC(".maps");
|
||||
|
||||
SEC("test_obj_id_dummy")
|
||||
int test_obj_id(struct __sk_buff *skb)
|
||||
|
25
tools/testing/selftests/bpf/progs/test_perf_buffer.c
Normal file
25
tools/testing/selftests/bpf/progs/test_perf_buffer.c
Normal file
@@ -0,0 +1,25 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2019 Facebook
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} perf_buf_map SEC(".maps");
|
||||
|
||||
SEC("kprobe/sys_nanosleep")
|
||||
int handle_sys_nanosleep_entry(struct pt_regs *ctx)
|
||||
{
|
||||
int cpu = bpf_get_smp_processor_id();
|
||||
|
||||
bpf_perf_event_output(ctx, &perf_buf_map, BPF_F_CURRENT_CPU,
|
||||
&cpu, sizeof(cpu));
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
__u32 _version SEC("version") = 1;
|
@@ -22,56 +22,39 @@ int _version SEC("version") = 1;
|
||||
#endif
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
} outer_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
|
||||
.max_entries = 1,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u32),
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
|
||||
__uint(max_entries, 1);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} outer_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} result_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = NR_RESULTS,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, NR_RESULTS);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} result_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
int *value;
|
||||
} tmp_index_ovr_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, int);
|
||||
} tmp_index_ovr_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} linum_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} linum_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct data_check *value;
|
||||
} data_check_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, struct data_check);
|
||||
} data_check_map SEC(".maps");
|
||||
|
||||
#define GOTO_DONE(_result) ({ \
|
||||
result = (_result); \
|
||||
|
@@ -5,24 +5,18 @@
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u64 *value;
|
||||
} info_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} info_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u64 *value;
|
||||
} status_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} status_map SEC(".maps");
|
||||
|
||||
SEC("send_signal_demo")
|
||||
int bpf_send_signal_test(void *ctx)
|
||||
|
@@ -28,44 +28,32 @@ enum bpf_linum_array_idx {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct sockaddr_in6 *value;
|
||||
} addr_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = __NR_BPF_ADDR_ARRAY_IDX,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, __NR_BPF_ADDR_ARRAY_IDX);
|
||||
__type(key, __u32);
|
||||
__type(value, struct sockaddr_in6);
|
||||
} addr_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct bpf_sock *value;
|
||||
} sock_result_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = __NR_BPF_RESULT_ARRAY_IDX,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, __NR_BPF_RESULT_ARRAY_IDX);
|
||||
__type(key, __u32);
|
||||
__type(value, struct bpf_sock);
|
||||
} sock_result_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct bpf_tcp_sock *value;
|
||||
} tcp_sock_result_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = __NR_BPF_RESULT_ARRAY_IDX,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, __NR_BPF_RESULT_ARRAY_IDX);
|
||||
__type(key, __u32);
|
||||
__type(value, struct bpf_tcp_sock);
|
||||
} tcp_sock_result_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} linum_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = __NR_BPF_LINUM_ARRAY_IDX,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, __NR_BPF_LINUM_ARRAY_IDX);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} linum_map SEC(".maps");
|
||||
|
||||
struct bpf_spinlock_cnt {
|
||||
struct bpf_spin_lock lock;
|
||||
@@ -73,24 +61,18 @@ struct bpf_spinlock_cnt {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 map_flags;
|
||||
int *key;
|
||||
struct bpf_spinlock_cnt *value;
|
||||
} sk_pkt_out_cnt SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_SK_STORAGE,
|
||||
.map_flags = BPF_F_NO_PREALLOC,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
__type(key, int);
|
||||
__type(value, struct bpf_spinlock_cnt);
|
||||
} sk_pkt_out_cnt SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 map_flags;
|
||||
int *key;
|
||||
struct bpf_spinlock_cnt *value;
|
||||
} sk_pkt_out_cnt10 SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_SK_STORAGE,
|
||||
.map_flags = BPF_F_NO_PREALLOC,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
|
||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||
__type(key, int);
|
||||
__type(value, struct bpf_spinlock_cnt);
|
||||
} sk_pkt_out_cnt10 SEC(".maps");
|
||||
|
||||
static bool is_loopback6(__u32 *a6)
|
||||
{
|
||||
|
@@ -11,14 +11,11 @@ struct hmap_elem {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
int *key;
|
||||
struct hmap_elem *value;
|
||||
} hmap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct hmap_elem);
|
||||
} hmap SEC(".maps");
|
||||
|
||||
struct cls_elem {
|
||||
struct bpf_spin_lock lock;
|
||||
@@ -26,12 +23,10 @@ struct cls_elem {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
struct bpf_cgroup_storage_key *key;
|
||||
struct cls_elem *value;
|
||||
} cls_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
|
||||
__type(key, struct bpf_cgroup_storage_key);
|
||||
__type(value, struct cls_elem);
|
||||
} cls_map SEC(".maps");
|
||||
|
||||
struct bpf_vqueue {
|
||||
struct bpf_spin_lock lock;
|
||||
@@ -42,14 +37,11 @@ struct bpf_vqueue {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
int *key;
|
||||
struct bpf_vqueue *value;
|
||||
} vqueue SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct bpf_vqueue);
|
||||
} vqueue SEC(".maps");
|
||||
|
||||
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
|
||||
|
||||
|
@@ -9,51 +9,36 @@
|
||||
#endif
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} control_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} control_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} stackid_hmap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 16384);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} stackid_hmap SEC(".maps");
|
||||
|
||||
typedef struct bpf_stack_build_id stack_trace_t[PERF_MAX_STACK_DEPTH];
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 map_flags;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
} stackmap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
.max_entries = 128,
|
||||
.map_flags = BPF_F_STACK_BUILD_ID,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(stack_trace_t),
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
||||
__uint(max_entries, 128);
|
||||
__uint(map_flags, BPF_F_STACK_BUILD_ID);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(stack_trace_t));
|
||||
} stackmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 128);
|
||||
__type(key, __u32);
|
||||
/* there seems to be a bug in kernel not handling typedef properly */
|
||||
struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH];
|
||||
} stack_amap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 128,
|
||||
};
|
||||
} stack_amap SEC(".maps");
|
||||
|
||||
/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
|
||||
struct random_urandom_args {
|
||||
|
@@ -9,48 +9,34 @@
|
||||
#endif
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} control_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} control_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} stackid_hmap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 16384);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} stackid_hmap SEC(".maps");
|
||||
|
||||
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
} stackmap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
.max_entries = 16384,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(stack_trace_t),
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
|
||||
__uint(max_entries, 16384);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(stack_trace_t));
|
||||
} stackmap SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 16384);
|
||||
__type(key, __u32);
|
||||
__u64 (*value)[PERF_MAX_STACK_DEPTH];
|
||||
} stack_amap SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
} stack_amap SEC(".maps");
|
||||
|
||||
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
|
||||
struct sched_switch_args {
|
||||
|
@@ -149,14 +149,11 @@ struct tcp_estats_basic_event {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct tcp_estats_basic_event *value;
|
||||
} ev_record_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = 1024,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 1024);
|
||||
__type(key, __u32);
|
||||
__type(value, struct tcp_estats_basic_event);
|
||||
} ev_record_map SEC(".maps");
|
||||
|
||||
struct dummy_tracepoint_args {
|
||||
unsigned long long pad;
|
||||
|
@@ -15,24 +15,18 @@
|
||||
#include "test_tcpbpf.h"
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct tcpbpf_globals *value;
|
||||
} global_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 4,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 4);
|
||||
__type(key, __u32);
|
||||
__type(value, struct tcpbpf_globals);
|
||||
} global_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
int *value;
|
||||
} sockopt_results SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 2,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 2);
|
||||
__type(key, __u32);
|
||||
__type(value, int);
|
||||
} sockopt_results SEC(".maps");
|
||||
|
||||
static inline void update_event_map(int event)
|
||||
{
|
||||
|
@@ -15,26 +15,18 @@
|
||||
#include "test_tcpnotify.h"
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct tcpnotify_globals *value;
|
||||
} global_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 4,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 4);
|
||||
__type(key, __u32);
|
||||
__type(value, struct tcpnotify_globals);
|
||||
} global_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 key_size;
|
||||
__u32 value_size;
|
||||
} perf_event_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.max_entries = 2,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(__u32),
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(max_entries, 2);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} perf_event_map SEC(".maps");
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
|
@@ -23,24 +23,18 @@
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u64 *value;
|
||||
} rxcnt SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.max_entries = 256,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 256);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} rxcnt SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
struct vip *key;
|
||||
struct iptnl_info *value;
|
||||
} vip2tnl SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = MAX_IPTNL_ENTRIES,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_IPTNL_ENTRIES);
|
||||
__type(key, struct vip);
|
||||
__type(value, struct iptnl_info);
|
||||
} vip2tnl SEC(".maps");
|
||||
|
||||
static __always_inline void count_tx(__u32 protocol)
|
||||
{
|
||||
|
@@ -18,19 +18,19 @@
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
struct bpf_map_def SEC("maps") rxcnt = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u64),
|
||||
.max_entries = 256,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 256);
|
||||
__type(key, __u32);
|
||||
__type(value, __u64);
|
||||
} rxcnt SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") vip2tnl = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(struct vip),
|
||||
.value_size = sizeof(struct iptnl_info),
|
||||
.max_entries = MAX_IPTNL_ENTRIES,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_IPTNL_ENTRIES);
|
||||
__type(key, struct vip);
|
||||
__type(value, struct iptnl_info);
|
||||
} vip2tnl SEC(".maps");
|
||||
|
||||
static __always_inline void count_tx(__u32 protocol)
|
||||
{
|
||||
|
@@ -164,66 +164,47 @@ struct lb_stats {
|
||||
};
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
struct vip_definition *key;
|
||||
struct vip_meta *value;
|
||||
} vip_map SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.max_entries = 512,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 512);
|
||||
__type(key, struct vip_definition);
|
||||
__type(value, struct vip_meta);
|
||||
} vip_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 map_flags;
|
||||
struct flow_key *key;
|
||||
struct real_pos_lru *value;
|
||||
} lru_cache SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_LRU_HASH,
|
||||
.max_entries = 300,
|
||||
.map_flags = 1U << 1,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
||||
__uint(max_entries, 300);
|
||||
__uint(map_flags, 1U << 1);
|
||||
__type(key, struct flow_key);
|
||||
__type(value, struct real_pos_lru);
|
||||
} lru_cache SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
__u32 *value;
|
||||
} ch_rings SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 12 * 655,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 12 * 655);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} ch_rings SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct real_definition *value;
|
||||
} reals SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 40,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 40);
|
||||
__type(key, __u32);
|
||||
__type(value, struct real_definition);
|
||||
} reals SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct lb_stats *value;
|
||||
} stats SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.max_entries = 515,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 515);
|
||||
__type(key, __u32);
|
||||
__type(value, struct lb_stats);
|
||||
} stats SEC(".maps");
|
||||
|
||||
struct {
|
||||
__u32 type;
|
||||
__u32 max_entries;
|
||||
__u32 *key;
|
||||
struct ctl_value *value;
|
||||
} ctl_array SEC(".maps") = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.max_entries = 16,
|
||||
};
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 16);
|
||||
__type(key, __u32);
|
||||
__type(value, struct ctl_value);
|
||||
} ctl_array SEC(".maps");
|
||||
|
||||
struct eth_hdr {
|
||||
unsigned char eth_dest[6];
|
||||
|
@@ -3,12 +3,12 @@
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
struct bpf_map_def SEC("maps") tx_port = {
|
||||
.type = BPF_MAP_TYPE_DEVMAP,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 8,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_DEVMAP);
|
||||
__uint(max_entries, 8);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} tx_port SEC(".maps");
|
||||
|
||||
SEC("redirect_map_0")
|
||||
int xdp_redirect_map_0(struct xdp_md *xdp)
|
||||
|
@@ -17,12 +17,12 @@
|
||||
|
||||
#include "xdping.h"
|
||||
|
||||
struct bpf_map_def SEC("maps") ping_map = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(struct pinginfo),
|
||||
.max_entries = 256,
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 256);
|
||||
__type(key, __u32);
|
||||
__type(value, struct pinginfo);
|
||||
} ping_map SEC(".maps");
|
||||
|
||||
static __always_inline void swap_src_dst_mac(void *data)
|
||||
{
|
||||
|
@@ -180,7 +180,7 @@ static struct bpf_align_test tests[] = {
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.matches = {
|
||||
{7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
|
||||
{7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
|
||||
{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
|
||||
{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
|
||||
{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
@@ -315,7 +315,7 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Offset is added to packet pointer R5, resulting in
|
||||
* known fixed offset, and variable offset from R6.
|
||||
@@ -405,7 +405,7 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Adding 14 makes R6 be (4n+2) */
|
||||
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
@@ -473,12 +473,12 @@ static struct bpf_align_test tests[] = {
|
||||
/* (4n) + 14 == (4n+2). We blow our bounds, because
|
||||
* the add could overflow.
|
||||
*/
|
||||
{7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
|
||||
{7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
|
||||
/* Checked s>=0 */
|
||||
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
/* packet pointer + nonnegative (4n+2) */
|
||||
{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
{13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
|
||||
* We checked the bounds, but it might have been able
|
||||
* to overflow if the packet pointer started in the
|
||||
@@ -486,7 +486,7 @@ static struct bpf_align_test tests[] = {
|
||||
* So we did not get a 'range' on R6, and the access
|
||||
* attempt will fail.
|
||||
*/
|
||||
{15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -521,7 +521,7 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
|
||||
/* Adding 14 makes R6 be (4n+2) */
|
||||
{10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
|
||||
@@ -574,7 +574,7 @@ static struct bpf_align_test tests[] = {
|
||||
/* Calculated offset in R6 has unknown value, but known
|
||||
* alignment of 4.
|
||||
*/
|
||||
{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
|
||||
{10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
|
||||
/* Adding 14 makes R6 be (4n+2) */
|
||||
{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
|
||||
|
@@ -1418,7 +1418,7 @@ static void test_map_wronly(void)
|
||||
assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM);
|
||||
}
|
||||
|
||||
static void prepare_reuseport_grp(int type, int map_fd,
|
||||
static void prepare_reuseport_grp(int type, int map_fd, size_t map_elem_size,
|
||||
__s64 *fds64, __u64 *sk_cookies,
|
||||
unsigned int n)
|
||||
{
|
||||
@@ -1428,6 +1428,8 @@ static void prepare_reuseport_grp(int type, int map_fd,
|
||||
const int optval = 1;
|
||||
unsigned int i;
|
||||
u64 sk_cookie;
|
||||
void *value;
|
||||
__s32 fd32;
|
||||
__s64 fd64;
|
||||
int err;
|
||||
|
||||
@@ -1449,8 +1451,14 @@ static void prepare_reuseport_grp(int type, int map_fd,
|
||||
"err:%d errno:%d\n", err, errno);
|
||||
|
||||
/* reuseport_array does not allow unbound sk */
|
||||
err = bpf_map_update_elem(map_fd, &index0, &fd64,
|
||||
BPF_ANY);
|
||||
if (map_elem_size == sizeof(__u64))
|
||||
value = &fd64;
|
||||
else {
|
||||
assert(map_elem_size == sizeof(__u32));
|
||||
fd32 = (__s32)fd64;
|
||||
value = &fd32;
|
||||
}
|
||||
err = bpf_map_update_elem(map_fd, &index0, value, BPF_ANY);
|
||||
CHECK(err != -1 || errno != EINVAL,
|
||||
"reuseport array update unbound sk",
|
||||
"sock_type:%d err:%d errno:%d\n",
|
||||
@@ -1478,7 +1486,7 @@ static void prepare_reuseport_grp(int type, int map_fd,
|
||||
* reuseport_array does not allow
|
||||
* non-listening tcp sk.
|
||||
*/
|
||||
err = bpf_map_update_elem(map_fd, &index0, &fd64,
|
||||
err = bpf_map_update_elem(map_fd, &index0, value,
|
||||
BPF_ANY);
|
||||
CHECK(err != -1 || errno != EINVAL,
|
||||
"reuseport array update non-listening sk",
|
||||
@@ -1541,7 +1549,7 @@ static void test_reuseport_array(void)
|
||||
for (t = 0; t < ARRAY_SIZE(types); t++) {
|
||||
type = types[t];
|
||||
|
||||
prepare_reuseport_grp(type, map_fd, grpa_fds64,
|
||||
prepare_reuseport_grp(type, map_fd, sizeof(__u64), grpa_fds64,
|
||||
grpa_cookies, ARRAY_SIZE(grpa_fds64));
|
||||
|
||||
/* Test BPF_* update flags */
|
||||
@@ -1649,7 +1657,8 @@ static void test_reuseport_array(void)
|
||||
sizeof(__u32), sizeof(__u32), array_size, 0);
|
||||
CHECK(map_fd == -1, "reuseport array create",
|
||||
"map_fd:%d, errno:%d\n", map_fd, errno);
|
||||
prepare_reuseport_grp(SOCK_STREAM, map_fd, &fd64, &sk_cookie, 1);
|
||||
prepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64,
|
||||
&sk_cookie, 1);
|
||||
fd = fd64;
|
||||
err = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST);
|
||||
CHECK(err == -1, "reuseport array update 32 bit fd",
|
||||
|
@@ -10,21 +10,21 @@
|
||||
|
||||
int _version SEC("version") = 1;
|
||||
|
||||
struct bpf_map_def __attribute__ ((section("maps"), used)) map_in = {
|
||||
.type = MAP_TYPE,
|
||||
.key_size = 0,
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 32,
|
||||
.map_flags = 0,
|
||||
};
|
||||
struct {
|
||||
__uint(type, MAP_TYPE);
|
||||
__uint(max_entries, 32);
|
||||
__uint(map_flags, 0);
|
||||
__uint(key_size, 0);
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} map_in SEC(".maps");
|
||||
|
||||
struct bpf_map_def __attribute__ ((section("maps"), used)) map_out = {
|
||||
.type = MAP_TYPE,
|
||||
.key_size = 0,
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 32,
|
||||
.map_flags = 0,
|
||||
};
|
||||
struct {
|
||||
__uint(type, MAP_TYPE);
|
||||
__uint(max_entries, 32);
|
||||
__uint(map_flags, 0);
|
||||
__uint(key_size, 0);
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} map_out SEC(".maps");
|
||||
|
||||
SEC("test")
|
||||
int _test(struct __sk_buff *skb)
|
||||
|
@@ -28,61 +28,61 @@
|
||||
* are established and verdicts are decided.
|
||||
*/
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map = {
|
||||
.type = TEST_MAP_TYPE,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, TEST_MAP_TYPE);
|
||||
__uint(max_entries, 20);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} sock_map SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_txmsg = {
|
||||
.type = TEST_MAP_TYPE,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, TEST_MAP_TYPE);
|
||||
__uint(max_entries, 20);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} sock_map_txmsg SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_map_redir = {
|
||||
.type = TEST_MAP_TYPE,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 20,
|
||||
};
|
||||
struct {
|
||||
__uint(type, TEST_MAP_TYPE);
|
||||
__uint(max_entries, 20);
|
||||
__uint(key_size, sizeof(int));
|
||||
__uint(value_size, sizeof(int));
|
||||
} sock_map_redir SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_apply_bytes = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_apply_bytes SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_cork_bytes = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_cork_bytes SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_bytes = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 6
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 6);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_bytes SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_redir_flags = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_redir_flags SEC(".maps");
|
||||
|
||||
struct bpf_map_def SEC("maps") sock_skb_opts = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(int),
|
||||
.max_entries = 1
|
||||
};
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, int);
|
||||
} sock_skb_opts SEC(".maps");
|
||||
|
||||
SEC("sk_skb1")
|
||||
int bpf_prog1(struct __sk_buff *skb)
|
||||
|
@@ -105,6 +105,7 @@ struct bpf_test {
|
||||
__u64 data64[TEST_DATA_LEN / 8];
|
||||
};
|
||||
} retvals[MAX_TEST_RUNS];
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
};
|
||||
|
||||
/* Note we want this to be 64 bit aligned so that the end of our array is
|
||||
@@ -850,6 +851,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
int fd_prog, expected_ret, alignment_prevented_execution;
|
||||
int prog_len, prog_type = test->prog_type;
|
||||
struct bpf_insn *prog = test->insns;
|
||||
struct bpf_load_program_attr attr;
|
||||
int run_errs, run_successes;
|
||||
int map_fds[MAX_NR_MAPS];
|
||||
const char *expected_err;
|
||||
@@ -881,8 +883,17 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
pflags |= BPF_F_STRICT_ALIGNMENT;
|
||||
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
|
||||
pflags |= BPF_F_ANY_ALIGNMENT;
|
||||
fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
|
||||
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.prog_type = prog_type;
|
||||
attr.expected_attach_type = test->expected_attach_type;
|
||||
attr.insns = prog;
|
||||
attr.insns_cnt = prog_len;
|
||||
attr.license = "GPL";
|
||||
attr.log_level = 4;
|
||||
attr.prog_flags = pflags;
|
||||
|
||||
fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
|
||||
if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
|
||||
printf("SKIP (unsupported program type %d)\n", prog_type);
|
||||
skips++;
|
||||
@@ -912,7 +923,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
printf("FAIL\nUnexpected success to load!\n");
|
||||
goto fail_log;
|
||||
}
|
||||
if (!strstr(bpf_vlog, expected_err)) {
|
||||
if (!expected_err || !strstr(bpf_vlog, expected_err)) {
|
||||
printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
|
||||
expected_err, bpf_vlog);
|
||||
goto fail_log;
|
||||
|
36
tools/testing/selftests/bpf/verifier/wide_store.c
Normal file
36
tools/testing/selftests/bpf/verifier/wide_store.c
Normal file
@@ -0,0 +1,36 @@
|
||||
#define BPF_SOCK_ADDR(field, off, res, err) \
|
||||
{ \
|
||||
"wide store to bpf_sock_addr." #field "[" #off "]", \
|
||||
.insns = { \
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1), \
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \
|
||||
offsetof(struct bpf_sock_addr, field[off])), \
|
||||
BPF_EXIT_INSN(), \
|
||||
}, \
|
||||
.result = res, \
|
||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
|
||||
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
|
||||
.errstr = err, \
|
||||
}
|
||||
|
||||
/* user_ip6[0] is u64 aligned */
|
||||
BPF_SOCK_ADDR(user_ip6, 0, ACCEPT,
|
||||
NULL),
|
||||
BPF_SOCK_ADDR(user_ip6, 1, REJECT,
|
||||
"invalid bpf_context access off=12 size=8"),
|
||||
BPF_SOCK_ADDR(user_ip6, 2, ACCEPT,
|
||||
NULL),
|
||||
BPF_SOCK_ADDR(user_ip6, 3, REJECT,
|
||||
"invalid bpf_context access off=20 size=8"),
|
||||
|
||||
/* msg_src_ip6[0] is _not_ u64 aligned */
|
||||
BPF_SOCK_ADDR(msg_src_ip6, 0, REJECT,
|
||||
"invalid bpf_context access off=44 size=8"),
|
||||
BPF_SOCK_ADDR(msg_src_ip6, 1, ACCEPT,
|
||||
NULL),
|
||||
BPF_SOCK_ADDR(msg_src_ip6, 2, REJECT,
|
||||
"invalid bpf_context access off=52 size=8"),
|
||||
BPF_SOCK_ADDR(msg_src_ip6, 3, REJECT,
|
||||
"invalid bpf_context access off=56 size=8"),
|
||||
|
||||
#undef BPF_SOCK_ADDR
|
Reference in New Issue
Block a user