Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Minor conflict, a CHECK was placed into an if() statement in net-next, whilst a newline was added to that CHECK call in 'net'. Thanks to Daniel for the merge resolution. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -32,7 +32,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
|
||||
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
|
||||
sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
|
||||
sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
|
||||
test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o
|
||||
test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
|
||||
test_get_stack_rawtp.o
|
||||
|
||||
# Order correspond to 'make run_tests' order
|
||||
TEST_PROGS := test_kmod.sh \
|
||||
@@ -58,6 +59,7 @@ $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sock: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
|
||||
$(OUTPUT)/test_sockmap: cgroup_helpers.c
|
||||
$(OUTPUT)/test_progs: trace_helpers.c
|
||||
|
||||
.PHONY: force
|
||||
|
||||
|
@@ -101,6 +101,8 @@ static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
|
||||
static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
|
||||
int size, int flags) =
|
||||
(void *) BPF_FUNC_skb_get_xfrm_state;
|
||||
static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
|
||||
(void *) BPF_FUNC_get_stack;
|
||||
|
||||
/* llvm builtin functions that eBPF C program may use to
|
||||
* emit BPF_LD_ABS and BPF_LD_IND instructions
|
||||
|
102
tools/testing/selftests/bpf/test_get_stack_rawtp.c
Normal file
102
tools/testing/selftests/bpf/test_get_stack_rawtp.c
Normal file
@@ -0,0 +1,102 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include "bpf_helpers.h"
|
||||
|
||||
/* Permit pretty deep stack traces */
|
||||
#define MAX_STACK_RAWTP 100
|
||||
struct stack_trace_t {
|
||||
int pid;
|
||||
int kern_stack_size;
|
||||
int user_stack_size;
|
||||
int user_stack_buildid_size;
|
||||
__u64 kern_stack[MAX_STACK_RAWTP];
|
||||
__u64 user_stack[MAX_STACK_RAWTP];
|
||||
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") perfmap = {
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 2,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") stackdata_map = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(struct stack_trace_t),
|
||||
.max_entries = 1,
|
||||
};
|
||||
|
||||
/* Allocate per-cpu space twice the needed. For the code below
|
||||
* usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
|
||||
* if (usize < 0)
|
||||
* return 0;
|
||||
* ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
|
||||
*
|
||||
* If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
|
||||
* verifier will complain that access "raw_data + usize"
|
||||
* with size "max_len - usize" may be out of bound.
|
||||
* The maximum "raw_data + usize" is "raw_data + max_len"
|
||||
* and the maximum "max_len - usize" is "max_len", verifier
|
||||
* concludes that the maximum buffer access range is
|
||||
* "raw_data[0...max_len * 2 - 1]" and hence reject the program.
|
||||
*
|
||||
* Doubling the to-be-used max buffer size can fix this verifier
|
||||
* issue and avoid complicated C programming massaging.
|
||||
* This is an acceptable workaround since there is one entry here.
|
||||
*/
|
||||
struct bpf_map_def SEC("maps") rawdata_map = {
|
||||
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
|
||||
.max_entries = 1,
|
||||
};
|
||||
|
||||
SEC("tracepoint/raw_syscalls/sys_enter")
|
||||
int bpf_prog1(void *ctx)
|
||||
{
|
||||
int max_len, max_buildid_len, usize, ksize, total_size;
|
||||
struct stack_trace_t *data;
|
||||
void *raw_data;
|
||||
__u32 key = 0;
|
||||
|
||||
data = bpf_map_lookup_elem(&stackdata_map, &key);
|
||||
if (!data)
|
||||
return 0;
|
||||
|
||||
max_len = MAX_STACK_RAWTP * sizeof(__u64);
|
||||
max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
|
||||
data->pid = bpf_get_current_pid_tgid();
|
||||
data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
|
||||
max_len, 0);
|
||||
data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
|
||||
BPF_F_USER_STACK);
|
||||
data->user_stack_buildid_size = bpf_get_stack(
|
||||
ctx, data->user_stack_buildid, max_buildid_len,
|
||||
BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
|
||||
bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
|
||||
|
||||
/* write both kernel and user stacks to the same buffer */
|
||||
raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
|
||||
if (!raw_data)
|
||||
return 0;
|
||||
|
||||
usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
|
||||
if (usize < 0)
|
||||
return 0;
|
||||
|
||||
ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
|
||||
if (ksize < 0)
|
||||
return 0;
|
||||
|
||||
total_size = usize + ksize;
|
||||
if (total_size > 0 && total_size <= max_len)
|
||||
bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
|
@@ -38,8 +38,10 @@ typedef __u16 __sum16;
|
||||
#include "bpf_util.h"
|
||||
#include "bpf_endian.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include "trace_helpers.h"
|
||||
|
||||
static int error_cnt, pass_cnt;
|
||||
static bool jit_enabled;
|
||||
|
||||
#define MAGIC_BYTES 123
|
||||
|
||||
@@ -391,13 +393,30 @@ static inline __u64 ptr_to_u64(const void *ptr)
|
||||
return (__u64) (unsigned long) ptr;
|
||||
}
|
||||
|
||||
static bool is_jit_enabled(void)
|
||||
{
|
||||
const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
|
||||
bool enabled = false;
|
||||
int sysctl_fd;
|
||||
|
||||
sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
|
||||
if (sysctl_fd != -1) {
|
||||
char tmpc;
|
||||
|
||||
if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
|
||||
enabled = (tmpc != '0');
|
||||
close(sysctl_fd);
|
||||
}
|
||||
|
||||
return enabled;
|
||||
}
|
||||
|
||||
static void test_bpf_obj_id(void)
|
||||
{
|
||||
const __u64 array_magic_value = 0xfaceb00c;
|
||||
const __u32 array_key = 0;
|
||||
const int nr_iters = 2;
|
||||
const char *file = "./test_obj_id.o";
|
||||
const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
|
||||
const char *expected_prog_name = "test_obj_id";
|
||||
const char *expected_map_name = "test_map_id";
|
||||
const __u64 nsec_per_sec = 1000000000;
|
||||
@@ -414,20 +433,11 @@ static void test_bpf_obj_id(void)
|
||||
char jited_insns[128], xlated_insns[128], zeros[128];
|
||||
__u32 i, next_id, info_len, nr_id_found, duration = 0;
|
||||
struct timespec real_time_ts, boot_time_ts;
|
||||
int sysctl_fd, jit_enabled = 0, err = 0;
|
||||
int err = 0;
|
||||
__u64 array_value;
|
||||
uid_t my_uid = getuid();
|
||||
time_t now, load_time;
|
||||
|
||||
sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
|
||||
if (sysctl_fd != -1) {
|
||||
char tmpc;
|
||||
|
||||
if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
|
||||
jit_enabled = (tmpc != '0');
|
||||
close(sysctl_fd);
|
||||
}
|
||||
|
||||
err = bpf_prog_get_fd_by_id(0);
|
||||
CHECK(err >= 0 || errno != ENOENT,
|
||||
"get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
|
||||
@@ -896,11 +906,47 @@ static int compare_map_keys(int map1_fd, int map2_fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
|
||||
{
|
||||
__u32 key, next_key, *cur_key_p, *next_key_p;
|
||||
char *val_buf1, *val_buf2;
|
||||
int i, err = 0;
|
||||
|
||||
val_buf1 = malloc(stack_trace_len);
|
||||
val_buf2 = malloc(stack_trace_len);
|
||||
cur_key_p = NULL;
|
||||
next_key_p = &key;
|
||||
while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
|
||||
err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
|
||||
if (err)
|
||||
goto out;
|
||||
err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
|
||||
if (err)
|
||||
goto out;
|
||||
for (i = 0; i < stack_trace_len; i++) {
|
||||
if (val_buf1[i] != val_buf2[i]) {
|
||||
err = -1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
key = *next_key_p;
|
||||
cur_key_p = &key;
|
||||
next_key_p = &next_key;
|
||||
}
|
||||
if (errno != ENOENT)
|
||||
err = -1;
|
||||
|
||||
out:
|
||||
free(val_buf1);
|
||||
free(val_buf2);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void test_stacktrace_map()
|
||||
{
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd;
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
|
||||
const char *file = "./test_stacktrace_map.o";
|
||||
int bytes, efd, err, pmu_fd, prog_fd;
|
||||
int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
|
||||
struct perf_event_attr attr = {};
|
||||
__u32 key, val, duration = 0;
|
||||
struct bpf_object *obj;
|
||||
@@ -956,6 +1002,10 @@ static void test_stacktrace_map()
|
||||
if (stackmap_fd < 0)
|
||||
goto disable_pmu;
|
||||
|
||||
stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
|
||||
if (stack_amap_fd < 0)
|
||||
goto disable_pmu;
|
||||
|
||||
/* give some time for bpf program run */
|
||||
sleep(1);
|
||||
|
||||
@@ -977,6 +1027,12 @@ static void test_stacktrace_map()
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu_noerr;
|
||||
|
||||
stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
|
||||
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
|
||||
if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu_noerr;
|
||||
|
||||
goto disable_pmu_noerr;
|
||||
disable_pmu:
|
||||
error_cnt++;
|
||||
@@ -1070,9 +1126,9 @@ err:
|
||||
|
||||
static void test_stacktrace_build_id(void)
|
||||
{
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd;
|
||||
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
|
||||
const char *file = "./test_stacktrace_build_id.o";
|
||||
int bytes, efd, err, pmu_fd, prog_fd;
|
||||
int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
|
||||
struct perf_event_attr attr = {};
|
||||
__u32 key, previous_key, val, duration = 0;
|
||||
struct bpf_object *obj;
|
||||
@@ -1137,6 +1193,11 @@ static void test_stacktrace_build_id(void)
|
||||
err, errno))
|
||||
goto disable_pmu;
|
||||
|
||||
stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
|
||||
if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu;
|
||||
|
||||
assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
|
||||
== 0);
|
||||
assert(system("./urandom_read") == 0);
|
||||
@@ -1188,8 +1249,15 @@ static void test_stacktrace_build_id(void)
|
||||
previous_key = key;
|
||||
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
|
||||
|
||||
CHECK(build_id_matches < 1, "build id match",
|
||||
"Didn't find expected build ID from the map\n");
|
||||
if (CHECK(build_id_matches < 1, "build id match",
|
||||
"Didn't find expected build ID from the map\n"))
|
||||
goto disable_pmu;
|
||||
|
||||
stack_trace_len = PERF_MAX_STACK_DEPTH
|
||||
* sizeof(struct bpf_stack_build_id);
|
||||
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
|
||||
CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
|
||||
"err %d errno %d\n", err, errno);
|
||||
|
||||
disable_pmu:
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
|
||||
@@ -1204,8 +1272,147 @@ out:
|
||||
return;
|
||||
}
|
||||
|
||||
#define MAX_CNT_RAWTP 10ull
|
||||
#define MAX_STACK_RAWTP 100
|
||||
struct get_stack_trace_t {
|
||||
int pid;
|
||||
int kern_stack_size;
|
||||
int user_stack_size;
|
||||
int user_stack_buildid_size;
|
||||
__u64 kern_stack[MAX_STACK_RAWTP];
|
||||
__u64 user_stack[MAX_STACK_RAWTP];
|
||||
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
|
||||
};
|
||||
|
||||
static int get_stack_print_output(void *data, int size)
|
||||
{
|
||||
bool good_kern_stack = false, good_user_stack = false;
|
||||
const char *nonjit_func = "___bpf_prog_run";
|
||||
struct get_stack_trace_t *e = data;
|
||||
int i, num_stack;
|
||||
static __u64 cnt;
|
||||
struct ksym *ks;
|
||||
|
||||
cnt++;
|
||||
|
||||
if (size < sizeof(struct get_stack_trace_t)) {
|
||||
__u64 *raw_data = data;
|
||||
bool found = false;
|
||||
|
||||
num_stack = size / sizeof(__u64);
|
||||
/* If jit is enabled, we do not have a good way to
|
||||
* verify the sanity of the kernel stack. So we
|
||||
* just assume it is good if the stack is not empty.
|
||||
* This could be improved in the future.
|
||||
*/
|
||||
if (jit_enabled) {
|
||||
found = num_stack > 0;
|
||||
} else {
|
||||
for (i = 0; i < num_stack; i++) {
|
||||
ks = ksym_search(raw_data[i]);
|
||||
if (strcmp(ks->name, nonjit_func) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
good_kern_stack = true;
|
||||
good_user_stack = true;
|
||||
}
|
||||
} else {
|
||||
num_stack = e->kern_stack_size / sizeof(__u64);
|
||||
if (jit_enabled) {
|
||||
good_kern_stack = num_stack > 0;
|
||||
} else {
|
||||
for (i = 0; i < num_stack; i++) {
|
||||
ks = ksym_search(e->kern_stack[i]);
|
||||
if (strcmp(ks->name, nonjit_func) == 0) {
|
||||
good_kern_stack = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
|
||||
good_user_stack = true;
|
||||
}
|
||||
if (!good_kern_stack || !good_user_stack)
|
||||
return PERF_EVENT_ERROR;
|
||||
|
||||
if (cnt == MAX_CNT_RAWTP)
|
||||
return PERF_EVENT_DONE;
|
||||
|
||||
return PERF_EVENT_CONT;
|
||||
}
|
||||
|
||||
static void test_get_stack_raw_tp(void)
|
||||
{
|
||||
const char *file = "./test_get_stack_rawtp.o";
|
||||
int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
|
||||
struct perf_event_attr attr = {};
|
||||
struct timespec tv = {0, 10};
|
||||
__u32 key = 0, duration = 0;
|
||||
struct bpf_object *obj;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
|
||||
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
|
||||
return;
|
||||
|
||||
efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
|
||||
if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
|
||||
goto close_prog;
|
||||
|
||||
perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
|
||||
if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
|
||||
perfmap_fd, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = load_kallsyms();
|
||||
if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
attr.sample_type = PERF_SAMPLE_RAW;
|
||||
attr.type = PERF_TYPE_SOFTWARE;
|
||||
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
|
||||
pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
|
||||
-1/*group_fd*/, 0);
|
||||
if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
|
||||
if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
|
||||
errno))
|
||||
goto close_prog;
|
||||
|
||||
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
|
||||
err, errno))
|
||||
goto close_prog;
|
||||
|
||||
err = perf_event_mmap(pmu_fd);
|
||||
if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
/* trigger some syscall action */
|
||||
for (i = 0; i < MAX_CNT_RAWTP; i++)
|
||||
nanosleep(&tv, NULL);
|
||||
|
||||
err = perf_event_poller(pmu_fd, get_stack_print_output);
|
||||
if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
jit_enabled = is_jit_enabled();
|
||||
|
||||
test_pkt_access();
|
||||
test_xdp();
|
||||
test_xdp_adjust_tail();
|
||||
@@ -1219,6 +1426,7 @@ int main(void)
|
||||
test_stacktrace_map();
|
||||
test_stacktrace_build_id();
|
||||
test_stacktrace_map_raw_tp();
|
||||
test_get_stack_raw_tp();
|
||||
|
||||
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
|
||||
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
|
||||
|
@@ -19,7 +19,7 @@ struct bpf_map_def SEC("maps") stackid_hmap = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 10000,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") stackmap = {
|
||||
@@ -31,6 +31,14 @@ struct bpf_map_def SEC("maps") stackmap = {
|
||||
.map_flags = BPF_F_STACK_BUILD_ID,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") stack_amap = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(struct bpf_stack_build_id)
|
||||
* PERF_MAX_STACK_DEPTH,
|
||||
.max_entries = 128,
|
||||
};
|
||||
|
||||
/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
|
||||
struct random_urandom_args {
|
||||
unsigned long long pad;
|
||||
@@ -42,7 +50,10 @@ struct random_urandom_args {
|
||||
SEC("tracepoint/random/urandom_read")
|
||||
int oncpu(struct random_urandom_args *args)
|
||||
{
|
||||
__u32 max_len = sizeof(struct bpf_stack_build_id)
|
||||
* PERF_MAX_STACK_DEPTH;
|
||||
__u32 key = 0, val = 0, *value_p;
|
||||
void *stack_p;
|
||||
|
||||
value_p = bpf_map_lookup_elem(&control_map, &key);
|
||||
if (value_p && *value_p)
|
||||
@@ -50,8 +61,13 @@ int oncpu(struct random_urandom_args *args)
|
||||
|
||||
/* The size of stackmap and stackid_hmap should be the same */
|
||||
key = bpf_get_stackid(args, &stackmap, BPF_F_USER_STACK);
|
||||
if ((int)key >= 0)
|
||||
if ((int)key >= 0) {
|
||||
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
|
||||
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
|
||||
if (stack_p)
|
||||
bpf_get_stack(args, stack_p, max_len,
|
||||
BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -19,14 +19,21 @@ struct bpf_map_def SEC("maps") stackid_hmap = {
|
||||
.type = BPF_MAP_TYPE_HASH,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u32),
|
||||
.max_entries = 10000,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") stackmap = {
|
||||
.type = BPF_MAP_TYPE_STACK_TRACE,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
|
||||
.max_entries = 10000,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
|
||||
struct bpf_map_def SEC("maps") stack_amap = {
|
||||
.type = BPF_MAP_TYPE_ARRAY,
|
||||
.key_size = sizeof(__u32),
|
||||
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
|
||||
.max_entries = 16384,
|
||||
};
|
||||
|
||||
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
|
||||
@@ -44,7 +51,9 @@ struct sched_switch_args {
|
||||
SEC("tracepoint/sched/sched_switch")
|
||||
int oncpu(struct sched_switch_args *ctx)
|
||||
{
|
||||
__u32 max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
|
||||
__u32 key = 0, val = 0, *value_p;
|
||||
void *stack_p;
|
||||
|
||||
value_p = bpf_map_lookup_elem(&control_map, &key);
|
||||
if (value_p && *value_p)
|
||||
@@ -52,8 +61,12 @@ int oncpu(struct sched_switch_args *ctx)
|
||||
|
||||
/* The size of stackmap and stackid_hmap should be the same */
|
||||
key = bpf_get_stackid(ctx, &stackmap, 0);
|
||||
if ((int)key >= 0)
|
||||
if ((int)key >= 0) {
|
||||
bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
|
||||
stack_p = bpf_map_lookup_elem(&stack_amap, &key);
|
||||
if (stack_p)
|
||||
bpf_get_stack(ctx, stack_p, max_len, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -47,7 +47,7 @@
|
||||
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#endif
|
||||
|
||||
#define MAX_INSNS 512
|
||||
#define MAX_INSNS BPF_MAXINSNS
|
||||
#define MAX_FIXUPS 8
|
||||
#define MAX_NR_MAPS 4
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
@@ -77,6 +77,8 @@ struct bpf_test {
|
||||
} result, result_unpriv;
|
||||
enum bpf_prog_type prog_type;
|
||||
uint8_t flags;
|
||||
__u8 data[TEST_DATA_LEN];
|
||||
void (*fill_helper)(struct bpf_test *self);
|
||||
};
|
||||
|
||||
/* Note we want this to be 64 bit aligned so that the end of our array is
|
||||
@@ -94,6 +96,62 @@ struct other_val {
|
||||
long long bar;
|
||||
};
|
||||
|
||||
static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
|
||||
{
|
||||
/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
|
||||
#define PUSH_CNT 51
|
||||
unsigned int len = BPF_MAXINSNS;
|
||||
struct bpf_insn *insn = self->insns;
|
||||
int i = 0, j, k = 0;
|
||||
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
|
||||
loop:
|
||||
for (j = 0; j < PUSH_CNT; j++) {
|
||||
insn[i++] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
|
||||
i++;
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
|
||||
insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
|
||||
insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
|
||||
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_vlan_push),
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
|
||||
i++;
|
||||
}
|
||||
|
||||
for (j = 0; j < PUSH_CNT; j++) {
|
||||
insn[i++] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
|
||||
i++;
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
|
||||
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_vlan_pop),
|
||||
insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
|
||||
i++;
|
||||
}
|
||||
if (++k < 5)
|
||||
goto loop;
|
||||
|
||||
for (; i < len - 1; i++)
|
||||
insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
|
||||
insn[len - 1] = BPF_EXIT_INSN();
|
||||
}
|
||||
|
||||
static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
|
||||
{
|
||||
struct bpf_insn *insn = self->insns;
|
||||
unsigned int len = BPF_MAXINSNS;
|
||||
int i = 0;
|
||||
|
||||
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
|
||||
insn[i++] = BPF_LD_ABS(BPF_B, 0);
|
||||
insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
|
||||
i++;
|
||||
while (i < len - 1)
|
||||
insn[i++] = BPF_LD_ABS(BPF_B, 1);
|
||||
insn[i] = BPF_EXIT_INSN();
|
||||
}
|
||||
|
||||
static struct bpf_test tests[] = {
|
||||
{
|
||||
"add+sub+mul",
|
||||
@@ -11680,6 +11738,242 @@ static struct bpf_test tests[] = {
|
||||
.errstr = "BPF_XADD stores into R2 packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"bpf_get_stack return R0 within range",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
||||
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 256),
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_stack),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
|
||||
BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
|
||||
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
|
||||
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_stack),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map2 = { 4 },
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"ld_abs: invalid op 1",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_LD_ABS(BPF_DW, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.errstr = "unknown opcode",
|
||||
},
|
||||
{
|
||||
"ld_abs: invalid op 2",
|
||||
.insns = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 256),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.errstr = "unknown opcode",
|
||||
},
|
||||
{
|
||||
"ld_abs: nmap reduced",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_LD_ABS(BPF_H, 12),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
|
||||
BPF_LD_ABS(BPF_H, 12),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 18),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
|
||||
BPF_LD_IND(BPF_W, BPF_REG_7, 14),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 280971478),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
|
||||
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
|
||||
BPF_LD_ABS(BPF_H, 12),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 22),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
|
||||
BPF_LD_IND(BPF_H, BPF_REG_7, 14),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 17366),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
|
||||
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 256),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.data = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 256,
|
||||
},
|
||||
{
|
||||
"ld_abs: div + abs, test 1",
|
||||
.insns = {
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
|
||||
BPF_LD_ABS(BPF_B, 3),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
|
||||
BPF_LD_ABS(BPF_B, 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
|
||||
BPF_LD_IND(BPF_B, BPF_REG_8, -70),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.data = {
|
||||
10, 20, 30, 40, 50,
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 10,
|
||||
},
|
||||
{
|
||||
"ld_abs: div + abs, test 2",
|
||||
.insns = {
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
|
||||
BPF_LD_ABS(BPF_B, 3),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
|
||||
BPF_LD_ABS(BPF_B, 128),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
|
||||
BPF_LD_IND(BPF_B, BPF_REG_8, -70),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.data = {
|
||||
10, 20, 30, 40, 50,
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"ld_abs: div + abs, test 3",
|
||||
.insns = {
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
|
||||
BPF_LD_ABS(BPF_B, 3),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.data = {
|
||||
10, 20, 30, 40, 50,
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"ld_abs: div + abs, test 4",
|
||||
.insns = {
|
||||
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
|
||||
BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
|
||||
BPF_LD_ABS(BPF_B, 256),
|
||||
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.data = {
|
||||
10, 20, 30, 40, 50,
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0,
|
||||
},
|
||||
{
|
||||
"ld_abs: vlan + abs, test 1",
|
||||
.insns = { },
|
||||
.data = {
|
||||
0x34,
|
||||
},
|
||||
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 0xbef,
|
||||
},
|
||||
{
|
||||
"ld_abs: vlan + abs, test 2",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_LD_ABS(BPF_B, 0),
|
||||
BPF_LD_ABS(BPF_H, 0),
|
||||
BPF_LD_ABS(BPF_W, 0),
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_6, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 2),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_vlan_push),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
|
||||
BPF_LD_ABS(BPF_B, 0),
|
||||
BPF_LD_ABS(BPF_H, 0),
|
||||
BPF_LD_ABS(BPF_W, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.data = {
|
||||
0x34,
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 42,
|
||||
},
|
||||
{
|
||||
"ld_abs: jump around ld_abs",
|
||||
.insns = { },
|
||||
.data = {
|
||||
10, 11,
|
||||
},
|
||||
.fill_helper = bpf_fill_jump_around_ld_abs,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
.retval = 10,
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
@@ -11783,7 +12077,7 @@ static int create_map_in_map(void)
|
||||
return outer_map_fd;
|
||||
}
|
||||
|
||||
static char bpf_vlog[32768];
|
||||
static char bpf_vlog[UINT_MAX >> 8];
|
||||
|
||||
static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
|
||||
int *map_fds)
|
||||
@@ -11794,6 +12088,9 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
|
||||
int *fixup_prog = test->fixup_prog;
|
||||
int *fixup_map_in_map = test->fixup_map_in_map;
|
||||
|
||||
if (test->fill_helper)
|
||||
test->fill_helper(test);
|
||||
|
||||
/* Allocating HTs with 1 elem is fine here, since we only test
|
||||
* for verifier and not do a runtime lookup, so the only thing
|
||||
* that really matters is value size in this case.
|
||||
@@ -11843,10 +12140,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
int *passes, int *errors)
|
||||
{
|
||||
int fd_prog, expected_ret, reject_from_alignment;
|
||||
int prog_len, prog_type = test->prog_type;
|
||||
struct bpf_insn *prog = test->insns;
|
||||
int prog_len = probe_filter_length(prog);
|
||||
char data_in[TEST_DATA_LEN] = {};
|
||||
int prog_type = test->prog_type;
|
||||
int map_fds[MAX_NR_MAPS];
|
||||
const char *expected_err;
|
||||
uint32_t retval;
|
||||
@@ -11856,6 +12151,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
map_fds[i] = -1;
|
||||
|
||||
do_test_fixup(test, prog, map_fds);
|
||||
prog_len = probe_filter_length(prog);
|
||||
|
||||
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
@@ -11895,8 +12191,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
}
|
||||
|
||||
if (fd_prog >= 0) {
|
||||
err = bpf_prog_test_run(fd_prog, 1, data_in, sizeof(data_in),
|
||||
NULL, NULL, &retval, NULL);
|
||||
err = bpf_prog_test_run(fd_prog, 1, test->data,
|
||||
sizeof(test->data), NULL, NULL,
|
||||
&retval, NULL);
|
||||
if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
|
||||
printf("Unexpected bpf_prog_test_run error\n");
|
||||
goto fail_log;
|
||||
|
180
tools/testing/selftests/bpf/trace_helpers.c
Normal file
180
tools/testing/selftests/bpf/trace_helpers.c
Normal file
@@ -0,0 +1,180 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <poll.h>
|
||||
#include <unistd.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <sys/mman.h>
|
||||
#include "trace_helpers.h"
|
||||
|
||||
#define MAX_SYMS 300000
|
||||
static struct ksym syms[MAX_SYMS];
|
||||
static int sym_cnt;
|
||||
|
||||
static int ksym_cmp(const void *p1, const void *p2)
|
||||
{
|
||||
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
|
||||
}
|
||||
|
||||
int load_kallsyms(void)
|
||||
{
|
||||
FILE *f = fopen("/proc/kallsyms", "r");
|
||||
char func[256], buf[256];
|
||||
char symbol;
|
||||
void *addr;
|
||||
int i = 0;
|
||||
|
||||
if (!f)
|
||||
return -ENOENT;
|
||||
|
||||
while (!feof(f)) {
|
||||
if (!fgets(buf, sizeof(buf), f))
|
||||
break;
|
||||
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
|
||||
break;
|
||||
if (!addr)
|
||||
continue;
|
||||
syms[i].addr = (long) addr;
|
||||
syms[i].name = strdup(func);
|
||||
i++;
|
||||
}
|
||||
sym_cnt = i;
|
||||
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ksym *ksym_search(long key)
|
||||
{
|
||||
int start = 0, end = sym_cnt;
|
||||
int result;
|
||||
|
||||
while (start < end) {
|
||||
size_t mid = start + (end - start) / 2;
|
||||
|
||||
result = key - syms[mid].addr;
|
||||
if (result < 0)
|
||||
end = mid;
|
||||
else if (result > 0)
|
||||
start = mid + 1;
|
||||
else
|
||||
return &syms[mid];
|
||||
}
|
||||
|
||||
if (start >= 1 && syms[start - 1].addr < key &&
|
||||
key < syms[start].addr)
|
||||
/* valid ksym */
|
||||
return &syms[start - 1];
|
||||
|
||||
/* out of range. return _stext */
|
||||
return &syms[0];
|
||||
}
|
||||
|
||||
static int page_size;
|
||||
static int page_cnt = 8;
|
||||
static volatile struct perf_event_mmap_page *header;
|
||||
|
||||
int perf_event_mmap(int fd)
|
||||
{
|
||||
void *base;
|
||||
int mmap_size;
|
||||
|
||||
page_size = getpagesize();
|
||||
mmap_size = page_size * (page_cnt + 1);
|
||||
|
||||
base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if (base == MAP_FAILED) {
|
||||
printf("mmap err\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
header = base;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_event_poll(int fd)
|
||||
{
|
||||
struct pollfd pfd = { .fd = fd, .events = POLLIN };
|
||||
|
||||
return poll(&pfd, 1, 1000);
|
||||
}
|
||||
|
||||
struct perf_event_sample {
|
||||
struct perf_event_header header;
|
||||
__u32 size;
|
||||
char data[];
|
||||
};
|
||||
|
||||
static int perf_event_read(perf_event_print_fn fn)
|
||||
{
|
||||
__u64 data_tail = header->data_tail;
|
||||
__u64 data_head = header->data_head;
|
||||
__u64 buffer_size = page_cnt * page_size;
|
||||
void *base, *begin, *end;
|
||||
char buf[256];
|
||||
int ret;
|
||||
|
||||
asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
|
||||
if (data_head == data_tail)
|
||||
return PERF_EVENT_CONT;
|
||||
|
||||
base = ((char *)header) + page_size;
|
||||
|
||||
begin = base + data_tail % buffer_size;
|
||||
end = base + data_head % buffer_size;
|
||||
|
||||
while (begin != end) {
|
||||
struct perf_event_sample *e;
|
||||
|
||||
e = begin;
|
||||
if (begin + e->header.size > base + buffer_size) {
|
||||
long len = base + buffer_size - begin;
|
||||
|
||||
assert(len < e->header.size);
|
||||
memcpy(buf, begin, len);
|
||||
memcpy(buf + len, base, e->header.size - len);
|
||||
e = (void *) buf;
|
||||
begin = base + e->header.size - len;
|
||||
} else if (begin + e->header.size == base + buffer_size) {
|
||||
begin = base;
|
||||
} else {
|
||||
begin += e->header.size;
|
||||
}
|
||||
|
||||
if (e->header.type == PERF_RECORD_SAMPLE) {
|
||||
ret = fn(e->data, e->size);
|
||||
if (ret != PERF_EVENT_CONT)
|
||||
return ret;
|
||||
} else if (e->header.type == PERF_RECORD_LOST) {
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
__u64 id;
|
||||
__u64 lost;
|
||||
} *lost = (void *) e;
|
||||
printf("lost %lld events\n", lost->lost);
|
||||
} else {
|
||||
printf("unknown event type=%d size=%d\n",
|
||||
e->header.type, e->header.size);
|
||||
}
|
||||
}
|
||||
|
||||
__sync_synchronize(); /* smp_mb() */
|
||||
header->data_tail = data_head;
|
||||
return PERF_EVENT_CONT;
|
||||
}
|
||||
|
||||
int perf_event_poller(int fd, perf_event_print_fn output_fn)
|
||||
{
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
perf_event_poll(fd);
|
||||
ret = perf_event_read(output_fn);
|
||||
if (ret != PERF_EVENT_CONT)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return PERF_EVENT_DONE;
|
||||
}
|
23
tools/testing/selftests/bpf/trace_helpers.h
Normal file
23
tools/testing/selftests/bpf/trace_helpers.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __TRACE_HELPER_H
|
||||
#define __TRACE_HELPER_H
|
||||
|
||||
struct ksym {
|
||||
long addr;
|
||||
char *name;
|
||||
};
|
||||
|
||||
int load_kallsyms(void);
|
||||
struct ksym *ksym_search(long key);
|
||||
|
||||
typedef int (*perf_event_print_fn)(void *data, int size);
|
||||
|
||||
/* return code for perf_event_print_fn */
|
||||
#define PERF_EVENT_DONE 0
|
||||
#define PERF_EVENT_ERROR -1
|
||||
#define PERF_EVENT_CONT -2
|
||||
|
||||
int perf_event_mmap(int fd);
|
||||
/* return PERF_EVENT_DONE or PERF_EVENT_ERROR */
|
||||
int perf_event_poller(int fd, perf_event_print_fn output_fn);
|
||||
#endif
|
Reference in New Issue
Block a user