Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "On the kernel side there's two x86 PMU driver fixes and a uprobes fix, plus on the tooling side there's a number of fixes and some late updates" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) perf sched timehist: Fix invalid period calculation perf sched timehist: Remove hardcoded 'comm_width' check at print_summary perf sched timehist: Enlarge default 'comm_width' perf sched timehist: Honour 'comm_width' when aligning the headers perf/x86: Fix overlap counter scheduling bug perf/x86/pebs: Fix handling of PEBS buffer overflows samples/bpf: Move open_raw_sock to separate header samples/bpf: Remove perf_event_open() declaration samples/bpf: Be consistent with bpf_load_program bpf_insn parameter tools lib bpf: Add bpf_prog_{attach,detach} samples/bpf: Switch over to libbpf perf diff: Do not overwrite valid build id perf annotate: Don't throw error for zero length symbols perf bench futex: Fix lock-pi help string perf trace: Check if MAP_32BIT is defined (again) samples/bpf: Make perf_event_read() static uprobes: Fix uprobes on MIPS, allow for a cache flush after ixol breakpoint creation samples/bpf: Make samples more libbpf-centric tools lib bpf: Add flags to bpf_create_map() tools lib bpf: use __u32 from linux/types.h ...
This commit is contained in:
@@ -35,40 +35,43 @@ hostprogs-y += tc_l2_redirect
|
||||
hostprogs-y += lwt_len_hist
|
||||
hostprogs-y += xdp_tx_iptunnel
|
||||
|
||||
test_lru_dist-objs := test_lru_dist.o libbpf.o
|
||||
sock_example-objs := sock_example.o libbpf.o
|
||||
fds_example-objs := bpf_load.o libbpf.o fds_example.o
|
||||
sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
|
||||
sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
|
||||
sockex3-objs := bpf_load.o libbpf.o sockex3_user.o
|
||||
tracex1-objs := bpf_load.o libbpf.o tracex1_user.o
|
||||
tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
|
||||
tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
|
||||
tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
|
||||
tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
|
||||
tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
|
||||
test_probe_write_user-objs := bpf_load.o libbpf.o test_probe_write_user_user.o
|
||||
trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
|
||||
lathist-objs := bpf_load.o libbpf.o lathist_user.o
|
||||
offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o
|
||||
spintest-objs := bpf_load.o libbpf.o spintest_user.o
|
||||
map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
|
||||
test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
|
||||
test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
|
||||
test_cgrp2_attach-objs := libbpf.o test_cgrp2_attach.o
|
||||
test_cgrp2_attach2-objs := libbpf.o test_cgrp2_attach2.o cgroup_helpers.o
|
||||
test_cgrp2_sock-objs := libbpf.o test_cgrp2_sock.o
|
||||
test_cgrp2_sock2-objs := bpf_load.o libbpf.o test_cgrp2_sock2.o
|
||||
xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
|
||||
# Libbpf dependencies
|
||||
LIBBPF := ../../tools/lib/bpf/bpf.o
|
||||
|
||||
test_lru_dist-objs := test_lru_dist.o $(LIBBPF)
|
||||
sock_example-objs := sock_example.o $(LIBBPF)
|
||||
fds_example-objs := bpf_load.o $(LIBBPF) fds_example.o
|
||||
sockex1-objs := bpf_load.o $(LIBBPF) sockex1_user.o
|
||||
sockex2-objs := bpf_load.o $(LIBBPF) sockex2_user.o
|
||||
sockex3-objs := bpf_load.o $(LIBBPF) sockex3_user.o
|
||||
tracex1-objs := bpf_load.o $(LIBBPF) tracex1_user.o
|
||||
tracex2-objs := bpf_load.o $(LIBBPF) tracex2_user.o
|
||||
tracex3-objs := bpf_load.o $(LIBBPF) tracex3_user.o
|
||||
tracex4-objs := bpf_load.o $(LIBBPF) tracex4_user.o
|
||||
tracex5-objs := bpf_load.o $(LIBBPF) tracex5_user.o
|
||||
tracex6-objs := bpf_load.o $(LIBBPF) tracex6_user.o
|
||||
test_probe_write_user-objs := bpf_load.o $(LIBBPF) test_probe_write_user_user.o
|
||||
trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o
|
||||
lathist-objs := bpf_load.o $(LIBBPF) lathist_user.o
|
||||
offwaketime-objs := bpf_load.o $(LIBBPF) offwaketime_user.o
|
||||
spintest-objs := bpf_load.o $(LIBBPF) spintest_user.o
|
||||
map_perf_test-objs := bpf_load.o $(LIBBPF) map_perf_test_user.o
|
||||
test_overhead-objs := bpf_load.o $(LIBBPF) test_overhead_user.o
|
||||
test_cgrp2_array_pin-objs := $(LIBBPF) test_cgrp2_array_pin.o
|
||||
test_cgrp2_attach-objs := $(LIBBPF) test_cgrp2_attach.o
|
||||
test_cgrp2_attach2-objs := $(LIBBPF) test_cgrp2_attach2.o cgroup_helpers.o
|
||||
test_cgrp2_sock-objs := $(LIBBPF) test_cgrp2_sock.o
|
||||
test_cgrp2_sock2-objs := bpf_load.o $(LIBBPF) test_cgrp2_sock2.o
|
||||
xdp1-objs := bpf_load.o $(LIBBPF) xdp1_user.o
|
||||
# reuse xdp1 source intentionally
|
||||
xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
|
||||
test_current_task_under_cgroup-objs := bpf_load.o libbpf.o cgroup_helpers.o \
|
||||
xdp2-objs := bpf_load.o $(LIBBPF) xdp1_user.o
|
||||
test_current_task_under_cgroup-objs := bpf_load.o $(LIBBPF) cgroup_helpers.o \
|
||||
test_current_task_under_cgroup_user.o
|
||||
trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
|
||||
sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
|
||||
tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
|
||||
lwt_len_hist-objs := bpf_load.o libbpf.o lwt_len_hist_user.o
|
||||
xdp_tx_iptunnel-objs := bpf_load.o libbpf.o xdp_tx_iptunnel_user.o
|
||||
trace_event-objs := bpf_load.o $(LIBBPF) trace_event_user.o
|
||||
sampleip-objs := bpf_load.o $(LIBBPF) sampleip_user.o
|
||||
tc_l2_redirect-objs := bpf_load.o $(LIBBPF) tc_l2_redirect_user.o
|
||||
lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o
|
||||
xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o
|
||||
|
||||
# Tell kbuild to always build the programs
|
||||
always := $(hostprogs-y)
|
||||
@@ -104,7 +107,10 @@ always += lwt_len_hist_kern.o
|
||||
always += xdp_tx_iptunnel_kern.o
|
||||
|
||||
HOSTCFLAGS += -I$(objtree)/usr/include
|
||||
HOSTCFLAGS += -I$(srctree)/tools/lib/
|
||||
HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
|
||||
HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
|
||||
HOSTCFLAGS += -I$(srctree)/tools/perf
|
||||
|
||||
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
|
||||
HOSTLOADLIBES_fds_example += -lelf
|
||||
|
@@ -1,8 +1,8 @@
|
||||
eBPF sample programs
|
||||
====================
|
||||
|
||||
This directory contains a mini eBPF library, test stubs, verifier
|
||||
test-suite and examples for using eBPF.
|
||||
This directory contains a test stubs, verifier test-suite and examples
|
||||
for using eBPF. The examples use libbpf from tools/lib/bpf.
|
||||
|
||||
Build dependencies
|
||||
==================
|
||||
|
@@ -22,25 +22,34 @@
|
||||
#include <poll.h>
|
||||
#include <ctype.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_helpers.h"
|
||||
#include "bpf_load.h"
|
||||
#include "perf-sys.h"
|
||||
|
||||
#define DEBUGFS "/sys/kernel/debug/tracing/"
|
||||
|
||||
static char license[128];
|
||||
static int kern_version;
|
||||
static bool processed_sec[128];
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
int map_fd[MAX_MAPS];
|
||||
int prog_fd[MAX_PROGS];
|
||||
int event_fd[MAX_PROGS];
|
||||
int prog_cnt;
|
||||
int prog_array_fd = -1;
|
||||
|
||||
struct bpf_map_def {
|
||||
unsigned int type;
|
||||
unsigned int key_size;
|
||||
unsigned int value_size;
|
||||
unsigned int max_entries;
|
||||
unsigned int map_flags;
|
||||
};
|
||||
|
||||
static int populate_prog_array(const char *event, int prog_fd)
|
||||
{
|
||||
int ind = atoi(event), err;
|
||||
|
||||
err = bpf_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
|
||||
err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
|
||||
if (err < 0) {
|
||||
printf("failed to store prog_fd in prog_array\n");
|
||||
return -1;
|
||||
@@ -58,6 +67,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
|
||||
bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
|
||||
bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0;
|
||||
bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
|
||||
size_t insns_cnt = size / sizeof(struct bpf_insn);
|
||||
enum bpf_prog_type prog_type;
|
||||
char buf[256];
|
||||
int fd, efd, err, id;
|
||||
@@ -87,9 +97,10 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
|
||||
return -1;
|
||||
}
|
||||
|
||||
fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
|
||||
fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
if (fd < 0) {
|
||||
printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
|
||||
printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -169,7 +180,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
|
||||
id = atoi(buf);
|
||||
attr.config = id;
|
||||
|
||||
efd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
|
||||
efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
|
||||
if (efd < 0) {
|
||||
printf("event %d fd %d err %s\n", id, efd, strerror(errno));
|
||||
return -1;
|
||||
|
@@ -1,12 +1,15 @@
|
||||
#ifndef __BPF_LOAD_H
|
||||
#define __BPF_LOAD_H
|
||||
|
||||
#include "libbpf.h"
|
||||
|
||||
#define MAX_MAPS 32
|
||||
#define MAX_PROGS 32
|
||||
|
||||
extern int map_fd[MAX_MAPS];
|
||||
extern int prog_fd[MAX_PROGS];
|
||||
extern int event_fd[MAX_PROGS];
|
||||
extern char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
extern int prog_cnt;
|
||||
|
||||
/* parses elf file compiled by llvm .c->.o
|
||||
|
@@ -14,6 +14,7 @@
|
||||
|
||||
#include "bpf_load.h"
|
||||
#include "libbpf.h"
|
||||
#include "sock_example.h"
|
||||
|
||||
#define BPF_F_PIN (1 << 0)
|
||||
#define BPF_F_GET (1 << 1)
|
||||
@@ -49,17 +50,19 @@ static int bpf_map_create(void)
|
||||
|
||||
static int bpf_prog_create(const char *object)
|
||||
{
|
||||
static const struct bpf_insn insns[] = {
|
||||
static struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
|
||||
|
||||
if (object) {
|
||||
assert(!load_bpf_file((char *)object));
|
||||
return prog_fd[0];
|
||||
} else {
|
||||
return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
insns, sizeof(insns), "GPL", 0);
|
||||
return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
insns, insns_cnt, "GPL", 0,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,12 +86,12 @@ static int bpf_do_map(const char *file, uint32_t flags, uint32_t key,
|
||||
}
|
||||
|
||||
if ((flags & BPF_F_KEY_VAL) == BPF_F_KEY_VAL) {
|
||||
ret = bpf_update_elem(fd, &key, &value, 0);
|
||||
ret = bpf_map_update_elem(fd, &key, &value, 0);
|
||||
printf("bpf: fd:%d u->(%u:%u) ret:(%d,%s)\n", fd, key, value,
|
||||
ret, strerror(errno));
|
||||
assert(ret == 0);
|
||||
} else if (flags & BPF_F_KEY) {
|
||||
ret = bpf_lookup_elem(fd, &key, &value);
|
||||
ret = bpf_map_lookup_elem(fd, &key, &value);
|
||||
printf("bpf: fd:%d l->(%u):%u ret:(%d,%s)\n", fd, key, value,
|
||||
ret, strerror(errno));
|
||||
assert(ret == 0);
|
||||
|
@@ -73,7 +73,7 @@ static void get_data(int fd)
|
||||
for (c = 0; c < MAX_CPU; c++) {
|
||||
for (i = 0; i < MAX_ENTRIES; i++) {
|
||||
key = c * MAX_ENTRIES + i;
|
||||
bpf_lookup_elem(fd, &key, &value);
|
||||
bpf_map_lookup_elem(fd, &key, &value);
|
||||
|
||||
cpu_hist[c].data[i] = value;
|
||||
if (value > cpu_hist[c].max)
|
||||
|
@@ -1,176 +0,0 @@
|
||||
/* eBPF mini library */
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <errno.h>
|
||||
#include <net/ethernet.h>
|
||||
#include <net/if.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <arpa/inet.h>
|
||||
#include "libbpf.h"
|
||||
|
||||
static __u64 ptr_to_u64(void *ptr)
|
||||
{
|
||||
return (__u64) (unsigned long) ptr;
|
||||
}
|
||||
|
||||
int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
|
||||
int max_entries, int map_flags)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.map_type = map_type,
|
||||
.key_size = key_size,
|
||||
.value_size = value_size,
|
||||
.max_entries = max_entries,
|
||||
.map_flags = map_flags,
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.map_fd = fd,
|
||||
.key = ptr_to_u64(key),
|
||||
.value = ptr_to_u64(value),
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_lookup_elem(int fd, void *key, void *value)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.map_fd = fd,
|
||||
.key = ptr_to_u64(key),
|
||||
.value = ptr_to_u64(value),
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_delete_elem(int fd, void *key)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.map_fd = fd,
|
||||
.key = ptr_to_u64(key),
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_get_next_key(int fd, void *key, void *next_key)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.map_fd = fd,
|
||||
.key = ptr_to_u64(key),
|
||||
.next_key = ptr_to_u64(next_key),
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
|
||||
|
||||
char bpf_log_buf[LOG_BUF_SIZE];
|
||||
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, int prog_len,
|
||||
const char *license, int kern_version)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.prog_type = prog_type,
|
||||
.insns = ptr_to_u64((void *) insns),
|
||||
.insn_cnt = prog_len / sizeof(struct bpf_insn),
|
||||
.license = ptr_to_u64((void *) license),
|
||||
.log_buf = ptr_to_u64(bpf_log_buf),
|
||||
.log_size = LOG_BUF_SIZE,
|
||||
.log_level = 1,
|
||||
};
|
||||
|
||||
/* assign one field outside of struct init to make sure any
|
||||
* padding is zero initialized
|
||||
*/
|
||||
attr.kern_version = kern_version;
|
||||
|
||||
bpf_log_buf[0] = 0;
|
||||
|
||||
return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.target_fd = target_fd,
|
||||
.attach_bpf_fd = prog_fd,
|
||||
.attach_type = type,
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.target_fd = target_fd,
|
||||
.attach_type = type,
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_PROG_DETACH, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_obj_pin(int fd, const char *pathname)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.pathname = ptr_to_u64((void *)pathname),
|
||||
.bpf_fd = fd,
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int bpf_obj_get(const char *pathname)
|
||||
{
|
||||
union bpf_attr attr = {
|
||||
.pathname = ptr_to_u64((void *)pathname),
|
||||
};
|
||||
|
||||
return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr));
|
||||
}
|
||||
|
||||
int open_raw_sock(const char *name)
|
||||
{
|
||||
struct sockaddr_ll sll;
|
||||
int sock;
|
||||
|
||||
sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
|
||||
if (sock < 0) {
|
||||
printf("cannot create raw socket\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(&sll, 0, sizeof(sll));
|
||||
sll.sll_family = AF_PACKET;
|
||||
sll.sll_ifindex = if_nametoindex(name);
|
||||
sll.sll_protocol = htons(ETH_P_ALL);
|
||||
if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
|
||||
printf("bind to %s: %s\n", name, strerror(errno));
|
||||
close(sock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return sock;
|
||||
}
|
||||
|
||||
int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
|
||||
int group_fd, unsigned long flags)
|
||||
{
|
||||
return syscall(__NR_perf_event_open, attr, pid, cpu,
|
||||
group_fd, flags);
|
||||
}
|
@@ -2,28 +2,10 @@
|
||||
#ifndef __LIBBPF_H
|
||||
#define __LIBBPF_H
|
||||
|
||||
#include <bpf/bpf.h>
|
||||
|
||||
struct bpf_insn;
|
||||
|
||||
int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
|
||||
int max_entries, int map_flags);
|
||||
int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
|
||||
int bpf_lookup_elem(int fd, void *key, void *value);
|
||||
int bpf_delete_elem(int fd, void *key);
|
||||
int bpf_get_next_key(int fd, void *key, void *next_key);
|
||||
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const struct bpf_insn *insns, int insn_len,
|
||||
const char *license, int kern_version);
|
||||
|
||||
int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
|
||||
int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
|
||||
|
||||
int bpf_obj_pin(int fd, const char *pathname);
|
||||
int bpf_obj_get(const char *pathname);
|
||||
|
||||
#define LOG_BUF_SIZE (256 * 1024)
|
||||
extern char bpf_log_buf[LOG_BUF_SIZE];
|
||||
|
||||
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
|
||||
|
||||
#define BPF_ALU64_REG(OP, DST, SRC) \
|
||||
@@ -203,10 +185,4 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
/* create RAW socket and bind to interface 'name' */
|
||||
int open_raw_sock(const char *name);
|
||||
|
||||
struct perf_event_attr;
|
||||
int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
|
||||
int group_fd, unsigned long flags);
|
||||
#endif
|
||||
|
@@ -14,6 +14,8 @@
|
||||
#define MAX_INDEX 64
|
||||
#define MAX_STARS 38
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static void stars(char *str, long val, long max, int width)
|
||||
{
|
||||
int i;
|
||||
@@ -41,13 +43,13 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (bpf_get_next_key(map_fd, &key, &next_key) == 0) {
|
||||
while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
|
||||
if (next_key >= MAX_INDEX) {
|
||||
fprintf(stderr, "Key %lu out of bounds\n", next_key);
|
||||
continue;
|
||||
}
|
||||
|
||||
bpf_lookup_elem(map_fd, &next_key, values);
|
||||
bpf_map_lookup_elem(map_fd, &next_key, values);
|
||||
|
||||
sum = 0;
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
|
@@ -49,14 +49,14 @@ static void print_stack(struct key_t *key, __u64 count)
|
||||
int i;
|
||||
|
||||
printf("%s;", key->target);
|
||||
if (bpf_lookup_elem(map_fd[3], &key->tret, ip) != 0) {
|
||||
if (bpf_map_lookup_elem(map_fd[3], &key->tret, ip) != 0) {
|
||||
printf("---;");
|
||||
} else {
|
||||
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
|
||||
print_ksym(ip[i]);
|
||||
}
|
||||
printf("-;");
|
||||
if (bpf_lookup_elem(map_fd[3], &key->wret, ip) != 0) {
|
||||
if (bpf_map_lookup_elem(map_fd[3], &key->wret, ip) != 0) {
|
||||
printf("---;");
|
||||
} else {
|
||||
for (i = 0; i < PERF_MAX_STACK_DEPTH; i++)
|
||||
@@ -77,8 +77,8 @@ static void print_stacks(int fd)
|
||||
struct key_t key = {}, next_key;
|
||||
__u64 value;
|
||||
|
||||
while (bpf_get_next_key(fd, &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(fd, &next_key, &value);
|
||||
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(fd, &next_key, &value);
|
||||
print_stack(&next_key, value);
|
||||
key = next_key;
|
||||
}
|
||||
|
@@ -21,6 +21,7 @@
|
||||
#include <sys/ioctl.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "perf-sys.h"
|
||||
|
||||
#define DEFAULT_FREQ 99
|
||||
#define DEFAULT_SECS 5
|
||||
@@ -49,7 +50,7 @@ static int sampling_start(int *pmu_fd, int freq)
|
||||
};
|
||||
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
pmu_fd[i] = perf_event_open(&pe_sample_attr, -1 /* pid */, i,
|
||||
pmu_fd[i] = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
|
||||
-1 /* group_fd */, 0 /* flags */);
|
||||
if (pmu_fd[i] < 0) {
|
||||
fprintf(stderr, "ERROR: Initializing perf sampling\n");
|
||||
@@ -95,8 +96,8 @@ static void print_ip_map(int fd)
|
||||
|
||||
/* fetch IPs and counts */
|
||||
key = 0, i = 0;
|
||||
while (bpf_get_next_key(fd, &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(fd, &next_key, &value);
|
||||
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(fd, &next_key, &value);
|
||||
counts[i].ip = next_key;
|
||||
counts[i++].count = value;
|
||||
key = next_key;
|
||||
|
@@ -27,6 +27,9 @@
|
||||
#include <linux/ip.h>
|
||||
#include <stddef.h>
|
||||
#include "libbpf.h"
|
||||
#include "sock_example.h"
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static int test_sock(void)
|
||||
{
|
||||
@@ -54,9 +57,10 @@ static int test_sock(void)
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
|
||||
prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog),
|
||||
"GPL", 0);
|
||||
prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, insns_cnt,
|
||||
"GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
if (prog_fd < 0) {
|
||||
printf("failed to load prog '%s'\n", strerror(errno));
|
||||
goto cleanup;
|
||||
@@ -72,13 +76,13 @@ static int test_sock(void)
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
key = IPPROTO_TCP;
|
||||
assert(bpf_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
|
||||
|
||||
key = IPPROTO_UDP;
|
||||
assert(bpf_lookup_elem(map_fd, &key, &udp_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0);
|
||||
|
||||
key = IPPROTO_ICMP;
|
||||
assert(bpf_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
|
||||
|
||||
printf("TCP %lld UDP %lld ICMP %lld packets\n",
|
||||
tcp_cnt, udp_cnt, icmp_cnt);
|
||||
|
35
samples/bpf/sock_example.h
Normal file
35
samples/bpf/sock_example.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <net/ethernet.h>
|
||||
#include <net/if.h>
|
||||
#include <linux/if_packet.h>
|
||||
#include <arpa/inet.h>
|
||||
#include "libbpf.h"
|
||||
|
||||
static inline int open_raw_sock(const char *name)
|
||||
{
|
||||
struct sockaddr_ll sll;
|
||||
int sock;
|
||||
|
||||
sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
|
||||
if (sock < 0) {
|
||||
printf("cannot create raw socket\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(&sll, 0, sizeof(sll));
|
||||
sll.sll_family = AF_PACKET;
|
||||
sll.sll_ifindex = if_nametoindex(name);
|
||||
sll.sll_protocol = htons(ETH_P_ALL);
|
||||
if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
|
||||
printf("bind to %s: %s\n", name, strerror(errno));
|
||||
close(sock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return sock;
|
||||
}
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "sock_example.h"
|
||||
#include <unistd.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
@@ -32,13 +33,13 @@ int main(int ac, char **argv)
|
||||
int key;
|
||||
|
||||
key = IPPROTO_TCP;
|
||||
assert(bpf_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0);
|
||||
|
||||
key = IPPROTO_UDP;
|
||||
assert(bpf_lookup_elem(map_fd[0], &key, &udp_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd[0], &key, &udp_cnt) == 0);
|
||||
|
||||
key = IPPROTO_ICMP;
|
||||
assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
|
||||
|
||||
printf("TCP %lld UDP %lld ICMP %lld bytes\n",
|
||||
tcp_cnt, udp_cnt, icmp_cnt);
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "sock_example.h"
|
||||
#include <unistd.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <sys/resource.h>
|
||||
@@ -39,8 +40,8 @@ int main(int ac, char **argv)
|
||||
int key = 0, next_key;
|
||||
struct pair value;
|
||||
|
||||
while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(map_fd[0], &next_key, &value);
|
||||
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(map_fd[0], &next_key, &value);
|
||||
printf("ip %s bytes %lld packets %lld\n",
|
||||
inet_ntoa((struct in_addr){htonl(next_key)}),
|
||||
value.bytes, value.packets);
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "sock_example.h"
|
||||
#include <unistd.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <sys/resource.h>
|
||||
@@ -54,8 +55,8 @@ int main(int argc, char **argv)
|
||||
|
||||
sleep(1);
|
||||
printf("IP src.port -> dst.port bytes packets\n");
|
||||
while (bpf_get_next_key(map_fd[2], &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(map_fd[2], &next_key, &value);
|
||||
while (bpf_map_get_next_key(map_fd[2], &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(map_fd[2], &next_key, &value);
|
||||
printf("%s.%05d -> %s.%05d %12lld %12lld\n",
|
||||
inet_ntoa((struct in_addr){htonl(next_key.src)}),
|
||||
next_key.port16[0],
|
||||
|
@@ -31,8 +31,8 @@ int main(int ac, char **argv)
|
||||
for (i = 0; i < 5; i++) {
|
||||
key = 0;
|
||||
printf("kprobing funcs:");
|
||||
while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(map_fd[0], &next_key, &value);
|
||||
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(map_fd[0], &next_key, &value);
|
||||
assert(next_key == value);
|
||||
sym = ksym_search(value);
|
||||
printf(" %s", sym->name);
|
||||
@@ -41,8 +41,8 @@ int main(int ac, char **argv)
|
||||
if (key)
|
||||
printf("\n");
|
||||
key = 0;
|
||||
while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0)
|
||||
bpf_delete_elem(map_fd[0], &next_key);
|
||||
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0)
|
||||
bpf_map_delete_elem(map_fd[0], &next_key);
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
|
@@ -60,9 +60,9 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* bpf_tunnel_key.remote_ipv4 expects host byte orders */
|
||||
ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0);
|
||||
ret = bpf_map_update_elem(array_fd, &array_key, &ifindex, 0);
|
||||
if (ret) {
|
||||
perror("bpf_update_elem");
|
||||
perror("bpf_map_update_elem");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@@ -85,9 +85,9 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
ret = bpf_update_elem(array_fd, &array_key, &cg2_fd, 0);
|
||||
ret = bpf_map_update_elem(array_fd, &array_key, &cg2_fd, 0);
|
||||
if (ret) {
|
||||
perror("bpf_update_elem");
|
||||
perror("bpf_map_update_elem");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@@ -36,6 +36,8 @@ enum {
|
||||
MAP_KEY_BYTES,
|
||||
};
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static int prog_load(int map_fd, int verdict)
|
||||
{
|
||||
struct bpf_insn prog[] = {
|
||||
@@ -66,9 +68,11 @@ static int prog_load(int map_fd, int verdict)
|
||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
|
||||
return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, sizeof(prog), "GPL", 0);
|
||||
return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, insns_cnt, "GPL", 0,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
}
|
||||
|
||||
static int usage(const char *argv0)
|
||||
@@ -108,10 +112,10 @@ static int attach_filter(int cg_fd, int type, int verdict)
|
||||
}
|
||||
while (1) {
|
||||
key = MAP_KEY_PACKETS;
|
||||
assert(bpf_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
|
||||
|
||||
key = MAP_KEY_BYTES;
|
||||
assert(bpf_lookup_elem(map_fd, &key, &byte_cnt) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &byte_cnt) == 0);
|
||||
|
||||
printf("cgroup received %lld packets, %lld bytes\n",
|
||||
pkt_cnt, byte_cnt);
|
||||
|
@@ -32,6 +32,8 @@
|
||||
#define BAR "/foo/bar/"
|
||||
#define PING_CMD "ping -c1 -w1 127.0.0.1"
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static int prog_load(int verdict)
|
||||
{
|
||||
int ret;
|
||||
@@ -39,9 +41,11 @@ static int prog_load(int verdict)
|
||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
|
||||
ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, sizeof(prog), "GPL", 0);
|
||||
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, insns_cnt, "GPL", 0,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
|
||||
if (ret < 0) {
|
||||
log_err("Loading program");
|
||||
|
@@ -23,6 +23,8 @@
|
||||
|
||||
#include "libbpf.h"
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static int prog_load(int idx)
|
||||
{
|
||||
struct bpf_insn prog[] = {
|
||||
@@ -33,9 +35,10 @@ static int prog_load(int idx)
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
|
||||
return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, prog, sizeof(prog),
|
||||
"GPL", 0);
|
||||
return bpf_load_program(BPF_PROG_TYPE_CGROUP_SOCK, prog, insns_cnt,
|
||||
"GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
}
|
||||
|
||||
static int usage(const char *argv0)
|
||||
|
@@ -36,7 +36,7 @@ int main(int argc, char **argv)
|
||||
if (!cg2)
|
||||
goto err;
|
||||
|
||||
if (bpf_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
|
||||
if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
|
||||
log_err("Adding target cgroup to map");
|
||||
goto err;
|
||||
}
|
||||
@@ -50,7 +50,7 @@ int main(int argc, char **argv)
|
||||
*/
|
||||
|
||||
sync();
|
||||
bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
|
||||
bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid);
|
||||
|
||||
if (local_pid != remote_pid) {
|
||||
fprintf(stderr,
|
||||
@@ -64,10 +64,10 @@ int main(int argc, char **argv)
|
||||
goto err;
|
||||
|
||||
remote_pid = 0;
|
||||
bpf_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
|
||||
bpf_map_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
|
||||
|
||||
sync();
|
||||
bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
|
||||
bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid);
|
||||
|
||||
if (local_pid == remote_pid) {
|
||||
fprintf(stderr, "BPF cgroup negative test did not work\n");
|
||||
|
@@ -134,7 +134,7 @@ static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
|
||||
int seen = 0;
|
||||
|
||||
lru->total++;
|
||||
if (!bpf_lookup_elem(lru->map_fd, &key, &node)) {
|
||||
if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) {
|
||||
if (node) {
|
||||
list_move(&node->list, &lru->list);
|
||||
return 1;
|
||||
@@ -151,7 +151,7 @@ static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
|
||||
node = list_last_entry(&lru->list,
|
||||
struct pfect_lru_node,
|
||||
list);
|
||||
bpf_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
|
||||
bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
|
||||
}
|
||||
|
||||
node->key = key;
|
||||
@@ -159,10 +159,10 @@ static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
|
||||
|
||||
lru->nr_misses++;
|
||||
if (seen) {
|
||||
assert(!bpf_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
|
||||
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
|
||||
} else {
|
||||
lru->nr_unique++;
|
||||
assert(!bpf_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
|
||||
assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
|
||||
}
|
||||
|
||||
return seen;
|
||||
@@ -285,11 +285,11 @@ static void do_test_lru_dist(int task, void *data)
|
||||
|
||||
pfect_lru_lookup_or_insert(&pfect_lru, key);
|
||||
|
||||
if (!bpf_lookup_elem(lru_map_fd, &key, &value))
|
||||
if (!bpf_map_lookup_elem(lru_map_fd, &key, &value))
|
||||
continue;
|
||||
|
||||
if (bpf_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
|
||||
printf("bpf_update_elem(lru_map_fd, %llu): errno:%d\n",
|
||||
if (bpf_map_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
|
||||
printf("bpf_map_update_elem(lru_map_fd, %llu): errno:%d\n",
|
||||
key, errno);
|
||||
assert(0);
|
||||
}
|
||||
@@ -358,19 +358,19 @@ static void test_lru_loss0(int map_type, int map_flags)
|
||||
for (key = 1; key <= 1000; key++) {
|
||||
int start_key, end_key;
|
||||
|
||||
assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
|
||||
assert(bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
|
||||
|
||||
start_key = 101;
|
||||
end_key = min(key, 900);
|
||||
|
||||
while (start_key <= end_key) {
|
||||
bpf_lookup_elem(map_fd, &start_key, value);
|
||||
bpf_map_lookup_elem(map_fd, &start_key, value);
|
||||
start_key++;
|
||||
}
|
||||
}
|
||||
|
||||
for (key = 1; key <= 1000; key++) {
|
||||
if (bpf_lookup_elem(map_fd, &key, value)) {
|
||||
if (bpf_map_lookup_elem(map_fd, &key, value)) {
|
||||
if (key <= 100)
|
||||
old_unused_losses++;
|
||||
else if (key <= 900)
|
||||
@@ -408,10 +408,10 @@ static void test_lru_loss1(int map_type, int map_flags)
|
||||
value[0] = 1234;
|
||||
|
||||
for (key = 1; key <= 1000; key++)
|
||||
assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
|
||||
assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
|
||||
|
||||
for (key = 1; key <= 1000; key++) {
|
||||
if (bpf_lookup_elem(map_fd, &key, value))
|
||||
if (bpf_map_lookup_elem(map_fd, &key, value))
|
||||
nr_losses++;
|
||||
}
|
||||
|
||||
@@ -436,7 +436,7 @@ static void do_test_parallel_lru_loss(int task, void *data)
|
||||
next_ins_key = stable_base;
|
||||
value[0] = 1234;
|
||||
for (i = 0; i < nr_stable_elems; i++) {
|
||||
assert(bpf_update_elem(map_fd, &next_ins_key, value,
|
||||
assert(bpf_map_update_elem(map_fd, &next_ins_key, value,
|
||||
BPF_NOEXIST) == 0);
|
||||
next_ins_key++;
|
||||
}
|
||||
@@ -448,9 +448,9 @@ static void do_test_parallel_lru_loss(int task, void *data)
|
||||
|
||||
if (rn % 10) {
|
||||
key = rn % nr_stable_elems + stable_base;
|
||||
bpf_lookup_elem(map_fd, &key, value);
|
||||
bpf_map_lookup_elem(map_fd, &key, value);
|
||||
} else {
|
||||
bpf_update_elem(map_fd, &next_ins_key, value,
|
||||
bpf_map_update_elem(map_fd, &next_ins_key, value,
|
||||
BPF_NOEXIST);
|
||||
next_ins_key++;
|
||||
}
|
||||
@@ -458,7 +458,7 @@ static void do_test_parallel_lru_loss(int task, void *data)
|
||||
|
||||
key = stable_base;
|
||||
for (i = 0; i < nr_stable_elems; i++) {
|
||||
if (bpf_lookup_elem(map_fd, &key, value))
|
||||
if (bpf_map_lookup_elem(map_fd, &key, value))
|
||||
nr_losses++;
|
||||
key++;
|
||||
}
|
||||
|
@@ -50,7 +50,7 @@ int main(int ac, char **argv)
|
||||
mapped_addr_in->sin_port = htons(5555);
|
||||
mapped_addr_in->sin_addr.s_addr = inet_addr("255.255.255.255");
|
||||
|
||||
assert(!bpf_update_elem(map_fd[0], &mapped_addr, &serv_addr, BPF_ANY));
|
||||
assert(!bpf_map_update_elem(map_fd[0], &mapped_addr, &serv_addr, BPF_ANY));
|
||||
|
||||
assert(listen(serverfd, 5) == 0);
|
||||
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <sys/resource.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "perf-sys.h"
|
||||
|
||||
#define SAMPLE_FREQ 50
|
||||
|
||||
@@ -61,14 +62,14 @@ static void print_stack(struct key_t *key, __u64 count)
|
||||
int i;
|
||||
|
||||
printf("%3lld %s;", count, key->comm);
|
||||
if (bpf_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
|
||||
if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
|
||||
printf("---;");
|
||||
} else {
|
||||
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
|
||||
print_ksym(ip[i]);
|
||||
}
|
||||
printf("-;");
|
||||
if (bpf_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
|
||||
if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
|
||||
printf("---;");
|
||||
} else {
|
||||
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
|
||||
@@ -98,10 +99,10 @@ static void print_stacks(void)
|
||||
int fd = map_fd[0], stack_map = map_fd[1];
|
||||
|
||||
sys_read_seen = sys_write_seen = false;
|
||||
while (bpf_get_next_key(fd, &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(fd, &next_key, &value);
|
||||
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(fd, &next_key, &value);
|
||||
print_stack(&next_key, value);
|
||||
bpf_delete_elem(fd, &next_key);
|
||||
bpf_map_delete_elem(fd, &next_key);
|
||||
key = next_key;
|
||||
}
|
||||
|
||||
@@ -111,8 +112,8 @@ static void print_stacks(void)
|
||||
}
|
||||
|
||||
/* clear stack map */
|
||||
while (bpf_get_next_key(stack_map, &stackid, &next_id) == 0) {
|
||||
bpf_delete_elem(stack_map, &next_id);
|
||||
while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
|
||||
bpf_map_delete_elem(stack_map, &next_id);
|
||||
stackid = next_id;
|
||||
}
|
||||
}
|
||||
@@ -125,9 +126,9 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
|
||||
|
||||
/* open perf_event on all cpus */
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
pmu_fd[i] = perf_event_open(attr, -1, i, -1, 0);
|
||||
pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
|
||||
if (pmu_fd[i] < 0) {
|
||||
printf("perf_event_open failed\n");
|
||||
printf("sys_perf_event_open failed\n");
|
||||
goto all_cpu_err;
|
||||
}
|
||||
assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
|
||||
@@ -146,9 +147,9 @@ static void test_perf_event_task(struct perf_event_attr *attr)
|
||||
int pmu_fd;
|
||||
|
||||
/* open task bound event */
|
||||
pmu_fd = perf_event_open(attr, 0, -1, -1, 0);
|
||||
pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
|
||||
if (pmu_fd < 0) {
|
||||
printf("perf_event_open failed\n");
|
||||
printf("sys_perf_event_open failed\n");
|
||||
return;
|
||||
}
|
||||
assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
|
||||
|
@@ -21,6 +21,7 @@
|
||||
#include <signal.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "perf-sys.h"
|
||||
|
||||
static int pmu_fd;
|
||||
|
||||
@@ -61,7 +62,7 @@ struct perf_event_sample {
|
||||
char data[];
|
||||
};
|
||||
|
||||
void perf_event_read(print_fn fn)
|
||||
static void perf_event_read(print_fn fn)
|
||||
{
|
||||
__u64 data_tail = header->data_tail;
|
||||
__u64 data_head = header->data_head;
|
||||
@@ -159,10 +160,10 @@ static void test_bpf_perf_event(void)
|
||||
};
|
||||
int key = 0;
|
||||
|
||||
pmu_fd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
|
||||
pmu_fd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
|
||||
|
||||
assert(pmu_fd >= 0);
|
||||
assert(bpf_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
|
||||
assert(bpf_map_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
|
||||
ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
|
||||
}
|
||||
|
||||
|
@@ -48,12 +48,12 @@ static void print_hist_for_pid(int fd, void *task)
|
||||
long max_value = 0;
|
||||
int i, ind;
|
||||
|
||||
while (bpf_get_next_key(fd, &key, &next_key) == 0) {
|
||||
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
|
||||
if (memcmp(&next_key, task, SIZE)) {
|
||||
key = next_key;
|
||||
continue;
|
||||
}
|
||||
bpf_lookup_elem(fd, &next_key, values);
|
||||
bpf_map_lookup_elem(fd, &next_key, values);
|
||||
value = 0;
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
value += values[i];
|
||||
@@ -83,7 +83,7 @@ static void print_hist(int fd)
|
||||
int task_cnt = 0;
|
||||
int i;
|
||||
|
||||
while (bpf_get_next_key(fd, &key, &next_key) == 0) {
|
||||
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
|
||||
int found = 0;
|
||||
|
||||
for (i = 0; i < task_cnt; i++)
|
||||
@@ -136,8 +136,8 @@ int main(int ac, char **argv)
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
key = 0;
|
||||
while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(map_fd[0], &next_key, &value);
|
||||
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(map_fd[0], &next_key, &value);
|
||||
printf("location 0x%lx count %ld\n", next_key, value);
|
||||
key = next_key;
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ static void clear_stats(int fd)
|
||||
|
||||
memset(values, 0, sizeof(values));
|
||||
for (key = 0; key < SLOTS; key++)
|
||||
bpf_update_elem(fd, &key, values, BPF_ANY);
|
||||
bpf_map_update_elem(fd, &key, values, BPF_ANY);
|
||||
}
|
||||
|
||||
const char *color[] = {
|
||||
@@ -89,7 +89,7 @@ static void print_hist(int fd)
|
||||
int i;
|
||||
|
||||
for (key = 0; key < SLOTS; key++) {
|
||||
bpf_lookup_elem(fd, &key, values);
|
||||
bpf_map_lookup_elem(fd, &key, values);
|
||||
value = 0;
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
value += values[i];
|
||||
|
@@ -37,8 +37,8 @@ static void print_old_objects(int fd)
|
||||
key = write(1, "\e[1;1H\e[2J", 12); /* clear screen */
|
||||
|
||||
key = -1;
|
||||
while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_lookup_elem(map_fd[0], &next_key, &v);
|
||||
while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
|
||||
bpf_map_lookup_elem(map_fd[0], &next_key, &v);
|
||||
key = next_key;
|
||||
if (val - v.val < 1000000000ll)
|
||||
/* object was allocated more then 1 sec ago */
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
#include "perf-sys.h"
|
||||
|
||||
#define SAMPLE_PERIOD 0x7fffffffffffffffULL
|
||||
|
||||
@@ -30,13 +31,13 @@ static void test_bpf_perf_event(void)
|
||||
};
|
||||
|
||||
for (i = 0; i < nr_cpus; i++) {
|
||||
pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
|
||||
pmu_fd[i] = sys_perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
|
||||
if (pmu_fd[i] < 0) {
|
||||
printf("event syscall failed\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
|
||||
bpf_map_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
|
||||
ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
|
||||
}
|
||||
|
||||
|
@@ -43,7 +43,7 @@ static void poll_stats(int interval)
|
||||
for (key = 0; key < nr_keys; key++) {
|
||||
__u64 sum = 0;
|
||||
|
||||
assert(bpf_lookup_elem(map_fd[0], &key, values) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd[0], &key, values) == 0);
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
sum += (values[i] - prev[key][i]);
|
||||
if (sum)
|
||||
|
@@ -51,7 +51,7 @@ static void poll_stats(unsigned int kill_after_s)
|
||||
for (proto = 0; proto < nr_protos; proto++) {
|
||||
__u64 sum = 0;
|
||||
|
||||
assert(bpf_lookup_elem(map_fd[0], &proto, values) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd[0], &proto, values) == 0);
|
||||
for (i = 0; i < nr_cpus; i++)
|
||||
sum += (values[i] - prev[proto][i]);
|
||||
|
||||
@@ -237,8 +237,8 @@ int main(int argc, char **argv)
|
||||
|
||||
while (min_port <= max_port) {
|
||||
vip.dport = htons(min_port++);
|
||||
if (bpf_update_elem(map_fd[1], &vip, &tnl, BPF_NOEXIST)) {
|
||||
perror("bpf_update_elem(&vip2tnl)");
|
||||
if (bpf_map_update_elem(map_fd[1], &vip, &tnl, BPF_NOEXIST)) {
|
||||
perror("bpf_map_update_elem(&vip2tnl)");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user