Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-05-31 The following pull-request contains BPF updates for your *net-next* tree. Lots of exciting new features in the first PR of this developement cycle! The main changes are: 1) misc verifier improvements, from Alexei. 2) bpftool can now convert btf to valid C, from Andrii. 3) verifier can insert explicit ZEXT insn when requested by 32-bit JITs. This feature greatly improves BPF speed on 32-bit architectures. From Jiong. 4) cgroups will now auto-detach bpf programs. This fixes issue of thousands bpf programs got stuck in dying cgroups. From Roman. 5) new bpf_send_signal() helper, from Yonghong. 6) cgroup inet skb programs can signal CN to the stack, from Lawrence. 7) miscellaneous cleanups, from many developers. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
1
samples/bpf/.gitignore
vendored
1
samples/bpf/.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
cpustat
|
||||
fds_example
|
||||
hbm
|
||||
ibumad
|
||||
lathist
|
||||
lwt_len_hist
|
||||
map_perf_test
|
||||
|
@@ -26,7 +26,6 @@ hostprogs-y += map_perf_test
|
||||
hostprogs-y += test_overhead
|
||||
hostprogs-y += test_cgrp2_array_pin
|
||||
hostprogs-y += test_cgrp2_attach
|
||||
hostprogs-y += test_cgrp2_attach2
|
||||
hostprogs-y += test_cgrp2_sock
|
||||
hostprogs-y += test_cgrp2_sock2
|
||||
hostprogs-y += xdp1
|
||||
@@ -81,7 +80,6 @@ map_perf_test-objs := bpf_load.o map_perf_test_user.o
|
||||
test_overhead-objs := bpf_load.o test_overhead_user.o
|
||||
test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
|
||||
test_cgrp2_attach-objs := test_cgrp2_attach.o
|
||||
test_cgrp2_attach2-objs := test_cgrp2_attach2.o $(CGROUP_HELPERS)
|
||||
test_cgrp2_sock-objs := test_cgrp2_sock.o
|
||||
test_cgrp2_sock2-objs := bpf_load.o test_cgrp2_sock2.o
|
||||
xdp1-objs := xdp1_user.o
|
||||
|
@@ -40,7 +40,7 @@ int prog_cnt;
|
||||
int prog_array_fd = -1;
|
||||
|
||||
struct bpf_map_data map_data[MAX_MAPS];
|
||||
int map_data_count = 0;
|
||||
int map_data_count;
|
||||
|
||||
static int populate_prog_array(const char *event, int prog_fd)
|
||||
{
|
||||
@@ -65,7 +65,7 @@ static int write_kprobe_events(const char *val)
|
||||
else
|
||||
flags = O_WRONLY | O_APPEND;
|
||||
|
||||
fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
|
||||
fd = open(DEBUGFS "kprobe_events", flags);
|
||||
|
||||
ret = write(fd, val, strlen(val));
|
||||
close(fd);
|
||||
@@ -490,8 +490,8 @@ static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx,
|
||||
|
||||
/* Verify no newer features were requested */
|
||||
if (validate_zero) {
|
||||
addr = (unsigned char*) def + map_sz_copy;
|
||||
end = (unsigned char*) def + map_sz_elf;
|
||||
addr = (unsigned char *) def + map_sz_copy;
|
||||
end = (unsigned char *) def + map_sz_elf;
|
||||
for (; addr < end; addr++) {
|
||||
if (*addr != 0) {
|
||||
free(sym);
|
||||
|
@@ -13,10 +13,10 @@ Usage() {
|
||||
echo "egress or ingress bandwidht. It then uses iperf3 or netperf to create"
|
||||
echo "loads. The output is the goodput in Mbps (unless -D was used)."
|
||||
echo ""
|
||||
echo "USAGE: $name [out] [-b=<prog>|--bpf=<prog>] [-c=<cc>|--cc=<cc>] [-D]"
|
||||
echo " [-d=<delay>|--delay=<delay>] [--debug] [-E]"
|
||||
echo "USAGE: $name [out] [-b=<prog>|--bpf=<prog>] [-c=<cc>|--cc=<cc>]"
|
||||
echo " [-D] [-d=<delay>|--delay=<delay>] [--debug] [-E]"
|
||||
echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=<id>|--id=<id >]"
|
||||
echo " [-l] [-N] [-p=<port>|--port=<port>] [-P]"
|
||||
echo " [-l] [-N] [--no_cn] [-p=<port>|--port=<port>] [-P]"
|
||||
echo " [-q=<qdisc>] [-R] [-s=<server>|--server=<server]"
|
||||
echo " [-S|--stats] -t=<time>|--time=<time>] [-w] [cubic|dctcp]"
|
||||
echo " Where:"
|
||||
@@ -33,6 +33,7 @@ Usage() {
|
||||
echo " -f or --flows number of concurrent flows (default=1)"
|
||||
echo " -i or --id cgroup id (an integer, default is 1)"
|
||||
echo " -N use netperf instead of iperf3"
|
||||
echo " --no_cn Do not return CN notifications"
|
||||
echo " -l do not limit flows using loopback"
|
||||
echo " -h Help"
|
||||
echo " -p or --port iperf3 port (default is 5201)"
|
||||
@@ -115,6 +116,9 @@ processArgs () {
|
||||
-c=*|--cc=*)
|
||||
cc="${i#*=}"
|
||||
;;
|
||||
--no_cn)
|
||||
flags="$flags --no_cn"
|
||||
;;
|
||||
--debug)
|
||||
flags="$flags -d"
|
||||
debug_flag=1
|
||||
|
@@ -16,6 +16,7 @@
|
||||
* -l Also limit flows doing loopback
|
||||
* -n <#> To create cgroup \"/hbm#\" and attach prog
|
||||
* Default is /hbm1
|
||||
* --no_cn Do not return cn notifications
|
||||
* -r <rate> Rate limit in Mbps
|
||||
* -s Get HBM stats (marked, dropped, etc.)
|
||||
* -t <time> Exit after specified seconds (default is 0)
|
||||
@@ -42,6 +43,7 @@
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf.h>
|
||||
#include <getopt.h>
|
||||
|
||||
#include "bpf_load.h"
|
||||
#include "bpf_rlimit.h"
|
||||
@@ -59,6 +61,7 @@ bool stats_flag;
|
||||
bool loopback_flag;
|
||||
bool debugFlag;
|
||||
bool work_conserving_flag;
|
||||
bool no_cn_flag;
|
||||
|
||||
static void Usage(void);
|
||||
static void read_trace_pipe2(void);
|
||||
@@ -185,6 +188,7 @@ static int run_bpf_prog(char *prog, int cg_id)
|
||||
qstats.rate = rate;
|
||||
qstats.stats = stats_flag ? 1 : 0;
|
||||
qstats.loopback = loopback_flag ? 1 : 0;
|
||||
qstats.no_cn = no_cn_flag ? 1 : 0;
|
||||
if (bpf_map_update_elem(map_fd, &key, &qstats, BPF_ANY)) {
|
||||
printf("ERROR: Could not update map element\n");
|
||||
goto err;
|
||||
@@ -312,6 +316,14 @@ static int run_bpf_prog(char *prog, int cg_id)
|
||||
double percent_pkts, percent_bytes;
|
||||
char fname[100];
|
||||
FILE *fout;
|
||||
int k;
|
||||
static const char *returnValNames[] = {
|
||||
"DROP_PKT",
|
||||
"ALLOW_PKT",
|
||||
"DROP_PKT_CWR",
|
||||
"ALLOW_PKT_CWR"
|
||||
};
|
||||
#define RET_VAL_COUNT 4
|
||||
|
||||
// Future support of ingress
|
||||
// if (!outFlag)
|
||||
@@ -346,6 +358,31 @@ static int run_bpf_prog(char *prog, int cg_id)
|
||||
(qstats.bytes_total + 1);
|
||||
fprintf(fout, "pkts_dropped_percent:%6.2f\n", percent_pkts);
|
||||
fprintf(fout, "bytes_dropped_percent:%6.2f\n", percent_bytes);
|
||||
|
||||
// ECN CE markings
|
||||
percent_pkts = (qstats.pkts_ecn_ce * 100.0) /
|
||||
(qstats.pkts_total + 1);
|
||||
fprintf(fout, "pkts_ecn_ce:%6.2f (%d)\n", percent_pkts,
|
||||
(int)qstats.pkts_ecn_ce);
|
||||
|
||||
// Average cwnd
|
||||
fprintf(fout, "avg cwnd:%d\n",
|
||||
(int)(qstats.sum_cwnd / (qstats.sum_cwnd_cnt + 1)));
|
||||
// Average rtt
|
||||
fprintf(fout, "avg rtt:%d\n",
|
||||
(int)(qstats.sum_rtt / (qstats.pkts_total + 1)));
|
||||
// Average credit
|
||||
fprintf(fout, "avg credit:%d\n",
|
||||
(int)(qstats.sum_credit /
|
||||
(1500 * ((int)qstats.pkts_total) + 1)));
|
||||
|
||||
// Return values stats
|
||||
for (k = 0; k < RET_VAL_COUNT; k++) {
|
||||
percent_pkts = (qstats.returnValCount[k] * 100.0) /
|
||||
(qstats.pkts_total + 1);
|
||||
fprintf(fout, "%s:%6.2f (%d)\n", returnValNames[k],
|
||||
percent_pkts, (int)qstats.returnValCount[k]);
|
||||
}
|
||||
fclose(fout);
|
||||
}
|
||||
|
||||
@@ -366,14 +403,15 @@ static void Usage(void)
|
||||
{
|
||||
printf("This program loads a cgroup skb BPF program to enforce\n"
|
||||
"cgroup output (egress) bandwidth limits.\n\n"
|
||||
"USAGE: hbm [-o] [-d] [-l] [-n <id>] [-r <rate>] [-s]\n"
|
||||
" [-t <secs>] [-w] [-h] [prog]\n"
|
||||
"USAGE: hbm [-o] [-d] [-l] [-n <id>] [--no_cn] [-r <rate>]\n"
|
||||
" [-s] [-t <secs>] [-w] [-h] [prog]\n"
|
||||
" Where:\n"
|
||||
" -o indicates egress direction (default)\n"
|
||||
" -d print BPF trace debug buffer\n"
|
||||
" -l also limit flows using loopback\n"
|
||||
" -n <#> to create cgroup \"/hbm#\" and attach prog\n"
|
||||
" Default is /hbm1\n"
|
||||
" --no_cn disable CN notifcations\n"
|
||||
" -r <rate> Rate in Mbps\n"
|
||||
" -s Update HBM stats\n"
|
||||
" -t <time> Exit after specified seconds (default is 0)\n"
|
||||
@@ -393,9 +431,16 @@ int main(int argc, char **argv)
|
||||
int k;
|
||||
int cg_id = 1;
|
||||
char *optstring = "iodln:r:st:wh";
|
||||
struct option loptions[] = {
|
||||
{"no_cn", 0, NULL, 1},
|
||||
{NULL, 0, NULL, 0}
|
||||
};
|
||||
|
||||
while ((k = getopt(argc, argv, optstring)) != -1) {
|
||||
while ((k = getopt_long(argc, argv, optstring, loptions, NULL)) != -1) {
|
||||
switch (k) {
|
||||
case 1:
|
||||
no_cn_flag = true;
|
||||
break;
|
||||
case'o':
|
||||
break;
|
||||
case 'd':
|
||||
|
@@ -19,7 +19,8 @@ struct hbm_vqueue {
|
||||
struct hbm_queue_stats {
|
||||
unsigned long rate; /* in Mbps*/
|
||||
unsigned long stats:1, /* get HBM stats (marked, dropped,..) */
|
||||
loopback:1; /* also limit flows using loopback */
|
||||
loopback:1, /* also limit flows using loopback */
|
||||
no_cn:1; /* do not use cn flags */
|
||||
unsigned long long pkts_marked;
|
||||
unsigned long long bytes_marked;
|
||||
unsigned long long pkts_dropped;
|
||||
@@ -28,4 +29,10 @@ struct hbm_queue_stats {
|
||||
unsigned long long bytes_total;
|
||||
unsigned long long firstPacketTime;
|
||||
unsigned long long lastPacketTime;
|
||||
unsigned long long pkts_ecn_ce;
|
||||
unsigned long long returnValCount[4];
|
||||
unsigned long long sum_cwnd;
|
||||
unsigned long long sum_rtt;
|
||||
unsigned long long sum_cwnd_cnt;
|
||||
long long sum_credit;
|
||||
};
|
||||
|
@@ -30,15 +30,8 @@
|
||||
#define ALLOW_PKT 1
|
||||
#define TCP_ECN_OK 1
|
||||
|
||||
#define HBM_DEBUG 0 // Set to 1 to enable debugging
|
||||
#if HBM_DEBUG
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
#else
|
||||
#ifndef HBM_DEBUG // Define HBM_DEBUG to enable debugging
|
||||
#undef bpf_printk
|
||||
#define bpf_printk(fmt, ...)
|
||||
#endif
|
||||
|
||||
@@ -72,17 +65,43 @@ struct bpf_map_def SEC("maps") queue_stats = {
|
||||
BPF_ANNOTATE_KV_PAIR(queue_stats, int, struct hbm_queue_stats);
|
||||
|
||||
struct hbm_pkt_info {
|
||||
int cwnd;
|
||||
int rtt;
|
||||
bool is_ip;
|
||||
bool is_tcp;
|
||||
short ecn;
|
||||
};
|
||||
|
||||
static int get_tcp_info(struct __sk_buff *skb, struct hbm_pkt_info *pkti)
|
||||
{
|
||||
struct bpf_sock *sk;
|
||||
struct bpf_tcp_sock *tp;
|
||||
|
||||
sk = skb->sk;
|
||||
if (sk) {
|
||||
sk = bpf_sk_fullsock(sk);
|
||||
if (sk) {
|
||||
if (sk->protocol == IPPROTO_TCP) {
|
||||
tp = bpf_tcp_sock(sk);
|
||||
if (tp) {
|
||||
pkti->cwnd = tp->snd_cwnd;
|
||||
pkti->rtt = tp->srtt_us >> 3;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __always_inline void hbm_get_pkt_info(struct __sk_buff *skb,
|
||||
struct hbm_pkt_info *pkti)
|
||||
{
|
||||
struct iphdr iph;
|
||||
struct ipv6hdr *ip6h;
|
||||
|
||||
pkti->cwnd = 0;
|
||||
pkti->rtt = 0;
|
||||
bpf_skb_load_bytes(skb, 0, &iph, 12);
|
||||
if (iph.version == 6) {
|
||||
ip6h = (struct ipv6hdr *)&iph;
|
||||
@@ -98,6 +117,8 @@ static __always_inline void hbm_get_pkt_info(struct __sk_buff *skb,
|
||||
pkti->is_tcp = false;
|
||||
pkti->ecn = 0;
|
||||
}
|
||||
if (pkti->is_tcp)
|
||||
get_tcp_info(skb, pkti);
|
||||
}
|
||||
|
||||
static __always_inline void hbm_init_vqueue(struct hbm_vqueue *qdp, int rate)
|
||||
@@ -112,8 +133,14 @@ static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp,
|
||||
int len,
|
||||
unsigned long long curtime,
|
||||
bool congestion_flag,
|
||||
bool drop_flag)
|
||||
bool drop_flag,
|
||||
bool cwr_flag,
|
||||
bool ecn_ce_flag,
|
||||
struct hbm_pkt_info *pkti,
|
||||
int credit)
|
||||
{
|
||||
int rv = ALLOW_PKT;
|
||||
|
||||
if (qsp != NULL) {
|
||||
// Following is needed for work conserving
|
||||
__sync_add_and_fetch(&(qsp->bytes_total), len);
|
||||
@@ -123,7 +150,7 @@ static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp,
|
||||
qsp->firstPacketTime = curtime;
|
||||
qsp->lastPacketTime = curtime;
|
||||
__sync_add_and_fetch(&(qsp->pkts_total), 1);
|
||||
if (congestion_flag || drop_flag) {
|
||||
if (congestion_flag) {
|
||||
__sync_add_and_fetch(&(qsp->pkts_marked), 1);
|
||||
__sync_add_and_fetch(&(qsp->bytes_marked), len);
|
||||
}
|
||||
@@ -132,6 +159,34 @@ static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp,
|
||||
__sync_add_and_fetch(&(qsp->bytes_dropped),
|
||||
len);
|
||||
}
|
||||
if (ecn_ce_flag)
|
||||
__sync_add_and_fetch(&(qsp->pkts_ecn_ce), 1);
|
||||
if (pkti->cwnd) {
|
||||
__sync_add_and_fetch(&(qsp->sum_cwnd),
|
||||
pkti->cwnd);
|
||||
__sync_add_and_fetch(&(qsp->sum_cwnd_cnt), 1);
|
||||
}
|
||||
if (pkti->rtt)
|
||||
__sync_add_and_fetch(&(qsp->sum_rtt),
|
||||
pkti->rtt);
|
||||
__sync_add_and_fetch(&(qsp->sum_credit), credit);
|
||||
|
||||
if (drop_flag)
|
||||
rv = DROP_PKT;
|
||||
if (cwr_flag)
|
||||
rv |= 2;
|
||||
if (rv == DROP_PKT)
|
||||
__sync_add_and_fetch(&(qsp->returnValCount[0]),
|
||||
1);
|
||||
else if (rv == ALLOW_PKT)
|
||||
__sync_add_and_fetch(&(qsp->returnValCount[1]),
|
||||
1);
|
||||
else if (rv == 2)
|
||||
__sync_add_and_fetch(&(qsp->returnValCount[2]),
|
||||
1);
|
||||
else if (rv == 3)
|
||||
__sync_add_and_fetch(&(qsp->returnValCount[3]),
|
||||
1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -62,11 +62,12 @@ int _hbm_out_cg(struct __sk_buff *skb)
|
||||
unsigned int queue_index = 0;
|
||||
unsigned long long curtime;
|
||||
int credit;
|
||||
signed long long delta = 0, zero = 0;
|
||||
signed long long delta = 0, new_credit;
|
||||
int max_credit = MAX_CREDIT;
|
||||
bool congestion_flag = false;
|
||||
bool drop_flag = false;
|
||||
bool cwr_flag = false;
|
||||
bool ecn_ce_flag = false;
|
||||
struct hbm_vqueue *qdp;
|
||||
struct hbm_queue_stats *qsp = NULL;
|
||||
int rv = ALLOW_PKT;
|
||||
@@ -99,9 +100,11 @@ int _hbm_out_cg(struct __sk_buff *skb)
|
||||
*/
|
||||
if (delta > 0) {
|
||||
qdp->lasttime = curtime;
|
||||
credit += CREDIT_PER_NS(delta, qdp->rate);
|
||||
if (credit > MAX_CREDIT)
|
||||
new_credit = credit + CREDIT_PER_NS(delta, qdp->rate);
|
||||
if (new_credit > MAX_CREDIT)
|
||||
credit = MAX_CREDIT;
|
||||
else
|
||||
credit = new_credit;
|
||||
}
|
||||
credit -= len;
|
||||
qdp->credit = credit;
|
||||
@@ -119,13 +122,16 @@ int _hbm_out_cg(struct __sk_buff *skb)
|
||||
// Set flags (drop, congestion, cwr)
|
||||
// Dropping => we are congested, so ignore congestion flag
|
||||
if (credit < -DROP_THRESH ||
|
||||
(len > LARGE_PKT_THRESH &&
|
||||
credit < -LARGE_PKT_DROP_THRESH)) {
|
||||
// Very congested, set drop flag
|
||||
(len > LARGE_PKT_THRESH && credit < -LARGE_PKT_DROP_THRESH)) {
|
||||
// Very congested, set drop packet
|
||||
drop_flag = true;
|
||||
if (pkti.ecn)
|
||||
congestion_flag = true;
|
||||
else if (pkti.is_tcp)
|
||||
cwr_flag = true;
|
||||
} else if (credit < 0) {
|
||||
// Congested, set congestion flag
|
||||
if (pkti.ecn) {
|
||||
if (pkti.ecn || pkti.is_tcp) {
|
||||
if (credit < -MARK_THRESH)
|
||||
congestion_flag = true;
|
||||
else
|
||||
@@ -136,22 +142,38 @@ int _hbm_out_cg(struct __sk_buff *skb)
|
||||
}
|
||||
|
||||
if (congestion_flag) {
|
||||
if (!bpf_skb_ecn_set_ce(skb)) {
|
||||
if (len > LARGE_PKT_THRESH) {
|
||||
if (bpf_skb_ecn_set_ce(skb)) {
|
||||
ecn_ce_flag = true;
|
||||
} else {
|
||||
if (pkti.is_tcp) {
|
||||
unsigned int rand = bpf_get_prandom_u32();
|
||||
|
||||
if (-credit >= MARK_THRESH +
|
||||
(rand % MARK_REGION_SIZE)) {
|
||||
// Do congestion control
|
||||
cwr_flag = true;
|
||||
}
|
||||
} else if (len > LARGE_PKT_THRESH) {
|
||||
// Problem if too many small packets?
|
||||
drop_flag = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (drop_flag)
|
||||
rv = DROP_PKT;
|
||||
if (qsp != NULL)
|
||||
if (qsp->no_cn)
|
||||
cwr_flag = false;
|
||||
|
||||
hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag);
|
||||
hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
|
||||
cwr_flag, ecn_ce_flag, &pkti, credit);
|
||||
|
||||
if (rv == DROP_PKT)
|
||||
if (drop_flag) {
|
||||
__sync_add_and_fetch(&(qdp->credit), len);
|
||||
rv = DROP_PKT;
|
||||
}
|
||||
|
||||
if (cwr_flag)
|
||||
rv |= 2;
|
||||
return rv;
|
||||
}
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
@@ -21,13 +21,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_basertt(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -22,13 +22,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_bufs(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -22,13 +22,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_clamp(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -21,13 +21,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_cong(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -22,13 +22,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_iw(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -21,13 +21,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_rwnd(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -21,13 +21,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_synrto(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -20,13 +20,6 @@
|
||||
|
||||
#define DEBUG 1
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
SEC("sockops")
|
||||
int bpf_basertt(struct bpf_sock_ops *skops)
|
||||
{
|
||||
|
@@ -1,459 +0,0 @@
|
||||
/* eBPF example program:
|
||||
*
|
||||
* - Creates arraymap in kernel with 4 bytes keys and 8 byte values
|
||||
*
|
||||
* - Loads eBPF program
|
||||
*
|
||||
* The eBPF program accesses the map passed in to store two pieces of
|
||||
* information. The number of invocations of the program, which maps
|
||||
* to the number of packets received, is stored to key 0. Key 1 is
|
||||
* incremented on each iteration by the number of bytes stored in
|
||||
* the skb. The program also stores the number of received bytes
|
||||
* in the cgroup storage.
|
||||
*
|
||||
* - Attaches the new program to a cgroup using BPF_PROG_ATTACH
|
||||
*
|
||||
* - Every second, reads map[0] and map[1] to see how many bytes and
|
||||
* packets were seen on any socket of tasks in the given cgroup.
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <bpf/bpf.h>
|
||||
|
||||
#include "bpf_insn.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include "cgroup_helpers.h"
|
||||
|
||||
#define FOO "/foo"
|
||||
#define BAR "/foo/bar/"
|
||||
#define PING_CMD "ping -c1 -w1 127.0.0.1 > /dev/null"
|
||||
|
||||
char bpf_log_buf[BPF_LOG_BUF_SIZE];
|
||||
|
||||
static int prog_load(int verdict)
|
||||
{
|
||||
int ret;
|
||||
struct bpf_insn prog[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
|
||||
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, insns_cnt, "GPL", 0,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
|
||||
if (ret < 0) {
|
||||
log_err("Loading program");
|
||||
printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int test_foo_bar(void)
|
||||
{
|
||||
int drop_prog, allow_prog, foo = 0, bar = 0, rc = 0;
|
||||
|
||||
allow_prog = prog_load(1);
|
||||
if (!allow_prog)
|
||||
goto err;
|
||||
|
||||
drop_prog = prog_load(0);
|
||||
if (!drop_prog)
|
||||
goto err;
|
||||
|
||||
if (setup_cgroup_environment())
|
||||
goto err;
|
||||
|
||||
/* Create cgroup /foo, get fd, and join it */
|
||||
foo = create_and_get_cgroup(FOO);
|
||||
if (foo < 0)
|
||||
goto err;
|
||||
|
||||
if (join_cgroup(FOO))
|
||||
goto err;
|
||||
|
||||
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
log_err("Attaching prog to /foo");
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Attached DROP prog. This ping in cgroup /foo should fail...\n");
|
||||
assert(system(PING_CMD) != 0);
|
||||
|
||||
/* Create cgroup /foo/bar, get fd, and join it */
|
||||
bar = create_and_get_cgroup(BAR);
|
||||
if (bar < 0)
|
||||
goto err;
|
||||
|
||||
if (join_cgroup(BAR))
|
||||
goto err;
|
||||
|
||||
printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
|
||||
assert(system(PING_CMD) != 0);
|
||||
|
||||
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
log_err("Attaching prog to /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
|
||||
assert(system(PING_CMD) == 0);
|
||||
|
||||
if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching program from /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
|
||||
"This ping in cgroup /foo/bar should fail...\n");
|
||||
assert(system(PING_CMD) != 0);
|
||||
|
||||
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
log_err("Attaching prog to /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching program from /foo");
|
||||
goto err;
|
||||
}
|
||||
|
||||
printf("Attached PASS from /foo/bar and detached DROP from /foo.\n"
|
||||
"This ping in cgroup /foo/bar should pass...\n");
|
||||
assert(system(PING_CMD) == 0);
|
||||
|
||||
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
log_err("Attaching prog to /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
|
||||
errno = 0;
|
||||
log_err("Unexpected success attaching prog to /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching program from /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
|
||||
errno = 0;
|
||||
log_err("Unexpected success in double detach from /foo");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
|
||||
log_err("Attaching non-overridable prog to /foo");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
|
||||
errno = 0;
|
||||
log_err("Unexpected success attaching non-overridable prog to /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
errno = 0;
|
||||
log_err("Unexpected success attaching overridable prog to /foo/bar");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
errno = 0;
|
||||
log_err("Unexpected success attaching overridable prog to /foo");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
|
||||
log_err("Attaching different non-overridable prog to /foo");
|
||||
goto err;
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
err:
|
||||
rc = 1;
|
||||
|
||||
out:
|
||||
close(foo);
|
||||
close(bar);
|
||||
cleanup_cgroup_environment();
|
||||
if (!rc)
|
||||
printf("### override:PASS\n");
|
||||
else
|
||||
printf("### override:FAIL\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int map_fd = -1;
|
||||
|
||||
static int prog_load_cnt(int verdict, int val)
|
||||
{
|
||||
int cgroup_storage_fd, percpu_cgroup_storage_fd;
|
||||
|
||||
if (map_fd < 0)
|
||||
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
|
||||
if (map_fd < 0) {
|
||||
printf("failed to create map '%s'\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
cgroup_storage_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
|
||||
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
|
||||
if (cgroup_storage_fd < 0) {
|
||||
printf("failed to create map '%s'\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
percpu_cgroup_storage_fd = bpf_create_map(
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
|
||||
sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
|
||||
if (percpu_cgroup_storage_fd < 0) {
|
||||
printf("failed to create map '%s'\n", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct bpf_insn prog[] = {
|
||||
BPF_MOV32_IMM(BPF_REG_0, 0),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
|
||||
BPF_LD_MAP_FD(BPF_REG_1, map_fd),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
|
||||
|
||||
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_MOV64_IMM(BPF_REG_1, val),
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
|
||||
|
||||
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
|
||||
|
||||
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
|
||||
int ret;
|
||||
|
||||
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
|
||||
prog, insns_cnt, "GPL", 0,
|
||||
bpf_log_buf, BPF_LOG_BUF_SIZE);
|
||||
|
||||
if (ret < 0) {
|
||||
log_err("Loading program");
|
||||
printf("Output from verifier:\n%s\n-------\n", bpf_log_buf);
|
||||
return 0;
|
||||
}
|
||||
close(cgroup_storage_fd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int test_multiprog(void)
|
||||
{
|
||||
__u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id;
|
||||
int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0;
|
||||
int drop_prog, allow_prog[6] = {}, rc = 0;
|
||||
unsigned long long value;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
allow_prog[i] = prog_load_cnt(1, 1 << i);
|
||||
if (!allow_prog[i])
|
||||
goto err;
|
||||
}
|
||||
drop_prog = prog_load_cnt(0, 1);
|
||||
if (!drop_prog)
|
||||
goto err;
|
||||
|
||||
if (setup_cgroup_environment())
|
||||
goto err;
|
||||
|
||||
cg1 = create_and_get_cgroup("/cg1");
|
||||
if (cg1 < 0)
|
||||
goto err;
|
||||
cg2 = create_and_get_cgroup("/cg1/cg2");
|
||||
if (cg2 < 0)
|
||||
goto err;
|
||||
cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
|
||||
if (cg3 < 0)
|
||||
goto err;
|
||||
cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
|
||||
if (cg4 < 0)
|
||||
goto err;
|
||||
cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
|
||||
if (cg5 < 0)
|
||||
goto err;
|
||||
|
||||
if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
|
||||
goto err;
|
||||
|
||||
if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_MULTI)) {
|
||||
log_err("Attaching prog to cg1");
|
||||
goto err;
|
||||
}
|
||||
if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_MULTI)) {
|
||||
log_err("Unexpected success attaching the same prog to cg1");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_MULTI)) {
|
||||
log_err("Attaching prog2 to cg1");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
log_err("Attaching prog to cg2");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_MULTI)) {
|
||||
log_err("Attaching prog to cg3");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
|
||||
BPF_F_ALLOW_OVERRIDE)) {
|
||||
log_err("Attaching prog to cg4");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0)) {
|
||||
log_err("Attaching prog to cg5");
|
||||
goto err;
|
||||
}
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 8 + 32);
|
||||
|
||||
/* query the number of effective progs in cg5 */
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
NULL, NULL, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 4);
|
||||
/* retrieve prog_ids of effective progs in cg5 */
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
&attach_flags, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 4);
|
||||
assert(attach_flags == 0);
|
||||
saved_prog_id = prog_ids[0];
|
||||
/* check enospc handling */
|
||||
prog_ids[0] = 0;
|
||||
prog_cnt = 2;
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
&attach_flags, prog_ids, &prog_cnt) == -1 &&
|
||||
errno == ENOSPC);
|
||||
assert(prog_cnt == 4);
|
||||
/* check that prog_ids are returned even when buffer is too small */
|
||||
assert(prog_ids[0] == saved_prog_id);
|
||||
/* retrieve prog_id of single attached prog in cg5 */
|
||||
prog_ids[0] = 0;
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
|
||||
NULL, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 1);
|
||||
assert(prog_ids[0] == saved_prog_id);
|
||||
|
||||
/* detach bottom program and ping again */
|
||||
if (bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching prog from cg5");
|
||||
goto err;
|
||||
}
|
||||
value = 0;
|
||||
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 8 + 16);
|
||||
|
||||
/* detach 3rd from bottom program and ping again */
|
||||
errno = 0;
|
||||
if (!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Unexpected success on detach from cg3");
|
||||
goto err;
|
||||
}
|
||||
if (bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching from cg3");
|
||||
goto err;
|
||||
}
|
||||
value = 0;
|
||||
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 16);
|
||||
|
||||
/* detach 2nd from bottom program and ping again */
|
||||
if (bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS)) {
|
||||
log_err("Detaching prog from cg4");
|
||||
goto err;
|
||||
}
|
||||
value = 0;
|
||||
assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0);
|
||||
assert(system(PING_CMD) == 0);
|
||||
assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0);
|
||||
assert(value == 1 + 2 + 4);
|
||||
|
||||
prog_cnt = 4;
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, BPF_F_QUERY_EFFECTIVE,
|
||||
&attach_flags, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 3);
|
||||
assert(attach_flags == 0);
|
||||
assert(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0,
|
||||
NULL, prog_ids, &prog_cnt) == 0);
|
||||
assert(prog_cnt == 0);
|
||||
goto out;
|
||||
err:
|
||||
rc = 1;
|
||||
|
||||
out:
|
||||
for (i = 0; i < 6; i++)
|
||||
if (allow_prog[i] > 0)
|
||||
close(allow_prog[i]);
|
||||
close(cg1);
|
||||
close(cg2);
|
||||
close(cg3);
|
||||
close(cg4);
|
||||
close(cg5);
|
||||
cleanup_cgroup_environment();
|
||||
if (!rc)
|
||||
printf("### multi:PASS\n");
|
||||
else
|
||||
printf("### multi:FAIL\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
rc = test_foo_bar();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return test_multiprog();
|
||||
}
|
@@ -7,13 +7,6 @@
|
||||
#define SAMPLE_SIZE 64ul
|
||||
#define MAX_CPUS 128
|
||||
|
||||
#define bpf_printk(fmt, ...) \
|
||||
({ \
|
||||
char ____fmt[] = fmt; \
|
||||
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
||||
##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
struct bpf_map_def SEC("maps") my_map = {
|
||||
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
.key_size = sizeof(int),
|
||||
|
Reference in New Issue
Block a user