bpf: offload: add infrastructure for loading programs for a specific netdev
The fact that we don't know which device the program is going to be used on is quite limiting in current eBPF infrastructure. We have to reverse or limit the changes which kernel makes to the loaded bytecode if we want it to be offloaded to a networking device. We also have to invent new APIs for debugging and troubleshooting support. Make it possible to load programs for a specific netdev. This helps us to bring the debug information closer to the core eBPF infrastructure (e.g. we will be able to reuse the verifer log in device JIT). It allows device JITs to perform translation on the original bytecode. __bpf_prog_get() when called to get a reference for an attachment point will now refuse to give it if program has a device assigned. Following patches will add a version of that function which passes the expected netdev in. @type argument in __bpf_prog_get() is renamed to attach_type to make it clearer that it's only set on attachment. All calls to ndo_bpf are protected by rtnl, only verifier callbacks are not. We need a wait queue to make sure netdev doesn't get destroyed while verifier is still running and calling its driver. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Simon Horman <simon.horman@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
f4e63525ee
commit
ab3f0063c4
@@ -824,7 +824,10 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
|
||||
if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
|
||||
return -EINVAL;
|
||||
|
||||
prog->aux->ops = bpf_prog_types[type];
|
||||
if (!bpf_prog_is_dev_bound(prog->aux))
|
||||
prog->aux->ops = bpf_prog_types[type];
|
||||
else
|
||||
prog->aux->ops = &bpf_offload_prog_ops;
|
||||
prog->type = type;
|
||||
return 0;
|
||||
}
|
||||
@@ -1054,7 +1057,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
|
||||
|
||||
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
|
||||
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type)
|
||||
{
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_prog *prog;
|
||||
@@ -1062,7 +1065,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
|
||||
prog = ____bpf_prog_get(f);
|
||||
if (IS_ERR(prog))
|
||||
return prog;
|
||||
if (type && prog->type != *type) {
|
||||
if (attach_type && (prog->type != *attach_type || prog->aux->offload)) {
|
||||
prog = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
@@ -1089,7 +1092,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_PROG_LOAD_LAST_FIELD prog_name
|
||||
#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
|
||||
|
||||
static int bpf_prog_load(union bpf_attr *attr)
|
||||
{
|
||||
@@ -1152,6 +1155,12 @@ static int bpf_prog_load(union bpf_attr *attr)
|
||||
atomic_set(&prog->aux->refcnt, 1);
|
||||
prog->gpl_compatible = is_gpl ? 1 : 0;
|
||||
|
||||
if (attr->prog_target_ifindex) {
|
||||
err = bpf_prog_offload_init(prog, attr);
|
||||
if (err)
|
||||
goto free_prog;
|
||||
}
|
||||
|
||||
/* find program type: socket_filter vs tracing_filter */
|
||||
err = find_prog_type(type, prog);
|
||||
if (err < 0)
|
||||
|
Reference in New Issue
Block a user