bpf: fix redirect to map under tail calls
Commits109980b894
("bpf: don't select potentially stale ri->map from buggy xdp progs") and7c30013133
("bpf: fix ri->map_owner pointer on bpf_prog_realloc") tried to mitigate that buggy programs using bpf_redirect_map() helper call do not leave stale maps behind. Idea was to add a map_owner cookie into the per CPU struct redirect_info which was set to prog->aux by the prog making the helper call as a proof that the map is not stale since the prog is implicitly holding a reference to it. This owner cookie could later on get compared with the program calling into BPF whether they match and therefore the redirect could proceed with processing the map safely. In (obvious) hindsight, this approach breaks down when tail calls are involved since the original caller's prog->aux pointer does not have to match the one from one of the progs out of the tail call chain, and therefore the xdp buffer will be dropped instead of redirected. A way around that would be to fix the issue differently (which also allows to remove related work in fast path at the same time): once the life-time of a redirect map has come to its end we use it's map free callback where we need to wait on synchronize_rcu() for current outstanding xdp buffers and remove such a map pointer from the redirect info if found to be present. At that time no program is using this map anymore so we simply invalidate the map pointers to NULL iff they previously pointed to that instance while making sure that the redirect path only reads out the map once. Fixes:97f91a7cf0
("bpf: add bpf_redirect_map helper routine") Fixes:109980b894
("bpf: don't select potentially stale ri->map from buggy xdp progs") Reported-by: Sebastiano Miano <sebastiano.miano@polito.it> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Цей коміт міститься в:

зафіксовано
Alexei Starovoitov

джерело
a85da34e97
коміт
f6069b9aa9
@@ -479,6 +479,8 @@ static void cpu_map_free(struct bpf_map *map)
|
||||
* It does __not__ ensure pending flush operations (if any) are
|
||||
* complete.
|
||||
*/
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_rcu();
|
||||
|
||||
/* To ensure all pending flush operations have completed wait for flush
|
||||
|
@@ -161,6 +161,7 @@ static void dev_map_free(struct bpf_map *map)
|
||||
list_del_rcu(&dtab->list);
|
||||
spin_unlock(&dev_map_lock);
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_rcu();
|
||||
|
||||
/* To ensure all pending flush operations have completed wait for flush
|
||||
|
@@ -5844,27 +5844,6 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
goto patch_call_imm;
|
||||
}
|
||||
|
||||
if (insn->imm == BPF_FUNC_redirect_map) {
|
||||
/* Note, we cannot use prog directly as imm as subsequent
|
||||
* rewrites would still change the prog pointer. The only
|
||||
* stable address we can use is aux, which also works with
|
||||
* prog clones during blinding.
|
||||
*/
|
||||
u64 addr = (unsigned long)prog->aux;
|
||||
struct bpf_insn r4_ld[] = {
|
||||
BPF_LD_IMM64(BPF_REG_4, addr),
|
||||
*insn,
|
||||
};
|
||||
cnt = ARRAY_SIZE(r4_ld);
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
}
|
||||
patch_call_imm:
|
||||
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
||||
/* all functions that have prototype and verifier allowed
|
||||
|
@@ -75,6 +75,7 @@ static void xsk_map_free(struct bpf_map *map)
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
int i;
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_net();
|
||||
|
||||
for (i = 0; i < map->max_entries; i++) {
|
||||
|
Посилання в новій задачі
Заблокувати користувача