bpf: write back the verifier log buffer as it gets filled

Verifier log buffer can be quite large (up to 16MB currently).
As Eric Dumazet points out if we allow multiple verification
requests to proceed simultaneously, malicious user may use the
verifier as a way of allocating large amounts of unswappable
memory to OOM the host.

Switch to a strategy of allocating a smaller buffer (1024B)
and writing it out into the user buffer after every print.

While at it remove the old BUG_ON().

This is in preparation of the global verifier lock removal.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Simon Horman <simon.horman@netronome.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski
2017-10-09 10:30:15 -07:00
committed by David S. Miller
부모 d66f2b91f9
커밋 a2a7d57010
2개의 변경된 파일22개의 추가작업 그리고 23개의 파일을 삭제

파일 보기

@@ -165,15 +165,26 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
const char *fmt, ...)
{
struct bpf_verifer_log *log = &env->log;
unsigned int n;
va_list args;
if (!log->level || bpf_verifier_log_full(log))
if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
return;
va_start(args, fmt);
log->len_used += vscnprintf(log->kbuf + log->len_used,
log->len_total - log->len_used, fmt, args);
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
va_end(args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
static bool type_is_pkt_pointer(enum bpf_reg_type type)
@@ -4263,11 +4274,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
!log->level || !log->ubuf)
goto err_unlock;
ret = -ENOMEM;
log->kbuf = vmalloc(log->len_total);
if (!log->kbuf)
goto err_unlock;
}
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
@@ -4304,18 +4310,11 @@ skip_full_check:
if (ret == 0)
ret = fixup_bpf_calls(env);
if (log->level && bpf_verifier_log_full(log)) {
BUG_ON(log->len_used >= log->len_total);
/* verifier log exceeded user supplied buffer */
if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
/* fall through to return what was recorded */
}
/* copy verifier log back to user space including trailing zero */
if (log->level && copy_to_user(log->ubuf, log->kbuf,
log->len_used + 1) != 0) {
if (log->level && !log->ubuf) {
ret = -EFAULT;
goto free_log_buf;
goto err_release_maps;
}
if (ret == 0 && env->used_map_cnt) {
@@ -4326,7 +4325,7 @@ skip_full_check:
if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
goto free_log_buf;
goto err_release_maps;
}
memcpy(env->prog->aux->used_maps, env->used_maps,
@@ -4339,9 +4338,7 @@ skip_full_check:
convert_pseudo_ld_imm64(env);
}
free_log_buf:
if (log->level)
vfree(log->kbuf);
err_release_maps:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.