Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-01-22 The following pull-request contains BPF updates for your *net-next* tree. We've added 92 non-merge commits during the last 16 day(s) which contain a total of 320 files changed, 7532 insertions(+), 1448 deletions(-). The main changes are: 1) function by function verification and program extensions from Alexei. 2) massive cleanup of selftests/bpf from Toke and Andrii. 3) batched bpf map operations from Brian and Yonghong. 4) tcp congestion control in bpf from Martin. 5) bulking for non-map xdp_redirect form Toke. 6) bpf_send_signal_thread helper from Yonghong. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -1122,10 +1122,6 @@ static void init_reg_state(struct bpf_verifier_env *env,
|
||||
regs[BPF_REG_FP].type = PTR_TO_STACK;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_FP);
|
||||
regs[BPF_REG_FP].frameno = state->frameno;
|
||||
|
||||
/* 1st arg to a function */
|
||||
regs[BPF_REG_1].type = PTR_TO_CTX;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_1);
|
||||
}
|
||||
|
||||
#define BPF_MAIN_FUNC (-1)
|
||||
@@ -1916,6 +1912,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
case PTR_TO_XDP_SOCK:
|
||||
case PTR_TO_BTF_ID:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@@ -2738,8 +2735,8 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno)
|
||||
int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno)
|
||||
{
|
||||
/* Access to ctx or passing it to a helper is only allowed in
|
||||
* its original, unmodified form.
|
||||
@@ -2858,11 +2855,6 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
||||
u32 btf_id;
|
||||
int ret;
|
||||
|
||||
if (atype != BPF_READ) {
|
||||
verbose(env, "only read is supported\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (off < 0) {
|
||||
verbose(env,
|
||||
"R%d is ptr_%s invalid negative access: off=%d\n",
|
||||
@@ -2879,17 +2871,32 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
|
||||
if (env->ops->btf_struct_access) {
|
||||
ret = env->ops->btf_struct_access(&env->log, t, off, size,
|
||||
atype, &btf_id);
|
||||
} else {
|
||||
if (atype != BPF_READ) {
|
||||
verbose(env, "only read is supported\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = btf_struct_access(&env->log, t, off, size, atype,
|
||||
&btf_id);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret == SCALAR_VALUE) {
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
return 0;
|
||||
if (atype == BPF_READ) {
|
||||
if (ret == SCALAR_VALUE) {
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
return 0;
|
||||
}
|
||||
mark_reg_known_zero(env, regs, value_regno);
|
||||
regs[value_regno].type = PTR_TO_BTF_ID;
|
||||
regs[value_regno].btf_id = btf_id;
|
||||
}
|
||||
mark_reg_known_zero(env, regs, value_regno);
|
||||
regs[value_regno].type = PTR_TO_BTF_ID;
|
||||
regs[value_regno].btf_id = btf_id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3945,12 +3952,26 @@ static int release_reference(struct bpf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_caller_saved_regs(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *regs)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* after the call registers r0 - r5 were scratched */
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
||||
mark_reg_not_init(env, regs, caller_saved[i]);
|
||||
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
|
||||
}
|
||||
}
|
||||
|
||||
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
int *insn_idx)
|
||||
{
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_func_info_aux *func_info_aux;
|
||||
struct bpf_func_state *caller, *callee;
|
||||
int i, err, subprog, target_insn;
|
||||
bool is_global = false;
|
||||
|
||||
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
|
||||
verbose(env, "the call stack of %d frames is too deep\n",
|
||||
@@ -3973,6 +3994,32 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
func_info_aux = env->prog->aux->func_info_aux;
|
||||
if (func_info_aux)
|
||||
is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
|
||||
err = btf_check_func_arg_match(env, subprog, caller->regs);
|
||||
if (err == -EFAULT)
|
||||
return err;
|
||||
if (is_global) {
|
||||
if (err) {
|
||||
verbose(env, "Caller passes invalid args into func#%d\n",
|
||||
subprog);
|
||||
return err;
|
||||
} else {
|
||||
if (env->log.level & BPF_LOG_LEVEL)
|
||||
verbose(env,
|
||||
"Func#%d is global and valid. Skipping.\n",
|
||||
subprog);
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* All global functions return SCALAR_VALUE */
|
||||
mark_reg_unknown(env, caller->regs, BPF_REG_0);
|
||||
|
||||
/* continue with next insn after call */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
|
||||
if (!callee)
|
||||
return -ENOMEM;
|
||||
@@ -3999,18 +4046,11 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
|
||||
callee->regs[i] = caller->regs[i];
|
||||
|
||||
/* after the call registers r0 - r5 were scratched */
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
||||
mark_reg_not_init(env, caller->regs, caller_saved[i]);
|
||||
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
|
||||
}
|
||||
clear_caller_saved_regs(env, caller->regs);
|
||||
|
||||
/* only increment it after check_reg_arg() finished */
|
||||
state->curframe++;
|
||||
|
||||
if (btf_check_func_arg_match(env, subprog))
|
||||
return -EINVAL;
|
||||
|
||||
/* and go analyze first insn of the callee */
|
||||
*insn_idx = target_insn;
|
||||
|
||||
@@ -6360,8 +6400,30 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
static int check_return_code(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct tnum enforce_attach_type_range = tnum_unknown;
|
||||
const struct bpf_prog *prog = env->prog;
|
||||
struct bpf_reg_state *reg;
|
||||
struct tnum range = tnum_range(0, 1);
|
||||
int err;
|
||||
|
||||
/* The struct_ops func-ptr's return type could be "void" */
|
||||
if (env->prog->type == BPF_PROG_TYPE_STRUCT_OPS &&
|
||||
!prog->aux->attach_func_proto->type)
|
||||
return 0;
|
||||
|
||||
/* eBPF calling convetion is such that R0 is used
|
||||
* to return the value from eBPF program.
|
||||
* Make sure that it's readable at this time
|
||||
* of bpf_exit, which means that program wrote
|
||||
* something into it earlier
|
||||
*/
|
||||
err = check_reg_arg(env, BPF_REG_0, SRC_OP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_pointer_value(env, BPF_REG_0)) {
|
||||
verbose(env, "R0 leaks addr as return value\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
switch (env->prog->type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
@@ -6750,12 +6812,13 @@ static int check_btf_func(struct bpf_verifier_env *env,
|
||||
|
||||
/* check type_id */
|
||||
type = btf_type_by_id(btf, krecord[i].type_id);
|
||||
if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
|
||||
if (!type || !btf_type_is_func(type)) {
|
||||
verbose(env, "invalid type id %d in func info",
|
||||
krecord[i].type_id);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
info_aux[i].linkage = BTF_INFO_VLEN(type->info);
|
||||
prev_offset = krecord[i].insn_off;
|
||||
urecord += urec_size;
|
||||
}
|
||||
@@ -7735,35 +7798,13 @@ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
|
||||
|
||||
static int do_check(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state *state;
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_insn *insns = env->prog->insnsi;
|
||||
struct bpf_reg_state *regs;
|
||||
int insn_cnt = env->prog->len;
|
||||
bool do_print_state = false;
|
||||
int prev_insn_idx = -1;
|
||||
|
||||
env->prev_linfo = NULL;
|
||||
|
||||
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
state->curframe = 0;
|
||||
state->speculative = false;
|
||||
state->branches = 1;
|
||||
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
||||
if (!state->frame[0]) {
|
||||
kfree(state);
|
||||
return -ENOMEM;
|
||||
}
|
||||
env->cur_state = state;
|
||||
init_func_state(env, state->frame[0],
|
||||
BPF_MAIN_FUNC /* callsite */,
|
||||
0 /* frameno */,
|
||||
0 /* subprogno, zero == main subprog */);
|
||||
|
||||
if (btf_check_func_arg_match(env, 0))
|
||||
return -EINVAL;
|
||||
|
||||
for (;;) {
|
||||
struct bpf_insn *insn;
|
||||
u8 class;
|
||||
@@ -7841,7 +7882,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
}
|
||||
|
||||
regs = cur_regs(env);
|
||||
env->insn_aux_data[env->insn_idx].seen = true;
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
prev_insn_idx = env->insn_idx;
|
||||
|
||||
if (class == BPF_ALU || class == BPF_ALU64) {
|
||||
@@ -8027,21 +8068,6 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* eBPF calling convetion is such that R0 is used
|
||||
* to return the value from eBPF program.
|
||||
* Make sure that it's readable at this time
|
||||
* of bpf_exit, which means that program wrote
|
||||
* something into it earlier
|
||||
*/
|
||||
err = check_reg_arg(env, BPF_REG_0, SRC_OP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_pointer_value(env, BPF_REG_0)) {
|
||||
verbose(env, "R0 leaks addr as return value\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
err = check_return_code(env);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -8076,7 +8102,7 @@ process_bpf_exit:
|
||||
return err;
|
||||
|
||||
env->insn_idx++;
|
||||
env->insn_aux_data[env->insn_idx].seen = true;
|
||||
env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
|
||||
} else {
|
||||
verbose(env, "invalid BPF_LD mode\n");
|
||||
return -EINVAL;
|
||||
@@ -8089,7 +8115,6 @@ process_bpf_exit:
|
||||
env->insn_idx++;
|
||||
}
|
||||
|
||||
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -8149,6 +8174,11 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
|
||||
verbose(env, "bpf_struct_ops map cannot be used in prog\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -8361,7 +8391,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
|
||||
memcpy(new_data + off + cnt - 1, old_data + off,
|
||||
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
|
||||
for (i = off; i < off + cnt - 1; i++) {
|
||||
new_data[i].seen = true;
|
||||
new_data[i].seen = env->pass_cnt;
|
||||
new_data[i].zext_dst = insn_has_def32(env, insn + i);
|
||||
}
|
||||
env->insn_aux_data = new_data;
|
||||
@@ -8840,12 +8870,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
|
||||
break;
|
||||
case PTR_TO_BTF_ID:
|
||||
if (type == BPF_WRITE) {
|
||||
if (type == BPF_READ) {
|
||||
insn->code = BPF_LDX | BPF_PROBE_MEM |
|
||||
BPF_SIZE((insn)->code);
|
||||
env->prog->aux->num_exentries++;
|
||||
} else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
|
||||
verbose(env, "Writes through BTF pointers are not allowed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
|
||||
env->prog->aux->num_exentries++;
|
||||
continue;
|
||||
default:
|
||||
continue;
|
||||
@@ -9425,6 +9457,30 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
||||
goto patch_call_imm;
|
||||
}
|
||||
|
||||
if (prog->jit_requested && BITS_PER_LONG == 64 &&
|
||||
insn->imm == BPF_FUNC_jiffies64) {
|
||||
struct bpf_insn ld_jiffies_addr[2] = {
|
||||
BPF_LD_IMM64(BPF_REG_0,
|
||||
(unsigned long)&jiffies),
|
||||
};
|
||||
|
||||
insn_buf[0] = ld_jiffies_addr[0];
|
||||
insn_buf[1] = ld_jiffies_addr[1];
|
||||
insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
|
||||
BPF_REG_0, 0);
|
||||
cnt = 3;
|
||||
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
|
||||
cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
continue;
|
||||
}
|
||||
|
||||
patch_call_imm:
|
||||
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
||||
/* all functions that have prototype and verifier allowed
|
||||
@@ -9471,6 +9527,7 @@ static void free_states(struct bpf_verifier_env *env)
|
||||
kfree(sl);
|
||||
sl = sln;
|
||||
}
|
||||
env->free_list = NULL;
|
||||
|
||||
if (!env->explored_states)
|
||||
return;
|
||||
@@ -9484,11 +9541,164 @@ static void free_states(struct bpf_verifier_env *env)
|
||||
kfree(sl);
|
||||
sl = sln;
|
||||
}
|
||||
env->explored_states[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* The verifier is using insn_aux_data[] to store temporary data during
|
||||
* verification and to store information for passes that run after the
|
||||
* verification like dead code sanitization. do_check_common() for subprogram N
|
||||
* may analyze many other subprograms. sanitize_insn_aux_data() clears all
|
||||
* temporary data after do_check_common() finds that subprogram N cannot be
|
||||
* verified independently. pass_cnt counts the number of times
|
||||
* do_check_common() was run and insn->aux->seen tells the pass number
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
struct bpf_insn_aux_data *aux;
|
||||
int i, class;
|
||||
|
||||
for (i = 0; i < env->prog->len; i++) {
|
||||
class = BPF_CLASS(insn[i].code);
|
||||
if (class != BPF_LDX && class != BPF_STX)
|
||||
continue;
|
||||
aux = &env->insn_aux_data[i];
|
||||
if (aux->seen != env->pass_cnt)
|
||||
continue;
|
||||
memset(aux, 0, offsetof(typeof(*aux), orig_idx));
|
||||
}
|
||||
}
|
||||
|
||||
static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
struct bpf_verifier_state *state;
|
||||
struct bpf_reg_state *regs;
|
||||
int ret, i;
|
||||
|
||||
env->prev_linfo = NULL;
|
||||
env->pass_cnt++;
|
||||
|
||||
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
state->curframe = 0;
|
||||
state->speculative = false;
|
||||
state->branches = 1;
|
||||
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
||||
if (!state->frame[0]) {
|
||||
kfree(state);
|
||||
return -ENOMEM;
|
||||
}
|
||||
env->cur_state = state;
|
||||
init_func_state(env, state->frame[0],
|
||||
BPF_MAIN_FUNC /* callsite */,
|
||||
0 /* frameno */,
|
||||
subprog);
|
||||
|
||||
regs = state->frame[state->curframe]->regs;
|
||||
if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
|
||||
ret = btf_prepare_func_args(env, subprog, regs);
|
||||
if (ret)
|
||||
goto out;
|
||||
for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
|
||||
if (regs[i].type == PTR_TO_CTX)
|
||||
mark_reg_known_zero(env, regs, i);
|
||||
else if (regs[i].type == SCALAR_VALUE)
|
||||
mark_reg_unknown(env, regs, i);
|
||||
}
|
||||
} else {
|
||||
/* 1st arg to a function */
|
||||
regs[BPF_REG_1].type = PTR_TO_CTX;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_1);
|
||||
ret = btf_check_func_arg_match(env, subprog, regs);
|
||||
if (ret == -EFAULT)
|
||||
/* unlikely verifier bug. abort.
|
||||
* ret == 0 and ret < 0 are sadly acceptable for
|
||||
* main() function due to backward compatibility.
|
||||
* Like socket filter program may be written as:
|
||||
* int bpf_prog(struct pt_regs *ctx)
|
||||
* and never dereference that ctx in the program.
|
||||
* 'struct pt_regs' is a type mismatch for socket
|
||||
* filter that should be using 'struct __sk_buff'.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvfree(env->explored_states);
|
||||
ret = do_check(env);
|
||||
out:
|
||||
/* check for NULL is necessary, since cur_state can be freed inside
|
||||
* do_check() under memory pressure.
|
||||
*/
|
||||
if (env->cur_state) {
|
||||
free_verifier_state(env->cur_state, true);
|
||||
env->cur_state = NULL;
|
||||
}
|
||||
while (!pop_stack(env, NULL, NULL));
|
||||
free_states(env);
|
||||
if (ret)
|
||||
/* clean aux data in case subprog was rejected */
|
||||
sanitize_insn_aux_data(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Verify all global functions in a BPF program one by one based on their BTF.
|
||||
* All global functions must pass verification. Otherwise the whole program is rejected.
|
||||
* Consider:
|
||||
* int bar(int);
|
||||
* int foo(int f)
|
||||
* {
|
||||
* return bar(f);
|
||||
* }
|
||||
* int bar(int b)
|
||||
* {
|
||||
* ...
|
||||
* }
|
||||
* foo() will be verified first for R1=any_scalar_value. During verification it
|
||||
* will be assumed that bar() already verified successfully and call to bar()
|
||||
* from foo() will be checked for type match only. Later bar() will be verified
|
||||
* independently to check that it's safe for R1=any_scalar_value.
|
||||
*/
|
||||
static int do_check_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_prog_aux *aux = env->prog->aux;
|
||||
int i, ret;
|
||||
|
||||
if (!aux->func_info)
|
||||
return 0;
|
||||
|
||||
for (i = 1; i < env->subprog_cnt; i++) {
|
||||
if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
|
||||
continue;
|
||||
env->insn_idx = env->subprog_info[i].start;
|
||||
WARN_ON_ONCE(env->insn_idx == 0);
|
||||
ret = do_check_common(env, i);
|
||||
if (ret) {
|
||||
return ret;
|
||||
} else if (env->log.level & BPF_LOG_LEVEL) {
|
||||
verbose(env,
|
||||
"Func#%d is safe for any args that match its prototype\n",
|
||||
i);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_check_main(struct bpf_verifier_env *env)
|
||||
{
|
||||
int ret;
|
||||
|
||||
env->insn_idx = 0;
|
||||
ret = do_check_common(env, 0);
|
||||
if (!ret)
|
||||
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void print_verification_stats(struct bpf_verifier_env *env)
|
||||
{
|
||||
int i;
|
||||
@@ -9513,9 +9723,62 @@ static void print_verification_stats(struct bpf_verifier_env *env)
|
||||
env->peak_states, env->longest_mark_read_walk);
|
||||
}
|
||||
|
||||
static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
|
||||
{
|
||||
const struct btf_type *t, *func_proto;
|
||||
const struct bpf_struct_ops *st_ops;
|
||||
const struct btf_member *member;
|
||||
struct bpf_prog *prog = env->prog;
|
||||
u32 btf_id, member_idx;
|
||||
const char *mname;
|
||||
|
||||
btf_id = prog->aux->attach_btf_id;
|
||||
st_ops = bpf_struct_ops_find(btf_id);
|
||||
if (!st_ops) {
|
||||
verbose(env, "attach_btf_id %u is not a supported struct\n",
|
||||
btf_id);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
t = st_ops->type;
|
||||
member_idx = prog->expected_attach_type;
|
||||
if (member_idx >= btf_type_vlen(t)) {
|
||||
verbose(env, "attach to invalid member idx %u of struct %s\n",
|
||||
member_idx, st_ops->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
member = &btf_type_member(t)[member_idx];
|
||||
mname = btf_name_by_offset(btf_vmlinux, member->name_off);
|
||||
func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
|
||||
NULL);
|
||||
if (!func_proto) {
|
||||
verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
|
||||
mname, member_idx, st_ops->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (st_ops->check_member) {
|
||||
int err = st_ops->check_member(t, member);
|
||||
|
||||
if (err) {
|
||||
verbose(env, "attach to unsupported member %s of struct %s\n",
|
||||
mname, st_ops->name);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
prog->aux->attach_func_proto = func_proto;
|
||||
prog->aux->attach_func_name = mname;
|
||||
env->ops = st_ops->verifier_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_prog *prog = env->prog;
|
||||
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
|
||||
struct bpf_prog *tgt_prog = prog->aux->linked_prog;
|
||||
u32 btf_id = prog->aux->attach_btf_id;
|
||||
const char prefix[] = "btf_trace_";
|
||||
@@ -9528,7 +9791,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
long addr;
|
||||
u64 key;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_TRACING)
|
||||
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
|
||||
return check_struct_ops_btf_id(env);
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_TRACING && !prog_extension)
|
||||
return 0;
|
||||
|
||||
if (!btf_id) {
|
||||
@@ -9564,8 +9830,59 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
return -EINVAL;
|
||||
}
|
||||
conservative = aux->func_info_aux[subprog].unreliable;
|
||||
if (prog_extension) {
|
||||
if (conservative) {
|
||||
verbose(env,
|
||||
"Cannot replace static functions\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!prog->jit_requested) {
|
||||
verbose(env,
|
||||
"Extension programs should be JITed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
env->ops = bpf_verifier_ops[tgt_prog->type];
|
||||
}
|
||||
if (!tgt_prog->jited) {
|
||||
verbose(env, "Can attach to only JITed progs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tgt_prog->type == prog->type) {
|
||||
/* Cannot fentry/fexit another fentry/fexit program.
|
||||
* Cannot attach program extension to another extension.
|
||||
* It's ok to attach fentry/fexit to extension program.
|
||||
*/
|
||||
verbose(env, "Cannot recursively attach\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
|
||||
prog_extension &&
|
||||
(tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
|
||||
tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
|
||||
/* Program extensions can extend all program types
|
||||
* except fentry/fexit. The reason is the following.
|
||||
* The fentry/fexit programs are used for performance
|
||||
* analysis, stats and can be attached to any program
|
||||
* type except themselves. When extension program is
|
||||
* replacing XDP function it is necessary to allow
|
||||
* performance analysis of all functions. Both original
|
||||
* XDP program and its program extension. Hence
|
||||
* attaching fentry/fexit to BPF_PROG_TYPE_EXT is
|
||||
* allowed. If extending of fentry/fexit was allowed it
|
||||
* would be possible to create long call chain
|
||||
* fentry->extension->fentry->extension beyond
|
||||
* reasonable stack size. Hence extending fentry is not
|
||||
* allowed.
|
||||
*/
|
||||
verbose(env, "Cannot extend fentry/fexit\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
key = ((u64)aux->id) << 32 | btf_id;
|
||||
} else {
|
||||
if (prog_extension) {
|
||||
verbose(env, "Cannot replace kernel functions\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
key = btf_id;
|
||||
}
|
||||
|
||||
@@ -9603,6 +9920,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
prog->aux->attach_func_proto = t;
|
||||
prog->aux->attach_btf_trace = true;
|
||||
return 0;
|
||||
default:
|
||||
if (!prog_extension)
|
||||
return -EINVAL;
|
||||
/* fallthrough */
|
||||
case BPF_TRACE_FENTRY:
|
||||
case BPF_TRACE_FEXIT:
|
||||
if (!btf_type_is_func(t)) {
|
||||
@@ -9610,6 +9931,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
btf_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (prog_extension &&
|
||||
btf_check_type_match(env, prog, btf, t))
|
||||
return -EINVAL;
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
if (!btf_type_is_func_proto(t))
|
||||
return -EINVAL;
|
||||
@@ -9633,18 +9957,6 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (tgt_prog) {
|
||||
if (!tgt_prog->jited) {
|
||||
/* for now */
|
||||
verbose(env, "Can trace only JITed BPF progs\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (tgt_prog->type == BPF_PROG_TYPE_TRACING) {
|
||||
/* prevent cycles */
|
||||
verbose(env, "Cannot recursively attach\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (subprog == 0)
|
||||
addr = (long) tgt_prog->bpf_func;
|
||||
else
|
||||
@@ -9666,8 +9978,6 @@ out:
|
||||
if (ret)
|
||||
bpf_trampoline_put(tr);
|
||||
return ret;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9737,10 +10047,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
|
||||
goto skip_full_check;
|
||||
}
|
||||
|
||||
ret = check_attach_btf_id(env);
|
||||
if (ret)
|
||||
goto skip_full_check;
|
||||
|
||||
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
|
||||
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
|
||||
env->strict_alignment = true;
|
||||
@@ -9777,22 +10083,22 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
|
||||
if (ret < 0)
|
||||
goto skip_full_check;
|
||||
|
||||
ret = check_attach_btf_id(env);
|
||||
if (ret)
|
||||
goto skip_full_check;
|
||||
|
||||
ret = check_cfg(env);
|
||||
if (ret < 0)
|
||||
goto skip_full_check;
|
||||
|
||||
ret = do_check(env);
|
||||
if (env->cur_state) {
|
||||
free_verifier_state(env->cur_state, true);
|
||||
env->cur_state = NULL;
|
||||
}
|
||||
ret = do_check_subprogs(env);
|
||||
ret = ret ?: do_check_main(env);
|
||||
|
||||
if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
|
||||
ret = bpf_prog_offload_finalize(env);
|
||||
|
||||
skip_full_check:
|
||||
while (!pop_stack(env, NULL, NULL));
|
||||
free_states(env);
|
||||
kvfree(env->explored_states);
|
||||
|
||||
if (ret == 0)
|
||||
ret = check_max_stack_depth(env);
|
||||
|
Reference in New Issue
Block a user