Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-02-16 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) numerous libbpf API improvements, from Andrii, Andrey, Yonghong. 2) test all bpf progs in alu32 mode, from Jiong. 3) skb->sk access and bpf_sk_fullsock(), bpf_tcp_sock() helpers, from Martin. 4) support for IP encap in lwt bpf progs, from Peter. 5) remove XDP_QUERY_XSK_UMEM dead code, from Jan. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Cette révision appartient à :
@@ -35,6 +35,7 @@ static DECLARE_RWSEM(bpf_devs_lock);
|
||||
struct bpf_offload_dev {
|
||||
const struct bpf_prog_offload_ops *ops;
|
||||
struct list_head netdevs;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct bpf_offload_netdev {
|
||||
@@ -669,7 +670,7 @@ unlock:
|
||||
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
|
||||
|
||||
struct bpf_offload_dev *
|
||||
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
|
||||
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
|
||||
{
|
||||
struct bpf_offload_dev *offdev;
|
||||
int err;
|
||||
@@ -688,6 +689,7 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
offdev->ops = ops;
|
||||
offdev->priv = priv;
|
||||
INIT_LIST_HEAD(&offdev->netdevs);
|
||||
|
||||
return offdev;
|
||||
@@ -700,3 +702,9 @@ void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
|
||||
kfree(offdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
|
||||
|
||||
void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
|
||||
{
|
||||
return offdev->priv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
|
||||
|
@@ -331,10 +331,19 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type)
|
||||
type == PTR_TO_PACKET_META;
|
||||
}
|
||||
|
||||
static bool type_is_sk_pointer(enum bpf_reg_type type)
|
||||
{
|
||||
return type == PTR_TO_SOCKET ||
|
||||
type == PTR_TO_SOCK_COMMON ||
|
||||
type == PTR_TO_TCP_SOCK;
|
||||
}
|
||||
|
||||
static bool reg_type_may_be_null(enum bpf_reg_type type)
|
||||
{
|
||||
return type == PTR_TO_MAP_VALUE_OR_NULL ||
|
||||
type == PTR_TO_SOCKET_OR_NULL;
|
||||
type == PTR_TO_SOCKET_OR_NULL ||
|
||||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
|
||||
type == PTR_TO_TCP_SOCK_OR_NULL;
|
||||
}
|
||||
|
||||
static bool type_is_refcounted(enum bpf_reg_type type)
|
||||
@@ -377,6 +386,12 @@ static bool is_release_function(enum bpf_func_id func_id)
|
||||
return func_id == BPF_FUNC_sk_release;
|
||||
}
|
||||
|
||||
static bool is_acquire_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_sk_lookup_tcp ||
|
||||
func_id == BPF_FUNC_sk_lookup_udp;
|
||||
}
|
||||
|
||||
/* string representation of 'enum bpf_reg_type' */
|
||||
static const char * const reg_type_str[] = {
|
||||
[NOT_INIT] = "?",
|
||||
@@ -392,6 +407,10 @@ static const char * const reg_type_str[] = {
|
||||
[PTR_TO_FLOW_KEYS] = "flow_keys",
|
||||
[PTR_TO_SOCKET] = "sock",
|
||||
[PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
|
||||
[PTR_TO_SOCK_COMMON] = "sock_common",
|
||||
[PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
|
||||
[PTR_TO_TCP_SOCK] = "tcp_sock",
|
||||
[PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
|
||||
};
|
||||
|
||||
static char slot_type_char[] = {
|
||||
@@ -618,13 +637,10 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
|
||||
}
|
||||
|
||||
/* release function corresponding to acquire_reference_state(). Idempotent. */
|
||||
static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
|
||||
static int release_reference_state(struct bpf_func_state *state, int ptr_id)
|
||||
{
|
||||
int i, last_idx;
|
||||
|
||||
if (!ptr_id)
|
||||
return -EFAULT;
|
||||
|
||||
last_idx = state->acquired_refs - 1;
|
||||
for (i = 0; i < state->acquired_refs; i++) {
|
||||
if (state->refs[i].id == ptr_id) {
|
||||
@@ -636,21 +652,7 @@ static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* variation on the above for cases where we expect that there must be an
|
||||
* outstanding reference for the specified ptr_id.
|
||||
*/
|
||||
static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
|
||||
{
|
||||
struct bpf_func_state *state = cur_func(env);
|
||||
int err;
|
||||
|
||||
err = __release_reference_state(state, ptr_id);
|
||||
if (WARN_ON_ONCE(err != 0))
|
||||
verbose(env, "verifier internal error: can't release reference\n");
|
||||
return err;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int transfer_reference_state(struct bpf_func_state *dst,
|
||||
@@ -1209,6 +1211,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
||||
case CONST_PTR_TO_MAP:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@@ -1640,12 +1646,14 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
int size, enum bpf_access_type t)
|
||||
static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
|
||||
u32 regno, int off, int size,
|
||||
enum bpf_access_type t)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
struct bpf_reg_state *reg = ®s[regno];
|
||||
struct bpf_insn_access_aux info;
|
||||
struct bpf_insn_access_aux info = {};
|
||||
bool valid;
|
||||
|
||||
if (reg->smin_value < 0) {
|
||||
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
@@ -1653,13 +1661,31 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (!bpf_sock_is_valid_access(off, size, t, &info)) {
|
||||
verbose(env, "invalid bpf_sock access off=%d size=%d\n",
|
||||
off, size);
|
||||
return -EACCES;
|
||||
switch (reg->type) {
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
valid = bpf_sock_common_is_valid_access(off, size, t, &info);
|
||||
break;
|
||||
case PTR_TO_SOCKET:
|
||||
valid = bpf_sock_is_valid_access(off, size, t, &info);
|
||||
break;
|
||||
case PTR_TO_TCP_SOCK:
|
||||
valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
|
||||
break;
|
||||
default:
|
||||
valid = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
if (valid) {
|
||||
env->insn_aux_data[insn_idx].ctx_field_size =
|
||||
info.ctx_field_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
verbose(env, "R%d invalid %s access off=%d size=%d\n",
|
||||
regno, reg_type_str[reg->type], off, size);
|
||||
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static bool __is_pointer_value(bool allow_ptr_leaks,
|
||||
@@ -1685,8 +1711,14 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
const struct bpf_reg_state *reg = reg_state(env, regno);
|
||||
|
||||
return reg->type == PTR_TO_CTX ||
|
||||
reg->type == PTR_TO_SOCKET;
|
||||
return reg->type == PTR_TO_CTX;
|
||||
}
|
||||
|
||||
static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
const struct bpf_reg_state *reg = reg_state(env, regno);
|
||||
|
||||
return type_is_sk_pointer(reg->type);
|
||||
}
|
||||
|
||||
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
|
||||
@@ -1797,6 +1829,12 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
|
||||
case PTR_TO_SOCKET:
|
||||
pointer_desc = "sock ";
|
||||
break;
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
pointer_desc = "sock_common ";
|
||||
break;
|
||||
case PTR_TO_TCP_SOCK:
|
||||
pointer_desc = "tcp_sock ";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -2000,11 +2038,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
* PTR_TO_PACKET[_META,_END]. In the latter
|
||||
* case, we know the offset is zero.
|
||||
*/
|
||||
if (reg_type == SCALAR_VALUE)
|
||||
if (reg_type == SCALAR_VALUE) {
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
else
|
||||
} else {
|
||||
mark_reg_known_zero(env, regs,
|
||||
value_regno);
|
||||
if (reg_type_may_be_null(reg_type))
|
||||
regs[value_regno].id = ++env->id_gen;
|
||||
}
|
||||
regs[value_regno].type = reg_type;
|
||||
}
|
||||
|
||||
@@ -2050,12 +2091,13 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
err = check_flow_keys_access(env, off, size);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else if (reg->type == PTR_TO_SOCKET) {
|
||||
} else if (type_is_sk_pointer(reg->type)) {
|
||||
if (t == BPF_WRITE) {
|
||||
verbose(env, "cannot write into socket\n");
|
||||
verbose(env, "R%d cannot write into %s\n",
|
||||
regno, reg_type_str[reg->type]);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_sock_access(env, regno, off, size, t);
|
||||
err = check_sock_access(env, insn_idx, regno, off, size, t);
|
||||
if (!err && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else {
|
||||
@@ -2099,7 +2141,8 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
|
||||
|
||||
if (is_ctx_reg(env, insn->dst_reg) ||
|
||||
is_pkt_reg(env, insn->dst_reg) ||
|
||||
is_flow_key_reg(env, insn->dst_reg)) {
|
||||
is_flow_key_reg(env, insn->dst_reg) ||
|
||||
is_sk_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg,
|
||||
reg_type_str[reg_state(env, insn->dst_reg)->type]);
|
||||
@@ -2366,6 +2409,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
|
||||
expected_type = PTR_TO_SOCK_COMMON;
|
||||
/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
|
||||
if (!type_is_sk_pointer(type))
|
||||
goto err_type;
|
||||
} else if (arg_type == ARG_PTR_TO_SOCKET) {
|
||||
expected_type = PTR_TO_SOCKET;
|
||||
if (type != expected_type)
|
||||
@@ -2780,7 +2828,7 @@ static int release_reference(struct bpf_verifier_env *env,
|
||||
for (i = 0; i <= vstate->curframe; i++)
|
||||
release_reg_references(env, vstate->frame[i], meta->ptr_id);
|
||||
|
||||
return release_reference_state(env, meta->ptr_id);
|
||||
return release_reference_state(cur_func(env), meta->ptr_id);
|
||||
}
|
||||
|
||||
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
@@ -3046,8 +3094,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
||||
}
|
||||
} else if (is_release_function(func_id)) {
|
||||
err = release_reference(env, &meta);
|
||||
if (err)
|
||||
if (err) {
|
||||
verbose(env, "func %s#%d reference has not been acquired before\n",
|
||||
func_id_name(func_id), func_id);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
regs = cur_regs(env);
|
||||
@@ -3096,12 +3147,23 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
}
|
||||
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
|
||||
int id = acquire_reference_state(env, insn_idx);
|
||||
if (id < 0)
|
||||
return id;
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
|
||||
regs[BPF_REG_0].id = id;
|
||||
if (is_acquire_function(func_id)) {
|
||||
int id = acquire_reference_state(env, insn_idx);
|
||||
|
||||
if (id < 0)
|
||||
return id;
|
||||
/* For release_reference() */
|
||||
regs[BPF_REG_0].id = id;
|
||||
} else {
|
||||
/* For mark_ptr_or_null_reg() */
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
}
|
||||
} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
} else {
|
||||
verbose(env, "unknown return type %d of func %s#%d\n",
|
||||
fn->ret_type, func_id_name(func_id), func_id);
|
||||
@@ -3361,6 +3423,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
||||
case PTR_TO_PACKET_END:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
|
||||
dst, reg_type_str[ptr_reg->type]);
|
||||
return -EACCES;
|
||||
@@ -4594,6 +4660,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
||||
}
|
||||
} else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
|
||||
reg->type = PTR_TO_SOCKET;
|
||||
} else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
|
||||
reg->type = PTR_TO_SOCK_COMMON;
|
||||
} else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
|
||||
reg->type = PTR_TO_TCP_SOCK;
|
||||
}
|
||||
if (is_null || !(reg_is_refcounted(reg) ||
|
||||
reg_may_point_to_spin_lock(reg))) {
|
||||
@@ -4618,7 +4688,7 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
||||
int i, j;
|
||||
|
||||
if (reg_is_refcounted_or_null(®s[regno]) && is_null)
|
||||
__release_reference_state(state, id);
|
||||
release_reference_state(state, id);
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
mark_ptr_or_null_reg(state, ®s[i], id, is_null);
|
||||
@@ -5787,6 +5857,10 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
case PTR_TO_FLOW_KEYS:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
/* Only valid matches are exact, which memcmp() above
|
||||
* would have accepted
|
||||
*/
|
||||
@@ -6107,6 +6181,10 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
|
||||
case PTR_TO_CTX:
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCKET_OR_NULL:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
case PTR_TO_SOCK_COMMON_OR_NULL:
|
||||
case PTR_TO_TCP_SOCK:
|
||||
case PTR_TO_TCP_SOCK_OR_NULL:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
@@ -7109,8 +7187,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
convert_ctx_access = ops->convert_ctx_access;
|
||||
break;
|
||||
case PTR_TO_SOCKET:
|
||||
case PTR_TO_SOCK_COMMON:
|
||||
convert_ctx_access = bpf_sock_convert_ctx_access;
|
||||
break;
|
||||
case PTR_TO_TCP_SOCK:
|
||||
convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
}
|
||||
|
Référencer dans un nouveau ticket
Bloquer un utilisateur