|
@@ -2978,7 +2978,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
|
regno);
|
|
|
return -EACCES;
|
|
|
}
|
|
|
- err = __check_mem_access(env, regno, off, size, reg->range,
|
|
|
+
|
|
|
+ err = reg->range < 0 ? -EINVAL :
|
|
|
+ __check_mem_access(env, regno, off, size, reg->range,
|
|
|
zero_size_allowed);
|
|
|
if (err) {
|
|
|
verbose(env, "R%d offset is outside of the packet\n", regno);
|
|
@@ -4991,50 +4993,41 @@ static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
|
|
|
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
|
|
|
* are now invalid, so turn them into unknown SCALAR_VALUE.
|
|
|
*/
|
|
|
-static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
|
|
|
- struct bpf_func_state *state)
|
|
|
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
|
|
|
{
|
|
|
- struct bpf_reg_state *regs = state->regs, *reg;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < MAX_BPF_REG; i++)
|
|
|
- if (reg_is_pkt_pointer_any(®s[i]))
|
|
|
- mark_reg_unknown(env, regs, i);
|
|
|
+ struct bpf_func_state *state;
|
|
|
+ struct bpf_reg_state *reg;
|
|
|
|
|
|
- bpf_for_each_spilled_reg(i, state, reg) {
|
|
|
- if (!reg)
|
|
|
- continue;
|
|
|
+ bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
|
|
|
if (reg_is_pkt_pointer_any(reg))
|
|
|
__mark_reg_unknown(env, reg);
|
|
|
- }
|
|
|
+ }));
|
|
|
}
|
|
|
|
|
|
-static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
|
|
|
-{
|
|
|
- struct bpf_verifier_state *vstate = env->cur_state;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i <= vstate->curframe; i++)
|
|
|
- __clear_all_pkt_pointers(env, vstate->frame[i]);
|
|
|
-}
|
|
|
+enum {
|
|
|
+ AT_PKT_END = -1,
|
|
|
+ BEYOND_PKT_END = -2,
|
|
|
+};
|
|
|
|
|
|
-static void release_reg_references(struct bpf_verifier_env *env,
|
|
|
- struct bpf_func_state *state,
|
|
|
- int ref_obj_id)
|
|
|
+static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
|
|
|
{
|
|
|
- struct bpf_reg_state *regs = state->regs, *reg;
|
|
|
- int i;
|
|
|
+ struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
|
|
+ struct bpf_reg_state *reg = &state->regs[regn];
|
|
|
|
|
|
- for (i = 0; i < MAX_BPF_REG; i++)
|
|
|
- if (regs[i].ref_obj_id == ref_obj_id)
|
|
|
- mark_reg_unknown(env, regs, i);
|
|
|
+ if (reg->type != PTR_TO_PACKET)
|
|
|
+ /* PTR_TO_PACKET_META is not supported yet */
|
|
|
+ return;
|
|
|
|
|
|
- bpf_for_each_spilled_reg(i, state, reg) {
|
|
|
- if (!reg)
|
|
|
- continue;
|
|
|
- if (reg->ref_obj_id == ref_obj_id)
|
|
|
- __mark_reg_unknown(env, reg);
|
|
|
- }
|
|
|
+ /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
|
|
|
+ * How far beyond pkt_end it goes is unknown.
|
|
|
+ * if (!range_open) it's the case of pkt >= pkt_end
|
|
|
+ * if (range_open) it's the case of pkt > pkt_end
|
|
|
+ * hence this pointer is at least 1 byte bigger than pkt_end
|
|
|
+ */
|
|
|
+ if (range_open)
|
|
|
+ reg->range = BEYOND_PKT_END;
|
|
|
+ else
|
|
|
+ reg->range = AT_PKT_END;
|
|
|
}
|
|
|
|
|
|
/* The pointer with the specified id has released its reference to kernel
|
|
@@ -5043,16 +5036,22 @@ static void release_reg_references(struct bpf_verifier_env *env,
|
|
|
static int release_reference(struct bpf_verifier_env *env,
|
|
|
int ref_obj_id)
|
|
|
{
|
|
|
- struct bpf_verifier_state *vstate = env->cur_state;
|
|
|
+ struct bpf_func_state *state;
|
|
|
+ struct bpf_reg_state *reg;
|
|
|
int err;
|
|
|
- int i;
|
|
|
|
|
|
err = release_reference_state(cur_func(env), ref_obj_id);
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- for (i = 0; i <= vstate->curframe; i++)
|
|
|
- release_reg_references(env, vstate->frame[i], ref_obj_id);
|
|
|
+ bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
|
|
|
+ if (reg->ref_obj_id == ref_obj_id) {
|
|
|
+ if (!env->allow_ptr_leaks)
|
|
|
+ __mark_reg_not_init(env, reg);
|
|
|
+ else
|
|
|
+ __mark_reg_unknown(env, reg);
|
|
|
+ }
|
|
|
+ }));
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -7191,35 +7190,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void __find_good_pkt_pointers(struct bpf_func_state *state,
|
|
|
- struct bpf_reg_state *dst_reg,
|
|
|
- enum bpf_reg_type type, u16 new_range)
|
|
|
-{
|
|
|
- struct bpf_reg_state *reg;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < MAX_BPF_REG; i++) {
|
|
|
- reg = &state->regs[i];
|
|
|
- if (reg->type == type && reg->id == dst_reg->id)
|
|
|
- /* keep the maximum range already checked */
|
|
|
- reg->range = max(reg->range, new_range);
|
|
|
- }
|
|
|
-
|
|
|
- bpf_for_each_spilled_reg(i, state, reg) {
|
|
|
- if (!reg)
|
|
|
- continue;
|
|
|
- if (reg->type == type && reg->id == dst_reg->id)
|
|
|
- reg->range = max(reg->range, new_range);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
|
|
|
struct bpf_reg_state *dst_reg,
|
|
|
enum bpf_reg_type type,
|
|
|
bool range_right_open)
|
|
|
{
|
|
|
- u16 new_range;
|
|
|
- int i;
|
|
|
+ struct bpf_func_state *state;
|
|
|
+ struct bpf_reg_state *reg;
|
|
|
+ int new_range;
|
|
|
|
|
|
if (dst_reg->off < 0 ||
|
|
|
(dst_reg->off == 0 && range_right_open))
|
|
@@ -7284,9 +7262,11 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
|
|
|
* the range won't allow anything.
|
|
|
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
|
|
|
*/
|
|
|
- for (i = 0; i <= vstate->curframe; i++)
|
|
|
- __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
|
|
|
- new_range);
|
|
|
+ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
|
|
|
+ if (reg->type == type && reg->id == dst_reg->id)
|
|
|
+ /* keep the maximum range already checked */
|
|
|
+ reg->range = max(reg->range, new_range);
|
|
|
+ }));
|
|
|
}
|
|
|
|
|
|
static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
|
|
@@ -7470,6 +7450,67 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
|
|
|
return is_branch64_taken(reg, val, opcode);
|
|
|
}
|
|
|
|
|
|
+static int flip_opcode(u32 opcode)
|
|
|
+{
|
|
|
+ /* How can we transform "a <op> b" into "b <op> a"? */
|
|
|
+ static const u8 opcode_flip[16] = {
|
|
|
+ /* these stay the same */
|
|
|
+ [BPF_JEQ >> 4] = BPF_JEQ,
|
|
|
+ [BPF_JNE >> 4] = BPF_JNE,
|
|
|
+ [BPF_JSET >> 4] = BPF_JSET,
|
|
|
+ /* these swap "lesser" and "greater" (L and G in the opcodes) */
|
|
|
+ [BPF_JGE >> 4] = BPF_JLE,
|
|
|
+ [BPF_JGT >> 4] = BPF_JLT,
|
|
|
+ [BPF_JLE >> 4] = BPF_JGE,
|
|
|
+ [BPF_JLT >> 4] = BPF_JGT,
|
|
|
+ [BPF_JSGE >> 4] = BPF_JSLE,
|
|
|
+ [BPF_JSGT >> 4] = BPF_JSLT,
|
|
|
+ [BPF_JSLE >> 4] = BPF_JSGE,
|
|
|
+ [BPF_JSLT >> 4] = BPF_JSGT
|
|
|
+ };
|
|
|
+ return opcode_flip[opcode >> 4];
|
|
|
+}
|
|
|
+
|
|
|
+static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
|
|
|
+ struct bpf_reg_state *src_reg,
|
|
|
+ u8 opcode)
|
|
|
+{
|
|
|
+ struct bpf_reg_state *pkt;
|
|
|
+
|
|
|
+ if (src_reg->type == PTR_TO_PACKET_END) {
|
|
|
+ pkt = dst_reg;
|
|
|
+ } else if (dst_reg->type == PTR_TO_PACKET_END) {
|
|
|
+ pkt = src_reg;
|
|
|
+ opcode = flip_opcode(opcode);
|
|
|
+ } else {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (pkt->range >= 0)
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ switch (opcode) {
|
|
|
+ case BPF_JLE:
|
|
|
+ /* pkt <= pkt_end */
|
|
|
+ fallthrough;
|
|
|
+ case BPF_JGT:
|
|
|
+ /* pkt > pkt_end */
|
|
|
+ if (pkt->range == BEYOND_PKT_END)
|
|
|
+ /* pkt has at last one extra byte beyond pkt_end */
|
|
|
+ return opcode == BPF_JGT;
|
|
|
+ break;
|
|
|
+ case BPF_JLT:
|
|
|
+ /* pkt < pkt_end */
|
|
|
+ fallthrough;
|
|
|
+ case BPF_JGE:
|
|
|
+ /* pkt >= pkt_end */
|
|
|
+ if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
|
|
|
+ return opcode == BPF_JGE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
/* Adjusts the register min/max values in the case that the dst_reg is the
|
|
|
* variable register that we are working on, and src_reg is a constant or we're
|
|
|
* simply doing a BPF_K check.
|
|
@@ -7640,23 +7681,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
|
|
|
u64 val, u32 val32,
|
|
|
u8 opcode, bool is_jmp32)
|
|
|
{
|
|
|
- /* How can we transform "a <op> b" into "b <op> a"? */
|
|
|
- static const u8 opcode_flip[16] = {
|
|
|
- /* these stay the same */
|
|
|
- [BPF_JEQ >> 4] = BPF_JEQ,
|
|
|
- [BPF_JNE >> 4] = BPF_JNE,
|
|
|
- [BPF_JSET >> 4] = BPF_JSET,
|
|
|
- /* these swap "lesser" and "greater" (L and G in the opcodes) */
|
|
|
- [BPF_JGE >> 4] = BPF_JLE,
|
|
|
- [BPF_JGT >> 4] = BPF_JLT,
|
|
|
- [BPF_JLE >> 4] = BPF_JGE,
|
|
|
- [BPF_JLT >> 4] = BPF_JGT,
|
|
|
- [BPF_JSGE >> 4] = BPF_JSLE,
|
|
|
- [BPF_JSGT >> 4] = BPF_JSLT,
|
|
|
- [BPF_JSLE >> 4] = BPF_JSGE,
|
|
|
- [BPF_JSLT >> 4] = BPF_JSGT
|
|
|
- };
|
|
|
- opcode = opcode_flip[opcode >> 4];
|
|
|
+ opcode = flip_opcode(opcode);
|
|
|
/* This uses zero as "not present in table"; luckily the zero opcode,
|
|
|
* BPF_JA, can't get here.
|
|
|
*/
|
|
@@ -7754,7 +7779,7 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
|
|
reg->ref_obj_id = 0;
|
|
|
} else if (!reg_may_point_to_spin_lock(reg)) {
|
|
|
/* For not-NULL ptr, reg->ref_obj_id will be reset
|
|
|
- * in release_reg_references().
|
|
|
+ * in release_reference().
|
|
|
*
|
|
|
* reg->id is still used by spin_lock ptr. Other
|
|
|
* than spin_lock ptr type, reg->id can be reset.
|
|
@@ -7764,22 +7789,6 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
|
|
|
- bool is_null)
|
|
|
-{
|
|
|
- struct bpf_reg_state *reg;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < MAX_BPF_REG; i++)
|
|
|
- mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
|
|
|
-
|
|
|
- bpf_for_each_spilled_reg(i, state, reg) {
|
|
|
- if (!reg)
|
|
|
- continue;
|
|
|
- mark_ptr_or_null_reg(state, reg, id, is_null);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/* The logic is similar to find_good_pkt_pointers(), both could eventually
|
|
|
* be folded together at some point.
|
|
|
*/
|
|
@@ -7787,10 +7796,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
|
|
bool is_null)
|
|
|
{
|
|
|
struct bpf_func_state *state = vstate->frame[vstate->curframe];
|
|
|
- struct bpf_reg_state *regs = state->regs;
|
|
|
+ struct bpf_reg_state *regs = state->regs, *reg;
|
|
|
u32 ref_obj_id = regs[regno].ref_obj_id;
|
|
|
u32 id = regs[regno].id;
|
|
|
- int i;
|
|
|
|
|
|
if (ref_obj_id && ref_obj_id == id && is_null)
|
|
|
/* regs[regno] is in the " == NULL" branch.
|
|
@@ -7799,8 +7807,9 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
|
|
|
*/
|
|
|
WARN_ON_ONCE(release_reference_state(state, id));
|
|
|
|
|
|
- for (i = 0; i <= vstate->curframe; i++)
|
|
|
- __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
|
|
|
+ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
|
|
|
+ mark_ptr_or_null_reg(state, reg, id, is_null);
|
|
|
+ }));
|
|
|
}
|
|
|
|
|
|
static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
@@ -7825,6 +7834,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
|
|
|
find_good_pkt_pointers(this_branch, dst_reg,
|
|
|
dst_reg->type, false);
|
|
|
+ mark_pkt_end(other_branch, insn->dst_reg, true);
|
|
|
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
|
|
|
src_reg->type == PTR_TO_PACKET) ||
|
|
|
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
|
|
@@ -7832,6 +7842,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_end > pkt_data', pkt_data > pkt_meta' */
|
|
|
find_good_pkt_pointers(other_branch, src_reg,
|
|
|
src_reg->type, true);
|
|
|
+ mark_pkt_end(this_branch, insn->src_reg, false);
|
|
|
} else {
|
|
|
return false;
|
|
|
}
|
|
@@ -7844,6 +7855,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
|
|
|
find_good_pkt_pointers(other_branch, dst_reg,
|
|
|
dst_reg->type, true);
|
|
|
+ mark_pkt_end(this_branch, insn->dst_reg, false);
|
|
|
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
|
|
|
src_reg->type == PTR_TO_PACKET) ||
|
|
|
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
|
|
@@ -7851,6 +7863,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_end < pkt_data', pkt_data > pkt_meta' */
|
|
|
find_good_pkt_pointers(this_branch, src_reg,
|
|
|
src_reg->type, false);
|
|
|
+ mark_pkt_end(other_branch, insn->src_reg, true);
|
|
|
} else {
|
|
|
return false;
|
|
|
}
|
|
@@ -7863,6 +7876,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
|
|
|
find_good_pkt_pointers(this_branch, dst_reg,
|
|
|
dst_reg->type, true);
|
|
|
+ mark_pkt_end(other_branch, insn->dst_reg, false);
|
|
|
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
|
|
|
src_reg->type == PTR_TO_PACKET) ||
|
|
|
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
|
|
@@ -7870,6 +7884,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
|
|
|
find_good_pkt_pointers(other_branch, src_reg,
|
|
|
src_reg->type, false);
|
|
|
+ mark_pkt_end(this_branch, insn->src_reg, true);
|
|
|
} else {
|
|
|
return false;
|
|
|
}
|
|
@@ -7882,6 +7897,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
|
|
|
find_good_pkt_pointers(other_branch, dst_reg,
|
|
|
dst_reg->type, false);
|
|
|
+ mark_pkt_end(this_branch, insn->dst_reg, true);
|
|
|
} else if ((dst_reg->type == PTR_TO_PACKET_END &&
|
|
|
src_reg->type == PTR_TO_PACKET) ||
|
|
|
(reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
|
|
@@ -7889,6 +7905,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
|
|
|
/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
|
|
|
find_good_pkt_pointers(this_branch, src_reg,
|
|
|
src_reg->type, true);
|
|
|
+ mark_pkt_end(other_branch, insn->src_reg, false);
|
|
|
} else {
|
|
|
return false;
|
|
|
}
|
|
@@ -7905,23 +7922,11 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate,
|
|
|
{
|
|
|
struct bpf_func_state *state;
|
|
|
struct bpf_reg_state *reg;
|
|
|
- int i, j;
|
|
|
-
|
|
|
- for (i = 0; i <= vstate->curframe; i++) {
|
|
|
- state = vstate->frame[i];
|
|
|
- for (j = 0; j < MAX_BPF_REG; j++) {
|
|
|
- reg = &state->regs[j];
|
|
|
- if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
|
|
|
- *reg = *known_reg;
|
|
|
- }
|
|
|
|
|
|
- bpf_for_each_spilled_reg(j, state, reg) {
|
|
|
- if (!reg)
|
|
|
- continue;
|
|
|
- if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
|
|
|
- *reg = *known_reg;
|
|
|
- }
|
|
|
- }
|
|
|
+ bpf_for_each_reg_in_vstate(vstate, state, reg, ({
|
|
|
+ if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
|
|
|
+ *reg = *known_reg;
|
|
|
+ }));
|
|
|
}
|
|
|
|
|
|
static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
@@ -7988,6 +7993,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
|
src_reg->var_off.value,
|
|
|
opcode,
|
|
|
is_jmp32);
|
|
|
+ } else if (reg_is_pkt_pointer_any(dst_reg) &&
|
|
|
+ reg_is_pkt_pointer_any(src_reg) &&
|
|
|
+ !is_jmp32) {
|
|
|
+ pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
|
|
|
}
|
|
|
|
|
|
if (pred >= 0) {
|
|
@@ -7996,7 +8005,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
|
*/
|
|
|
if (!__is_pointer_value(false, dst_reg))
|
|
|
err = mark_chain_precision(env, insn->dst_reg);
|
|
|
- if (BPF_SRC(insn->code) == BPF_X && !err)
|
|
|
+ if (BPF_SRC(insn->code) == BPF_X && !err &&
|
|
|
+ !__is_pointer_value(false, src_reg))
|
|
|
err = mark_chain_precision(env, insn->src_reg);
|
|
|
if (err)
|
|
|
return err;
|