bpf: Check the other end of slot_type for STACK_SPILL
[ Upstream commit 27113c59b6d0a587b29ae72d4ff3f832f58b0651 ] Every 8 bytes of the stack is tracked by a bpf_stack_state. Within each bpf_stack_state, there is a 'u8 slot_type[8]' to track the type of each byte. Verifier tests slot_type[0] == STACK_SPILL to decide if the spilled reg state is saved. Verifier currently only saves the reg state if the whole 8 bytes are spilled to the stack, so checking the slot_type[7] is the same as checking slot_type[0]. The later patch will allow verifier to save the bounded scalar reg also for <8 bytes spill. There is a llvm patch [1] to ensure the <8 bytes spill will be 8-byte aligned, so checking slot_type[7] instead of slot_type[0] is required. While at it, this patch refactors the slot_type[0] == STACK_SPILL test into a new function is_spilled_reg() and change the slot_type[0] check to slot_type[7] check in there also. [1] https://reviews.llvm.org/D109073 Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210922004934.624194-1-kafai@fb.com Stable-dep-of: 529409ea92d5 ("bpf: propagate precision across all frames, not just the last one") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
42b2b7382a
commit
cdd73a5ed0
@@ -562,6 +562,14 @@ const char *kernel_type_name(u32 id)
|
|||||||
btf_type_by_id(btf_vmlinux, id)->name_off);
|
btf_type_by_id(btf_vmlinux, id)->name_off);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The reg state of a pointer or a bounded scalar was saved when
|
||||||
|
* it was spilled to the stack.
|
||||||
|
*/
|
||||||
|
static bool is_spilled_reg(const struct bpf_stack_state *stack)
|
||||||
|
{
|
||||||
|
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
|
||||||
|
}
|
||||||
|
|
||||||
static void print_verifier_state(struct bpf_verifier_env *env,
|
static void print_verifier_state(struct bpf_verifier_env *env,
|
||||||
const struct bpf_func_state *state)
|
const struct bpf_func_state *state)
|
||||||
{
|
{
|
||||||
@@ -666,7 +674,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
|
|||||||
continue;
|
continue;
|
||||||
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
|
||||||
print_liveness(env, state->stack[i].spilled_ptr.live);
|
print_liveness(env, state->stack[i].spilled_ptr.live);
|
||||||
if (state->stack[i].slot_type[0] == STACK_SPILL) {
|
if (is_spilled_reg(&state->stack[i])) {
|
||||||
reg = &state->stack[i].spilled_ptr;
|
reg = &state->stack[i].spilled_ptr;
|
||||||
t = reg->type;
|
t = reg->type;
|
||||||
verbose(env, "=%s", reg_type_str[t]);
|
verbose(env, "=%s", reg_type_str[t]);
|
||||||
@@ -2009,7 +2017,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
|
|||||||
reg->precise = true;
|
reg->precise = true;
|
||||||
}
|
}
|
||||||
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
|
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
|
||||||
if (func->stack[j].slot_type[0] != STACK_SPILL)
|
if (!is_spilled_reg(&func->stack[j]))
|
||||||
continue;
|
continue;
|
||||||
reg = &func->stack[j].spilled_ptr;
|
reg = &func->stack[j].spilled_ptr;
|
||||||
if (reg->type != SCALAR_VALUE)
|
if (reg->type != SCALAR_VALUE)
|
||||||
@@ -2051,7 +2059,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (spi >= 0) {
|
while (spi >= 0) {
|
||||||
if (func->stack[spi].slot_type[0] != STACK_SPILL) {
|
if (!is_spilled_reg(&func->stack[spi])) {
|
||||||
stack_mask = 0;
|
stack_mask = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -2150,7 +2158,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (func->stack[i].slot_type[0] != STACK_SPILL) {
|
if (!is_spilled_reg(&func->stack[i])) {
|
||||||
stack_mask &= ~(1ull << i);
|
stack_mask &= ~(1ull << i);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -2348,7 +2356,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
|||||||
/* regular write of data into stack destroys any spilled ptr */
|
/* regular write of data into stack destroys any spilled ptr */
|
||||||
state->stack[spi].spilled_ptr.type = NOT_INIT;
|
state->stack[spi].spilled_ptr.type = NOT_INIT;
|
||||||
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
|
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */
|
||||||
if (state->stack[spi].slot_type[0] == STACK_SPILL)
|
if (is_spilled_reg(&state->stack[spi]))
|
||||||
for (i = 0; i < BPF_REG_SIZE; i++)
|
for (i = 0; i < BPF_REG_SIZE; i++)
|
||||||
state->stack[spi].slot_type[i] = STACK_MISC;
|
state->stack[spi].slot_type[i] = STACK_MISC;
|
||||||
|
|
||||||
@@ -2562,7 +2570,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
|
|||||||
stype = reg_state->stack[spi].slot_type;
|
stype = reg_state->stack[spi].slot_type;
|
||||||
reg = ®_state->stack[spi].spilled_ptr;
|
reg = ®_state->stack[spi].spilled_ptr;
|
||||||
|
|
||||||
if (stype[0] == STACK_SPILL) {
|
if (is_spilled_reg(®_state->stack[spi])) {
|
||||||
if (size != BPF_REG_SIZE) {
|
if (size != BPF_REG_SIZE) {
|
||||||
if (reg->type != SCALAR_VALUE) {
|
if (reg->type != SCALAR_VALUE) {
|
||||||
verbose_linfo(env, env->insn_idx, "; ");
|
verbose_linfo(env, env->insn_idx, "; ");
|
||||||
@@ -4081,11 +4089,11 @@ static int check_stack_range_initialized(
|
|||||||
goto mark;
|
goto mark;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
|
if (is_spilled_reg(&state->stack[spi]) &&
|
||||||
state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
|
state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
|
||||||
goto mark;
|
goto mark;
|
||||||
|
|
||||||
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
|
if (is_spilled_reg(&state->stack[spi]) &&
|
||||||
(state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
|
(state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
|
||||||
env->allow_ptr_leaks)) {
|
env->allow_ptr_leaks)) {
|
||||||
if (clobber) {
|
if (clobber) {
|
||||||
@@ -9282,9 +9290,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
|||||||
* return false to continue verification of this path
|
* return false to continue verification of this path
|
||||||
*/
|
*/
|
||||||
return false;
|
return false;
|
||||||
if (i % BPF_REG_SIZE)
|
if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
|
||||||
continue;
|
continue;
|
||||||
if (old->stack[spi].slot_type[0] != STACK_SPILL)
|
if (!is_spilled_reg(&old->stack[spi]))
|
||||||
continue;
|
continue;
|
||||||
if (!regsafe(env, &old->stack[spi].spilled_ptr,
|
if (!regsafe(env, &old->stack[spi].spilled_ptr,
|
||||||
&cur->stack[spi].spilled_ptr, idmap))
|
&cur->stack[spi].spilled_ptr, idmap))
|
||||||
@@ -9491,7 +9499,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
|
||||||
if (state->stack[i].slot_type[0] != STACK_SPILL)
|
if (!is_spilled_reg(&state->stack[i]))
|
||||||
continue;
|
continue;
|
||||||
state_reg = &state->stack[i].spilled_ptr;
|
state_reg = &state->stack[i].spilled_ptr;
|
||||||
if (state_reg->type != SCALAR_VALUE ||
|
if (state_reg->type != SCALAR_VALUE ||
|
||||||
|
Reference in New Issue
Block a user