bpf: fix the return value of push_stack

In [1] Eduard mentioned that on push_stack failure verifier code
should return -ENOMEM instead of -EFAULT. After checking with the
other call sites I've found that code randomly returns either -ENOMEM
or -EFAULT. This patch unifies the return values for the push_stack
(and similar push_async_cb) functions such that error codes are
always assigned properly.

  [1] https://lore.kernel.org/bpf/20250615085943.3871208-1-a.s.protopopov@gmail.com

Signed-off-by: Anton Protopopov <a.s.protopopov@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20251019202145.3944697-2-a.s.protopopov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Anton Protopopov
2025-10-19 20:21:29 +00:00
committed by Alexei Starovoitov
parent 96d31dff3f
commit 6ea5fc92a0

View File

@@ -2109,7 +2109,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT);
if (!elem)
return NULL;
return ERR_PTR(-ENOMEM);
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
@@ -2119,12 +2119,12 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
env->stack_size++;
err = copy_verifier_state(&elem->st, cur);
if (err)
return NULL;
return ERR_PTR(-ENOMEM);
elem->st.speculative |= speculative;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
verbose(env, "The sequence of %d jumps is too complex.\n",
env->stack_size);
return NULL;
return ERR_PTR(-E2BIG);
}
if (elem->st.parent) {
++elem->st.parent->branches;
@@ -2919,7 +2919,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT);
if (!elem)
return NULL;
return ERR_PTR(-ENOMEM);
elem->insn_idx = insn_idx;
elem->prev_insn_idx = prev_insn_idx;
@@ -2931,7 +2931,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
verbose(env,
"The sequence of %d jumps is too complex for async cb.\n",
env->stack_size);
return NULL;
return ERR_PTR(-E2BIG);
}
/* Unlike push_stack() do not copy_verifier_state().
* The caller state doesn't matter.
@@ -2942,7 +2942,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
elem->st.in_sleepable = is_sleepable;
frame = kzalloc(sizeof(*frame), GFP_KERNEL_ACCOUNT);
if (!frame)
return NULL;
return ERR_PTR(-ENOMEM);
init_func_state(env, frame,
BPF_MAIN_FUNC /* callsite */,
0 /* frameno within this callchain */,
@@ -9045,8 +9045,8 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
prev_st = find_prev_entry(env, cur_st->parent, insn_idx);
/* branch out active iter state */
queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
if (!queued_st)
return -ENOMEM;
if (IS_ERR(queued_st))
return PTR_ERR(queued_st);
queued_iter = get_iter_from_state(queued_st, meta);
queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
@@ -10616,8 +10616,8 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
async_cb = push_async_cb(env, env->subprog_info[subprog].start,
insn_idx, subprog,
is_async_cb_sleepable(env, insn));
if (!async_cb)
return -EFAULT;
if (IS_ERR(async_cb))
return PTR_ERR(async_cb);
callee = async_cb->frame[0];
callee->async_entry_cnt = caller->async_entry_cnt + 1;
@@ -10633,8 +10633,8 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
* proceed with next instruction within current frame.
*/
callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
if (!callback_state)
return -ENOMEM;
if (IS_ERR(callback_state))
return PTR_ERR(callback_state);
err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
callback_state);
@@ -13859,9 +13859,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_reg_state *regs;
branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false);
if (!branch) {
if (IS_ERR(branch)) {
verbose(env, "failed to push state for failed lock acquisition\n");
return -ENOMEM;
return PTR_ERR(branch);
}
regs = branch->frame[branch->curframe]->regs;
@@ -14316,16 +14316,15 @@ struct bpf_sanitize_info {
bool mask_to_left;
};
static struct bpf_verifier_state *
sanitize_speculative_path(struct bpf_verifier_env *env,
const struct bpf_insn *insn,
u32 next_idx, u32 curr_idx)
static int sanitize_speculative_path(struct bpf_verifier_env *env,
const struct bpf_insn *insn,
u32 next_idx, u32 curr_idx)
{
struct bpf_verifier_state *branch;
struct bpf_reg_state *regs;
branch = push_stack(env, next_idx, curr_idx, true);
if (branch && insn) {
if (!IS_ERR(branch) && insn) {
regs = branch->frame[branch->curframe]->regs;
if (BPF_SRC(insn->code) == BPF_K) {
mark_reg_unknown(env, regs, insn->dst_reg);
@@ -14334,7 +14333,7 @@ sanitize_speculative_path(struct bpf_verifier_env *env,
mark_reg_unknown(env, regs, insn->src_reg);
}
}
return branch;
return PTR_ERR_OR_ZERO(branch);
}
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
@@ -14353,7 +14352,6 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
struct bpf_reg_state tmp;
bool ret;
int err;
if (can_skip_alu_sanitation(env, insn))
@@ -14426,11 +14424,12 @@ do_sim:
tmp = *dst_reg;
copy_register_state(dst_reg, ptr_reg);
}
ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
env->insn_idx);
if (!ptr_is_dst_reg && ret)
err = sanitize_speculative_path(env, NULL, env->insn_idx + 1, env->insn_idx);
if (err < 0)
return REASON_STACK;
if (!ptr_is_dst_reg)
*dst_reg = tmp;
return !ret ? REASON_STACK : 0;
return 0;
}
static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
@@ -16750,8 +16749,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
/* branch out 'fallthrough' insn as a new state to explore */
queued_st = push_stack(env, idx + 1, idx, false);
if (!queued_st)
return -ENOMEM;
if (IS_ERR(queued_st))
return PTR_ERR(queued_st);
queued_st->may_goto_depth++;
if (prev_st)
@@ -16829,10 +16828,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
* the fall-through branch for simulation under speculative
* execution.
*/
if (!env->bypass_spec_v1 &&
!sanitize_speculative_path(env, insn, *insn_idx + 1,
*insn_idx))
return -EFAULT;
if (!env->bypass_spec_v1) {
err = sanitize_speculative_path(env, insn, *insn_idx + 1, *insn_idx);
if (err < 0)
return err;
}
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch, this_branch->curframe);
*insn_idx += insn->off;
@@ -16842,11 +16842,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
* program will go. If needed, push the goto branch for
* simulation under speculative execution.
*/
if (!env->bypass_spec_v1 &&
!sanitize_speculative_path(env, insn,
*insn_idx + insn->off + 1,
*insn_idx))
return -EFAULT;
if (!env->bypass_spec_v1) {
err = sanitize_speculative_path(env, insn, *insn_idx + insn->off + 1,
*insn_idx);
if (err < 0)
return err;
}
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch, this_branch->curframe);
return 0;
@@ -16867,10 +16868,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return err;
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false);
if (!other_branch)
return -EFAULT;
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, false);
if (IS_ERR(other_branch))
return PTR_ERR(other_branch);
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
if (BPF_SRC(insn->code) == BPF_X) {