mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
bpf: Use bpf_prog_run_pin_on_cpu() at simple call sites.
All of these cases are strictly of the form: preempt_disable(); BPF_PROG_RUN(...); preempt_enable(); Replace this with bpf_prog_run_pin_on_cpu() which wraps BPF_PROG_RUN() with: migrate_disable(); BPF_PROG_RUN(...); migrate_enable(); On non RT enabled kernels this maps to preempt_disable/enable() and on RT enabled kernels this solely prevents migration, which is sufficient as there is no requirement to prevent reentrancy to any BPF program from a preempting task. The only requirement is that the program stays on the same CPU. Therefore, this is a trivially correct transformation. The seccomp loop does not need protection over the loop. It only needs protection per BPF filter program [ tglx: Converted to bpf_prog_run_pin_on_cpu() ] Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145643.691493094@linutronix.de
This commit is contained in:
committed by
Alexei Starovoitov
parent
37e1d92022
commit
3d9f773cf2
@@ -920,9 +920,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
|
||||
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
|
||||
flow_keys->flags = flags;
|
||||
|
||||
preempt_disable();
|
||||
result = BPF_PROG_RUN(prog, ctx);
|
||||
preempt_enable();
|
||||
result = bpf_prog_run_pin_on_cpu(prog, ctx);
|
||||
|
||||
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
|
||||
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
|
||||
|
||||
@@ -628,7 +628,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||
struct bpf_prog *prog;
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
prog = READ_ONCE(psock->progs.msg_parser);
|
||||
if (unlikely(!prog)) {
|
||||
@@ -638,7 +637,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||
|
||||
sk_msg_compute_data_pointers(msg);
|
||||
msg->sk = sk;
|
||||
ret = BPF_PROG_RUN(prog, msg);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, msg);
|
||||
ret = sk_psock_map_verd(ret, msg->sk_redir);
|
||||
psock->apply_bytes = msg->apply_bytes;
|
||||
if (ret == __SK_REDIRECT) {
|
||||
@@ -653,7 +652,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
|
||||
@@ -665,9 +663,7 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
|
||||
|
||||
skb->sk = psock->sk;
|
||||
bpf_compute_data_end_sk_skb(skb);
|
||||
preempt_disable();
|
||||
ret = BPF_PROG_RUN(prog, skb);
|
||||
preempt_enable();
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
/* strparser clones the skb before handing it to a upper layer,
|
||||
* meaning skb_orphan has been called. We NULL sk on the way out
|
||||
* to ensure we don't trigger a BUG_ON() in skb/sk operations
|
||||
|
||||
Reference in New Issue
Block a user