net/tcp_fastopen: remove tcp_fastopen_ctx_lock

Remove the (per netns) spinlock in favor of xchg() atomic operations.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Wei Wang <weiwan@google.com>
Link: https://lore.kernel.org/r/20210719101107.3203943-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet
2021-07-19 03:11:07 -07:00
committed by Jakub Kicinski
parent fef773fc81
commit e93abb840a
3 changed files with 3 additions and 16 deletions

View File

@@ -55,12 +55,7 @@ void tcp_fastopen_ctx_destroy(struct net *net)
{
struct tcp_fastopen_context *ctxt;
spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
if (ctxt)
call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
@@ -89,18 +84,12 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
ctx->num = 1;
}
spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
if (sk) {
q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
octx = rcu_dereference_protected(q->ctx,
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
rcu_assign_pointer(q->ctx, ctx);
octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
} else {
octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
}
spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
if (octx)
call_rcu(&octx->rcu, tcp_fastopen_ctx_free);