mptcp: grafting MPJ subflow earlier

Later patches need to ensure that all MPJ subflows are grafted to the
msk socket before accept() completion.

Currently the grafting happens under the msk socket lock: potentially
at msk release_cb time which make satisfying the above condition a bit
tricky.

Move the MPJ subflow grafting earlier, under the msk data lock, so that
we can use such lock as a synchronization point.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20251121-net-next-mptcp-memcg-backlog-imp-v1-3-1f34b6c1e0b1@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Paolo Abeni
2025-11-21 18:02:02 +01:00
committed by Jakub Kicinski
parent bd92dd8e03
commit e777a7fb06

View File

@@ -895,12 +895,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
mptcp_subflow_joined(msk, ssk);
spin_unlock_bh(&msk->fallback_lock);
/* attach to msk socket only after we are sure we will deal with it
* at close time
*/
if (sk->sk_socket && !ssk->sk_socket)
mptcp_sock_graft(ssk, sk->sk_socket);
mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
mptcp_sockopt_sync_locked(msk, ssk);
mptcp_stop_tout_timer(sk);
@@ -3647,6 +3641,20 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
/* Can be called without holding the msk socket lock; use the callback lock
* to avoid {READ_,WRITE_}ONCE annotations on sk_socket.
*/
static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk)
{
struct socket *sock;
write_lock_bh(&sk->sk_callback_lock);
sock = sk->sk_socket;
write_unlock_bh(&sk->sk_callback_lock);
if (sock)
mptcp_sock_graft(ssk, sock);
}
bool mptcp_finish_join(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -3662,7 +3670,9 @@ bool mptcp_finish_join(struct sock *ssk)
return false;
}
/* active subflow, already present inside the conn_list */
/* Active subflow, already present inside the conn_list; is grafted
* either by __mptcp_subflow_connect() or accept.
*/
if (!list_empty(&subflow->node)) {
spin_lock_bh(&msk->fallback_lock);
if (!msk->allow_subflows) {
@@ -3689,11 +3699,17 @@ bool mptcp_finish_join(struct sock *ssk)
if (ret) {
sock_hold(ssk);
list_add_tail(&subflow->node, &msk->conn_list);
mptcp_sock_check_graft(parent, ssk);
}
} else {
sock_hold(ssk);
list_add_tail(&subflow->node, &msk->join_list);
__set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
/* In case of later failures, __mptcp_flush_join_list() will
* properly orphan the ssk via mptcp_close_ssk().
*/
mptcp_sock_check_graft(parent, ssk);
}
mptcp_data_unlock(parent);