tcp: better handle TCP_TX_DELAY on established flows

Some applications uses TCP_TX_DELAY socket option after TCP flow
is established.

Some metrics need to be updated, otherwise TCP might take time to
adapt to the new (emulated) RTT.

This patch adjusts tp->srtt_us, tp->rtt_min, icsk_rto
and sk->sk_pacing_rate.

This is best effort, and for instance icsk_rto is reset
without taking backoff into account.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251013145926.833198-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet
2025-10-13 14:59:26 +00:00
committed by Jakub Kicinski
parent 6378e25ee1
commit 1c51450f1a
3 changed files with 31 additions and 6 deletions

View File

@@ -461,6 +461,8 @@ enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_pacing_rate(struct sock *sk);
void tcp_set_rto(struct sock *sk);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);

View File

@@ -3583,9 +3583,12 @@ static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
EXPORT_IPV6_MOD(tcp_tx_delay_enabled);
static void tcp_enable_tx_delay(void)
static void tcp_enable_tx_delay(struct sock *sk, int val)
{
if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = (val - tp->tcp_tx_delay) << 3;
if (val && !static_branch_unlikely(&tcp_tx_delay_enabled)) {
static int __tcp_tx_delay_enabled = 0;
if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
@@ -3593,6 +3596,22 @@ static void tcp_enable_tx_delay(void)
pr_info("TCP_TX_DELAY enabled\n");
}
}
/* If we change tcp_tx_delay on a live flow, adjust tp->srtt_us,
* tp->rtt_min, icsk_rto and sk->sk_pacing_rate.
* This is best effort.
*/
if (delta && sk->sk_state == TCP_ESTABLISHED) {
s64 srtt = (s64)tp->srtt_us + delta;
tp->srtt_us = clamp_t(s64, srtt, 1, ~0U);
/* Note: does not deal with non zero icsk_backoff */
tcp_set_rto(sk);
minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
tcp_update_pacing_rate(sk);
}
}
/* When set indicates to always queue non-full frames. Later the user clears
@@ -4119,8 +4138,12 @@ ao_parse:
tp->recvmsg_inq = val;
break;
case TCP_TX_DELAY:
if (val)
tcp_enable_tx_delay();
/* tp->srtt_us is u32, and is shifted by 3 */
if (val < 0 || val >= (1U << (31 - 3))) {
err = -EINVAL;
break;
}
tcp_enable_tx_delay(sk, val);
WRITE_ONCE(tp->tcp_tx_delay, val);
break;
default:

View File

@@ -1095,7 +1095,7 @@ static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
tp->srtt_us = max(1U, srtt);
}
static void tcp_update_pacing_rate(struct sock *sk)
void tcp_update_pacing_rate(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
u64 rate;
@@ -1132,7 +1132,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
static void tcp_set_rto(struct sock *sk)
void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)