mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts. Adjacent changes:e3f02f32a0("ionic: fix kernel panic due to multi-buffer handling")d9c0420999("ionic: Mark error paths in the data path as unlikely") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -2782,13 +2782,37 @@ static void tcp_mtup_probe_success(struct sock *sk)
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
|
||||
}
|
||||
|
||||
/* Sometimes we deduce that packets have been dropped due to reasons other than
|
||||
* congestion, like path MTU reductions or failed client TFO attempts. In these
|
||||
* cases we call this function to retransmit as many packets as cwnd allows,
|
||||
* without reducing cwnd. Given that retransmits will set retrans_stamp to a
|
||||
* non-zero value (and may do so in a later calling context due to TSQ), we
|
||||
* also enter CA_Loss so that we track when all retransmitted packets are ACKed
|
||||
* and clear retrans_stamp when that happens (to ensure later recurring RTOs
|
||||
* are using the correct retrans_stamp and don't declare ETIMEDOUT
|
||||
* prematurely).
|
||||
*/
|
||||
static void tcp_non_congestion_loss_retransmit(struct sock *sk)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (icsk->icsk_ca_state != TCP_CA_Loss) {
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
tp->prior_ssthresh = 0;
|
||||
tp->undo_marker = 0;
|
||||
tcp_set_ca_state(sk, TCP_CA_Loss);
|
||||
}
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
}
|
||||
|
||||
/* Do a simple retransmit without using the backoff mechanisms in
|
||||
* tcp_timer. This is used for path mtu discovery.
|
||||
* The socket is already locked here.
|
||||
*/
|
||||
void tcp_simple_retransmit(struct sock *sk)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
int mss;
|
||||
@@ -2828,14 +2852,7 @@ void tcp_simple_retransmit(struct sock *sk)
|
||||
* in network, but units changed and effective
|
||||
* cwnd/ssthresh really reduced now.
|
||||
*/
|
||||
if (icsk->icsk_ca_state != TCP_CA_Loss) {
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
tp->prior_ssthresh = 0;
|
||||
tp->undo_marker = 0;
|
||||
tcp_set_ca_state(sk, TCP_CA_Loss);
|
||||
}
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
tcp_non_congestion_loss_retransmit(sk);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_simple_retransmit);
|
||||
|
||||
@@ -6304,8 +6321,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
|
||||
tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
|
||||
skb_rbtree_walk_from(data)
|
||||
tcp_mark_skb_lost(sk, data);
|
||||
tcp_xmit_retransmit_queue(sk);
|
||||
tp->retrans_stamp = 0;
|
||||
tcp_non_congestion_loss_retransmit(sk);
|
||||
NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
return true;
|
||||
@@ -7241,7 +7257,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
tcp_rsk(req)->tfo_listener = false;
|
||||
if (!want_cookie) {
|
||||
req->timeout = tcp_timeout_init((struct sock *)req);
|
||||
inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
|
||||
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
|
||||
req->timeout))) {
|
||||
reqsk_free(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
af_ops->send_synack(sk, dst, &fl, req, &foc,
|
||||
!want_cookie ? TCP_SYNACK_NORMAL :
|
||||
|
||||
Reference in New Issue
Block a user