diff options
author | Yuchung Cheng <ycheng@google.com> | 2018-05-16 16:40:14 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-17 15:41:29 -0400 |
commit | 2ad55f5660158d8a12c06486d32a7c47fa5f116b (patch) | |
tree | 7808086db33bbd720587b803323447da526ae176 /net/ipv4/tcp_input.c | |
parent | d716bfdb10b4250617783c94253e48b0e85adcb1 (diff) | |
download | lwn-2ad55f5660158d8a12c06486d32a7c47fa5f116b.tar.gz lwn-2ad55f5660158d8a12c06486d32a7c47fa5f116b.zip |
tcp: new helper tcp_timeout_mark_lost
Refactor using a new helper, tcp_timeout_mark_loss(), that marks packets
lost upon RTO.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Priyaranjan Jha <priyarjha@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 50 |
1 files changed, 29 insertions, 21 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6fb0a28977a0..af32accda2a9 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1917,18 +1917,43 @@ static inline void tcp_init_undo(struct tcp_sock *tp) tp->undo_retrans = tp->retrans_out ? : -1; } -/* Enter Loss state. If we detect SACK reneging, forget all SACK information +/* If we detect SACK reneging, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */ +static void tcp_timeout_mark_lost(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + bool is_reneg; /* is receiver reneging on SACKs? */ + + skb = tcp_rtx_queue_head(sk); + is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); + if (is_reneg) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); + tp->sacked_out = 0; + /* Mark SACK reneging until we recover from this loss event. */ + tp->is_sack_reneg = 1; + } else if (tcp_is_reno(tp)) { + tcp_reset_reno_sack(tp); + } + + skb_rbtree_walk_from(skb) { + if (is_reneg) + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; + tcp_mark_skb_lost(sk, skb); + } + tcp_verify_left_out(tp); + tcp_clear_all_retrans_hints(tp); +} + +/* Enter Loss state. */ void tcp_enter_loss(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); - struct sk_buff *skb; bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; - bool is_reneg; /* is receiver reneging on SACKs? */ /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || @@ -1944,24 +1969,7 @@ void tcp_enter_loss(struct sock *sk) tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_jiffies32; - if (tcp_is_reno(tp)) - tcp_reset_reno_sack(tp); - - skb = tcp_rtx_queue_head(sk); - is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); - if (is_reneg) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); - tp->sacked_out = 0; - /* Mark SACK reneging until we recover from this loss event. */ - tp->is_sack_reneg = 1; - } - skb_rbtree_walk_from(skb) { - if (is_reneg) - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; - tcp_mark_skb_lost(sk, skb); - } - tcp_verify_left_out(tp); - tcp_clear_all_retrans_hints(tp); + tcp_timeout_mark_lost(sk); /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. |