summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-08-09 14:53:36 +0300
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 16:47:58 -0700
commit1b6d427bb7eb69e6dc4f194a5b0f4a382a16ff82 (patch)
treed67f6ea9a5f581f83b4d8228fc2964c70f940d5a /net/ipv4/tcp_input.c
parentd02596e32925edaeccee0af8eb6c229b5615de42 (diff)
downloadlwn-1b6d427bb7eb69e6dc4f194a5b0f4a382a16ff82.tar.gz
lwn-1b6d427bb7eb69e6dc4f194a5b0f4a382a16ff82.zip
[TCP]: Reduce sacked_out with reno when purging write_queue
Previously TCP had a transitional state during which reno counted segments that are already below the current window into sacked_out, which is now prevented. In addition, re-try now the unconditional S+L skb catching. This approach conservatively calls just remove_sack and leaves reset_sack() calls alone. The best solution to the whole problem would be to first calculate the new sacked_out fully (this patch does not move reno_sack_reset calls from original sites and thus does not implement this). However, that would require very invasive change to fastretrans_alert (perhaps even slicing it to two halves). Alternatively, all callers of tcp_packets_in_flight (i.e., users that depend on sacked_out) should be postponed until the new sacked_out has been calculated but it isn't any simpler alternative. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bf4fc3516fb9..f8af018dd224 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2187,7 +2187,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
* tcp_xmit_retransmit_queue().
*/
static void
-tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
+tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -2273,12 +2273,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (IsReno(tp) && is_dupack)
tcp_add_reno_sack(sk);
- } else {
- int acked = prior_packets - tp->packets_out;
- if (IsReno(tp))
- tcp_remove_reno_sacks(sk, acked);
- do_lost = tcp_try_undo_partial(sk, acked);
- }
+ } else
+ do_lost = tcp_try_undo_partial(sk, pkts_acked);
break;
case TCP_CA_Loss:
if (flag&FLAG_DATA_ACKED)
@@ -2577,6 +2573,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
tcp_ack_update_rtt(sk, acked, seq_rtt);
tcp_ack_packets_out(sk);
+ if (IsReno(tp))
+ tcp_remove_reno_sacks(sk, pkts_acked);
+
if (ca_ops->pkts_acked) {
s32 rtt_us = -1;
@@ -2927,7 +2926,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight, 0);
- tcp_fastretrans_alert(sk, prior_packets, flag);
+ tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight, 1);