summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-12-05 22:41:26 -0800
committerDavid S. Miller <davem@davemloft.net>2008-12-05 22:41:26 -0800
commit775ffabf77a648d78fe1d20cb3a620e771abb921 (patch)
tree92d953047db446134ddae8facf209fa71d14c992 /net/ipv4/tcp_input.c
parent9969ca5f205988fb96461075cb4914c55cf166b5 (diff)
downloadlwn-775ffabf77a648d78fe1d20cb3a620e771abb921.tar.gz
lwn-775ffabf77a648d78fe1d20cb3a620e771abb921.zip
tcp: make mtu probe failure to not break gso'ed skbs unnecessarily
I noticed that since skb->len has nothing to do with actual segment length with gso, we need to figure it out separately, reuse a function from the recent shifting stuff (generalize it). Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 33902f6799c3..21c670190780 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1445,14 +1445,9 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
/* I wish gso_size would have a bit more sane initialization than
* something-or-zero which complicates things
*/
-static int tcp_shift_mss(struct sk_buff *skb)
+static int tcp_skb_seglen(struct sk_buff *skb)
{
- int mss = tcp_skb_mss(skb);
-
- if (!mss)
- mss = skb->len;
-
- return mss;
+ return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
}
/* Shifting pages past head area doesn't work */
@@ -1503,12 +1498,12 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (in_sack) {
len = skb->len;
pcount = tcp_skb_pcount(skb);
- mss = tcp_shift_mss(skb);
+ mss = tcp_skb_seglen(skb);
/* TODO: Fix DSACKs to not fragment already SACKed and we can
* drop this restriction as unnecessary
*/
- if (mss != tcp_shift_mss(prev))
+ if (mss != tcp_skb_seglen(prev))
goto fallback;
} else {
if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
@@ -1549,7 +1544,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
/* TODO: Fix DSACKs to not fragment already SACKed and we can
* drop this restriction as unnecessary
*/
- if (mss != tcp_shift_mss(prev))
+ if (mss != tcp_skb_seglen(prev))
goto fallback;
if (len == mss) {
@@ -1578,7 +1573,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!skb_can_shift(skb) ||
(skb == tcp_send_head(sk)) ||
((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
- (mss != tcp_shift_mss(skb)))
+ (mss != tcp_skb_seglen(skb)))
goto out;
len = skb->len;
@@ -2853,7 +2848,7 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
- if (skb->len > mss &&
+ if (tcp_skb_seglen(skb) > mss &&
!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;