summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-08-09 19:25:21 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 15:31:14 -0700
commit8728b834b226ffcf2c94a58530090e292af2a7bf (patch)
tree2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /net/ipv4/tcp_input.c
parent6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff)
downloadlwn-8728b834b226ffcf2c94a58530090e292af2a7bf.tar.gz
lwn-8728b834b226ffcf2c94a58530090e292af2a7bf.zip
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely redundant. All SKB list removal callers know which list the SKB is on, so storing this in sk_buff does nothing other than taking up some space. Two tricky bits were SCTP, which I took care of, and two ATM drivers which Francois Romieu <romieu@fr.zoreil.com> fixed up. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..ffa24025cd02 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
seq_rtt = now - scb->when;
tcp_dec_pcount_approx(&tp->fackets_out, skb);
tcp_packets_out_dec(tp, skb);
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &sk->sk_write_queue);
sk_stream_free_skb(sk, skb);
}
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received \n");
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb);
continue;
}
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq);
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &tp->out_of_order_queue);
__skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (seq == TCP_SKB_CB(skb1)->end_seq) {
- __skb_append(skb1, skb);
+ __skb_append(skb1, skb, &tp->out_of_order_queue);
if (!tp->rx_opt.num_sacks ||
tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
break;
}
- __skb_unlink(skb1, skb1->list);
+ __skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
__kfree_skb(skb1);
}
@@ -3088,8 +3088,9 @@ add_sack:
* simplifies code)
*/
static void
-tcp_collapse(struct sock *sk, struct sk_buff *head,
- struct sk_buff *tail, u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+ struct sk_buff *head, struct sk_buff *tail,
+ u32 start, u32 end)
{
struct sk_buff *skb;
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next;
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
- __skb_insert(nskb, skb->prev, skb, skb->list);
+ __skb_insert(nskb, skb->prev, skb, list);
sk_stream_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next;
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
- tcp_collapse(sk, head, skb, start, end);
+ tcp_collapse(sk, &tp->out_of_order_queue,
+ head, skb, start, end);
head = skb;
if (skb == (struct sk_buff *)&tp->out_of_order_queue)
break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
- tcp_collapse(sk, sk->sk_receive_queue.next,
+ tcp_collapse(sk, &sk->sk_receive_queue,
+ sk->sk_receive_queue.next,
(struct sk_buff*)&sk->sk_receive_queue,
tp->copied_seq, tp->rcv_nxt);
sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
}