diff options
author | Michal Kubecek <mkubecek@suse.cz> | 2016-05-09 11:01:04 +0200 |
---|---|---|
committer | Jiri Slaby <jslaby@suse.cz> | 2016-09-29 11:14:19 +0200 |
commit | af29d5b57acef4c573c8361a509908115b9ced68 (patch) | |
tree | ae35552b74232ae65ea0d351447dcd27df023898 /net | |
parent | dea85278d68898838020b9f9edfa159f0a2b7eea (diff) | |
download | lwn-af29d5b57acef4c573c8361a509908115b9ced68.tar.gz lwn-af29d5b57acef4c573c8361a509908115b9ced68.zip |
net: disable fragment reassembly if high_thresh is set to zero
commit 30759219f562cfaaebe7b9c1d1c0e6b5445c69b0 upstream.
Before commit 6d7b857d541e ("net: use lib/percpu_counter API for
fragmentation mem accounting"), setting high threshold to 0 prevented
fragment reassembly as first fragment would be always evicted before
second could be added to the queue. While inefficient, some users
apparently relied on it.
Since the commit mentioned above, a percpu counter is used for
reassembly memory accounting and high batch size avoids taking slow path
in most common scenarios. As a result, a whole full sized packet can be
reassembled without the percpu counter's main counter changing its
value so that even with high_thresh set to 0, fragmented packets can be
still reassembled and processed.
Add explicit checks preventing reassembly if high threshold is zero.
[mk] backport to 3.12
Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/ip_fragment.c | 4 | ||||
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_reasm.c | 3 | ||||
-rw-r--r-- | net/ipv6/reassembly.c | 4 |
3 files changed, 11 insertions, 0 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 4d98a6b80b04..04c7e4618008 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -656,6 +656,9 @@ int ip_defrag(struct sk_buff *skb, u32 user) net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); + if (!net->ipv4.frags.high_thresh) + goto fail; + /* Start by cleaning up the memory. */ ip_evictor(net); @@ -672,6 +675,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) return ret; } +fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 7cd623588532..c11a40caf5b6 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -569,6 +569,9 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return skb; + if (!net->nf_frag.frags.high_thresh) + return skb; + clone = skb_clone(skb, GFP_ATOMIC); if (clone == NULL) { pr_debug("Can't clone skb\n"); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index a1fb511da3b5..1a5318efa31c 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -556,6 +556,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return 1; } + if (!net->ipv6.frags.high_thresh) + goto fail_mem; + evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false); if (evicted) IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), @@ -575,6 +578,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return ret; } +fail_mem: IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; |