summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-11-21 11:47:16 -0800
committerDavid S. Miller <davem@davemloft.net>2014-11-21 15:26:32 -0500
commite7820e39b7d19b9fe1928fc19de9361b44150ca6 (patch)
tree15599feec720efe469184ed07d9eefc91c32a086 /net
parent892d6eb1245b771987afb8667a65344e568d3439 (diff)
downloadlwn-e7820e39b7d19b9fe1928fc19de9361b44150ca6.tar.gz
lwn-e7820e39b7d19b9fe1928fc19de9361b44150ca6.zip
net: Revert "net: avoid one atomic operation in skb_clone()"
Not sure what I was thinking, but doing anything after releasing a refcount is suicidal or/and embarrassing. By the time we set skb->fclone to SKB_FCLONE_FREE, another cpu could have released last reference and freed whole skb. We potentially corrupt memory or trap if CONFIG_DEBUG_PAGEALLOC is set. Reported-by: Chris Mason <clm@fb.com> Fixes: ce1a4ea3f1258 ("net: avoid one atomic operation in skb_clone()") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Sabrina Dubroca <sd@queasysnail.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c16615bfb61e..32e31c299631 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb)
case SKB_FCLONE_CLONE:
fclones = container_of(skb, struct sk_buff_fclones, skb2);
- /* Warning : We must perform the atomic_dec_and_test() before
- * setting skb->fclone back to SKB_FCLONE_FREE, otherwise
- * skb_clone() could set clone_ref to 2 before our decrement.
- * Anyway, if we are going to free the structure, no need to
- * rewrite skb->fclone.
+ /* The clone portion is available for
+ * fast-cloning again.
*/
- if (atomic_dec_and_test(&fclones->fclone_ref)) {
+ skb->fclone = SKB_FCLONE_FREE;
+
+ if (atomic_dec_and_test(&fclones->fclone_ref))
kmem_cache_free(skbuff_fclone_cache, fclones);
- } else {
- /* The clone portion is available for
- * fast-cloning again.
- */
- skb->fclone = SKB_FCLONE_FREE;
- }
break;
}
}
@@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb->fclone == SKB_FCLONE_ORIG &&
n->fclone == SKB_FCLONE_FREE) {
n->fclone = SKB_FCLONE_CLONE;
- /* As our fastclone was free, clone_ref must be 1 at this point.
- * We could use atomic_inc() here, but it is faster
- * to set the final value.
- */
- atomic_set(&fclones->fclone_ref, 2);
+ atomic_inc(&fclones->fclone_ref);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;