summaryrefslogtreecommitdiff
path: root/net/llc/af_llc.c
diff options
context:
space:
mode:
authorRadu Iliescu <radui@bricsys.com>2012-01-19 03:57:57 +0000
committerDavid S. Miller <davem@davemloft.net>2012-01-24 15:33:19 -0500
commit56ac11cf2f21366ad48b356f7a0d1af8cff3588e (patch)
treedd4471a9df32ddda044718d6d88cbb78cf941bea /net/llc/af_llc.c
parent90b9a5454fd2e626aa1614fe9ece6b63a0dc37af (diff)
downloadlwn-56ac11cf2f21366ad48b356f7a0d1af8cff3588e.tar.gz
lwn-56ac11cf2f21366ad48b356f7a0d1af8cff3588e.zip
llc: Fix race condition in llc_ui_recvmsg
There is a race on sk_receive_queue between llc_ui_recvmsg and sock_queue_rcv_skb. Our current solution is to protect skb_eat in llc_ui_recvmsg with the queue spinlock. Signed-off-by: Radu Iliescu <riliescu@ixiacom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/llc/af_llc.c')
-rw-r--r--net/llc/af_llc.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index a18e6c3d36e3..b9bef2c75026 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
+ unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq;
@@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
+ spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, 0);
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}
@@ -859,7 +862,9 @@ copy_uaddr:
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
+ spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
sk_eat_skb(sk, skb, 0);
+ spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
*seq = 0;
}