diff options
author | Patrick McHardy <kaber@trash.net> | 2007-11-13 03:03:00 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2007-11-21 09:25:54 -0800 |
commit | d6eb5c86ed40fe57e411c4ee72b38d0a83642302 (patch) | |
tree | 9ba9965b5a2fce1738d19533ed7d1057768c3ba0 | |
parent | 2d23541246308b5d2867c6592ad2b09e9a1ae344 (diff) | |
download | lwn-d6eb5c86ed40fe57e411c4ee72b38d0a83642302.tar.gz lwn-d6eb5c86ed40fe57e411c4ee72b38d0a83642302.zip |
Fix netlink timeouts.
[NETLINK]: Fix unicast timeouts
[ Upstream commit: c3d8d1e30cace31fed6186a4b8c6b1401836d89c ]
Commit ed6dcf4a in the history.git tree broke netlink_unicast timeouts
by moving the schedule_timeout() call to a new function that doesn't
propagate the remaining timeout back to the caller. This means on each
retry we start with the full timeout again.
ipc/mqueue.c seems to actually want to wait indefinitely so this
behaviour is retained.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | include/linux/netlink.h | 2 | ||||
-rw-r--r-- | ipc/mqueue.c | 6 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 10 |
3 files changed, 10 insertions, 8 deletions
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 2e23353c28a5..b2834d8219e6 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -173,7 +173,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfilp(struct file *filp); int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, - long timeo, struct sock *ssk); + long *timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index a242c83d89d6..1eef14b24a68 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -1014,6 +1014,8 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, return -EINVAL; } if (notification.sigev_notify == SIGEV_THREAD) { + long timeo; + /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); ret = -ENOMEM; @@ -1042,8 +1044,8 @@ retry: goto out; } - ret = netlink_attachskb(sock, nc, 0, - MAX_SCHEDULE_TIMEOUT, NULL); + timeo = MAX_SCHEDULE_TIMEOUT; + ret = netlink_attachskb(sock, nc, 0, &timeo, NULL); if (ret == 1) goto retry; if (ret) { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1f15821c8da4..6ac83c2752eb 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -732,7 +732,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp) * 1: repeat lookup - reference dropped while waiting for socket memory. */ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, - long timeo, struct sock *ssk) + long *timeo, struct sock *ssk) { struct netlink_sock *nlk; @@ -741,7 +741,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); - if (!timeo) { + if (!*timeo) { if (!ssk || nlk_sk(ssk)->pid == 0) netlink_overrun(sk); sock_put(sk); @@ -755,7 +755,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) && !sock_flag(sk, SOCK_DEAD)) - timeo = schedule_timeout(timeo); + *timeo = schedule_timeout(*timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&nlk->wait, &wait); @@ -763,7 +763,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, if (signal_pending(current)) { kfree_skb(skb); - return sock_intr_errno(timeo); + return sock_intr_errno(*timeo); } return 1; } @@ -827,7 +827,7 @@ retry: kfree_skb(skb); return PTR_ERR(sk); } - err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); + err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); if (err == 1) goto retry; if (err) |