diff options
author | Davide Libenzi <davidel@xmailserver.org> | 2007-05-18 12:02:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-18 13:09:34 -0700 |
commit | d48eb2331595224ffe89665e79721d44b40bb047 (patch) | |
tree | b4e398ec71e0775a441329b60cb0771c43e92c54 /fs/eventfd.c | |
parent | 347b4599dd6ffef27e18c227532d1ec66556000b (diff) | |
download | lwn-d48eb2331595224ffe89665e79721d44b40bb047.tar.gz lwn-d48eb2331595224ffe89665e79721d44b40bb047.zip |
eventfd use waitqueue lock ...
The eventfd was using the unlocked waitqueue operations, but it was
using a different lock, so poll_wait() would race with it.
This makes eventfd directly use the waitqueue lock.
Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventfd.c')
-rw-r--r-- | fs/eventfd.c | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c index 480e2b3c4166..2ce19c000d2a 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -17,7 +17,6 @@ #include <linux/eventfd.h> struct eventfd_ctx { - spinlock_t lock; wait_queue_head_t wqh; /* * Every time that a write(2) is performed on an eventfd, the @@ -45,13 +44,13 @@ int eventfd_signal(struct file *file, int n) if (n < 0) return -EINVAL; - spin_lock_irqsave(&ctx->lock, flags); + spin_lock_irqsave(&ctx->wqh.lock, flags); if (ULLONG_MAX - ctx->count < n) n = (int) (ULLONG_MAX - ctx->count); ctx->count += n; if (waitqueue_active(&ctx->wqh)) wake_up_locked(&ctx->wqh); - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock_irqrestore(&ctx->wqh.lock, flags); return n; } @@ -70,14 +69,14 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait) poll_wait(file, &ctx->wqh, wait); - spin_lock_irqsave(&ctx->lock, flags); + spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->count > 0) events |= POLLIN; if (ctx->count == ULLONG_MAX) events |= POLLERR; if (ULLONG_MAX - 1 > ctx->count) events |= POLLOUT; - spin_unlock_irqrestore(&ctx->lock, flags); + spin_unlock_irqrestore(&ctx->wqh.lock, flags); return events; } @@ -92,7 +91,7 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, if (count < sizeof(ucnt)) return -EINVAL; - spin_lock_irq(&ctx->lock); + spin_lock_irq(&ctx->wqh.lock); res = -EAGAIN; ucnt = ctx->count; if (ucnt > 0) @@ -110,9 +109,9 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, res = -ERESTARTSYS; break; } - spin_unlock_irq(&ctx->lock); + spin_unlock_irq(&ctx->wqh.lock); schedule(); - spin_lock_irq(&ctx->lock); + spin_lock_irq(&ctx->wqh.lock); } __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); @@ -122,7 +121,7 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, if (waitqueue_active(&ctx->wqh)) wake_up_locked(&ctx->wqh); } - spin_unlock_irq(&ctx->lock); + spin_unlock_irq(&ctx->wqh.lock); if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) return -EFAULT; @@ -143,7 +142,7 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c return -EFAULT; if (ucnt == ULLONG_MAX) return -EINVAL; - spin_lock_irq(&ctx->lock); + spin_lock_irq(&ctx->wqh.lock); res = -EAGAIN; if (ULLONG_MAX - ctx->count > ucnt) res = sizeof(ucnt); @@ -159,9 +158,9 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c res = -ERESTARTSYS; break; } - spin_unlock_irq(&ctx->lock); + spin_unlock_irq(&ctx->wqh.lock); schedule(); - spin_lock_irq(&ctx->lock); + spin_lock_irq(&ctx->wqh.lock); } __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); @@ -171,7 +170,7 @@ static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t c if (waitqueue_active(&ctx->wqh)) wake_up_locked(&ctx->wqh); } - spin_unlock_irq(&ctx->lock); + spin_unlock_irq(&ctx->wqh.lock); return res; } @@ -210,7 +209,6 @@ asmlinkage long sys_eventfd(unsigned int count) return -ENOMEM; init_waitqueue_head(&ctx->wqh); - spin_lock_init(&ctx->lock); ctx->count = count; /* |