summaryrefslogtreecommitdiff
path: root/fs/eventfd.c
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2023-11-22 13:48:25 +0100
committerChristian Brauner <brauner@kernel.org>2023-11-28 14:08:46 +0100
commitb7638ad0c7802ea854599ce753d0e6d20690f7e2 (patch)
tree4f9e2d2484b58b488fb09fab17b4abbfa5b1bfcc /fs/eventfd.c
parent120ae58593630819209a011a3f9c89f73bcc9894 (diff)
downloadlwn-b7638ad0c7802ea854599ce753d0e6d20690f7e2.tar.gz
lwn-b7638ad0c7802ea854599ce753d0e6d20690f7e2.zip
eventfd: make eventfd_signal{_mask}() void
No caller care about the return value. Link: https://lore.kernel.org/r/20231122-vfs-eventfd-signal-v2-4-bd549b14ce0c@kernel.org Reviewed-by: Jan Kara <jack@suse.cz> Reviewed-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/eventfd.c')
-rw-r--r--fs/eventfd.c40
1 files changed, 15 insertions, 25 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 41109ba6bbe0..16bea05a7c78 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -43,10 +43,19 @@ struct eventfd_ctx {
int id;
};
-__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
+/**
+ * eventfd_signal_mask - Increment the event counter
+ * @ctx: [in] Pointer to the eventfd context.
+ * @mask: [in] poll mask
+ *
+ * This function is supposed to be called by the kernel in paths that do not
+ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+ * value, and we signal this as overflow condition by returning a EPOLLERR
+ * to poll(2).
+ */
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
unsigned long flags;
- __u64 n = 1;
/*
* Deadlock or stack overflow issues can happen if we recurse here
@@ -57,37 +66,18 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
* safe context.
*/
if (WARN_ON_ONCE(current->in_eventfd))
- return 0;
+ return;
spin_lock_irqsave(&ctx->wqh.lock, flags);
current->in_eventfd = 1;
- if (ULLONG_MAX - ctx->count < n)
- n = ULLONG_MAX - ctx->count;
- ctx->count += n;
+ if (ctx->count < ULLONG_MAX)
+ ctx->count++;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
current->in_eventfd = 0;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
-
- return n == 1;
-}
-
-/**
- * eventfd_signal - Increment the event counter
- * @ctx: [in] Pointer to the eventfd context.
- *
- * This function is supposed to be called by the kernel in paths that do not
- * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
- * value, and we signal this as overflow condition by returning a EPOLLERR
- * to poll(2).
- *
- * Returns the amount by which the counter was incremented.
- */
-__u64 eventfd_signal(struct eventfd_ctx *ctx)
-{
- return eventfd_signal_mask(ctx, 0);
}
-EXPORT_SYMBOL_GPL(eventfd_signal);
+EXPORT_SYMBOL_GPL(eventfd_signal_mask);
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{