summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-07-06 17:59:30 +0300
committerJens Axboe <axboe@kernel.dk>2020-07-06 09:06:20 -0600
commiteba0a4dd2aa5c47ca5b0c56ffb6d6665e047ff72 (patch)
treee36780c1ba1eb115af4a80853ce48c5b25618b37 /fs/io_uring.c
parent3aadc23e6054353ca056bf14e87250c79efbd7ed (diff)
downloadlwn-eba0a4dd2aa5c47ca5b0c56ffb6d6665e047ff72.tar.gz
lwn-eba0a4dd2aa5c47ca5b0c56ffb6d6665e047ff72.zip
io_uring: fix stopping iopoll'ing too early
Nobody adjusts *nr_events (number of completed requests) before calling io_iopoll_getevents(), so the passed @min shouldn't be adjusted as well. Othewise it can return less than initially asked @min without hitting need_resched(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 60f1a81c6c35..332008f346e3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2044,7 +2044,7 @@ static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
ret = io_do_iopoll(ctx, nr_events, min);
if (ret < 0)
return ret;
- if (!min || *nr_events >= min)
+ if (*nr_events >= min)
return 0;
}
@@ -2087,8 +2087,6 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
*/
mutex_lock(&ctx->uring_lock);
do {
- int tmin = 0;
-
/*
* Don't enter poll loop if we already have events pending.
* If we do, we can potentially be spinning for commands that
@@ -2113,10 +2111,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
mutex_lock(&ctx->uring_lock);
}
- if (*nr_events < min)
- tmin = min - *nr_events;
-
- ret = io_iopoll_getevents(ctx, nr_events, tmin);
+ ret = io_iopoll_getevents(ctx, nr_events, min);
if (ret <= 0)
break;
ret = 0;