summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorStefan Bühler <source@stbuehler.de>2019-04-27 20:34:19 +0200
committerJens Axboe <axboe@kernel.dk>2019-04-30 09:40:02 -0600
commit8449eedaa1da6a51d67190c905b1b54243e095f6 (patch)
tree469de67ca696245db15b75bc6d2396071e1f07d9 /fs/io_uring.c
parent37624b58542fb9f2d9a70e6ea006ef8a5f66c30b (diff)
downloadlwn-8449eedaa1da6a51d67190c905b1b54243e095f6.tar.gz
lwn-8449eedaa1da6a51d67190c905b1b54243e095f6.zip
io_uring: fix handling SQEs requesting NOWAIT
Not all request types set REQ_F_FORCE_NONBLOCK when they needed async punting; reverse logic instead and set REQ_F_NOWAIT if request mustn't be punted. Signed-off-by: Stefan Bühler <source@stbuehler.de> Merged with my previous patch for this. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0e9fb2cb1984..d5e23a6dd6aa 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -221,7 +221,7 @@ struct io_kiocb {
struct list_head list;
unsigned int flags;
refcount_t refs;
-#define REQ_F_FORCE_NONBLOCK 1 /* inline submission attempt */
+#define REQ_F_NOWAIT 1 /* must not punt to workers */
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
#define REQ_F_SEQ_PREV 8 /* sequential with previous */
@@ -774,10 +774,14 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
if (unlikely(ret))
return ret;
- if (force_nonblock) {
+
+ /* don't allow async punt if RWF_NOWAIT was requested */
+ if (kiocb->ki_flags & IOCB_NOWAIT)
+ req->flags |= REQ_F_NOWAIT;
+
+ if (force_nonblock)
kiocb->ki_flags |= IOCB_NOWAIT;
- req->flags |= REQ_F_FORCE_NONBLOCK;
- }
+
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) ||
!kiocb->ki_filp->f_op->iopoll)
@@ -1436,8 +1440,7 @@ restart:
struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe;
- /* Ensure we clear previously set forced non-block flag */
- req->flags &= ~REQ_F_FORCE_NONBLOCK;
+ /* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
ret = 0;
@@ -1623,7 +1626,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
goto out;
ret = __io_submit_sqe(ctx, req, s, true);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
struct io_uring_sqe *sqe_copy;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);