diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-02-04 13:52:03 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-02-04 08:05:46 -0700 |
commit | 5ea5dd45844d1b727ab2a76f47d6e9aa65d1e921 (patch) | |
tree | e0c98b883f1850c61a85d6b3ae9e93639d8db74a /fs/io_uring.c | |
parent | 7335e3bf9d0a92be09bb4f38d06ab22c40f0fead (diff) | |
download | lwn-5ea5dd45844d1b727ab2a76f47d6e9aa65d1e921.tar.gz lwn-5ea5dd45844d1b727ab2a76f47d6e9aa65d1e921.zip |
io_uring: inline io_read()'s iovec freeing
io_read() has not the simpliest control flow with a lot of jumps and
it's hard to read. One of those is a out_free: label, which frees iovec.
However, from the middle of io_read() iovec is NULL'ed and so
kfree(iovec) is no-op, it leaves us with two place where we can inline
it and further clean up the code.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 31 |
1 files changed, 13 insertions, 18 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 25fffff27c76..35ad889afaec 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3530,14 +3530,18 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, } ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); - if (unlikely(ret)) - goto out_free; + if (unlikely(ret)) { + kfree(iovec); + return ret; + } ret = io_iter_do_read(req, iter); if (ret == -EIOCBQUEUED) { - ret = 0; - goto out_free; + /* it's faster to check here then delegate to kfree */ + if (iovec) + kfree(iovec); + return 0; } else if (ret == -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) @@ -3560,8 +3564,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, return ret2; rw = req->async_data; - /* it's copied and will be cleaned with ->io */ - iovec = NULL; /* now use our persistent iterator, if we aren't already */ iter = &rw->iter; retry: @@ -3580,21 +3582,14 @@ retry: * do, then just retry at the new offset. */ ret = io_iter_do_read(req, iter); - if (ret == -EIOCBQUEUED) { - ret = 0; - goto out_free; - } else if (ret > 0 && ret < io_size) { - /* we got some bytes, but not all. retry. */ + if (ret == -EIOCBQUEUED) + return 0; + /* we got some bytes, but not all. retry. */ + if (ret > 0 && ret < io_size) goto retry; - } done: kiocb_done(kiocb, ret, cs); - ret = 0; -out_free: - /* it's reportedly faster than delegating the null check to kfree() */ - if (iovec) - kfree(iovec); - return ret; + return 0; } static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |