summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-03-19 17:22:41 +0000
committerJens Axboe <axboe@kernel.dk>2021-04-11 17:41:59 -0600
commit68fb897966febe814f89f9462aa819abae00725f (patch)
treeaa91b2f3fce7cc68d42e61227e9c8e62e96d40cb /fs/io_uring.c
parent2593553a01c803e01e7c5c2131993885879efbec (diff)
downloadlwn-68fb897966febe814f89f9462aa819abae00725f.tar.gz
lwn-68fb897966febe814f89f9462aa819abae00725f.zip
io_uring: inline io_clean_op()'s fast path
Inline io_clean_op(), leaving __io_clean_op() but renaming it. This will be used in following patches. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9f9eb853a083..00860dc04e82 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1032,7 +1032,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update *ip,
unsigned nr_args);
-static void __io_clean_op(struct io_kiocb *req);
+static void io_clean_op(struct io_kiocb *req);
static struct file *io_file_get(struct io_submit_state *state,
struct io_kiocb *req, int fd, bool fixed);
static void __io_queue_sqe(struct io_kiocb *req);
@@ -1063,12 +1063,6 @@ EXPORT_SYMBOL(io_uring_get_socket);
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-static inline void io_clean_op(struct io_kiocb *req)
-{
- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
- __io_clean_op(req);
-}
-
static inline void io_set_resource_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1544,7 +1538,9 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
set_bit(0, &ctx->cq_check_overflow);
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
}
- io_clean_op(req);
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
+
req->result = res;
req->compl.cflags = cflags;
req_ref_get(req);
@@ -1600,7 +1596,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
static void io_req_complete_state(struct io_kiocb *req, long res,
unsigned int cflags)
{
- io_clean_op(req);
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
req->result = res;
req->compl.cflags = cflags;
req->flags |= REQ_F_COMPLETE_INLINE;
@@ -1708,8 +1705,8 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
static void io_dismantle_req(struct io_kiocb *req)
{
- io_clean_op(req);
-
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ io_clean_op(req);
if (req->async_data)
kfree(req->async_data);
if (req->file)
@@ -5949,7 +5946,7 @@ static int io_req_defer(struct io_kiocb *req)
return -EIOCBQUEUED;
}
-static void __io_clean_op(struct io_kiocb *req)
+static void io_clean_op(struct io_kiocb *req)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
switch (req->opcode) {