diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-04-15 22:08:22 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-04-24 18:02:49 -0600 |
commit | 971cf9c19e97bad172bdb7142a23e6489d1b38f0 (patch) | |
tree | 6035c0a3aa932ea8d562a1a610ae5cf46b3d84f0 /fs/io_uring.c | |
parent | f22190570b213dcc84216ac07cfd0eeada010013 (diff) | |
download | lwn-971cf9c19e97bad172bdb7142a23e6489d1b38f0.tar.gz lwn-971cf9c19e97bad172bdb7142a23e6489d1b38f0.zip |
io_uring: minor refactoring for some tw handlers
Get rid of some useless local variables
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/7798327b684b7015f7e4300420142ddfcd317297.1650056133.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 14 |
1 files changed, 5 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 76d51c6e3ab9..acb7978a31a5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1729,7 +1729,6 @@ static inline void io_req_add_compl_list(struct io_kiocb *req) static void io_queue_async_work(struct io_kiocb *req, bool *dont_use) { - struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->task->io_uring; @@ -1749,8 +1748,9 @@ static void io_queue_async_work(struct io_kiocb *req, bool *dont_use) if (WARN_ON_ONCE(!same_thread_group(req->task, current))) req->work.flags |= IO_WQ_WORK_CANCEL; - trace_io_uring_queue_async_work(ctx, req, req->cqe.user_data, req->opcode, req->flags, - &req->work, io_wq_is_hashed(&req->work)); + trace_io_uring_queue_async_work(req->ctx, req, req->cqe.user_data, + req->opcode, req->flags, &req->work, + io_wq_is_hashed(&req->work)); io_wq_enqueue(tctx->io_wq, &req->work); if (link) io_queue_linked_timeout(link); @@ -2642,18 +2642,14 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority) static void io_req_task_cancel(struct io_kiocb *req, bool *locked) { - struct io_ring_ctx *ctx = req->ctx; - /* not needed for normal modes, but SQPOLL depends on it */ - io_tw_lock(ctx, locked); + io_tw_lock(req->ctx, locked); io_req_complete_failed(req, req->cqe.res); } static void io_req_task_submit(struct io_kiocb *req, bool *locked) { - struct io_ring_ctx *ctx = req->ctx; - - io_tw_lock(ctx, locked); + io_tw_lock(req->ctx, locked); /* req->task == current here, checking PF_EXITING is safe */ if (likely(!(req->task->flags & PF_EXITING))) __io_queue_sqe(req); |