diff options
author | Jens Axboe <axboe@kernel.dk> | 2022-04-10 19:05:09 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-04-11 17:06:20 -0600 |
commit | 82733d168cbd3fe9dab603f05894316b99008924 (patch) | |
tree | eb34dfacd2df84b3683b2cc7bd66c0f2d231a57b /fs/io_uring.c | |
parent | 2804ecd8d3e3730b4f999cc1ff4b2441e1f4d513 (diff) | |
download | lwn-82733d168cbd3fe9dab603f05894316b99008924.tar.gz lwn-82733d168cbd3fe9dab603f05894316b99008924.zip |
io_uring: stop using io_wq_work as an fd placeholder
There are two reasons why this isn't the best idea:
- It's an odd area to grab a bit of storage space, hence it's an odd area
to grab storage from.
- It puts the 3rd io_kiocb cacheline into the hot path, where normal hot
path just needs the first two.
Use 'cflags' for joint fd/cflags storage. We only need fd until we
successfully issue, and we only need cflags once a request is done and is
completed.
Fixes: 6bf9c47a3989 ("io_uring: defer file assignment")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 3a97535d0550..38e62b1c6297 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -907,7 +907,11 @@ struct io_kiocb { u64 user_data; u32 result; - u32 cflags; + /* fd initially, then cflags for completion */ + union { + u32 cflags; + int fd; + }; struct io_ring_ctx *ctx; struct task_struct *task; @@ -7090,9 +7094,9 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags) return true; if (req->flags & REQ_F_FIXED_FILE) - req->file = io_file_get_fixed(req, req->work.fd, issue_flags); + req->file = io_file_get_fixed(req, req->fd, issue_flags); else - req->file = io_file_get_normal(req, req->work.fd); + req->file = io_file_get_normal(req, req->fd); if (req->file) return true; @@ -7630,7 +7634,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (io_op_defs[opcode].needs_file) { struct io_submit_state *state = &ctx->submit_state; - req->work.fd = READ_ONCE(sqe->fd); + req->fd = READ_ONCE(sqe->fd); /* * Plug now if we have more than 2 IO left after this, and the |