summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-06-29 13:13:02 +0300
committerJens Axboe <axboe@kernel.dk>2020-06-30 08:39:59 -0600
commit8eb7e2d00763367f345ef0b2a2eb4f8001ae40ce (patch)
tree2e64d97502458098d3940bdbc03c787361e57493 /fs/io_uring.c
parenta1a4661691c5f1a3af4c04f56ad68e2d1dbee3af (diff)
downloadlwn-8eb7e2d00763367f345ef0b2a2eb4f8001ae40ce.tar.gz
lwn-8eb7e2d00763367f345ef0b2a2eb4f8001ae40ce.zip
io_uring: kill REQ_F_TIMEOUT_NOSEQ
There are too many useless flags, kill REQ_F_TIMEOUT_NOSEQ, which can be easily infered from req.timeout itself. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e9c8f52daf8f..8495c17b53d6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -532,7 +532,6 @@ enum {
REQ_F_NOWAIT_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
- REQ_F_TIMEOUT_NOSEQ_BIT,
REQ_F_COMP_LOCKED_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_OVERFLOW_BIT,
@@ -575,8 +574,6 @@ enum {
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
/* regular file */
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
- /* no timeout sequence */
- REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
/* completion under lock */
REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
/* needs cleanup */
@@ -1010,6 +1007,11 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
complete(&ctx->ref_comp);
}
+static inline bool io_is_timeout_noseq(struct io_kiocb *req)
+{
+ return !req->timeout.off;
+}
+
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
@@ -1222,7 +1224,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, list);
- if (req->flags & REQ_F_TIMEOUT_NOSEQ)
+ if (io_is_timeout_noseq(req))
break;
if (req->timeout.target_seq != ctx->cached_cq_tail
- atomic_read(&ctx->cq_timeouts))
@@ -5087,8 +5089,7 @@ static int io_timeout(struct io_kiocb *req)
* timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used.
*/
- if (!off) {
- req->flags |= REQ_F_TIMEOUT_NOSEQ;
+ if (io_is_timeout_noseq(req)) {
entry = ctx->timeout_list.prev;
goto add;
}
@@ -5103,7 +5104,7 @@ static int io_timeout(struct io_kiocb *req)
list_for_each_prev(entry, &ctx->timeout_list) {
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
- if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+ if (io_is_timeout_noseq(nxt))
continue;
/* nxt.seq is behind @tail, otherwise would've been completed */
if (off >= nxt->timeout.target_seq - tail)