summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-07-03 22:15:06 +0300
committerJens Axboe <axboe@kernel.dk>2020-07-05 15:07:35 -0600
commit6df1db6b542436c6d429caa66e1045862fa36155 (patch)
treed89e1b5a8d3dba341bef5a0c4a0035b8ce9424fd /fs/io_uring.c
parentc2c4c83c58cbca23527fee93b49738a5a84272a1 (diff)
downloadlwn-6df1db6b542436c6d429caa66e1045862fa36155.tar.gz
lwn-6df1db6b542436c6d429caa66e1045862fa36155.zip
io_uring: fix mis-refcounting linked timeouts
io_prep_linked_timeout() sets REQ_F_LINK_TIMEOUT altering refcounting of the following linked request. After that someone should call io_queue_linked_timeout(), otherwise a submission reference of the linked timeout won't be ever dropped. That's what happens in io_steal_work() if io-wq decides to postpone linked request with io_wqe_enqueue(). io_queue_linked_timeout() can also be potentially called twice without synchronisation during re-submission, e.g. io_rw_resubmit(). There are the rules, whoever did io_prep_linked_timeout() must also call io_queue_linked_timeout(). To not do it twice, io_prep_linked_timeout() will return non NULL only for the first call. That's controlled by REQ_F_LINK_TIMEOUT flag. Also kill REQ_F_QUEUE_TIMEOUT. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c33
1 files changed, 7 insertions, 26 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2b849984bae5..cf1b3d4ac241 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -538,7 +538,6 @@ enum {
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_NO_FILE_TABLE_BIT,
- REQ_F_QUEUE_TIMEOUT_BIT,
REQ_F_WORK_INITIALIZED_BIT,
REQ_F_TASK_PINNED_BIT,
@@ -586,8 +585,6 @@ enum {
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
/* doesn't need file table for this request */
REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
- /* needs to queue linked timeout */
- REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
/* io_wq_work is initialized */
REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
/* req->task is refcounted */
@@ -1842,7 +1839,7 @@ static void io_put_req(struct io_kiocb *req)
static struct io_wq_work *io_steal_work(struct io_kiocb *req)
{
- struct io_kiocb *timeout, *nxt = NULL;
+ struct io_kiocb *nxt;
/*
* A ref is owned by io-wq in which context we're. So, if that's the
@@ -1853,13 +1850,7 @@ static struct io_wq_work *io_steal_work(struct io_kiocb *req)
return NULL;
nxt = io_req_find_next(req);
- if (!nxt)
- return NULL;
-
- timeout = io_prep_linked_timeout(nxt);
- if (timeout)
- nxt->flags |= REQ_F_QUEUE_TIMEOUT;
- return &nxt->work;
+ return nxt ? &nxt->work : NULL;
}
/*
@@ -5702,24 +5693,15 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
-static void io_arm_async_linked_timeout(struct io_kiocb *req)
-{
- struct io_kiocb *link;
-
- /* link head's timeout is queued in io_queue_async_work() */
- if (!(req->flags & REQ_F_QUEUE_TIMEOUT))
- return;
-
- link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
- io_queue_linked_timeout(link);
-}
-
static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_kiocb *timeout;
int ret = 0;
- io_arm_async_linked_timeout(req);
+ timeout = io_prep_linked_timeout(req);
+ if (timeout)
+ io_queue_linked_timeout(timeout);
/* if NO_CANCEL is set, we must still run the work */
if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
@@ -5893,8 +5875,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
if (!(req->flags & REQ_F_LINK_HEAD))
return NULL;
- /* for polled retry, if flag is set, we already went through here */
- if (req->flags & REQ_F_POLLED)
+ if (req->flags & REQ_F_LINK_TIMEOUT)
return NULL;
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,