summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-16 10:33:53 -0700
committerJens Axboe <axboe@kernel.dk>2021-02-16 11:11:20 -0700
commit0b81e80c813f92520667c872d499a2dba8377be6 (patch)
treede0d4fdfc6b131cf8e9f52aaf2b1dbb1d2fd645e /fs/io_uring.c
parent0d4370cfe36b7f1719123b621a4ec4d9c7a25f89 (diff)
downloadlwn-0b81e80c813f92520667c872d499a2dba8377be6.tar.gz
lwn-0b81e80c813f92520667c872d499a2dba8377be6.zip
io_uring: tctx->task_lock should be IRQ safe
We add task_work from any context, hence we need to ensure that we can tolerate it being from IRQ context as well. Fixes: 7cbf1722d5fc ("io_uring: provide FIFO ordering for task_work") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a9d094f7060f..58dd10481106 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2186,10 +2186,10 @@ static bool __tctx_task_work(struct io_uring_task *tctx)
if (wq_list_empty(&tctx->task_list))
return false;
- spin_lock(&tctx->task_lock);
+ spin_lock_irq(&tctx->task_lock);
list = tctx->task_list;
INIT_WQ_LIST(&tctx->task_list);
- spin_unlock(&tctx->task_lock);
+ spin_unlock_irq(&tctx->task_lock);
node = list.first;
while (node) {
@@ -2236,13 +2236,14 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
{
struct io_uring_task *tctx = tsk->io_uring;
struct io_wq_work_node *node, *prev;
+ unsigned long flags;
int ret;
WARN_ON_ONCE(!tctx);
- spin_lock(&tctx->task_lock);
+ spin_lock_irqsave(&tctx->task_lock, flags);
wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
- spin_unlock(&tctx->task_lock);
+ spin_unlock_irqrestore(&tctx->task_lock, flags);
/* task_work already pending, we're done */
if (test_bit(0, &tctx->task_state) ||
@@ -2257,7 +2258,7 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
* in the list, it got run and we're fine.
*/
ret = 0;
- spin_lock(&tctx->task_lock);
+ spin_lock_irqsave(&tctx->task_lock, flags);
wq_list_for_each(node, prev, &tctx->task_list) {
if (&req->io_task_work.node == node) {
wq_list_del(&tctx->task_list, node, prev);
@@ -2265,7 +2266,7 @@ static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
break;
}
}
- spin_unlock(&tctx->task_lock);
+ spin_unlock_irqrestore(&tctx->task_lock, flags);
clear_bit(0, &tctx->task_state);
return ret;
}