summaryrefslogtreecommitdiff
path: root/fs/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-12-17 08:46:33 -0700
committerJens Axboe <axboe@kernel.dk>2020-01-20 17:03:59 -0700
commit895e2ca0f693c672902191747b548bdc56f0c7de (patch)
treef8584b4eb5a901df37e5c40aea54081e681a624f /fs/io-wq.c
parenteddc7ef52a6b37b7ba3d1c8a8fbb63d5d9914f8a (diff)
downloadlwn-895e2ca0f693c672902191747b548bdc56f0c7de.tar.gz
lwn-895e2ca0f693c672902191747b548bdc56f0c7de.zip
io-wq: support concurrent non-blocking work
io-wq assumes that work will complete fast (and not block), so it doesn't create a new worker when work is enqueued, if we already have at least one worker running. This is done on the assumption that if work is running, then it will complete fast. Add an option to force io-wq to fork a new worker for work queued. This is signaled by setting IO_WQ_WORK_CONCURRENT on the work item. For that case, io-wq will create a new worker, even though workers are already running. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r--fs/io-wq.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 79eae29983ca..4d902c19ee5f 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -724,6 +724,7 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+ int work_flags;
unsigned long flags;
/*
@@ -738,12 +739,14 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
return;
}
+ work_flags = work->flags;
spin_lock_irqsave(&wqe->lock, flags);
wq_list_add_tail(&work->list, &wqe->work_list);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
spin_unlock_irqrestore(&wqe->lock, flags);
- if (!atomic_read(&acct->nr_running))
+ if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
+ !atomic_read(&acct->nr_running))
io_wqe_wake_worker(wqe, acct);
}