diff options
author | Hao Xu <haoxu@linux.alibaba.com> | 2022-02-06 17:52:40 +0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-03-10 06:32:49 -0700 |
commit | e13fb1fe1483f6cd6452f25b866ffadf5ee0eff6 (patch) | |
tree | 5222b2a47c1c29d1be37f95716ae67e877b0b8dc /fs/io-wq.c | |
parent | 42abc95f05bff5180ac40c7ba5726b73c1d5e2f4 (diff) | |
download | lwn-e13fb1fe1483f6cd6452f25b866ffadf5ee0eff6.tar.gz lwn-e13fb1fe1483f6cd6452f25b866ffadf5ee0eff6.zip |
io-wq: reduce acct->lock crossing functions lock/unlock
reduce acct->lock lock and unlock in different functions to make the
code clearer.
Signed-off-by: Hao Xu <haoxu@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220206095241.121485-3-haoxu@linux.alibaba.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r-- | fs/io-wq.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index 9595616ccaa3..f7b7fa396faf 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -239,10 +239,15 @@ static void io_worker_exit(struct io_worker *worker) static inline bool io_acct_run_queue(struct io_wqe_acct *acct) { + bool ret = false; + + raw_spin_lock(&acct->lock); if (!wq_list_empty(&acct->work_list) && !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) - return true; - return false; + ret = true; + raw_spin_unlock(&acct->lock); + + return ret; } /* @@ -395,13 +400,9 @@ static void io_wqe_dec_running(struct io_worker *worker) if (!atomic_dec_and_test(&acct->nr_running)) return; - raw_spin_lock(&wqe->lock); - if (!io_acct_run_queue(acct)) { - raw_spin_unlock(&wqe->lock); + if (!io_acct_run_queue(acct)) return; - } - raw_spin_unlock(&wqe->lock); atomic_inc(&acct->nr_running); atomic_inc(&wqe->wq->worker_refs); io_queue_worker_create(worker, acct, create_worker_cb); @@ -544,7 +545,6 @@ static void io_assign_current_work(struct io_worker *worker, static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); static void io_worker_handle_work(struct io_worker *worker) - __releases(acct->lock) { struct io_wqe_acct *acct = io_wqe_get_acct(worker); struct io_wqe *wqe = worker->wqe; @@ -561,6 +561,7 @@ static void io_worker_handle_work(struct io_worker *worker) * can't make progress, any work completion or insertion will * clear the stalled flag. */ + raw_spin_lock(&acct->lock); work = io_get_next_work(acct, worker); raw_spin_unlock(&acct->lock); if (work) { @@ -614,8 +615,6 @@ static void io_worker_handle_work(struct io_worker *worker) wake_up(&wq->hash->wait); } } while (work); - - raw_spin_lock(&acct->lock); } while (1); } @@ -639,14 +638,9 @@ static int io_wqe_worker(void *data) long ret; set_current_state(TASK_INTERRUPTIBLE); -loop: - raw_spin_lock(&acct->lock); - if (io_acct_run_queue(acct)) { + while (io_acct_run_queue(acct)) io_worker_handle_work(worker); - goto loop; - } else { - raw_spin_unlock(&acct->lock); - } + raw_spin_lock(&wqe->lock); /* timed out, exit unless we're the last worker */ if (last_timeout && acct->nr_workers > 1) { @@ -671,10 +665,8 @@ loop: last_timeout = !ret; } - if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) { - raw_spin_lock(&acct->lock); + if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) io_worker_handle_work(worker); - } audit_free(current); io_worker_exit(worker); |