diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-05-16 22:58:00 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-06-14 08:23:04 -0600 |
commit | e4b6d902a9e38f424ce118106ea4d1665b7951b5 (patch) | |
tree | a8ae910156c5c4ac75c809ac76bf180a699e2144 /fs/io_uring.c | |
parent | 009c9aa5be652675a06d5211e1640e02bbb1c33d (diff) | |
download | lwn-e4b6d902a9e38f424ce118106ea4d1665b7951b5.tar.gz lwn-e4b6d902a9e38f424ce118106ea4d1665b7951b5.zip |
io_uring: improve sqpoll event/state handling
As sqd->state changes rarely, don't check every event one by one but
look them all at once. Add a helper function. Also don't go into event
waiting sleeping with STOP flag set.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/645025f95c7eeec97f88ff497785f4f1d6f3966f.1621201931.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 45 |
1 files changed, 28 insertions, 17 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index fa8794c61af7..24c0042b0de7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6757,6 +6757,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) return submitted; } +static inline bool io_sqd_events_pending(struct io_sq_data *sqd) +{ + return READ_ONCE(sqd->state); +} + static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx) { /* Tell userspace we may need a wakeup call */ @@ -6815,6 +6820,24 @@ static void io_sqd_update_thread_idle(struct io_sq_data *sqd) sqd->sq_thread_idle = sq_thread_idle; } +static bool io_sqd_handle_event(struct io_sq_data *sqd) +{ + bool did_sig = false; + struct ksignal ksig; + + if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || + signal_pending(current)) { + mutex_unlock(&sqd->lock); + if (signal_pending(current)) + did_sig = get_signal(&ksig); + cond_resched(); + mutex_lock(&sqd->lock); + } + io_run_task_work(); + io_run_task_work_head(&sqd->park_task_work); + return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); +} + static int io_sq_thread(void *data) { struct io_sq_data *sqd = data; @@ -6836,29 +6859,17 @@ static int io_sq_thread(void *data) /* a user may had exited before the thread started */ io_run_task_work_head(&sqd->park_task_work); - while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) { + while (1) { int ret; bool cap_entries, sqt_spin, needs_sched; - if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || - signal_pending(current)) { - bool did_sig = false; - - mutex_unlock(&sqd->lock); - if (signal_pending(current)) { - struct ksignal ksig; - - did_sig = get_signal(&ksig); - } - cond_resched(); - mutex_lock(&sqd->lock); - io_run_task_work(); - io_run_task_work_head(&sqd->park_task_work); - if (did_sig) + if (io_sqd_events_pending(sqd) || signal_pending(current)) { + if (io_sqd_handle_event(sqd)) break; timeout = jiffies + sqd->sq_thread_idle; continue; } + sqt_spin = false; cap_entries = !list_is_singular(&sqd->ctx_list); list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { @@ -6882,7 +6893,7 @@ static int io_sq_thread(void *data) } prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); - if (!test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) { + if (!io_sqd_events_pending(sqd)) { list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) io_ring_set_wakeup_flag(ctx); |