diff options
author | Takashi Iwai <tiwai@suse.de> | 2021-03-30 17:42:40 +0200 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2021-03-30 17:42:40 +0200 |
commit | 5b1ed7df01335ecf686edf490948054078d5766d (patch) | |
tree | 06245eabf0eb1441b2f9a8c93e73915b5a54ed0f /fs/io-wq.c | |
parent | abc21649b3e5c34b143bf86f0c78e33d5815e250 (diff) | |
parent | a135dfb5de1501327895729b4f513370d2555b4d (diff) | |
download | lwn-5b1ed7df01335ecf686edf490948054078d5766d.tar.gz lwn-5b1ed7df01335ecf686edf490948054078d5766d.zip |
Merge tag 'tags/mute-led-rework' into for-next
ALSA: control - add generic LED API
This patchset tries to resolve the diversity in the audio LED
control among the ALSA drivers. A new control layer registration
is introduced which allows to run additional operations on
top of the elementary ALSA sound controls.
A new control access group (three bits in the access flags)
was introduced to carry the LED group information for
the sound controls. The low-level sound drivers can just
mark those controls using this access group. This information
is not exported to the user space, but user space can
manage the LED sound control associations through sysfs
(last patch) per Mark's request. It makes things fully
configurable in the kernel and user space (UCM).
The actual state ('route') evaluation is really easy
(the minimal value check for all channels / controls / cards).
If there's more complicated logic for a given hardware,
the card driver may eventually export a new read-only
sound control for the LED group and do the logic itself.
The new LED trigger control code is completely separated
and possibly optional (there's no symbol dependency).
The full code separation allows eventually to move this
LED trigger control to the user space in future.
Actually it replaces the already present functionality
in the kernel space (HDA drivers) and allows a quick adoption
for the recent hardware (ASoC codecs including SoundWire).
snd_ctl_led 24576 0
The sound driver implementation is really easy:
1) call snd_ctl_led_request() when control LED layer should be
automatically activated
/ it calls module_request("snd-ctl-led") on demand /
2) mark all related kcontrols with
SNDRV_CTL_ELEM_ACCESS_SPK_LED or
SNDRV_CTL_ELEM_ACCESS_MIC_LED
Link: https://lore.kernel.org/r/20210317172945.842280-1-perex@perex.cz
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r-- | fs/io-wq.c | 65 |
1 files changed, 41 insertions, 24 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index 28868eb4cd09..7434eb40ca8c 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -16,7 +16,6 @@ #include <linux/rculist_nulls.h> #include <linux/cpu.h> #include <linux/tracehook.h> -#include <linux/freezer.h> #include "../kernel/sched/sched.h" #include "io-wq.h" @@ -110,7 +109,6 @@ struct io_wq { io_wq_work_fn *do_work; struct task_struct *manager; - struct user_struct *user; struct io_wq_hash *hash; @@ -387,13 +385,14 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe) return NULL; } -static void io_flush_signals(void) +static bool io_flush_signals(void) { - if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { - if (current->task_works) - task_work_run(); - clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); + if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) { + __set_current_state(TASK_RUNNING); + tracehook_notify_signal(); + return true; } + return false; } static void io_assign_current_work(struct io_worker *worker, @@ -489,6 +488,8 @@ static int io_wqe_worker(void *data) set_task_comm(current, buf); while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { + long ret; + set_current_state(TASK_INTERRUPTIBLE); loop: raw_spin_lock_irq(&wqe->lock); @@ -498,11 +499,18 @@ loop: } __io_worker_idle(wqe, worker); raw_spin_unlock_irq(&wqe->lock); - io_flush_signals(); - if (schedule_timeout(WORKER_IDLE_TIMEOUT)) + if (io_flush_signals()) continue; - if (fatal_signal_pending(current)) + ret = schedule_timeout(WORKER_IDLE_TIMEOUT); + if (signal_pending(current)) { + struct ksignal ksig; + + if (!get_signal(&ksig)) + continue; break; + } + if (ret) + continue; /* timed out, exit unless we're the fixed worker */ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || !(worker->flags & IO_WORKER_F_FIXED)) @@ -592,7 +600,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) tsk->pf_io_worker = worker; worker->task = tsk; set_cpus_allowed_ptr(tsk, cpumask_of_node(wqe->node)); - tsk->flags |= PF_NOFREEZE | PF_NO_SETAFFINITY; + tsk->flags |= PF_NO_SETAFFINITY; raw_spin_lock_irq(&wqe->lock); hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); @@ -710,9 +718,13 @@ static int io_wq_manager(void *data) set_current_state(TASK_INTERRUPTIBLE); io_wq_check_workers(wq); schedule_timeout(HZ); - try_to_freeze(); - if (fatal_signal_pending(current)) + if (signal_pending(current)) { + struct ksignal ksig; + + if (!get_signal(&ksig)) + continue; set_bit(IO_WQ_BIT_EXIT, &wq->state); + } } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)); io_wq_check_workers(wq); @@ -722,9 +734,9 @@ static int io_wq_manager(void *data) io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL); rcu_read_unlock(); - /* we might not ever have created any workers */ - if (atomic_read(&wq->worker_refs)) - wait_for_completion(&wq->worker_done); + if (atomic_dec_and_test(&wq->worker_refs)) + complete(&wq->worker_done); + wait_for_completion(&wq->worker_done); spin_lock_irq(&wq->hash->wait.lock); for_each_node(node) @@ -774,7 +786,10 @@ static int io_wq_fork_manager(struct io_wq *wq) if (wq->manager) return 0; - reinit_completion(&wq->worker_done); + WARN_ON_ONCE(test_bit(IO_WQ_BIT_EXIT, &wq->state)); + + init_completion(&wq->worker_done); + atomic_set(&wq->worker_refs, 1); tsk = create_io_thread(io_wq_manager, wq, NUMA_NO_NODE); if (!IS_ERR(tsk)) { wq->manager = get_task_struct(tsk); @@ -782,6 +797,9 @@ static int io_wq_fork_manager(struct io_wq *wq) return 0; } + if (atomic_dec_and_test(&wq->worker_refs)) + complete(&wq->worker_done); + return PTR_ERR(tsk); } @@ -794,8 +812,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) /* Can only happen if manager creation fails after exec */ if (io_wq_fork_manager(wqe->wq) || test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) { - work->flags |= IO_WQ_WORK_CANCEL; - wqe->wq->do_work(work); + io_run_cancel(work, wqe); return; } @@ -1018,13 +1035,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) init_completion(&wq->exited); refcount_set(&wq->refs, 1); - init_completion(&wq->worker_done); - atomic_set(&wq->worker_refs, 0); - ret = io_wq_fork_manager(wq); if (!ret) return wq; - err: io_wq_put_hash(data->hash); cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); @@ -1058,7 +1071,11 @@ static void io_wq_destroy(struct io_wq *wq) for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; - WARN_ON_ONCE(!wq_list_empty(&wqe->work_list)); + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_all, + .cancel_all = true, + }; + io_wqe_cancel_pending_work(wqe, &match); kfree(wqe); } io_wq_put_hash(wq->hash); |