summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-04-05 19:50:39 +0100
committerThomas Gleixner <tglx@linutronix.de>2022-04-10 21:06:30 +0200
commit33de0aa4bae982ed6f7c777f86b5af3e627ac937 (patch)
treeb4443df13aec797af540398751b16d269a12c099
parentd802057c7c553ad426520a053da9f9fe08e2c35a (diff)
downloadlwn-33de0aa4bae982ed6f7c777f86b5af3e627ac937.tar.gz
lwn-33de0aa4bae982ed6f7c777f86b5af3e627ac937.zip
genirq: Always limit the affinity to online CPUs
When booting with maxcpus=<small number> (or even loading a driver while most CPUs are offline), it is pretty easy to observe managed affinities containing a mix of online and offline CPUs being passed to the irqchip driver. This means that the irqchip cannot trust the affinity passed down from the core code, which is a bit annoying and requires (at least in theory) all drivers to implement some sort of affinity narrowing. In order to address this, always limit the cpumask to the set of online CPUs. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20220405185040.206297-3-maz@kernel.org
-rw-r--r--kernel/irq/manage.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c03f71d5ec10..f71ecc100545 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -222,11 +222,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
{
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
+ const struct cpumask *prog_mask;
int ret;
+ static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
+ static struct cpumask tmp_mask;
+
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
+ raw_spin_lock(&tmp_mask_lock);
/*
* If this is a managed interrupt and housekeeping is enabled on
* it check whether the requested affinity mask intersects with
@@ -248,24 +253,28 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
*/
if (irqd_affinity_is_managed(data) &&
housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
- const struct cpumask *hk_mask, *prog_mask;
-
- static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
- static struct cpumask tmp_mask;
+ const struct cpumask *hk_mask;
hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
- raw_spin_lock(&tmp_mask_lock);
cpumask_and(&tmp_mask, mask, hk_mask);
if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
prog_mask = mask;
else
prog_mask = &tmp_mask;
- ret = chip->irq_set_affinity(data, prog_mask, force);
- raw_spin_unlock(&tmp_mask_lock);
} else {
- ret = chip->irq_set_affinity(data, mask, force);
+ prog_mask = mask;
}
+
+ /* Make sure we only provide online CPUs to the irqchip */
+ cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
+ if (!cpumask_empty(&tmp_mask))
+ ret = chip->irq_set_affinity(data, &tmp_mask, force);
+ else
+ ret = -EINVAL;
+
+ raw_spin_unlock(&tmp_mask_lock);
+
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE: