summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-09-11 14:23:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 15:58:25 -0700
commit202da400570d991bacda4a06e878cb901e96a783 (patch)
tree135d76acc021c2a67b8c24dc1084b3d6cf8e01e5 /kernel
parentf9597f24c089dcbddbd2d9e99fbf00df57fb70c6 (diff)
downloadlwn-202da400570d991bacda4a06e878cb901e96a783.tar.gz
lwn-202da400570d991bacda4a06e878cb901e96a783.zip
kernel/smp.c: quit unconditionally enabling irqs in on_each_cpu_mask().
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in !SMP version of on_each_cpu()"), we don't want to enable irqs if they are not already enabled. I don't know of any bugs currently caused by this unconditional local_irq_enable(), but I want to use this function in MIPS/OCTEON early boot (when we have early_boot_irqs_disabled). This also makes this function have similar semantics to on_each_cpu() which is good in itself. Signed-off-by: David Daney <david.daney@cavium.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 3bb6ae533cdf..0564571dcdf7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -575,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu);
*
* If @wait is true, then returns once @func has returned.
*
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
*/
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
@@ -585,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
- local_irq_disable();
+ unsigned long flags;
+ local_irq_save(flags);
func(info);
- local_irq_enable();
+ local_irq_restore(flags);
}
put_cpu();
}