summaryrefslogtreecommitdiff
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-07-16 15:10:07 +0200
committerIngo Molnar <mingo@kernel.org>2014-07-16 15:10:07 +0200
commitd26fad5b38e1c4667d4f2604936e59c837caa54d (patch)
tree04b524a69a0129c181567445bff18847a1b44721 /kernel/smp.c
parente720fff6341fe4b95e5a93c939bd3c77fa55ced4 (diff)
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
downloadlwn-d26fad5b38e1c4667d4f2604936e59c837caa54d.tar.gz
lwn-d26fad5b38e1c4667d4f2604936e59c837caa54d.zip
Merge tag 'v3.16-rc5' into sched/core, to refresh the branch before applying bigger tree-wide changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c57
1 files changed, 49 insertions, 8 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index a1812d184aed..487653b5844f 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -30,6 +30,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
+static void flush_smp_call_function_queue(bool warn_cpu_offline);
+
static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
@@ -52,12 +54,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
+ /* Fall-through to the CPU_DEAD[_FROZEN] case. */
case CPU_DEAD:
case CPU_DEAD_FROZEN:
free_cpumask_var(cfd->cpumask);
free_percpu(cfd->csd);
break;
+
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ /*
+ * The IPIs for the smp-call-function callbacks queued by other
+ * CPUs might arrive late, either due to hardware latencies or
+ * because this CPU disabled interrupts (inside stop-machine)
+ * before the IPIs were sent. So flush out any pending callbacks
+ * explicitly (without waiting for the IPIs to arrive), to
+ * ensure that the outgoing CPU doesn't go offline with work
+ * still pending.
+ */
+ flush_smp_call_function_queue(false);
+ break;
#endif
};
@@ -178,23 +195,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
return 0;
}
-/*
- * Invoked by arch to handle an IPI for call function single. Must be
- * called from the arch with interrupts disabled.
+/**
+ * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
+ *
+ * Invoked by arch to handle an IPI for call function single.
+ * Must be called with interrupts disabled.
*/
void generic_smp_call_function_single_interrupt(void)
{
+ flush_smp_call_function_queue(true);
+}
+
+/**
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ *
+ * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
+ * offline CPU. Skip this check if set to 'false'.
+ *
+ * Flush any pending smp-call-function callbacks queued on this CPU. This is
+ * invoked by the generic IPI handler, as well as by a CPU about to go offline,
+ * to ensure that all pending IPI callbacks are run before it goes completely
+ * offline.
+ *
+ * Loop through the call_single_queue and run all the queued callbacks.
+ * Must be called with interrupts disabled.
+ */
+static void flush_smp_call_function_queue(bool warn_cpu_offline)
+{
+ struct llist_head *head;
struct llist_node *entry;
struct call_single_data *csd, *csd_next;
static bool warned;
- entry = llist_del_all(&__get_cpu_var(call_single_queue));
+ WARN_ON(!irqs_disabled());
+
+ head = &__get_cpu_var(call_single_queue);
+ entry = llist_del_all(head);
entry = llist_reverse_order(entry);
- /*
- * Shouldn't receive this interrupt on a cpu that is not yet online.
- */
- if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
+ /* There shouldn't be any pending callbacks on an offline CPU. */
+ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
+ !warned && !llist_empty(head))) {
warned = true;
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());