summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 11:34:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-12 11:34:32 -0700
commitc457cc800e892315d428a724ede09a085c4501be (patch)
treee8da4a306761a98f9cc02e3a2129e6af46598c35 /arch/arm
parentf5f59336a9ae8f683772d6b8cb2d6732b5e567ea (diff)
parent863bae1fbcfa0420e5f51389218a9532542aa00f (diff)
downloadlwn-c457cc800e892315d428a724ede09a085c4501be.tar.gz
lwn-c457cc800e892315d428a724ede09a085c4501be.zip
Merge tag 'irq-core-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "Updates for the interrupt subsystem: Core: - Allow trimming of interrupt hierarchy to support odd hardware setups where only a subset of the interrupts requires the full hierarchy. - Allow the retrigger mechanism to follow a hierarchy to simplify driver code. - Provide a mechanism to force enable wakeup interrrupts on suspend. - More infrastructure to handle IPIs in the core code Architectures: - Convert ARM/ARM64 IPI handling to utilize the interrupt core code. Drivers: - The usual pile of new interrupt chips (MStar, Actions Owl, TI PRUSS, Designware ICTL) - ARM(64) IPI related conversions - Wakeup support for Qualcom PDC - Prevent hierarchy corruption in the NVIDIA Tegra driver - The usual small fixes, improvements and cleanups all over the place" * tag 'irq-core-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (59 commits) dt-bindings: interrupt-controller: Add MStar interrupt controller irqchip/irq-mst: Add MStar interrupt controller support soc/tegra: pmc: Don't create fake interrupt hierarchy levels soc/tegra: pmc: Allow optional irq parent callbacks gpio: tegra186: Allow optional irq parent callbacks genirq/irqdomain: Allow partial trimming of irq_data hierarchy irqchip/qcom-pdc: Reset PDC interrupts during init irqchip/qcom-pdc: Set IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND flag pinctrl: qcom: Set IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND flag genirq/PM: Introduce IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND flag pinctrl: qcom: Use return value from irq_set_wake() call pinctrl: qcom: Set IRQCHIP_SET_TYPE_MASKED and IRQCHIP_MASK_ON_SUSPEND flags ARM: Handle no IPI being registered in show_ipi_list() MAINTAINERS: Add entries for Actions Semi Owl SIRQ controller irqchip: Add Actions Semi Owl SIRQ controller dt-bindings: interrupt-controller: Add Actions SIRQ controller binding dt-bindings: dw-apb-ictl: Update binding to describe use as primary interrupt controller irqchip/dw-apb-ictl: Add primary interrupt controller support irqchip/dw-apb-ictl: Refactor priot to introducing hierarchical irq domains genirq: Add stub for set_handle_irq() when !GENERIC_IRQ_MULTI_HANDLER ...
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/hardirq.h17
-rw-r--r--arch/arm/include/asm/smp.h5
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/smp.c140
5 files changed, 97 insertions, 67 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e00d94b16658..e67ef15c800f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -49,6 +49,7 @@ config ARM
select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_IRQ_IPI if SMP
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 7a88f160b1fb..b95848ed2bc7 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -6,29 +6,12 @@
#include <linux/threads.h>
#include <asm/irq.h>
-/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
-#define NR_IPI 7
-
typedef struct {
unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
- unsigned int ipi_irqs[NR_IPI];
-#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-
-#ifdef CONFIG_SMP
-u64 smp_irq_stat_cpu(unsigned int cpu);
-#else
-#define smp_irq_stat_cpu(cpu) 0
-#endif
-
-#define arch_irq_stat_cpu smp_irq_stat_cpu
-
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a91f21e3c5b5..0ca55a607d0a 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -39,11 +39,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
*/
extern void smp_init_cpus(void);
-
/*
- * Provide a function to raise an IPI cross call on CPUs in callmap.
+ * Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
/*
* Called from platform specific assembly code, this is the
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ee514034c0a1..698b6f636156 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -18,7 +18,6 @@
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
-#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5d9da61eff62..48099c6e1e4a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/atomic.h>
#include <asm/bugs.h>
@@ -65,18 +66,26 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ NR_IPI,
/*
* CPU_BACKTRACE is special and not included in NR_IPI
* or tracable with trace_ipi_*
*/
- IPI_CPU_BACKTRACE,
+ IPI_CPU_BACKTRACE = NR_IPI,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
* not be usable by the kernel. Please keep the above limited
* to at most 8 entries.
*/
+ MAX_IPI
};
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops __ro_after_init;
@@ -226,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu)
return cpu != 0;
}
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -247,6 +267,7 @@ int __cpu_disable(void)
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
@@ -422,6 +443,8 @@ asmlinkage void secondary_start_kernel(void)
notify_cpu_starting(cpu);
+ ipi_setup(cpu);
+
calibrate_delay();
smp_store_cpu_info(cpu);
@@ -500,14 +523,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- if (!__smp_cross_call)
- __smp_cross_call = fn;
-}
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -519,38 +534,28 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_COMPLETION, "completion interrupts"),
};
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
-{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
- __smp_cross_call(target, ipinr);
-}
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
+ unsigned int irq;
+
+ if (!ipi_desc[i])
+ continue;
+
+ irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
}
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
-
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
-}
-
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
@@ -627,15 +632,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
handle_IPI(ipinr, regs);
}
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
+ if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
switch (ipinr) {
case IPI_WAKEUP:
@@ -643,9 +645,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
@@ -654,36 +654,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
break;
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
irq_work_run();
- irq_exit();
break;
#endif
case IPI_COMPLETION:
- irq_enter();
ipi_complete(cpu);
- irq_exit();
break;
case IPI_CPU_BACKTRACE:
printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
+ nmi_cpu_backtrace(get_irq_regs());
printk_nmi_exit();
break;
@@ -695,9 +685,67 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+}
+
+/* Legacy version, should go away once all irqchips have been converted */
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ do_handle_IPI(ipinr);
+ irq_exit();
+
set_irq_regs(old_regs);
}
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &irq_stat);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
+}
+
void smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
@@ -805,7 +853,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- __smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)