summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/cacheflush.h2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c10
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c13
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c13
-rw-r--r--arch/arm/mach-ux500/mbox-db5500.c2
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/avr32/mm/cache.c2
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/kernel/irq.c2
-rw-r--r--arch/cris/arch-v10/mm/init.c2
-rw-r--r--arch/cris/arch-v32/kernel/irq.c8
-rw-r--r--arch/cris/kernel/irq.c39
-rw-r--r--arch/ia64/include/asm/perfmon.h2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts24
-rw-r--r--arch/powerpc/boot/dts/kmeter1.dts69
-rw-r--r--arch/powerpc/boot/dts/mgcoge.dts47
-rw-r--r--arch/powerpc/boot/dts/mgsuvd.dts163
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts4
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig7
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig9
-rw-r--r--arch/powerpc/configs/mgsuvd_defconfig81
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/hw_irq.h2
-rw-r--r--arch/powerpc/include/asm/mpic.h6
-rw-r--r--arch/powerpc/include/asm/nvram.h3
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/qe_ic.h19
-rw-r--r--arch/powerpc/include/asm/reg.h13
-rw-r--r--arch/powerpc/include/asm/reg_booke.h3
-rw-r--r--arch/powerpc/kernel/cputable.c22
-rw-r--r--arch/powerpc/kernel/irq.c55
-rw-r--r--arch/powerpc/kernel/machine_kexec.c21
-rw-r--r--arch/powerpc/kernel/nvram_64.c31
-rw-r--r--arch/powerpc/kernel/perf_event.c24
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/rtasd.c3
-rw-r--r--arch/powerpc/math-emu/math_efp.c65
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S35
-rw-r--r--arch/powerpc/platforms/44x/44x.h4
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c134
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c1
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c14
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c21
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c26
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c80
-rw-r--r--arch/powerpc/platforms/82xx/Makefile2
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c (renamed from arch/powerpc/platforms/82xx/mgcoge.c)62
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c27
-rw-r--r--arch/powerpc/platforms/83xx/Makefile2
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c (renamed from arch/powerpc/platforms/83xx/kmeter1.c)46
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c3
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/smp.c6
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c40
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c3
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c3
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c22
-rw-r--r--arch/powerpc/platforms/86xx/pic.c5
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/8xx/Makefile1
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c9
-rw-r--r--arch/powerpc/platforms/8xx/mgsuvd.c92
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c3
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c36
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c30
-rw-r--r--arch/powerpc/platforms/cell/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c43
-rw-r--r--arch/powerpc/platforms/chrp/setup.c5
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c32
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c41
-rw-r--r--arch/powerpc/platforms/iseries/irq.c43
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c4
-rw-r--r--arch/powerpc/platforms/powermac/pic.c48
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c40
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c14
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c587
-rw-r--r--arch/powerpc/platforms/pseries/msi.c14
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c255
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/powerpc/platforms/pseries/xics.c89
-rw-r--r--arch/powerpc/sysdev/cpm1.c18
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c32
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c111
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c15
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h17
-rw-r--r--arch/powerpc/sysdev/i8259.c42
-rw-r--r--arch/powerpc/sysdev/ipic.c54
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c32
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c46
-rw-r--r--arch/powerpc/sysdev/mpic.c137
-rw-r--r--arch/powerpc/sysdev/mpic.h5
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c18
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c18
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c46
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c25
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c41
-rw-r--r--arch/powerpc/sysdev/uic.c59
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c48
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/hwsampler.c1256
-rw-r--r--arch/s390/oprofile/hwsampler.h113
-rw-r--r--arch/s390/oprofile/init.c165
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c6
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c6
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S6
-rw-r--r--arch/x86/include/asm/cacheflush.h2
-rw-r--r--arch/x86/include/asm/kdebug.h2
-rw-r--r--arch/x86/include/asm/nmi.h4
-rw-r--r--arch/x86/include/asm/nops.h2
-rw-r--r--arch/x86/include/asm/olpc.h2
-rw-r--r--arch/x86/include/asm/perf_event_p4.h4
-rw-r--r--arch/x86/include/asm/pgtable-3level.h11
-rw-r--r--arch/x86/include/asm/processor-flags.h2
-rw-r--r--arch/x86/include/asm/ptrace-abi.h2
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/stacktrace.h6
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/include/asm/xen/interface.h2
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-smi.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c112
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c8
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/dumpstack.c14
-rw-r--r--arch/x86/kernel/dumpstack_32.c15
-rw-r--r--arch/x86/kernel/dumpstack_64.c14
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/mca_32.c2
-rw-r--r--arch/x86/kernel/mpparse.c4
-rw-r--r--arch/x86/kernel/pci-calgary_64.c4
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/stacktrace.c6
-rw-r--r--arch/x86/kernel/step.c2
-rw-r--r--arch/x86/kernel/topology.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/verify_cpu.S2
-rw-r--r--arch/x86/kernel/xsave.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/timer.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lguest/boot.c2
-rw-r--r--arch/x86/lib/copy_user_64.S2
-rw-r--r--arch/x86/lib/csum-copy_64.S242
-rw-r--r--arch/x86/lib/csum-partial_64.c2
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pgtable.c3
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/irq.c15
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/xtensa/configs/s6105_defconfig2
177 files changed, 3961 insertions, 1673 deletions
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 012f1243b1c1..a9cb6aa447aa 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -63,7 +63,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
#endif
-/* This is used only in do_no_page and do_swap_page. */
+/* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \
flush_icache_user_range((vma), (page), 0, 0)
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 4303a86e6e38..3e6f0aab460b 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -119,13 +119,6 @@ static struct platform_device keysc_device = {
};
/* FSI A */
-static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(I2S) |
- SH_FSI_IFMT(I2S),
-};
-
static struct resource fsi_resources[] = {
[0] = {
.name = "FSI",
@@ -144,9 +137,6 @@ static struct platform_device fsi_device = {
.id = -1,
.num_resources = ARRAY_SIZE(fsi_resources),
.resource = fsi_resources,
- .dev = {
- .platform_data = &fsi_info,
- },
};
static struct resource sh_mmcif_resources[] = {
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 81d6536552a9..1a8118c929be 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -673,16 +673,12 @@ static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
}
static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(PCM) |
- SH_FSI_IFMT(PCM),
+ .porta_flags = SH_FSI_BRS_INV,
.portb_flags = SH_FSI_BRS_INV |
SH_FSI_BRM_INV |
SH_FSI_LRS_INV |
- SH_FSI_OFMT(SPDIF),
+ SH_FSI_FMT_SPDIF,
.set_rate = fsi_set_rate,
};
@@ -783,6 +779,10 @@ static struct platform_device hdmi_device = {
},
};
+static struct platform_device fsi_hdmi_device = {
+ .name = "sh_fsi2_b_hdmi",
+};
+
static long ap4evb_clk_optimize(unsigned long target, unsigned long *best_freq,
unsigned long *parent_freq)
{
@@ -936,6 +936,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
&usb1_host_device,
&fsi_device,
&fsi_ak4643_device,
+ &fsi_hdmi_device,
&sh_mmcif_device,
&lcdc1_device,
&lcdc_device,
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 1657eac5dde2..1a63c213e45d 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -399,6 +399,10 @@ static struct platform_device hdmi_device = {
},
};
+static struct platform_device fsi_hdmi_device = {
+ .name = "sh_fsi2_b_hdmi",
+};
+
static int __init hdmi_init_pm_clock(void)
{
struct clk *hdmi_ick = clk_get(&hdmi_device.dev, "ick");
@@ -609,16 +613,12 @@ fsi_set_rate_end:
}
static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(PCM) |
- SH_FSI_IFMT(PCM),
+ .porta_flags = SH_FSI_BRS_INV,
.portb_flags = SH_FSI_BRS_INV |
SH_FSI_BRM_INV |
SH_FSI_LRS_INV |
- SH_FSI_OFMT(SPDIF),
+ SH_FSI_FMT_SPDIF,
.set_rate = fsi_set_rate,
};
@@ -921,6 +921,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
&leds_device,
&fsi_device,
&fsi_ak4643_device,
+ &fsi_hdmi_device,
&sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF)
&sdhi1_device,
diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
index cbf15718fc3c..a4ffb9f4f461 100644
--- a/arch/arm/mach-ux500/mbox-db5500.c
+++ b/arch/arm/mach-ux500/mbox-db5500.c
@@ -498,7 +498,7 @@ struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
#endif
dev_info(&(mbox->pdev->dev),
- "Mailbox driver with index %d initated!\n", mbox_id);
+ "Mailbox driver with index %d initiated!\n", mbox_id);
exit:
return mbox;
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index b6333ae3f92a..cd5f993612fd 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -54,7 +54,7 @@ config OMAP_SMARTREFLEX
user must write 1 to
/debug/voltage/vdd_<X>/smartreflex/autocomp,
where X is mpu or core for OMAP3.
- Optionallly autocompensation can be enabled in the kernel
+ Optionally autocompensation can be enabled in the kernel
by default during system init via the enable_on_init flag
which an be passed as platform data to the smartreflex driver.
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c
index 24a74d1ca7d9..6a46ecd56cfd 100644
--- a/arch/avr32/mm/cache.c
+++ b/arch/avr32/mm/cache.c
@@ -113,7 +113,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
/*
- * This one is called from do_no_page(), do_swap_page() and install_page().
+ * This one is called from __do_fault() and do_swap_page().
*/
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 0a7a4c11d8b1..4db5b46e1eff 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -56,6 +56,7 @@ config CRIS
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
config HZ
int
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index 7328a7cf7449..907cfb5a873d 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -199,7 +199,7 @@ init_IRQ(void)
/* Initialize IRQ handler descriptors. */
for(i = 2; i < NR_IRQS; i++) {
- set_irq_desc_and_handler(i, &crisv10_irq_type,
+ irq_set_chip_and_handler(i, &crisv10_irq_type,
handle_simple_irq);
set_int_vector(i, interrupt[i]);
}
diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c
index baa746ce4e74..e7f8066105aa 100644
--- a/arch/cris/arch-v10/mm/init.c
+++ b/arch/cris/arch-v10/mm/init.c
@@ -241,7 +241,7 @@ flush_etrax_cacherange(void *startadr, int length)
}
/* Due to a bug in Etrax100(LX) all versions, receiving DMA buffers
- * will occationally corrupt certain CPU writes if the DMA buffers
+ * will occasionally corrupt certain CPU writes if the DMA buffers
* happen to be hot in the cache.
*
* As a workaround, we have to flush the relevant parts of the cache
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 0ad9db5126c7..8023176e19b2 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -451,16 +451,16 @@ init_IRQ(void)
/* Point all IRQ's to bad handlers. */
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
- set_irq_chip_and_handler(j, &crisv32_irq_type,
+ irq_set_chip_and_handler(j, &crisv32_irq_type,
handle_simple_irq);
set_exception_vector(i, interrupt[j]);
}
- /* Mark Timer and IPI IRQs as CPU local */
+ /* Mark Timer and IPI IRQs as CPU local */
irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
- irq_desc[TIMER0_INTR_VECT].status |= IRQ_PER_CPU;
+ irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU);
irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
- irq_desc[IPI_INTR_VECT].status |= IRQ_PER_CPU;
+ irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU);
set_exception_vector(0x00, nmi_interrupt);
set_exception_vector(0x30, multiple_interrupt);
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index c346952f06dc..788eb2248916 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -37,45 +37,6 @@
#include <asm/io.h>
-int show_interrupts(struct seq_file *p, void *v)
-{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#endif
- seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- }
- return 0;
-}
-
-
/* called by the assembler IRQ entry functions defined in irq.h
* to dispatch the interrupts to registered handlers
* interrupts are disabled upon entry - depending on if the
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
index 7f3333dd00e4..d551183fee90 100644
--- a/arch/ia64/include/asm/perfmon.h
+++ b/arch/ia64/include/asm/perfmon.h
@@ -7,7 +7,7 @@
#define _ASM_IA64_PERFMON_H
/*
- * perfmon comamnds supported on all CPU models
+ * perfmon commands supported on all CPU models
*/
#define PFM_WRITE_PMCS 0x01
#define PFM_WRITE_PMDS 0x02
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7d69e9bf5e64..71ba04721beb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,6 +134,7 @@ config PPC
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select IRQ_PER_CPU
+ select GENERIC_HARDIRQS_NO_DEPRECATED
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 5b27a4b74b79..2779f08313a5 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -172,6 +172,19 @@
interrupts = <0x1e 4>;
};
+ USBOTG0: usbotg@bff80000 {
+ compatible = "amcc,dwc-otg";
+ reg = <0x4 0xbff80000 0x10000>;
+ interrupt-parent = <&USBOTG0>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x1 0x2>;
+ interrupt-map = </* USB-OTG */ 0x0 &UIC2 0x1c 0x4
+ /* HIGH-POWER */ 0x1 &UIC1 0x1a 0x8
+ /* DMA */ 0x2 &UIC0 0xc 0x4>;
+ };
+
SATA0: sata@bffd1000 {
compatible = "amcc,sata-460ex";
reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
@@ -233,6 +246,11 @@
};
};
+ cpld@2,0 {
+ compatible = "amcc,ppc460ex-bcsr";
+ reg = <2 0x0 0x9>;
+ };
+
ndfc@3,0 {
compatible = "ibm,ndfc";
reg = <0x00000003 0x00000000 0x00002000>;
@@ -307,6 +325,12 @@
interrupts = <0x3 0x4>;
};
+ GPIO0: gpio@ef600b00 {
+ compatible = "ibm,ppc4xx-gpio";
+ reg = <0xef600b00 0x00000048>;
+ gpio-controller;
+ };
+
ZMII0: emac-zmii@ef600d00 {
compatible = "ibm,zmii-460ex", "ibm,zmii";
reg = <0xef600d00 0x0000000c>;
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts
index d8b5d12fb663..d16bae1230f7 100644
--- a/arch/powerpc/boot/dts/kmeter1.dts
+++ b/arch/powerpc/boot/dts/kmeter1.dts
@@ -1,7 +1,7 @@
/*
* Keymile KMETER1 Device Tree Source
*
- * 2008 DENX Software Engineering GmbH
+ * 2008-2011 DENX Software Engineering GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -70,11 +70,11 @@
#address-cells = <1>;
#size-cells = <0>;
cell-index = <0>;
- compatible = "fsl-i2c";
+ compatible = "fsl,mpc8313-i2c","fsl-i2c";
reg = <0x3000 0x100>;
interrupts = <14 0x8>;
interrupt-parent = <&ipic>;
- dfsrr;
+ clock-frequency = <400000>;
};
serial0: serial@4500 {
@@ -137,6 +137,13 @@
compatible = "fsl,mpc8360-par_io";
num-ports = <7>;
+ qe_pio_c: gpio-controller@30 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8360-qe-pario-bank",
+ "fsl,mpc8323-qe-pario-bank";
+ reg = <0x1430 0x18>;
+ gpio-controller;
+ };
pio_ucc1: ucc_pin@0 {
reg = <0>;
@@ -472,7 +479,17 @@
#address-cells = <0>;
#interrupt-cells = <1>;
reg = <0x80 0x80>;
- interrupts = <32 8 33 8>;
+ big-endian;
+ interrupts = <
+ 32 0x8
+ 33 0x8
+ 34 0x8
+ 35 0x8
+ 40 0x8
+ 41 0x8
+ 42 0x8
+ 43 0x8
+ >;
interrupt-parent = <&ipic>;
};
};
@@ -484,43 +501,31 @@
compatible = "fsl,mpc8360-localbus", "fsl,pq2pro-localbus",
"simple-bus";
reg = <0xe0005000 0xd8>;
- ranges = <0 0 0xf0000000 0x04000000>; /* Filled in by U-Boot */
+ ranges = <0 0 0xf0000000 0x04000000 /* LB 0 */
+ 1 0 0xe8000000 0x01000000 /* LB 1 */
+ 3 0 0xa0000000 0x10000000>; /* LB 3 */
- flash@f0000000,0 {
+ flash@0,0 {
compatible = "cfi-flash";
- /*
- * The Intel P30 chip has 2 non-identical chips on
- * one die, so we need to define 2 separate regions
- * that are scanned by physmap_of independantly.
- */
- reg = <0 0x00000000 0x02000000
- 0 0x02000000 0x02000000>; /* Filled in by U-Boot */
- bank-width = <2>;
+ reg = <0 0 0x04000000>;
#address-cells = <1>;
#size-cells = <1>;
- partition@0 {
+ bank-width = <2>;
+ partition@0 { /* 768KB */
label = "u-boot";
- reg = <0 0x40000>;
+ reg = <0 0xC0000>;
};
- partition@40000 {
+ partition@c0000 { /* 128KB */
label = "env";
- reg = <0x40000 0x40000>;
- };
- partition@80000 {
- label = "dtb";
- reg = <0x80000 0x20000>;
- };
- partition@a0000 {
- label = "kernel";
- reg = <0xa0000 0x300000>;
+ reg = <0xC0000 0x20000>;
};
- partition@3a0000 {
- label = "ramdisk";
- reg = <0x3a0000 0x800000>;
+ partition@e0000 { /* 128KB */
+ label = "envred";
+ reg = <0xE0000 0x20000>;
};
- partition@ba0000 {
- label = "user";
- reg = <0xba0000 0x3460000>;
+ partition@100000 { /* 64512KB */
+ label = "ubi0";
+ reg = <0x100000 0x3F00000>;
};
};
};
diff --git a/arch/powerpc/boot/dts/mgcoge.dts b/arch/powerpc/boot/dts/mgcoge.dts
index 0ce96644176d..1360d2f69024 100644
--- a/arch/powerpc/boot/dts/mgcoge.dts
+++ b/arch/powerpc/boot/dts/mgcoge.dts
@@ -13,7 +13,7 @@
/dts-v1/;
/ {
model = "MGCOGE";
- compatible = "keymile,mgcoge";
+ compatible = "keymile,km82xx";
#address-cells = <1>;
#size-cells = <1>;
@@ -48,8 +48,10 @@
reg = <0xf0010100 0x40>;
ranges = <0 0 0xfe000000 0x00400000
- 5 0 0x50000000 0x20000000
- >; /* Filled in by U-Boot */
+ 1 0 0x30000000 0x00010000
+ 2 0 0x40000000 0x00010000
+ 5 0 0x50000000 0x04000000
+ >;
flash@0,0 {
compatible = "cfi-flash";
@@ -60,36 +62,32 @@
device-width = <1>;
partition@0 {
label = "u-boot";
- reg = <0 0x40000>;
+ reg = <0x00000 0xC0000>;
};
- partition@40000 {
+ partition@1 {
label = "env";
- reg = <0x40000 0x20000>;
+ reg = <0xC0000 0x20000>;
};
- partition@60000 {
- label = "kernel";
- reg = <0x60000 0x220000>;
+ partition@2 {
+ label = "envred";
+ reg = <0xE0000 0x20000>;
};
- partition@280000 {
- label = "dtb";
- reg = <0x280000 0x20000>;
+ partition@3 {
+ label = "free";
+ reg = <0x100000 0x300000>;
};
};
flash@5,0 {
compatible = "cfi-flash";
- reg = <5 0x0 0x2000000>;
+ reg = <5 0x00000000 0x02000000
+ 5 0x02000000 0x02000000>;
#address-cells = <1>;
#size-cells = <1>;
bank-width = <2>;
- device-width = <2>;
- partition@0 {
- label = "ramdisk";
- reg = <0 0x7a0000>;
- };
- partition@7a0000 {
- label = "user";
- reg = <0x7a0000 0x1860000>;
+ partition@app { /* 64 MBytes */
+ label = "ubi0";
+ reg = <0x00000000 0x04000000>;
};
};
};
@@ -217,6 +215,13 @@
};
};
+ cpm2_pio_c: gpio-controller@10d40 {
+ #gpio-cells = <2>;
+ compatible = "fsl,cpm2-pario-bank";
+ reg = <0x10d40 0x14>;
+ gpio-controller;
+ };
+
PIC: interrupt-controller@10c00 {
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/powerpc/boot/dts/mgsuvd.dts b/arch/powerpc/boot/dts/mgsuvd.dts
deleted file mode 100644
index e4fc53ab42bd..000000000000
--- a/arch/powerpc/boot/dts/mgsuvd.dts
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * MGSUVD Device Tree Source
- *
- * Copyright 2008 DENX Software Engineering GmbH
- * Heiko Schocher <hs@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-/ {
- model = "MGSUVD";
- compatible = "keymile,mgsuvd";
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,852@0 {
- device_type = "cpu";
- reg = <0>;
- d-cache-line-size = <16>;
- i-cache-line-size = <16>;
- d-cache-size = <8192>;
- i-cache-size = <8192>;
- timebase-frequency = <0>; /* Filled in by u-boot */
- bus-frequency = <0>; /* Filled in by u-boot */
- clock-frequency = <0>; /* Filled in by u-boot */
- interrupts = <15 2>; /* decrementer interrupt */
- interrupt-parent = <&PIC>;
- };
- };
-
- memory {
- device_type = "memory";
- reg = <00000000 0x4000000>; /* Filled in by u-boot */
- };
-
- localbus@fff00100 {
- compatible = "fsl,mpc852-localbus", "fsl,pq1-localbus", "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0xfff00100 0x40>;
-
- ranges = <0 0 0xf0000000 0x01000000>; /* Filled in by u-boot */
-
- flash@0,0 {
- compatible = "cfi-flash";
- reg = <0 0 0x1000000>;
- #address-cells = <1>;
- #size-cells = <1>;
- bank-width = <1>;
- device-width = <1>;
- partition@0 {
- label = "u-boot";
- reg = <0 0x80000>;
- };
- partition@80000 {
- label = "env";
- reg = <0x80000 0x20000>;
- };
- partition@a0000 {
- label = "kernel";
- reg = <0xa0000 0x1e0000>;
- };
- partition@280000 {
- label = "dtb";
- reg = <0x280000 0x20000>;
- };
- partition@2a0000 {
- label = "root";
- reg = <0x2a0000 0x500000>;
- };
- partition@7a0000 {
- label = "user";
- reg = <0x7a0000 0x860000>;
- };
- };
- };
-
- soc@fff00000 {
- compatible = "fsl,mpc852", "fsl,pq1-soc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- ranges = <0 0xfff00000 0x00004000>;
-
- PIC: interrupt-controller@0 {
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0 24>;
- compatible = "fsl,mpc852-pic", "fsl,pq1-pic";
- };
-
- cpm@9c0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc852-cpm", "fsl,cpm1", "simple-bus";
- interrupts = <0>; /* cpm error interrupt */
- interrupt-parent = <&CPM_PIC>;
- reg = <0x9c0 10>;
- ranges;
-
- muram@2000 {
- compatible = "fsl,cpm-muram";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x2000 0x2000>;
-
- data@0 {
- compatible = "fsl,cpm-muram-data";
- reg = <0x800 0x1800>;
- };
- };
-
- brg@9f0 {
- compatible = "fsl,mpc852-brg",
- "fsl,cpm1-brg",
- "fsl,cpm-brg";
- reg = <0x9f0 0x10>;
- clock-frequency = <0>; /* Filled in by u-boot */
- };
-
- CPM_PIC: interrupt-controller@930 {
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupts = <5 2 0 2>;
- interrupt-parent = <&PIC>;
- reg = <0x930 0x20>;
- compatible = "fsl,cpm1-pic";
- };
-
- /* MON-1 */
- serial@a80 {
- device_type = "serial";
- compatible = "fsl,cpm1-smc-uart";
- reg = <0xa80 0x10 0x3fc0 0x40>;
- interrupts = <4>;
- interrupt-parent = <&CPM_PIC>;
- fsl,cpm-brg = <1>;
- fsl,cpm-command = <0x0090>;
- current-speed = <0>; /* Filled in by u-boot */
- };
-
- ethernet@a40 {
- device_type = "network";
- compatible = "fsl,mpc866-scc-enet",
- "fsl,cpm1-scc-enet";
- reg = <0xa40 0x18 0x3e00 0x100>;
- local-mac-address = [ 00 00 00 00 00 00 ]; /* Filled in by u-boot */
- interrupts = <28>;
- interrupt-parent = <&CPM_PIC>;
- fsl,cpm-command = <0x80>;
- fixed-link = <0 0 10 0 0>;
- };
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 69422eb24d97..59ef405c1c91 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -475,14 +475,14 @@
};
sata@18000 {
- compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
+ compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
reg = <0x18000 0x1000>;
cell-index = <1>;
interrupts = <74 0x2>;
};
sata@19000 {
- compatible = "fsl,mpc8536-sata", "fsl,pq-sata";
+ compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
reg = <0x19000 0x1000>;
cell-index = <2>;
interrupts = <41 0x2>;
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index 7a7b731c5735..07e1bbadebfe 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -2,6 +2,7 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
@@ -18,7 +19,6 @@ CONFIG_KMETER1=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -37,7 +37,6 @@ CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
@@ -49,13 +48,12 @@ CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_GLUEBI=y
CONFIG_MTD_UBI_DEBUG=y
CONFIG_PROC_DEVICETREE=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_TUN=y
+CONFIG_MII=y
CONFIG_MARVELL_PHY=y
CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_UCC_GETH=y
# CONFIG_NETDEV_10000 is not set
CONFIG_WAN=y
@@ -77,7 +75,6 @@ CONFIG_I2C_MPC=y
# CONFIG_USB_SUPPORT is not set
CONFIG_UIO=y
# CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 39518e91822f..6cb588a7d425 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -1,4 +1,5 @@
CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -10,7 +11,6 @@ CONFIG_SLAB=y
CONFIG_PPC_82xx=y
CONFIG_MGCOGE=y
CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -30,7 +30,6 @@ CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_CFI=y
@@ -43,7 +42,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
CONFIG_FIXED_PHY=y
@@ -67,7 +65,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
@@ -88,13 +85,9 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BDI_SWITCH=y
-CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mgsuvd_defconfig b/arch/powerpc/configs/mgsuvd_defconfig
deleted file mode 100644
index 2a490626015c..000000000000
--- a/arch/powerpc/configs/mgsuvd_defconfig
+++ /dev/null
@@ -1,81 +0,0 @@
-CONFIG_PPC_8xx=y
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-# CONFIG_BUG is not set
-# CONFIG_BASE_FULL is not set
-# CONFIG_EPOLL is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-CONFIG_SLAB=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PPC_MGSUVD=y
-CONFIG_8xx_COPYBACK=y
-CONFIG_8xx_CPU6=y
-CONFIG_I2C_SPI_SMC1_UCODE_PATCH=y
-CONFIG_HZ_1000=y
-CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
-# CONFIG_SECCOMP is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
-CONFIG_NETDEVICES=y
-CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-# CONFIG_FS_ENET_HAS_FEC is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_CPM=y
-CONFIG_SERIAL_CPM_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_GEN_RTC=y
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_CRC_CCITT=y
-CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f0a211d96923..be3cdf9134ce 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -154,6 +154,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
+#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000)
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
@@ -465,7 +466,7 @@ enum {
CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
#ifdef CONFIG_PPC_47x
- CPU_FTRS_47X |
+ CPU_FTRS_47X | CPU_FTR_476_DD2 |
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index ff08b70b36d4..bb712c9488b3 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -141,6 +141,8 @@ static inline bool arch_irqs_disabled(void)
#endif /* CONFIG_PPC64 */
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
+
/*
* interrupt-retrigger: should we handle this via lost interrupts and IPIs
* or should we not care like we do now ? --BenH.
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index e000cce8f6dd..946ec4947da2 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -467,11 +467,11 @@ extern void mpic_request_ipis(void);
void smp_mpic_message_pass(int target, int msg);
/* Unmask a specific virq */
-extern void mpic_unmask_irq(unsigned int irq);
+extern void mpic_unmask_irq(struct irq_data *d);
/* Mask a specific virq */
-extern void mpic_mask_irq(unsigned int irq);
+extern void mpic_mask_irq(struct irq_data *d);
/* EOI a specific virq */
-extern void mpic_end_irq(unsigned int irq);
+extern void mpic_end_irq(struct irq_data *d);
/* Fetch interrupt from a given mpic */
extern unsigned int mpic_get_one_irq(struct mpic *mpic);
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 92efe67d1c57..9d1aafe607c7 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -51,7 +51,8 @@ static inline int mmio_nvram_init(void)
extern int __init nvram_scan_partitions(void);
extern loff_t nvram_create_partition(const char *name, int sig,
int req_size, int min_size);
-extern int nvram_remove_partition(const char *name, int sig);
+extern int nvram_remove_partition(const char *name, int sig,
+ const char *exceptions[]);
extern int nvram_get_partition_size(loff_t data_index);
extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 89f158731ce3..88b0bd925a8b 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -170,6 +170,7 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_COHERENT | _PAGE_WRITETHRU))
+#define pgprot_writecombine pgprot_noncached_wc
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index cf519663a791..9e2cb2019161 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = get_irq_desc_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
@@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
static inline void qe_ic_cascade_high_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = get_irq_desc_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq != NO_IRQ)
@@ -101,32 +101,35 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
static inline void qe_ic_cascade_low_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = get_irq_desc_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = get_irq_desc_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static inline void qe_ic_cascade_high_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = get_irq_desc_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = get_irq_desc_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = desc->handler_data;
+ struct qe_ic *qe_ic = get_irq_desc_data(desc);
unsigned int cascade_irq;
+ struct irq_chip *chip = get_irq_desc_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == NO_IRQ)
@@ -135,7 +138,7 @@ static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 125fc1ad665d..7e4abebe76c0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -170,6 +170,16 @@
#define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */
/* Special Purpose Registers (SPRNs)*/
+
+#ifdef CONFIG_40x
+#define SPRN_PID 0x3B1 /* Process ID */
+#else
+#define SPRN_PID 0x030 /* Process ID */
+#ifdef CONFIG_BOOKE
+#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
+#endif
+#endif
+
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
#define SPRN_CTRLF 0x088
@@ -852,6 +862,8 @@
#define PVR_7450 0x80000000
#define PVR_8540 0x80200000
#define PVR_8560 0x80200000
+#define PVR_VER_E500V1 0x8020
+#define PVR_VER_E500V2 0x8021
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
@@ -880,6 +892,7 @@
#define PV_970 0x0039
#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
+#define PV_POWER7 0x003F
#define PV_970FX 0x003C
#define PV_630 0x0040
#define PV_630p 0x0041
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index e68c69bf741a..86ad8128963a 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -150,8 +150,6 @@
* or IBM 40x.
*/
#ifdef CONFIG_BOOKE
-#define SPRN_PID 0x030 /* Process ID */
-#define SPRN_PID0 SPRN_PID/* Process ID Register 0 */
#define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */
#define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */
#define SPRN_DEAR 0x03D /* Data Error Address Register */
@@ -168,7 +166,6 @@
#define SPRN_TCR 0x154 /* Timer Control Register */
#endif /* Book E */
#ifdef CONFIG_40x
-#define SPRN_PID 0x3B1 /* Process ID */
#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */
#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */
#define SPRN_DEAR 0x3D5 /* Data Error Address Register */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e8e915ce3d8d..c9b68d07ac4f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1811,11 +1811,11 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
- { /* 476 core */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x11a50000,
+ { /* 476 DD2 core */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x11a52080,
.cpu_name = "476",
- .cpu_features = CPU_FTRS_47X,
+ .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_47x |
@@ -1839,6 +1839,20 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_47x,
.platform = "ppc470",
},
+ { /* 476 others */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x11a50000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ce557f6f00fc..0a5570338b96 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -237,6 +237,7 @@ int show_interrupts(struct seq_file *p, void *v)
int i = *(loff_t *) v, j, prec;
struct irqaction *action;
struct irq_desc *desc;
+ struct irq_chip *chip;
if (i > nr_irqs)
return 0;
@@ -270,8 +271,9 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- if (desc->chip)
- seq_printf(p, " %-16s", desc->chip->name);
+ chip = get_irq_desc_chip(desc);
+ if (chip)
+ seq_printf(p, " %-16s", chip->name);
else
seq_printf(p, " %-16s", "None");
seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
@@ -313,6 +315,8 @@ void fixup_irqs(const struct cpumask *map)
alloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq(irq) {
+ struct irq_chip *chip;
+
desc = irq_to_desc(irq);
if (!desc)
continue;
@@ -320,13 +324,15 @@ void fixup_irqs(const struct cpumask *map)
if (desc->status & IRQ_PER_CPU)
continue;
- cpumask_and(mask, desc->affinity, map);
+ chip = get_irq_desc_chip(desc);
+
+ cpumask_and(mask, desc->irq_data.affinity, map);
if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
cpumask_copy(mask, map);
}
- if (desc->chip->set_affinity)
- desc->chip->set_affinity(irq, mask);
+ if (chip->irq_set_affinity)
+ chip->irq_set_affinity(&desc->irq_data, mask, true);
else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
@@ -678,16 +684,15 @@ void irq_set_virq_count(unsigned int count)
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
- struct irq_desc *desc;
+ int res;
- desc = irq_to_desc_alloc_node(virq, 0);
- if (!desc) {
+ res = irq_alloc_desc_at(virq, 0);
+ if (res != virq) {
pr_debug("irq: -> allocating desc failed\n");
goto error;
}
- /* Clear IRQ_NOREQUEST flag */
- desc->status &= ~IRQ_NOREQUEST;
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
/* map it */
smp_wmb();
@@ -696,11 +701,13 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
if (host->ops->map(host, virq, hwirq)) {
pr_debug("irq: -> mapping failed, freeing\n");
- goto error;
+ goto errdesc;
}
return 0;
+errdesc:
+ irq_free_descs(virq, 1);
error:
irq_free_virt(virq, 1);
return -1;
@@ -879,9 +886,9 @@ void irq_dispose_mapping(unsigned int virq)
smp_mb();
irq_map[virq].hwirq = host->inval_irq;
- /* Set some flags */
- irq_to_desc(virq)->status |= IRQ_NOREQUEST;
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
+ irq_free_descs(virq, 1);
/* Free it */
irq_free_virt(virq, 1);
}
@@ -1074,21 +1081,6 @@ void irq_free_virt(unsigned int virq, unsigned int count)
int arch_early_irq_init(void)
{
- struct irq_desc *desc;
- int i;
-
- for (i = 0; i < NR_IRQS; i++) {
- desc = irq_to_desc(i);
- if (desc)
- desc->status |= IRQ_NOREQUEST;
- }
-
- return 0;
-}
-
-int arch_init_chip_data(struct irq_desc *desc, int node)
-{
- desc->status |= IRQ_NOREQUEST;
return 0;
}
@@ -1159,11 +1151,14 @@ static int virq_debug_show(struct seq_file *m, void *private)
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
+ struct irq_chip *chip;
+
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05lx ", virq_to_hw(i));
- if (desc->chip && desc->chip->name)
- p = desc->chip->name;
+ chip = get_irq_desc_chip(desc);
+ if (chip && chip->name)
+ p = chip->name;
else
p = none;
seq_printf(m, "%-15s ", p);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index a5f8672eeff3..bd1e1ff17b2d 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -26,20 +26,23 @@ void machine_kexec_mask_interrupts(void) {
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
+ struct irq_chip *chip;
- if (!desc || !desc->chip)
+ if (!desc)
continue;
- if (desc->chip->eoi &&
- desc->status & IRQ_INPROGRESS)
- desc->chip->eoi(i);
+ chip = get_irq_desc_chip(desc);
+ if (!chip)
+ continue;
+
+ if (chip->irq_eoi && desc->status & IRQ_INPROGRESS)
+ chip->irq_eoi(&desc->irq_data);
- if (desc->chip->mask)
- desc->chip->mask(i);
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
- if (desc->chip->disable &&
- !(desc->status & IRQ_DISABLED))
- desc->chip->disable(i);
+ if (chip->irq_disable && !(desc->status & IRQ_DISABLED))
+ chip->irq_disable(&desc->irq_data);
}
}
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index bb12b3248f13..bec1e930ed73 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -237,22 +237,45 @@ static unsigned char __init nvram_checksum(struct nvram_header *p)
return c_sum;
}
+/*
+ * Per the criteria passed via nvram_remove_partition(), should this
+ * partition be removed? 1=remove, 0=keep
+ */
+static int nvram_can_remove_partition(struct nvram_partition *part,
+ const char *name, int sig, const char *exceptions[])
+{
+ if (part->header.signature != sig)
+ return 0;
+ if (name) {
+ if (strncmp(name, part->header.name, 12))
+ return 0;
+ } else if (exceptions) {
+ const char **except;
+ for (except = exceptions; *except; except++) {
+ if (!strncmp(*except, part->header.name, 12))
+ return 0;
+ }
+ }
+ return 1;
+}
+
/**
* nvram_remove_partition - Remove one or more partitions in nvram
* @name: name of the partition to remove, or NULL for a
* signature only match
* @sig: signature of the partition(s) to remove
+ * @exceptions: When removing all partitions with a matching signature,
+ * leave these alone.
*/
-int __init nvram_remove_partition(const char *name, int sig)
+int __init nvram_remove_partition(const char *name, int sig,
+ const char *exceptions[])
{
struct nvram_partition *part, *prev, *tmp;
int rc;
list_for_each_entry(part, &nvram_partitions, partition) {
- if (part->header.signature != sig)
- continue;
- if (name && strncmp(name, part->header.name, 12))
+ if (!nvram_can_remove_partition(part, name, sig, exceptions))
continue;
/* Make partition a free partition */
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index ab6f6beadb57..97e0ae414940 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
return ip;
}
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
+ return true;
+
+ /*
+ * Events on POWER7 can roll back if a speculative event doesn't
+ * eventually complete. Unfortunately in some rare cases they will
+ * raise a performance monitor exception. We need to catch this to
+ * ensure we reset the PMC. In all cases the PMC will be 256 or less
+ * cycles from overflow.
+ *
+ * We only do this if the first pass fails to find any overflowing
+ * PMCs because a user might set a period of less than 256 and we
+ * don't want to mistakenly reset them.
+ */
+ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+ return true;
+
+ return false;
+}
+
/*
* Performance monitor interrupt stuff
*/
@@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
- if ((int)val < 0)
+ if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 7185f0da7dc3..05b7139d6a27 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -97,7 +97,7 @@ static void __init move_device_tree(void)
start = __pa(initial_boot_params);
size = be32_to_cpu(initial_boot_params->totalsize);
- if ((memory_limit && (start + size) > memory_limit) ||
+ if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
overlaps_crashkernel(start, size)) {
p = __va(memblock_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 049dbecb5dbc..7980ec0e1e1a 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -412,7 +412,8 @@ static void rtas_event_scan(struct work_struct *w)
get_online_cpus();
- cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
+ /* raw_ OK because just using CPU as starting point. */
+ cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
if (cpu >= nr_cpu_ids) {
cpu = cpumask_first(cpu_online_mask);
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index 41f4ef30e480..62279200d965 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -1,7 +1,7 @@
/*
* arch/powerpc/math-emu/math_efp.c
*
- * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2006-2008, 2010 Freescale Semiconductor, Inc.
*
* Author: Ebony Zhu, <ebony.zhu@freescale.com>
* Yu Liu, <yu.liu@freescale.com>
@@ -104,6 +104,8 @@
#define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \
FP_EX_UNDERFLOW | FP_EX_OVERFLOW)
+static int have_e500_cpu_a005_erratum;
+
union dw_union {
u64 dp[1];
u32 wp[2];
@@ -320,7 +322,8 @@ int do_spe_mathemu(struct pt_regs *regs)
} else {
_FP_ROUND_ZERO(1, SB);
}
- FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0));
+ FP_TO_INT_S(vc.wp[1], SB, 32,
+ (((func & 0x3) != 0) || SB_s));
goto update_regs;
default:
@@ -458,7 +461,8 @@ cmp_s:
} else {
_FP_ROUND_ZERO(2, DB);
}
- FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0));
+ FP_TO_INT_D(vc.wp[1], DB, 32,
+ (((func & 0x3) != 0) || DB_s));
goto update_regs;
default:
@@ -589,8 +593,10 @@ cmp_d:
_FP_ROUND_ZERO(1, SB0);
_FP_ROUND_ZERO(1, SB1);
}
- FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0));
- FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0));
+ FP_TO_INT_S(vc.wp[0], SB0, 32,
+ (((func & 0x3) != 0) || SB0_s));
+ FP_TO_INT_S(vc.wp[1], SB1, 32,
+ (((func & 0x3) != 0) || SB1_s));
goto update_regs;
default:
@@ -652,6 +658,15 @@ update_regs:
return 0;
illegal:
+ if (have_e500_cpu_a005_erratum) {
+ /* according to e500 cpu a005 erratum, reissue efp inst */
+ regs->nip -= 4;
+#ifdef DEBUG
+ printk(KERN_DEBUG "re-issue efp inst: %08lx\n", speinsn);
+#endif
+ return 0;
+ }
+
printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn);
return -ENOSYS;
}
@@ -718,3 +733,43 @@ int speround_handler(struct pt_regs *regs)
return 0;
}
+
+int __init spe_mathemu_init(void)
+{
+ u32 pvr, maj, min;
+
+ pvr = mfspr(SPRN_PVR);
+
+ if ((PVR_VER(pvr) == PVR_VER_E500V1) ||
+ (PVR_VER(pvr) == PVR_VER_E500V2)) {
+ maj = PVR_MAJ(pvr);
+ min = PVR_MIN(pvr);
+
+ /*
+ * E500 revision below 1.1, 2.3, 3.1, 4.1, 5.1
+ * need cpu a005 errata workaround
+ */
+ switch (maj) {
+ case 1:
+ if (min < 1)
+ have_e500_cpu_a005_erratum = 1;
+ break;
+ case 2:
+ if (min < 3)
+ have_e500_cpu_a005_erratum = 1;
+ break;
+ case 3:
+ case 4:
+ case 5:
+ if (min < 1)
+ have_e500_cpu_a005_erratum = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+module_init(spe_mathemu_init);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 742da43b4ab6..d65b591e5556 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -148,7 +148,7 @@ void __init MMU_init(void)
lowmem_end_addr = memstart_addr + total_lowmem;
#ifndef CONFIG_HIGHMEM
total_memory = total_lowmem;
- memblock_enforce_memory_limit(lowmem_end_addr);
+ memblock_enforce_memory_limit(total_lowmem);
memblock_analyze();
#endif /* CONFIG_HIGHMEM */
}
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index af405eefe48d..7c63c0ed4f1b 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -189,6 +189,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
blr
#ifdef CONFIG_PPC_47x
+
+/*
+ * 47x variant of icbt
+ */
+# define ICBT(CT,RA,RB) \
+ .long 0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11)
+
/*
* _tlbivax_bcast is only on 47x. We don't bother doing a runtime
* check though, it will blow up soon enough if we mistakenly try
@@ -206,7 +213,35 @@ _GLOBAL(_tlbivax_bcast)
isync
eieio
tlbsync
+BEGIN_FTR_SECTION
+ b 1f
+END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
+ sync
+ wrtee r10
+ blr
+/*
+ * DD2 HW could hang if in instruction fetch happens before msync completes.
+ * Touch enough instruction cache lines to ensure cache hits
+ */
+1: mflr r9
+ bl 2f
+2: mflr r6
+ li r7,32
+ ICBT(0,r6,r7) /* touch next cache line */
+ add r6,r6,r7
+ ICBT(0,r6,r7) /* touch next cache line */
+ add r6,r6,r7
+ ICBT(0,r6,r7) /* touch next cache line */
sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ mtlr r9
wrtee r10
blr
#endif /* CONFIG_PPC_47x */
diff --git a/arch/powerpc/platforms/44x/44x.h b/arch/powerpc/platforms/44x/44x.h
index dbc4d2b4301a..63f703ecd23c 100644
--- a/arch/powerpc/platforms/44x/44x.h
+++ b/arch/powerpc/platforms/44x/44x.h
@@ -4,4 +4,8 @@
extern u8 as1_readb(volatile u8 __iomem *addr);
extern void as1_writeb(u8 data, volatile u8 __iomem *addr);
+#define GPIO0_OSRH 0xC
+#define GPIO0_TSRH 0x14
+#define GPIO0_ISR1H 0x34
+
#endif /* __POWERPC_PLATFORMS_44X_44X_H */
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0f979c5c756b..f485fc5f6d5e 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -115,7 +115,6 @@ config CANYONLANDS
bool "Canyonlands"
depends on 44x
default n
- select PPC44x_SIMPLE
select 460EX
select PCI
select PPC4xx_PCI_EXPRESS
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index c04d16df8488..553db6007217 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_ISS4xx) += iss4xx.o
+obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
new file mode 100644
index 000000000000..afc5e8ea3775
--- /dev/null
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -0,0 +1,134 @@
+/*
+ * This contain platform specific code for APM PPC460EX based Canyonlands
+ * board.
+ *
+ * Copyright (c) 2010, Applied Micro Circuits Corporation
+ * Author: Rupjyoti Sarmah <rsarmah@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc4xx.h>
+#include <asm/udbg.h>
+#include <asm/uic.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+#include "44x.h"
+
+#define BCSR_USB_EN 0x11
+
+static __initdata struct of_device_id ppc460ex_of_bus[] = {
+ { .compatible = "ibm,plb4", },
+ { .compatible = "ibm,opb", },
+ { .compatible = "ibm,ebc", },
+ { .compatible = "simple-bus", },
+ {},
+};
+
+static int __init ppc460ex_device_probe(void)
+{
+ of_platform_bus_probe(NULL, ppc460ex_of_bus, NULL);
+
+ return 0;
+}
+machine_device_initcall(canyonlands, ppc460ex_device_probe);
+
+/* Using this code only for the Canyonlands board. */
+
+static int __init ppc460ex_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+ if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) {
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+ return 1;
+ }
+ return 0;
+}
+
+/* USB PHY fixup code on Canyonlands kit. */
+
+static int __init ppc460ex_canyonlands_fixup(void)
+{
+ u8 __iomem *bcsr ;
+ void __iomem *vaddr;
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-bcsr");
+ if (!np) {
+ printk(KERN_ERR "failed did not find amcc, ppc460ex bcsr node\n");
+ return -ENODEV;
+ }
+
+ bcsr = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!bcsr) {
+ printk(KERN_CRIT "Could not remap bcsr\n");
+ ret = -ENODEV;
+ goto err_bcsr;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,ppc4xx-gpio");
+ if (!np) {
+ printk(KERN_ERR "failed did not find ibm,ppc4xx-gpio node\n");
+ return -ENODEV;
+ }
+
+ vaddr = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!vaddr) {
+ printk(KERN_CRIT "Could not get gpio node address\n");
+ ret = -ENODEV;
+ goto err_gpio;
+ }
+ /* Disable USB, through the BCSR7 bits */
+ setbits8(&bcsr[7], BCSR_USB_EN);
+
+ /* Wait for a while after reset */
+ msleep(100);
+
+ /* Enable USB here */
+ clrbits8(&bcsr[7], BCSR_USB_EN);
+
+ /*
+ * Configure multiplexed gpio16 and gpio19 as alternate1 output
+ * source after USB reset. In this configuration gpio16 will be
+ * USB2HStop and gpio19 will be USB2DStop. For more details refer to
+ * table 34-7 of PPC460EX user manual.
+ */
+ setbits32((vaddr + GPIO0_OSRH), 0x42000000);
+ setbits32((vaddr + GPIO0_TSRH), 0x42000000);
+err_gpio:
+ iounmap(vaddr);
+err_bcsr:
+ iounmap(bcsr);
+ return ret;
+}
+machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup);
+define_machine(canyonlands) {
+ .name = "Canyonlands",
+ .probe = ppc460ex_probe,
+ .progress = udbg_progress,
+ .init_IRQ = uic_init_tree,
+ .get_irq = uic_get_irq,
+ .restart = ppc4xx_reset_system,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 7ddcba3b9397..c81c19c0b3d4 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -53,7 +53,6 @@ static char *board[] __initdata = {
"amcc,arches",
"amcc,bamboo",
"amcc,bluestone",
- "amcc,canyonlands",
"amcc,glacier",
"ibm,ebony",
"amcc,eiger",
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 4ecf4cf9a51b..fde0ea50c97d 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -59,9 +59,9 @@ irq_to_pic_bit(unsigned int irq)
}
static void
-cpld_mask_irq(unsigned int irq)
+cpld_mask_irq(struct irq_data *d)
{
- unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq;
void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
out_8(pic_mask,
@@ -69,9 +69,9 @@ cpld_mask_irq(unsigned int irq)
}
static void
-cpld_unmask_irq(unsigned int irq)
+cpld_unmask_irq(struct irq_data *d)
{
- unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq;
void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
out_8(pic_mask,
@@ -80,9 +80,9 @@ cpld_unmask_irq(unsigned int irq)
static struct irq_chip cpld_pic = {
.name = "CPLD PIC",
- .mask = cpld_mask_irq,
- .ack = cpld_mask_irq,
- .unmask = cpld_unmask_irq,
+ .irq_mask = cpld_mask_irq,
+ .irq_ack = cpld_mask_irq,
+ .irq_unmask = cpld_unmask_irq,
};
static int
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 2c7780cb68e5..2bd1e6cf1f58 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -49,45 +49,46 @@ struct media5200_irq {
};
struct media5200_irq media5200_irq;
-static void media5200_irq_unmask(unsigned int virq)
+static void media5200_irq_unmask(struct irq_data *d)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&media5200_irq.lock, flags);
val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
- val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq);
+ val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq);
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
spin_unlock_irqrestore(&media5200_irq.lock, flags);
}
-static void media5200_irq_mask(unsigned int virq)
+static void media5200_irq_mask(struct irq_data *d)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&media5200_irq.lock, flags);
val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
- val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq));
+ val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq));
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
spin_unlock_irqrestore(&media5200_irq.lock, flags);
}
static struct irq_chip media5200_irq_chip = {
.name = "Media5200 FPGA",
- .unmask = media5200_irq_unmask,
- .mask = media5200_irq_mask,
- .mask_ack = media5200_irq_mask,
+ .irq_unmask = media5200_irq_unmask,
+ .irq_mask = media5200_irq_mask,
+ .irq_mask_ack = media5200_irq_mask,
};
void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
int sub_virq, val;
u32 status, enable;
/* Mask off the cascaded IRQ */
raw_spin_lock(&desc->lock);
- desc->chip->mask(virq);
+ chip->irq_mask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
@@ -105,9 +106,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
/* Processing done; can reenable the cascade now */
raw_spin_lock(&desc->lock);
- desc->chip->ack(virq);
+ chip->irq_ack(&desc->irq_data);
if (!(desc->status & IRQ_DISABLED))
- desc->chip->unmask(virq);
+ chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 859abf1c6d4b..6da44f0f2934 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -135,9 +135,9 @@ DEFINE_MUTEX(mpc52xx_gpt_list_mutex);
* Cascaded interrupt controller hooks
*/
-static void mpc52xx_gpt_irq_unmask(unsigned int virq)
+static void mpc52xx_gpt_irq_unmask(struct irq_data *d)
{
- struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
unsigned long flags;
spin_lock_irqsave(&gpt->lock, flags);
@@ -145,9 +145,9 @@ static void mpc52xx_gpt_irq_unmask(unsigned int virq)
spin_unlock_irqrestore(&gpt->lock, flags);
}
-static void mpc52xx_gpt_irq_mask(unsigned int virq)
+static void mpc52xx_gpt_irq_mask(struct irq_data *d)
{
- struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
unsigned long flags;
spin_lock_irqsave(&gpt->lock, flags);
@@ -155,20 +155,20 @@ static void mpc52xx_gpt_irq_mask(unsigned int virq)
spin_unlock_irqrestore(&gpt->lock, flags);
}
-static void mpc52xx_gpt_irq_ack(unsigned int virq)
+static void mpc52xx_gpt_irq_ack(struct irq_data *d)
{
- struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK);
}
-static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type)
+static int mpc52xx_gpt_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
unsigned long flags;
u32 reg;
- dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, virq, flow_type);
+ dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, d->irq, flow_type);
spin_lock_irqsave(&gpt->lock, flags);
reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK;
@@ -184,10 +184,10 @@ static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type)
static struct irq_chip mpc52xx_gpt_irq_chip = {
.name = "MPC52xx GPT",
- .unmask = mpc52xx_gpt_irq_unmask,
- .mask = mpc52xx_gpt_irq_mask,
- .ack = mpc52xx_gpt_irq_ack,
- .set_type = mpc52xx_gpt_irq_set_type,
+ .irq_unmask = mpc52xx_gpt_irq_unmask,
+ .irq_mask = mpc52xx_gpt_irq_mask,
+ .irq_ack = mpc52xx_gpt_irq_ack,
+ .irq_set_type = mpc52xx_gpt_irq_set_type,
};
void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 4bf4bf7b063e..9f3ed582d082 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -155,47 +155,47 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
/*
* IRQ[0-3] interrupt irq_chip
*/
-static void mpc52xx_extirq_mask(unsigned int virq)
+static void mpc52xx_extirq_mask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->ctrl, 11 - l2irq);
}
-static void mpc52xx_extirq_unmask(unsigned int virq)
+static void mpc52xx_extirq_unmask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 11 - l2irq);
}
-static void mpc52xx_extirq_ack(unsigned int virq)
+static void mpc52xx_extirq_ack(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 27-l2irq);
}
-static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
+static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
{
u32 ctrl_reg, type;
int irq;
int l2irq;
void *handler = handle_level_irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
@@ -214,44 +214,44 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
- __set_irq_handler_unlocked(virq, handler);
+ __set_irq_handler_unlocked(d->irq, handler);
return 0;
}
static struct irq_chip mpc52xx_extirq_irqchip = {
.name = "MPC52xx External",
- .mask = mpc52xx_extirq_mask,
- .unmask = mpc52xx_extirq_unmask,
- .ack = mpc52xx_extirq_ack,
- .set_type = mpc52xx_extirq_set_type,
+ .irq_mask = mpc52xx_extirq_mask,
+ .irq_unmask = mpc52xx_extirq_unmask,
+ .irq_ack = mpc52xx_extirq_ack,
+ .irq_set_type = mpc52xx_extirq_set_type,
};
/*
* Main interrupt irq_chip
*/
-static int mpc52xx_null_set_type(unsigned int virq, unsigned int flow_type)
+static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type)
{
return 0; /* Do nothing so that the sense mask will get updated */
}
-static void mpc52xx_main_mask(unsigned int virq)
+static void mpc52xx_main_mask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->main_mask, 16 - l2irq);
}
-static void mpc52xx_main_unmask(unsigned int virq)
+static void mpc52xx_main_unmask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->main_mask, 16 - l2irq);
@@ -259,32 +259,32 @@ static void mpc52xx_main_unmask(unsigned int virq)
static struct irq_chip mpc52xx_main_irqchip = {
.name = "MPC52xx Main",
- .mask = mpc52xx_main_mask,
- .mask_ack = mpc52xx_main_mask,
- .unmask = mpc52xx_main_unmask,
- .set_type = mpc52xx_null_set_type,
+ .irq_mask = mpc52xx_main_mask,
+ .irq_mask_ack = mpc52xx_main_mask,
+ .irq_unmask = mpc52xx_main_unmask,
+ .irq_set_type = mpc52xx_null_set_type,
};
/*
* Peripherals interrupt irq_chip
*/
-static void mpc52xx_periph_mask(unsigned int virq)
+static void mpc52xx_periph_mask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->per_mask, 31 - l2irq);
}
-static void mpc52xx_periph_unmask(unsigned int virq)
+static void mpc52xx_periph_unmask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->per_mask, 31 - l2irq);
@@ -292,43 +292,43 @@ static void mpc52xx_periph_unmask(unsigned int virq)
static struct irq_chip mpc52xx_periph_irqchip = {
.name = "MPC52xx Peripherals",
- .mask = mpc52xx_periph_mask,
- .mask_ack = mpc52xx_periph_mask,
- .unmask = mpc52xx_periph_unmask,
- .set_type = mpc52xx_null_set_type,
+ .irq_mask = mpc52xx_periph_mask,
+ .irq_mask_ack = mpc52xx_periph_mask,
+ .irq_unmask = mpc52xx_periph_unmask,
+ .irq_set_type = mpc52xx_null_set_type,
};
/*
* SDMA interrupt irq_chip
*/
-static void mpc52xx_sdma_mask(unsigned int virq)
+static void mpc52xx_sdma_mask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&sdma->IntMask, l2irq);
}
-static void mpc52xx_sdma_unmask(unsigned int virq)
+static void mpc52xx_sdma_unmask(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&sdma->IntMask, l2irq);
}
-static void mpc52xx_sdma_ack(unsigned int virq)
+static void mpc52xx_sdma_ack(struct irq_data *d)
{
int irq;
int l2irq;
- irq = irq_map[virq].hwirq;
+ irq = irq_map[d->irq].hwirq;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
out_be32(&sdma->IntPend, 1 << l2irq);
@@ -336,10 +336,10 @@ static void mpc52xx_sdma_ack(unsigned int virq)
static struct irq_chip mpc52xx_sdma_irqchip = {
.name = "MPC52xx SDMA",
- .mask = mpc52xx_sdma_mask,
- .unmask = mpc52xx_sdma_unmask,
- .ack = mpc52xx_sdma_ack,
- .set_type = mpc52xx_null_set_type,
+ .irq_mask = mpc52xx_sdma_mask,
+ .irq_unmask = mpc52xx_sdma_unmask,
+ .irq_ack = mpc52xx_sdma_ack,
+ .irq_set_type = mpc52xx_null_set_type,
};
/**
diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile
index d982793f4dbd..455fe21e37c4 100644
--- a/arch/powerpc/platforms/82xx/Makefile
+++ b/arch/powerpc/platforms/82xx/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_CPM2) += pq2.o
obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o
obj-$(CONFIG_PQ2FADS) += pq2fads.o
obj-$(CONFIG_EP8248E) += ep8248e.o
-obj-$(CONFIG_MGCOGE) += mgcoge.o
+obj-$(CONFIG_MGCOGE) += km82xx.o
diff --git a/arch/powerpc/platforms/82xx/mgcoge.c b/arch/powerpc/platforms/82xx/km82xx.c
index 7a5de9eb3c73..428c5e0a0e75 100644
--- a/arch/powerpc/platforms/82xx/mgcoge.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -1,6 +1,6 @@
/*
- * Keymile mgcoge support
- * Copyright 2008 DENX Software Engineering GmbH
+ * Keymile km82xx support
+ * Copyright 2008-2011 DENX Software Engineering GmbH
* Author: Heiko Schocher <hs@denx.de>
*
* based on code from:
@@ -31,9 +31,10 @@
#include "pq2.h"
-static void __init mgcoge_pic_init(void)
+static void __init km82xx_pic_init(void)
{
- struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic");
+ struct device_node *np = of_find_compatible_node(NULL, NULL,
+ "fsl,pq2-pic");
if (!np) {
printk(KERN_ERR "PIC init: can not find cpm-pic node\n");
return;
@@ -47,12 +48,18 @@ struct cpm_pin {
int port, pin, flags;
};
-static __initdata struct cpm_pin mgcoge_pins[] = {
+static __initdata struct cpm_pin km82xx_pins[] = {
/* SMC2 */
{0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
+ /* SCC1 */
+ {2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
+ {2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
+ {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
+ {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
+
/* SCC4 */
{2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
@@ -107,30 +114,49 @@ static __initdata struct cpm_pin mgcoge_pins[] = {
{3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN},
{3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN},
#endif
+
+ /* USB */
+ {0, 10, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /* FULL_SPEED */
+ {0, 11, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /*/SLAVE */
+ {2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXN */
+ {2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXP */
+ {2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* /OE */
+ {2, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXCLK */
+ {3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXP */
+ {3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXN */
+ {3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXD */
};
static void __init init_ioports(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mgcoge_pins); i++) {
- const struct cpm_pin *pin = &mgcoge_pins[i];
+ for (i = 0; i < ARRAY_SIZE(km82xx_pins); i++) {
+ const struct cpm_pin *pin = &km82xx_pins[i];
cpm2_set_pin(pin->port, pin->pin, pin->flags);
}
cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8);
+ cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX);
+ cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX);
+ cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX);
cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
+
+ /* Force USB FULL SPEED bit to '1' */
+ setbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 10));
+ /* clear USB_SLAVE */
+ clrbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 11));
}
-static void __init mgcoge_setup_arch(void)
+static void __init km82xx_setup_arch(void)
{
if (ppc_md.progress)
- ppc_md.progress("mgcoge_setup_arch()", 0);
+ ppc_md.progress("km82xx_setup_arch()", 0);
cpm2_reset();
@@ -142,7 +168,7 @@ static void __init mgcoge_setup_arch(void)
init_ioports();
if (ppc_md.progress)
- ppc_md.progress("mgcoge_setup_arch(), finish", 0);
+ ppc_md.progress("km82xx_setup_arch(), finish", 0);
}
static __initdata struct of_device_id of_bus_ids[] = {
@@ -156,23 +182,23 @@ static int __init declare_of_platform_devices(void)
return 0;
}
-machine_device_initcall(mgcoge, declare_of_platform_devices);
+machine_device_initcall(km82xx, declare_of_platform_devices);
/*
* Called very early, device-tree isn't unflattened
*/
-static int __init mgcoge_probe(void)
+static int __init km82xx_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "keymile,mgcoge");
+ return of_flat_dt_is_compatible(root, "keymile,km82xx");
}
-define_machine(mgcoge)
+define_machine(km82xx)
{
- .name = "Keymile MGCOGE",
- .probe = mgcoge_probe,
- .setup_arch = mgcoge_setup_arch,
- .init_IRQ = mgcoge_pic_init,
+ .name = "Keymile km82xx",
+ .probe = km82xx_probe,
+ .setup_arch = km82xx_setup_arch,
+ .init_IRQ = km82xx_pic_init,
.get_irq = cpm2_get_irq,
.calibrate_decr = generic_calibrate_decr,
.restart = pq2_restart,
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 5a55d87d6bd6..926dfdaaf57a 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -39,10 +39,10 @@ struct pq2ads_pci_pic {
#define NUM_IRQS 32
-static void pq2ads_pci_mask_irq(unsigned int virq)
+static void pq2ads_pci_mask_irq(struct irq_data *d)
{
- struct pq2ads_pci_pic *priv = get_irq_chip_data(virq);
- int irq = NUM_IRQS - virq_to_hw(virq) - 1;
+ struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
+ int irq = NUM_IRQS - virq_to_hw(d->irq) - 1;
if (irq != -1) {
unsigned long flags;
@@ -55,10 +55,10 @@ static void pq2ads_pci_mask_irq(unsigned int virq)
}
}
-static void pq2ads_pci_unmask_irq(unsigned int virq)
+static void pq2ads_pci_unmask_irq(struct irq_data *d)
{
- struct pq2ads_pci_pic *priv = get_irq_chip_data(virq);
- int irq = NUM_IRQS - virq_to_hw(virq) - 1;
+ struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
+ int irq = NUM_IRQS - virq_to_hw(d->irq) - 1;
if (irq != -1) {
unsigned long flags;
@@ -71,18 +71,17 @@ static void pq2ads_pci_unmask_irq(unsigned int virq)
static struct irq_chip pq2ads_pci_ic = {
.name = "PQ2 ADS PCI",
- .end = pq2ads_pci_unmask_irq,
- .mask = pq2ads_pci_mask_irq,
- .mask_ack = pq2ads_pci_mask_irq,
- .ack = pq2ads_pci_mask_irq,
- .unmask = pq2ads_pci_unmask_irq,
- .enable = pq2ads_pci_unmask_irq,
- .disable = pq2ads_pci_mask_irq
+ .irq_mask = pq2ads_pci_mask_irq,
+ .irq_mask_ack = pq2ads_pci_mask_irq,
+ .irq_ack = pq2ads_pci_mask_irq,
+ .irq_unmask = pq2ads_pci_unmask_irq,
+ .irq_enable = pq2ads_pci_unmask_irq,
+ .irq_disable = pq2ads_pci_mask_irq
};
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct pq2ads_pci_pic *priv = desc->handler_data;
+ struct pq2ads_pci_pic *priv = get_irq_desc_data(desc);
u32 stat, mask, pend;
int bit;
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
index 6e8bbbbcfdf8..ed95bfcbcbff 100644
--- a/arch/powerpc/platforms/83xx/Makefile
+++ b/arch/powerpc/platforms/83xx/Makefile
@@ -16,4 +16,4 @@ obj-$(CONFIG_MPC837x_MDS) += mpc837x_mds.o
obj-$(CONFIG_SBC834x) += sbc834x.o
obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o
obj-$(CONFIG_ASP834x) += asp834x.o
-obj-$(CONFIG_KMETER1) += kmeter1.o
+obj-$(CONFIG_KMETER1) += km83xx.o
diff --git a/arch/powerpc/platforms/83xx/kmeter1.c b/arch/powerpc/platforms/83xx/km83xx.c
index 903acfd851ac..a2b9b9ef1240 100644
--- a/arch/powerpc/platforms/83xx/kmeter1.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008 DENX Software Engineering GmbH
+ * Copyright 2008-2011 DENX Software Engineering GmbH
* Author: Heiko Schocher <hs@denx.de>
*
* Description:
@@ -49,12 +49,12 @@
* Setup the architecture
*
*/
-static void __init kmeter1_setup_arch(void)
+static void __init mpc83xx_km_setup_arch(void)
{
struct device_node *np;
if (ppc_md.progress)
- ppc_md.progress("kmeter1_setup_arch()", 0);
+ ppc_md.progress("kmpbec83xx_setup_arch()", 0);
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
@@ -69,6 +69,9 @@ static void __init kmeter1_setup_arch(void)
par_io_init(np);
of_node_put(np);
+ for_each_node_by_name(np, "spi")
+ par_io_of_config(np);
+
for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
par_io_of_config(np);
}
@@ -119,7 +122,7 @@ static void __init kmeter1_setup_arch(void)
#endif /* CONFIG_QUICC_ENGINE */
}
-static struct of_device_id kmeter_ids[] = {
+static struct of_device_id kmpbec83xx_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
@@ -131,13 +134,13 @@ static struct of_device_id kmeter_ids[] = {
static int __init kmeter_declare_of_platform_devices(void)
{
/* Publish the QE devices */
- of_platform_bus_probe(NULL, kmeter_ids, NULL);
+ of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL);
return 0;
}
-machine_device_initcall(kmeter1, kmeter_declare_of_platform_devices);
+machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices);
-static void __init kmeter1_init_IRQ(void)
+static void __init mpc83xx_km_init_IRQ(void)
{
struct device_node *np;
@@ -168,21 +171,34 @@ static void __init kmeter1_init_IRQ(void)
#endif /* CONFIG_QUICC_ENGINE */
}
+/* list of the supported boards */
+static char *board[] __initdata = {
+ "Keymile,KMETER1",
+ "Keymile,kmpbec8321",
+ NULL
+};
+
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init kmeter1_probe(void)
+static int __init mpc83xx_km_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
+ unsigned long node = of_get_flat_dt_root();
+ int i = 0;
- return of_flat_dt_is_compatible(root, "keymile,KMETER1");
+ while (board[i]) {
+ if (of_flat_dt_is_compatible(node, board[i]))
+ break;
+ i++;
+ }
+ return (board[i] != NULL);
}
-define_machine(kmeter1) {
- .name = "KMETER1",
- .probe = kmeter1_probe,
- .setup_arch = kmeter1_setup_arch,
- .init_IRQ = kmeter1_init_IRQ,
+define_machine(mpc83xx_km) {
+ .name = "mpc83xx-km-platform",
+ .probe = mpc83xx_km_probe,
+ .setup_arch = mpc83xx_km_setup_arch,
+ .init_IRQ = mpc83xx_km_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
.time_init = mpc83xx_time_init,
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index f4d36b5a2e00..64447e48f3d5 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -56,12 +56,13 @@ static void machine_restart(char *cmd)
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static void __init ksi8560_pic_init(void)
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 9438a892afc4..1352d1107bfd 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -50,12 +50,13 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_CPM2 */
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 8190bc25bf27..793ead7993ab 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -47,12 +47,13 @@
#ifdef CONFIG_PPC_I8259
static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ) {
generic_handle_irq(cascade_irq);
}
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_PPC_I8259 */
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index a5ad1c7794bf..d7e28ec3e072 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -41,12 +41,13 @@
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_CPM2 */
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 5c91a992f02b..0d00ff9d05a0 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -91,10 +91,14 @@ smp_85xx_kick_cpu(int nr)
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
#else
+ smp_generic_kick_cpu(nr);
+
out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
__pa((u64)*((unsigned long long *) generic_secondary_smp_init)));
- smp_generic_kick_cpu(nr);
+ if (!ioremappable)
+ flush_dcache_range((ulong)bptr_vaddr,
+ (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
#endif
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index d48527ffc425..79d85aca4767 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -93,6 +93,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq;
/*
@@ -103,17 +104,16 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
-
+ chip->irq_eoi(&desc->irq_data);
}
-static void socrates_fpga_pic_ack(unsigned int virq)
+static void socrates_fpga_pic_ack(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq, irq_line;
uint32_t mask;
- hwirq = socrates_fpga_irq_to_hw(virq);
+ hwirq = socrates_fpga_irq_to_hw(d->irq);
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
@@ -124,14 +124,14 @@ static void socrates_fpga_pic_ack(unsigned int virq)
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
-static void socrates_fpga_pic_mask(unsigned int virq)
+static void socrates_fpga_pic_mask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq;
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(virq);
+ hwirq = socrates_fpga_irq_to_hw(d->irq);
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
@@ -142,14 +142,14 @@ static void socrates_fpga_pic_mask(unsigned int virq)
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
-static void socrates_fpga_pic_mask_ack(unsigned int virq)
+static void socrates_fpga_pic_mask_ack(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq;
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(virq);
+ hwirq = socrates_fpga_irq_to_hw(d->irq);
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
@@ -161,14 +161,14 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq)
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
-static void socrates_fpga_pic_unmask(unsigned int virq)
+static void socrates_fpga_pic_unmask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq;
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(virq);
+ hwirq = socrates_fpga_irq_to_hw(d->irq);
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
@@ -179,14 +179,14 @@ static void socrates_fpga_pic_unmask(unsigned int virq)
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
-static void socrates_fpga_pic_eoi(unsigned int virq)
+static void socrates_fpga_pic_eoi(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq;
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(virq);
+ hwirq = socrates_fpga_irq_to_hw(d->irq);
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
@@ -197,7 +197,7 @@ static void socrates_fpga_pic_eoi(unsigned int virq)
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
-static int socrates_fpga_pic_set_type(unsigned int virq,
+static int socrates_fpga_pic_set_type(struct irq_data *d,
unsigned int flow_type)
{
unsigned long flags;
@@ -205,7 +205,7 @@ static int socrates_fpga_pic_set_type(unsigned int virq,
int polarity;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(virq);
+ hwirq = socrates_fpga_irq_to_hw(d->irq);
if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE)
return -EINVAL;
@@ -233,12 +233,12 @@ static int socrates_fpga_pic_set_type(unsigned int virq,
static struct irq_chip socrates_fpga_pic_chip = {
.name = "FPGA-PIC",
- .ack = socrates_fpga_pic_ack,
- .mask = socrates_fpga_pic_mask,
- .mask_ack = socrates_fpga_pic_mask_ack,
- .unmask = socrates_fpga_pic_unmask,
- .eoi = socrates_fpga_pic_eoi,
- .set_type = socrates_fpga_pic_set_type,
+ .irq_ack = socrates_fpga_pic_ack,
+ .irq_mask = socrates_fpga_pic_mask,
+ .irq_mask_ack = socrates_fpga_pic_mask_ack,
+ .irq_unmask = socrates_fpga_pic_unmask,
+ .irq_eoi = socrates_fpga_pic_eoi,
+ .irq_set_type = socrates_fpga_pic_set_type,
};
static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index bc33d1859ae7..2b62b064eac7 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -46,12 +46,13 @@
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_CPM2 */
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 5e847d0b47c8..2265b68e3279 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -44,12 +44,13 @@
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_CPM2 */
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 6df9e2561c06..0adfe3b740cd 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -95,6 +95,7 @@ static int gef_pic_cascade_irq;
void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq;
/*
@@ -106,17 +107,16 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
-
+ chip->irq_eoi(&desc->irq_data);
}
-static void gef_pic_mask(unsigned int virq)
+static void gef_pic_mask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq;
u32 mask;
- hwirq = gef_irq_to_hw(virq);
+ hwirq = gef_irq_to_hw(d->irq);
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
@@ -125,21 +125,21 @@ static void gef_pic_mask(unsigned int virq)
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
}
-static void gef_pic_mask_ack(unsigned int virq)
+static void gef_pic_mask_ack(struct irq_data *d)
{
/* Don't think we actually have to do anything to ack an interrupt,
* we just need to clear down the devices interrupt and it will go away
*/
- gef_pic_mask(virq);
+ gef_pic_mask(d);
}
-static void gef_pic_unmask(unsigned int virq)
+static void gef_pic_unmask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq;
u32 mask;
- hwirq = gef_irq_to_hw(virq);
+ hwirq = gef_irq_to_hw(d->irq);
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
@@ -150,9 +150,9 @@ static void gef_pic_unmask(unsigned int virq)
static struct irq_chip gef_pic_chip = {
.name = "gefp",
- .mask = gef_pic_mask,
- .mask_ack = gef_pic_mask_ack,
- .unmask = gef_pic_unmask,
+ .irq_mask = gef_pic_mask,
+ .irq_mask_ack = gef_pic_mask_ack,
+ .irq_unmask = gef_pic_unmask,
};
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 668275d9e668..cbe33639b478 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -19,10 +19,13 @@
#ifdef CONFIG_PPC_I8259
static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq = i8259_irq();
+
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_PPC_I8259 */
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index dd35ce081cff..ee56a9ea6a79 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -49,12 +49,6 @@ config PPC_ADDER875
This enables support for the Analogue & Micro Adder 875
board.
-config PPC_MGSUVD
- bool "MGSUVD"
- select CPM1
- help
- This enables support for the Keymile MGSUVD board.
-
config TQM8XX
bool "TQM8XX"
select CPM1
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile
index a491fe6b94fc..76a81c3350a8 100644
--- a/arch/powerpc/platforms/8xx/Makefile
+++ b/arch/powerpc/platforms/8xx/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o
obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o
obj-$(CONFIG_PPC_EP88XC) += ep88xc.o
obj-$(CONFIG_PPC_ADDER875) += adder875.o
-obj-$(CONFIG_PPC_MGSUVD) += mgsuvd.o
obj-$(CONFIG_TQM8XX) += tqm8xx_setup.o
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 60168c1f98fe..fabb108e8744 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -218,15 +218,20 @@ void mpc8xx_restart(char *cmd)
static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip;
int cascade_irq;
if ((cascade_irq = cpm_get_irq()) >= 0) {
struct irq_desc *cdesc = irq_to_desc(cascade_irq);
generic_handle_irq(cascade_irq);
- cdesc->chip->eoi(cascade_irq);
+
+ chip = get_irq_desc_chip(cdesc);
+ chip->irq_eoi(&cdesc->irq_data);
}
- desc->chip->eoi(irq);
+
+ chip = get_irq_desc_chip(desc);
+ chip->irq_eoi(&desc->irq_data);
}
/* Initialize the internal interrupt controllers. The number of
diff --git a/arch/powerpc/platforms/8xx/mgsuvd.c b/arch/powerpc/platforms/8xx/mgsuvd.c
deleted file mode 100644
index ca3cb071772c..000000000000
--- a/arch/powerpc/platforms/8xx/mgsuvd.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- *
- * Platform setup for the Keymile mgsuvd board
- *
- * Heiko Schocher <hs@denx.de>
- *
- * Copyright 2008 DENX Software Engineering GmbH
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/ioport.h>
-#include <linux/of_platform.h>
-
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/processor.h>
-#include <asm/cpm1.h>
-#include <asm/prom.h>
-#include <asm/fs_pd.h>
-
-#include "mpc8xx.h"
-
-struct cpm_pin {
- int port, pin, flags;
-};
-
-static __initdata struct cpm_pin mgsuvd_pins[] = {
- /* SMC1 */
- {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
- {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
-
- /* SCC3 */
- {CPM_PORTA, 10, CPM_PIN_INPUT},
- {CPM_PORTA, 11, CPM_PIN_INPUT},
- {CPM_PORTA, 3, CPM_PIN_INPUT},
- {CPM_PORTA, 2, CPM_PIN_INPUT},
- {CPM_PORTC, 13, CPM_PIN_INPUT},
-};
-
-static void __init init_ioports(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mgsuvd_pins); i++) {
- struct cpm_pin *pin = &mgsuvd_pins[i];
- cpm1_set_pin(pin->port, pin->pin, pin->flags);
- }
-
- setbits16(&mpc8xx_immr->im_ioport.iop_pcso, 0x300);
- cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RX);
- cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK6, CPM_CLK_TX);
- cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
-}
-
-static void __init mgsuvd_setup_arch(void)
-{
- cpm_reset();
- init_ioports();
-}
-
-static __initdata struct of_device_id of_bus_ids[] = {
- { .compatible = "simple-bus" },
- {},
-};
-
-static int __init declare_of_platform_devices(void)
-{
- of_platform_bus_probe(NULL, of_bus_ids, NULL);
- return 0;
-}
-machine_device_initcall(mgsuvd, declare_of_platform_devices);
-
-static int __init mgsuvd_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "keymile,mgsuvd");
-}
-
-define_machine(mgsuvd) {
- .name = "MGSUVD",
- .probe = mgsuvd_probe,
- .setup_arch = mgsuvd_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
- .get_irq = mpc8xx_get_irq,
- .restart = mpc8xx_restart,
- .calibrate_decr = mpc8xx_calibrate_decr,
- .set_rtc_time = mpc8xx_set_rtc_time,
- .get_rtc_time = mpc8xx_get_rtc_time,
-};
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index c35099af340e..c48b66a67e42 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -93,6 +93,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
struct axon_msic *msic = get_irq_data(irq);
u32 write_offset, msi;
int idx;
@@ -145,7 +146,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
msic->read_offset &= MSIC_FIFO_SIZE_MASK;
}
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static struct axon_msic *find_msi_translator(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 682af97321a8..0b8f7d7135c5 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -61,59 +61,59 @@ static inline void beatic_update_irq_mask(unsigned int irq_plug)
panic("Failed to set mask IRQ!");
}
-static void beatic_mask_irq(unsigned int irq_plug)
+static void beatic_mask_irq(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64)));
- beatic_update_irq_mask(irq_plug);
+ beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
+ beatic_update_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
-static void beatic_unmask_irq(unsigned int irq_plug)
+static void beatic_unmask_irq(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64));
- beatic_update_irq_mask(irq_plug);
+ beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64));
+ beatic_update_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
-static void beatic_ack_irq(unsigned int irq_plug)
+static void beatic_ack_irq(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64)));
- beatic_update_irq_mask(irq_plug);
+ beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
+ beatic_update_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
-static void beatic_end_irq(unsigned int irq_plug)
+static void beatic_end_irq(struct irq_data *d)
{
s64 err;
unsigned long flags;
- err = beat_downcount_of_interrupt(irq_plug);
+ err = beat_downcount_of_interrupt(d->irq);
if (err != 0) {
if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
panic("Failed to downcount IRQ! Error = %16llx", err);
- printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug);
+ printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq);
}
raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64));
- beatic_update_irq_mask(irq_plug);
+ beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64));
+ beatic_update_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
static struct irq_chip beatic_pic = {
.name = "CELL-BEAT",
- .unmask = beatic_unmask_irq,
- .mask = beatic_mask_irq,
- .eoi = beatic_end_irq,
+ .irq_unmask = beatic_unmask_irq,
+ .irq_mask = beatic_mask_irq,
+ .irq_eoi = beatic_end_irq,
};
/*
@@ -232,7 +232,7 @@ unsigned int beatic_get_irq(void)
ret = beatic_get_irq_plug();
if (ret != NO_IRQ)
- beatic_ack_irq(ret);
+ beatic_ack_irq(irq_get_irq_data(ret));
return ret;
}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 10eb1a443626..624d26e72f1d 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -72,15 +72,15 @@ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
}
-static void iic_mask(unsigned int irq)
+static void iic_mask(struct irq_data *d)
{
}
-static void iic_unmask(unsigned int irq)
+static void iic_unmask(struct irq_data *d)
{
}
-static void iic_eoi(unsigned int irq)
+static void iic_eoi(struct irq_data *d)
{
struct iic *iic = &__get_cpu_var(cpu_iic);
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
@@ -89,19 +89,21 @@ static void iic_eoi(unsigned int irq)
static struct irq_chip iic_chip = {
.name = "CELL-IIC",
- .mask = iic_mask,
- .unmask = iic_unmask,
- .eoi = iic_eoi,
+ .irq_mask = iic_mask,
+ .irq_unmask = iic_unmask,
+ .irq_eoi = iic_eoi,
};
-static void iic_ioexc_eoi(unsigned int irq)
+static void iic_ioexc_eoi(struct irq_data *d)
{
}
static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data;
+ struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct cbe_iic_regs __iomem *node_iic =
+ (void __iomem *)get_irq_desc_data(desc);
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
unsigned long bits, ack;
int cascade;
@@ -128,15 +130,15 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
if (ack)
out_be64(&node_iic->iic_is, ack);
}
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
}
static struct irq_chip iic_ioexc_chip = {
.name = "CELL-IOEX",
- .mask = iic_mask,
- .unmask = iic_unmask,
- .eoi = iic_ioexc_eoi,
+ .irq_mask = iic_mask,
+ .irq_unmask = iic_unmask,
+ .irq_eoi = iic_ioexc_eoi,
};
/* Get an IRQ number from the pending state register of the IIC */
@@ -237,6 +239,8 @@ extern int noirqdebug;
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
+
raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -275,7 +279,7 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_INPROGRESS;
out_eoi:
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 691995761b3d..6a28d027d959 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -187,13 +187,15 @@ machine_subsys_initcall(cell, cell_publish_devices);
static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct mpic *mpic = desc->handler_data;
+ struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct mpic *mpic = get_irq_desc_data(desc);
unsigned int virq;
virq = mpic_get_one_irq(mpic);
if (virq != NO_IRQ)
generic_handle_irq(virq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
static void __init mpic_init_IRQ(void)
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 3f2e557344a3..b38cdfc1deb8 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -79,30 +79,30 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic,
return pic->regs + TIR_CFGA + 8 * src;
}
-static void spider_unmask_irq(unsigned int virq)
+static void spider_unmask_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(virq);
- void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq);
+ struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq);
out_be32(cfg, in_be32(cfg) | 0x30000000u);
}
-static void spider_mask_irq(unsigned int virq)
+static void spider_mask_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(virq);
- void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq);
+ struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq);
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
-static void spider_ack_irq(unsigned int virq)
+static void spider_ack_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(virq);
- unsigned int src = irq_map[virq].hwirq;
+ struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ unsigned int src = irq_map[d->irq].hwirq;
/* Reset edge detection logic if necessary
*/
- if (irq_to_desc(virq)->status & IRQ_LEVEL)
+ if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
return;
/* Only interrupts 47 to 50 can be set to edge */
@@ -113,13 +113,13 @@ static void spider_ack_irq(unsigned int virq)
out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf));
}
-static int spider_set_irq_type(unsigned int virq, unsigned int type)
+static int spider_set_irq_type(struct irq_data *d, unsigned int type)
{
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
- struct spider_pic *pic = spider_virq_to_pic(virq);
- unsigned int hw = irq_map[virq].hwirq;
+ struct spider_pic *pic = spider_virq_to_pic(d->irq);
+ unsigned int hw = irq_map[d->irq].hwirq;
void __iomem *cfg = spider_get_irq_config(pic, hw);
- struct irq_desc *desc = irq_to_desc(virq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
u32 old_mask;
u32 ic;
@@ -169,10 +169,10 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type)
static struct irq_chip spider_pic = {
.name = "SPIDER",
- .unmask = spider_unmask_irq,
- .mask = spider_mask_irq,
- .ack = spider_ack_irq,
- .set_type = spider_set_irq_type,
+ .irq_unmask = spider_unmask_irq,
+ .irq_mask = spider_mask_irq,
+ .irq_ack = spider_ack_irq,
+ .irq_set_type = spider_set_irq_type,
};
static int spider_host_map(struct irq_host *h, unsigned int virq,
@@ -207,7 +207,8 @@ static struct irq_host_ops spider_host_ops = {
static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct spider_pic *pic = desc->handler_data;
+ struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct spider_pic *pic = get_irq_desc_data(desc);
unsigned int cs, virq;
cs = in_be32(pic->regs + TIR_CS) >> 24;
@@ -215,9 +216,11 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
virq = NO_IRQ;
else
virq = irq_linear_revmap(pic->host, cs);
+
if (virq != NO_IRQ)
generic_handle_irq(virq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
/* For hooking up the cascace we have a problem. Our device-tree is
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 8553cc49e0d6..4c1288451a21 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -365,10 +365,13 @@ void __init chrp_setup_arch(void)
static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq = i8259_irq();
+
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
/*
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index c278bd3a8fec..0aca0e28a8e5 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -46,10 +46,10 @@
*
*/
-static void flipper_pic_mask_and_ack(unsigned int virq)
+static void flipper_pic_mask_and_ack(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
clrbits32(io_base + FLIPPER_IMR, mask);
@@ -57,27 +57,27 @@ static void flipper_pic_mask_and_ack(unsigned int virq)
out_be32(io_base + FLIPPER_ICR, mask);
}
-static void flipper_pic_ack(unsigned int virq)
+static void flipper_pic_ack(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
/* this is at least needed for RSW */
out_be32(io_base + FLIPPER_ICR, 1 << irq);
}
-static void flipper_pic_mask(unsigned int virq)
+static void flipper_pic_mask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + FLIPPER_IMR, 1 << irq);
}
-static void flipper_pic_unmask(unsigned int virq)
+static void flipper_pic_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + FLIPPER_IMR, 1 << irq);
}
@@ -85,10 +85,10 @@ static void flipper_pic_unmask(unsigned int virq)
static struct irq_chip flipper_pic = {
.name = "flipper-pic",
- .ack = flipper_pic_ack,
- .mask_ack = flipper_pic_mask_and_ack,
- .mask = flipper_pic_mask,
- .unmask = flipper_pic_unmask,
+ .irq_ack = flipper_pic_ack,
+ .irq_mask_ack = flipper_pic_mask_and_ack,
+ .irq_mask = flipper_pic_mask,
+ .irq_unmask = flipper_pic_unmask,
};
/*
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index a771f91e215b..35e448bd8479 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -41,36 +41,36 @@
*
*/
-static void hlwd_pic_mask_and_ack(unsigned int virq)
+static void hlwd_pic_mask_and_ack(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
clrbits32(io_base + HW_BROADWAY_IMR, mask);
out_be32(io_base + HW_BROADWAY_ICR, mask);
}
-static void hlwd_pic_ack(unsigned int virq)
+static void hlwd_pic_ack(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
}
-static void hlwd_pic_mask(unsigned int virq)
+static void hlwd_pic_mask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
}
-static void hlwd_pic_unmask(unsigned int virq)
+static void hlwd_pic_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void __iomem *io_base = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
}
@@ -78,10 +78,10 @@ static void hlwd_pic_unmask(unsigned int virq)
static struct irq_chip hlwd_pic = {
.name = "hlwd-pic",
- .ack = hlwd_pic_ack,
- .mask_ack = hlwd_pic_mask_and_ack,
- .mask = hlwd_pic_mask,
- .unmask = hlwd_pic_unmask,
+ .irq_ack = hlwd_pic_ack,
+ .irq_mask_ack = hlwd_pic_mask_and_ack,
+ .irq_mask = hlwd_pic_mask,
+ .irq_unmask = hlwd_pic_unmask,
};
/*
@@ -129,11 +129,12 @@ static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
struct irq_host *irq_host = get_irq_data(cascade_virq);
unsigned int virq;
raw_spin_lock(&desc->lock);
- desc->chip->mask(cascade_virq); /* IRQ_LEVEL */
+ chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
raw_spin_unlock(&desc->lock);
virq = __hlwd_pic_get_irq(irq_host);
@@ -143,9 +144,9 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
pr_err("spurious interrupt!\n");
raw_spin_lock(&desc->lock);
- desc->chip->ack(cascade_virq); /* IRQ_LEVEL */
- if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
- desc->chip->unmask(cascade_virq);
+ chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */
+ if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
+ chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index ba446bf355a9..4fb96f0b2df6 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -167,11 +167,11 @@ static void pci_event_handler(struct HvLpEvent *event)
* This will be called by device drivers (via enable_IRQ)
* to enable INTA in the bridge interrupt status register.
*/
-static void iseries_enable_IRQ(unsigned int irq)
+static void iseries_enable_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
@@ -184,23 +184,23 @@ static void iseries_enable_IRQ(unsigned int irq)
}
/* This is called by iseries_activate_IRQs */
-static unsigned int iseries_startup_IRQ(unsigned int irq)
+static unsigned int iseries_startup_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
/* Link the IRQ number to the bridge */
- HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq);
+ HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq);
/* Unmask bridge interrupts in the FISR */
mask = 0x01010000 << function;
HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
- iseries_enable_IRQ(irq);
+ iseries_enable_IRQ(d);
return 0;
}
@@ -215,21 +215,26 @@ void __init iSeries_activate_IRQs()
for_each_irq (irq) {
struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip;
- if (desc && desc->chip && desc->chip->startup) {
+ if (!desc)
+ continue;
+
+ chip = get_irq_desc_chip(desc);
+ if (chip && chip->irq_startup) {
raw_spin_lock_irqsave(&desc->lock, flags);
- desc->chip->startup(irq);
+ chip->irq_startup(&desc->irq_data);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
/* this is not called anywhere currently */
-static void iseries_shutdown_IRQ(unsigned int irq)
+static void iseries_shutdown_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
/* irq should be locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
@@ -248,11 +253,11 @@ static void iseries_shutdown_IRQ(unsigned int irq)
* This will be called by device drivers (via disable_IRQ)
* to disable INTA in the bridge interrupt status register.
*/
-static void iseries_disable_IRQ(unsigned int irq)
+static void iseries_disable_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
@@ -264,9 +269,9 @@ static void iseries_disable_IRQ(unsigned int irq)
HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
}
-static void iseries_end_IRQ(unsigned int irq)
+static void iseries_end_IRQ(struct irq_data *d)
{
- unsigned int rirq = (unsigned int)irq_map[irq].hwirq;
+ unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
(REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
@@ -274,11 +279,11 @@ static void iseries_end_IRQ(unsigned int irq)
static struct irq_chip iseries_pic = {
.name = "iSeries",
- .startup = iseries_startup_IRQ,
- .shutdown = iseries_shutdown_IRQ,
- .unmask = iseries_enable_IRQ,
- .mask = iseries_disable_IRQ,
- .eoi = iseries_end_IRQ
+ .irq_startup = iseries_startup_IRQ,
+ .irq_shutdown = iseries_shutdown_IRQ,
+ .irq_unmask = iseries_enable_IRQ,
+ .irq_mask = iseries_disable_IRQ,
+ .irq_eoi = iseries_end_IRQ
};
/*
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index f372ec1691a3..a6067b38d2ca 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -240,7 +240,7 @@ static __init void pas_init_IRQ(void)
nmi_virq = irq_create_mapping(NULL, *nmiprop);
mpic_irq_set_priority(nmi_virq, 15);
set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
- mpic_unmask_irq(nmi_virq);
+ mpic_unmask_irq(irq_get_irq_data(nmi_virq));
}
of_node_put(mpic_node);
@@ -266,7 +266,7 @@ static int pas_machine_check_handler(struct pt_regs *regs)
if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) {
printk(KERN_ERR "NMI delivered\n");
debugger(regs);
- mpic_end_irq(nmi_virq);
+ mpic_end_irq(irq_get_irq_data(nmi_virq));
goto out;
}
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 890d5f72b198..c55812bb6a51 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -82,9 +82,9 @@ static void __pmac_retrigger(unsigned int irq_nr)
}
}
-static void pmac_mask_and_ack_irq(unsigned int virq)
+static void pmac_mask_and_ack_irq(struct irq_data *d)
{
- unsigned int src = irq_map[virq].hwirq;
+ unsigned int src = irq_map[d->irq].hwirq;
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
@@ -104,9 +104,9 @@ static void pmac_mask_and_ack_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-static void pmac_ack_irq(unsigned int virq)
+static void pmac_ack_irq(struct irq_data *d)
{
- unsigned int src = irq_map[virq].hwirq;
+ unsigned int src = irq_map[d->irq].hwirq;
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
@@ -149,15 +149,15 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
/* When an irq gets requested for the first client, if it's an
* edge interrupt, we clear any previous one on the controller
*/
-static unsigned int pmac_startup_irq(unsigned int virq)
+static unsigned int pmac_startup_irq(struct irq_data *d)
{
unsigned long flags;
- unsigned int src = irq_map[virq].hwirq;
+ unsigned int src = irq_map[d->irq].hwirq;
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
- if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0)
+ if ((irq_to_desc(d->irq)->status & IRQ_LEVEL) == 0)
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
@@ -166,10 +166,10 @@ static unsigned int pmac_startup_irq(unsigned int virq)
return 0;
}
-static void pmac_mask_irq(unsigned int virq)
+static void pmac_mask_irq(struct irq_data *d)
{
unsigned long flags;
- unsigned int src = irq_map[virq].hwirq;
+ unsigned int src = irq_map[d->irq].hwirq;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
@@ -177,10 +177,10 @@ static void pmac_mask_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-static void pmac_unmask_irq(unsigned int virq)
+static void pmac_unmask_irq(struct irq_data *d)
{
unsigned long flags;
- unsigned int src = irq_map[virq].hwirq;
+ unsigned int src = irq_map[d->irq].hwirq;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__set_bit(src, ppc_cached_irq_mask);
@@ -188,24 +188,24 @@ static void pmac_unmask_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
-static int pmac_retrigger(unsigned int virq)
+static int pmac_retrigger(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
- __pmac_retrigger(irq_map[virq].hwirq);
+ __pmac_retrigger(irq_map[d->irq].hwirq);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 1;
}
static struct irq_chip pmac_pic = {
.name = "PMAC-PIC",
- .startup = pmac_startup_irq,
- .mask = pmac_mask_irq,
- .ack = pmac_ack_irq,
- .mask_ack = pmac_mask_and_ack_irq,
- .unmask = pmac_unmask_irq,
- .retrigger = pmac_retrigger,
+ .irq_startup = pmac_startup_irq,
+ .irq_mask = pmac_mask_irq,
+ .irq_ack = pmac_ack_irq,
+ .irq_mask_ack = pmac_mask_and_ack_irq,
+ .irq_unmask = pmac_unmask_irq,
+ .irq_retrigger = pmac_retrigger,
};
static irqreturn_t gatwick_action(int cpl, void *dev_id)
@@ -472,12 +472,14 @@ int of_irq_map_oldworld(struct device_node *device, int index,
static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct mpic *mpic = desc->handler_data;
-
+ struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct mpic *mpic = get_irq_desc_data(desc);
unsigned int cascade_irq = mpic_get_one_irq(mpic);
+
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
@@ -707,7 +709,7 @@ static int pmacpic_resume(struct sys_device *sysdev)
mb();
for (i = 0; i < max_real_irqs; ++i)
if (test_bit(i, sleep_save_mask))
- pmac_unmask_irq(i);
+ pmac_unmask_irq(irq_get_irq_data(i));
return 0;
}
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 92290ff4761a..3988c86682a5 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -99,16 +99,16 @@ static DEFINE_PER_CPU(struct ps3_private, ps3_private);
* Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
-static void ps3_chip_mask(unsigned int virq)
+static void ps3_chip_mask(struct irq_data *d)
{
- struct ps3_private *pd = get_irq_chip_data(virq);
+ struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
- pd->thread_id, virq);
+ pd->thread_id, d->irq);
local_irq_save(flags);
- clear_bit(63 - virq, &pd->bmp.mask);
+ clear_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
@@ -120,16 +120,16 @@ static void ps3_chip_mask(unsigned int virq)
* Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
-static void ps3_chip_unmask(unsigned int virq)
+static void ps3_chip_unmask(struct irq_data *d)
{
- struct ps3_private *pd = get_irq_chip_data(virq);
+ struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
- pd->thread_id, virq);
+ pd->thread_id, d->irq);
local_irq_save(flags);
- set_bit(63 - virq, &pd->bmp.mask);
+ set_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
@@ -141,10 +141,10 @@ static void ps3_chip_unmask(unsigned int virq)
* Calls lv1_end_of_interrupt_ext().
*/
-static void ps3_chip_eoi(unsigned int virq)
+static void ps3_chip_eoi(struct irq_data *d)
{
- const struct ps3_private *pd = get_irq_chip_data(virq);
- lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, virq);
+ const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
+ lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
@@ -153,9 +153,9 @@ static void ps3_chip_eoi(unsigned int virq)
static struct irq_chip ps3_irq_chip = {
.name = "ps3",
- .mask = ps3_chip_mask,
- .unmask = ps3_chip_unmask,
- .eoi = ps3_chip_eoi,
+ .irq_mask = ps3_chip_mask,
+ .irq_unmask = ps3_chip_unmask,
+ .irq_eoi = ps3_chip_eoi,
};
/**
@@ -202,7 +202,7 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
goto fail_set;
}
- ps3_chip_mask(*virq);
+ ps3_chip_mask(irq_get_irq_data(*virq));
return result;
@@ -296,7 +296,7 @@ int ps3_irq_plug_destroy(unsigned int virq)
pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
- ps3_chip_mask(virq);
+ ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
@@ -357,7 +357,7 @@ int ps3_event_receive_port_destroy(unsigned int virq)
pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
- ps3_chip_mask(virq);
+ ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_destruct_event_receive_port(virq_to_hw(virq));
@@ -492,7 +492,7 @@ int ps3_io_irq_destroy(unsigned int virq)
int result;
unsigned long outlet = virq_to_hw(virq);
- ps3_chip_mask(virq);
+ ps3_chip_mask(irq_get_irq_data(virq));
/*
* lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
@@ -553,7 +553,7 @@ int ps3_vuart_irq_destroy(unsigned int virq)
{
int result;
- ps3_chip_mask(virq);
+ ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_deconfigure_virtual_uart_irq();
if (result) {
@@ -605,7 +605,7 @@ int ps3_spe_irq_destroy(unsigned int virq)
{
int result;
- ps3_chip_mask(virq);
+ ps3_chip_mask(irq_get_irq_data(virq));
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index f4803868642c..3cafc306b971 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -508,12 +508,7 @@ static int cmm_memory_isolate_cb(struct notifier_block *self,
if (action == MEM_ISOLATE_COUNT)
ret = cmm_count_pages(arg);
- if (ret)
- ret = notifier_from_errno(ret);
- else
- ret = NOTIFY_OK;
-
- return ret;
+ return notifier_from_errno(ret);
}
static struct notifier_block cmm_mem_isolate_nb = {
@@ -635,12 +630,7 @@ static int cmm_memory_cb(struct notifier_block *self,
break;
}
- if (ret)
- ret = notifier_from_errno(ret);
- else
- ret = NOTIFY_OK;
-
- return ret;
+ return notifier_from_errno(ret);
}
static struct notifier_block cmm_mem_nb = {
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 17a11c82e6f8..3cc4d102b1f1 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -876,7 +876,7 @@ void eeh_restore_bars(struct pci_dn *pdn)
*
* Save the values of the device bars. Unlike the restore
* routine, this routine is *not* recursive. This is because
- * PCI devices are added individuallly; but, for the restore,
+ * PCI devices are added individually; but, for the restore,
* an entire slot is reset at a time.
*/
static void eeh_save_bars(struct pci_dn *pdn)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index edea60b7ee90..154c464cdca5 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/crash_dump.h>
+#include <linux/memory.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -45,6 +46,7 @@
#include <asm/tce.h>
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
+#include <asm/mmzone.h>
#include "plpar_wrappers.h"
@@ -270,6 +272,152 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
return tce_ret;
}
+/* this is compatable with cells for the device tree property */
+struct dynamic_dma_window_prop {
+ __be32 liobn; /* tce table number */
+ __be64 dma_base; /* address hi,lo */
+ __be32 tce_shift; /* ilog2(tce_page_size) */
+ __be32 window_shift; /* ilog2(tce_window_size) */
+};
+
+struct direct_window {
+ struct device_node *device;
+ const struct dynamic_dma_window_prop *prop;
+ struct list_head list;
+};
+
+/* Dynamic DMA Window support */
+struct ddw_query_response {
+ u32 windows_available;
+ u32 largest_available_block;
+ u32 page_size;
+ u32 migration_capable;
+};
+
+struct ddw_create_response {
+ u32 liobn;
+ u32 addr_hi;
+ u32 addr_lo;
+};
+
+static LIST_HEAD(direct_window_list);
+/* prevents races between memory on/offline and window creation */
+static DEFINE_SPINLOCK(direct_window_list_lock);
+/* protects initializing window twice for same device */
+static DEFINE_MUTEX(direct_window_init_mutex);
+#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
+
+static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
+ unsigned long num_pfn, const void *arg)
+{
+ const struct dynamic_dma_window_prop *maprange = arg;
+ int rc;
+ u64 tce_size, num_tce, dma_offset, next;
+ u32 tce_shift;
+ long limit;
+
+ tce_shift = be32_to_cpu(maprange->tce_shift);
+ tce_size = 1ULL << tce_shift;
+ next = start_pfn << PAGE_SHIFT;
+ num_tce = num_pfn << PAGE_SHIFT;
+
+ /* round back to the beginning of the tce page size */
+ num_tce += next & (tce_size - 1);
+ next &= ~(tce_size - 1);
+
+ /* covert to number of tces */
+ num_tce |= tce_size - 1;
+ num_tce >>= tce_shift;
+
+ do {
+ /*
+ * Set up the page with TCE data, looping through and setting
+ * the values.
+ */
+ limit = min_t(long, num_tce, 512);
+ dma_offset = next + be64_to_cpu(maprange->dma_base);
+
+ rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
+ dma_offset,
+ 0, limit);
+ num_tce -= limit;
+ } while (num_tce > 0 && !rc);
+
+ return rc;
+}
+
+static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+ unsigned long num_pfn, const void *arg)
+{
+ const struct dynamic_dma_window_prop *maprange = arg;
+ u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn;
+ u32 tce_shift;
+ u64 rc = 0;
+ long l, limit;
+
+ local_irq_disable(); /* to protect tcep and the page behind it */
+ tcep = __get_cpu_var(tce_page);
+
+ if (!tcep) {
+ tcep = (u64 *)__get_free_page(GFP_ATOMIC);
+ if (!tcep) {
+ local_irq_enable();
+ return -ENOMEM;
+ }
+ __get_cpu_var(tce_page) = tcep;
+ }
+
+ proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
+
+ liobn = (u64)be32_to_cpu(maprange->liobn);
+ tce_shift = be32_to_cpu(maprange->tce_shift);
+ tce_size = 1ULL << tce_shift;
+ next = start_pfn << PAGE_SHIFT;
+ num_tce = num_pfn << PAGE_SHIFT;
+
+ /* round back to the beginning of the tce page size */
+ num_tce += next & (tce_size - 1);
+ next &= ~(tce_size - 1);
+
+ /* covert to number of tces */
+ num_tce |= tce_size - 1;
+ num_tce >>= tce_shift;
+
+ /* We can map max one pageful of TCEs at a time */
+ do {
+ /*
+ * Set up the page with TCE data, looping through and setting
+ * the values.
+ */
+ limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
+ dma_offset = next + be64_to_cpu(maprange->dma_base);
+
+ for (l = 0; l < limit; l++) {
+ tcep[l] = proto_tce | next;
+ next += tce_size;
+ }
+
+ rc = plpar_tce_put_indirect(liobn,
+ dma_offset,
+ (u64)virt_to_abs(tcep),
+ limit);
+
+ num_tce -= limit;
+ } while (num_tce > 0 && !rc);
+
+ /* error cleanup: caller will clear whole range */
+
+ local_irq_enable();
+ return rc;
+}
+
+static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
+ unsigned long num_pfn, void *arg)
+{
+ return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
+}
+
+
#ifdef CONFIG_PCI
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
@@ -495,6 +643,329 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
pci_name(dev));
}
+static int __read_mostly disable_ddw;
+
+static int __init disable_ddw_setup(char *str)
+{
+ disable_ddw = 1;
+ printk(KERN_INFO "ppc iommu: disabling ddw.\n");
+
+ return 0;
+}
+
+early_param("disable_ddw", disable_ddw_setup);
+
+static void remove_ddw(struct device_node *np)
+{
+ struct dynamic_dma_window_prop *dwp;
+ struct property *win64;
+ const u32 *ddr_avail;
+ u64 liobn;
+ int len, ret;
+
+ ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+ if (!win64 || !ddr_avail || len < 3 * sizeof(u32))
+ return;
+
+ dwp = win64->value;
+ liobn = (u64)be32_to_cpu(dwp->liobn);
+
+ /* clear the whole window, note the arg is in kernel pages */
+ ret = tce_clearrange_multi_pSeriesLP(0,
+ 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
+ if (ret)
+ pr_warning("%s failed to clear tces in window.\n",
+ np->full_name);
+ else
+ pr_debug("%s successfully cleared tces in window.\n",
+ np->full_name);
+
+ ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn);
+ if (ret)
+ pr_warning("%s: failed to remove direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddr_avail[2], liobn);
+ else
+ pr_debug("%s: successfully removed direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddr_avail[2], liobn);
+}
+
+
+static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn)
+{
+ struct device_node *dn;
+ struct pci_dn *pcidn;
+ struct direct_window *window;
+ const struct dynamic_dma_window_prop *direct64;
+ u64 dma_addr = 0;
+
+ dn = pci_device_to_OF_node(dev);
+ pcidn = PCI_DN(dn);
+ spin_lock(&direct_window_list_lock);
+ /* check if we already created a window and dupe that config if so */
+ list_for_each_entry(window, &direct_window_list, list) {
+ if (window->device == pdn) {
+ direct64 = window->prop;
+ dma_addr = direct64->dma_base;
+ break;
+ }
+ }
+ spin_unlock(&direct_window_list_lock);
+
+ return dma_addr;
+}
+
+static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn)
+{
+ struct device_node *dn;
+ struct pci_dn *pcidn;
+ int len;
+ struct direct_window *window;
+ const struct dynamic_dma_window_prop *direct64;
+ u64 dma_addr = 0;
+
+ dn = pci_device_to_OF_node(dev);
+ pcidn = PCI_DN(dn);
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
+ if (direct64) {
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window) {
+ remove_ddw(pdn);
+ } else {
+ window->device = pdn;
+ window->prop = direct64;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
+ dma_addr = direct64->dma_base;
+ }
+ }
+
+ return dma_addr;
+}
+
+static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
+ struct ddw_query_response *query)
+{
+ struct device_node *dn;
+ struct pci_dn *pcidn;
+ u32 cfg_addr;
+ u64 buid;
+ int ret;
+
+ /*
+ * Get the config address and phb buid of the PE window.
+ * Rely on eeh to retrieve this for us.
+ * Retrieve them from the pci device, not the node with the
+ * dma-window property
+ */
+ dn = pci_device_to_OF_node(dev);
+ pcidn = PCI_DN(dn);
+ cfg_addr = pcidn->eeh_config_addr;
+ if (pcidn->eeh_pe_config_addr)
+ cfg_addr = pcidn->eeh_pe_config_addr;
+ buid = pcidn->phb->buid;
+ ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query,
+ cfg_addr, BUID_HI(buid), BUID_LO(buid));
+ dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
+ " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid),
+ BUID_LO(buid), ret);
+ return ret;
+}
+
+static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
+ struct ddw_create_response *create, int page_shift,
+ int window_shift)
+{
+ struct device_node *dn;
+ struct pci_dn *pcidn;
+ u32 cfg_addr;
+ u64 buid;
+ int ret;
+
+ /*
+ * Get the config address and phb buid of the PE window.
+ * Rely on eeh to retrieve this for us.
+ * Retrieve them from the pci device, not the node with the
+ * dma-window property
+ */
+ dn = pci_device_to_OF_node(dev);
+ pcidn = PCI_DN(dn);
+ cfg_addr = pcidn->eeh_config_addr;
+ if (pcidn->eeh_pe_config_addr)
+ cfg_addr = pcidn->eeh_pe_config_addr;
+ buid = pcidn->phb->buid;
+
+ do {
+ /* extra outputs are LIOBN and dma-addr (hi, lo) */
+ ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr,
+ BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
+ } while (rtas_busy_delay(ret));
+ dev_info(&dev->dev,
+ "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
+ "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1],
+ cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
+ window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
+
+ return ret;
+}
+
+/*
+ * If the PE supports dynamic dma windows, and there is space for a table
+ * that can map all pages in a linear offset, then setup such a table,
+ * and record the dma-offset in the struct device.
+ *
+ * dev: the pci device we are checking
+ * pdn: the parent pe node with the ibm,dma_window property
+ * Future: also check if we can remap the base window for our base page size
+ *
+ * returns the dma offset for use by dma_set_mask
+ */
+static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+{
+ int len, ret;
+ struct ddw_query_response query;
+ struct ddw_create_response create;
+ int page_shift;
+ u64 dma_addr, max_addr;
+ struct device_node *dn;
+ const u32 *uninitialized_var(ddr_avail);
+ struct direct_window *window;
+ struct property *uninitialized_var(win64);
+ struct dynamic_dma_window_prop *ddwprop;
+
+ mutex_lock(&direct_window_init_mutex);
+
+ dma_addr = dupe_ddw_if_already_created(dev, pdn);
+ if (dma_addr != 0)
+ goto out_unlock;
+
+ dma_addr = dupe_ddw_if_kexec(dev, pdn);
+ if (dma_addr != 0)
+ goto out_unlock;
+
+ /*
+ * the ibm,ddw-applicable property holds the tokens for:
+ * ibm,query-pe-dma-window
+ * ibm,create-pe-dma-window
+ * ibm,remove-pe-dma-window
+ * for the given node in that order.
+ * the property is actually in the parent, not the PE
+ */
+ ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
+ if (!ddr_avail || len < 3 * sizeof(u32))
+ goto out_unlock;
+
+ /*
+ * Query if there is a second window of size to map the
+ * whole partition. Query returns number of windows, largest
+ * block assigned to PE (partition endpoint), and two bitmasks
+ * of page sizes: supported and supported for migrate-dma.
+ */
+ dn = pci_device_to_OF_node(dev);
+ ret = query_ddw(dev, ddr_avail, &query);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (query.windows_available == 0) {
+ /*
+ * no additional windows are available for this device.
+ * We might be able to reallocate the existing window,
+ * trading in for a larger page size.
+ */
+ dev_dbg(&dev->dev, "no free dynamic windows");
+ goto out_unlock;
+ }
+ if (query.page_size & 4) {
+ page_shift = 24; /* 16MB */
+ } else if (query.page_size & 2) {
+ page_shift = 16; /* 64kB */
+ } else if (query.page_size & 1) {
+ page_shift = 12; /* 4kB */
+ } else {
+ dev_dbg(&dev->dev, "no supported direct page size in mask %x",
+ query.page_size);
+ goto out_unlock;
+ }
+ /* verify the window * number of ptes will map the partition */
+ /* check largest block * page size > max memory hotplug addr */
+ max_addr = memory_hotplug_max();
+ if (query.largest_available_block < (max_addr >> page_shift)) {
+ dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
+ "%llu-sized pages\n", max_addr, query.largest_available_block,
+ 1ULL << page_shift);
+ goto out_unlock;
+ }
+ len = order_base_2(max_addr);
+ win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
+ if (!win64) {
+ dev_info(&dev->dev,
+ "couldn't allocate property for 64bit dma window\n");
+ goto out_unlock;
+ }
+ win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
+ win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
+ if (!win64->name || !win64->value) {
+ dev_info(&dev->dev,
+ "couldn't allocate property name and value\n");
+ goto out_free_prop;
+ }
+
+ ret = create_ddw(dev, ddr_avail, &create, page_shift, len);
+ if (ret != 0)
+ goto out_free_prop;
+
+ ddwprop->liobn = cpu_to_be32(create.liobn);
+ ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
+ ddwprop->tce_shift = cpu_to_be32(page_shift);
+ ddwprop->window_shift = cpu_to_be32(len);
+
+ dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n",
+ create.liobn, dn->full_name);
+
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window)
+ goto out_clear_window;
+
+ ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
+ win64->value, tce_setrange_multi_pSeriesLP_walk);
+ if (ret) {
+ dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
+ dn->full_name, ret);
+ goto out_clear_window;
+ }
+
+ ret = prom_add_property(pdn, win64);
+ if (ret) {
+ dev_err(&dev->dev, "unable to add dma window property for %s: %d",
+ pdn->full_name, ret);
+ goto out_clear_window;
+ }
+
+ window->device = pdn;
+ window->prop = ddwprop;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
+
+ dma_addr = of_read_number(&create.addr_hi, 2);
+ goto out_unlock;
+
+out_clear_window:
+ remove_ddw(pdn);
+
+out_free_prop:
+ kfree(win64->name);
+ kfree(win64->value);
+ kfree(win64);
+
+out_unlock:
+ mutex_unlock(&direct_window_init_mutex);
+ return dma_addr;
+}
+
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
@@ -541,23 +1012,137 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
set_iommu_table_base(&dev->dev, pci->iommu_table);
}
+
+static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
+{
+ bool ddw_enabled = false;
+ struct device_node *pdn, *dn;
+ struct pci_dev *pdev;
+ const void *dma_window = NULL;
+ u64 dma_offset;
+
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ /* only attempt to use a new window if 64-bit DMA is requested */
+ if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
+ pdev = to_pci_dev(dev);
+
+ dn = pci_device_to_OF_node(pdev);
+ dev_dbg(dev, "node is %s\n", dn->full_name);
+
+ /*
+ * the device tree might contain the dma-window properties
+ * per-device and not neccesarily for the bus. So we need to
+ * search upwards in the tree until we either hit a dma-window
+ * property, OR find a parent with a table already allocated.
+ */
+ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
+ pdn = pdn->parent) {
+ dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
+ if (dma_window)
+ break;
+ }
+ if (pdn && PCI_DN(pdn)) {
+ dma_offset = enable_ddw(pdev, pdn);
+ if (dma_offset != 0) {
+ dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
+ set_dma_offset(dev, dma_offset);
+ set_dma_ops(dev, &dma_direct_ops);
+ ddw_enabled = true;
+ }
+ }
+ }
+
+ /* fall-through to iommu ops */
+ if (!ddw_enabled) {
+ dev_info(dev, "Using 32-bit DMA via iommu\n");
+ set_dma_ops(dev, &dma_iommu_ops);
+ }
+
+ *dev->dma_mask = dma_mask;
+ return 0;
+}
+
#else /* CONFIG_PCI */
#define pci_dma_bus_setup_pSeries NULL
#define pci_dma_dev_setup_pSeries NULL
#define pci_dma_bus_setup_pSeriesLP NULL
#define pci_dma_dev_setup_pSeriesLP NULL
+#define dma_set_mask_pSeriesLP NULL
#endif /* !CONFIG_PCI */
+static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct direct_window *window;
+ struct memory_notify *arg = data;
+ int ret = 0;
+
+ switch (action) {
+ case MEM_GOING_ONLINE:
+ spin_lock(&direct_window_list_lock);
+ list_for_each_entry(window, &direct_window_list, list) {
+ ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
+ arg->nr_pages, window->prop);
+ /* XXX log error */
+ }
+ spin_unlock(&direct_window_list_lock);
+ break;
+ case MEM_CANCEL_ONLINE:
+ case MEM_OFFLINE:
+ spin_lock(&direct_window_list_lock);
+ list_for_each_entry(window, &direct_window_list, list) {
+ ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
+ arg->nr_pages, window->prop);
+ /* XXX log error */
+ }
+ spin_unlock(&direct_window_list_lock);
+ break;
+ default:
+ break;
+ }
+ if (ret && action != MEM_CANCEL_ONLINE)
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block iommu_mem_nb = {
+ .notifier_call = iommu_mem_notifier,
+};
+
static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
{
int err = NOTIFY_OK;
struct device_node *np = node;
struct pci_dn *pci = PCI_DN(np);
+ struct direct_window *window;
switch (action) {
case PSERIES_RECONFIG_REMOVE:
if (pci && pci->iommu_table)
iommu_free_table(pci->iommu_table, np->full_name);
+
+ spin_lock(&direct_window_list_lock);
+ list_for_each_entry(window, &direct_window_list, list) {
+ if (window->device == np) {
+ list_del(&window->list);
+ kfree(window);
+ break;
+ }
+ }
+ spin_unlock(&direct_window_list_lock);
+
+ /*
+ * Because the notifier runs after isolation of the
+ * slot, we are guaranteed any DMA window has already
+ * been revoked and the TCEs have been marked invalid,
+ * so we don't need a call to remove_ddw(np). However,
+ * if an additional notifier action is added before the
+ * isolate call, we should update this code for
+ * completeness with such a call.
+ */
break;
default:
err = NOTIFY_DONE;
@@ -587,6 +1172,7 @@ void iommu_init_early_pSeries(void)
ppc_md.tce_get = tce_get_pSeriesLP;
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
+ ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
@@ -597,6 +1183,7 @@ void iommu_init_early_pSeries(void)
pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
+ register_memory_notifier(&iommu_mem_nb);
set_pci_dma_ops(&dma_iommu_ops);
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 1164c3430f2c..18ac801f8e90 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -93,8 +93,18 @@ static void rtas_disable_msi(struct pci_dev *pdev)
if (!pdn)
return;
- if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0)
- pr_debug("rtas_msi: Setting MSIs to 0 failed!\n");
+ /*
+ * disabling MSI with the explicit interface also disables MSI-X
+ */
+ if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) {
+ /*
+ * may have failed because explicit interface is not
+ * present
+ */
+ if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) {
+ pr_debug("rtas_msi: Setting MSIs to 0 failed!\n");
+ }
+ }
}
static int rtas_query_irq_number(struct pci_dn *pdn, int offset)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 7e828ba29bc3..419707b07248 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -16,6 +16,8 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/kmsg_dump.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -30,17 +32,54 @@ static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
-static long nvram_error_log_index = -1;
-static long nvram_error_log_size = 0;
-
struct err_log_info {
int error_type;
unsigned int seq_num;
};
-#define NVRAM_MAX_REQ 2079
-#define NVRAM_MIN_REQ 1055
-#define NVRAM_LOG_PART_NAME "ibm,rtas-log"
+struct nvram_os_partition {
+ const char *name;
+ int req_size; /* desired size, in bytes */
+ int min_size; /* minimum acceptable size (0 means req_size) */
+ long size; /* size of data portion (excluding err_log_info) */
+ long index; /* offset of data portion of partition */
+};
+
+static struct nvram_os_partition rtas_log_partition = {
+ .name = "ibm,rtas-log",
+ .req_size = 2079,
+ .min_size = 1055,
+ .index = -1
+};
+
+static struct nvram_os_partition oops_log_partition = {
+ .name = "lnx,oops-log",
+ .req_size = 4000,
+ .min_size = 2000,
+ .index = -1
+};
+
+static const char *pseries_nvram_os_partitions[] = {
+ "ibm,rtas-log",
+ "lnx,oops-log",
+ NULL
+};
+
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason,
+ const char *old_msgs, unsigned long old_len,
+ const char *new_msgs, unsigned long new_len);
+
+static struct kmsg_dumper nvram_kmsg_dumper = {
+ .dump = oops_to_nvram
+};
+
+/* See clobbering_unread_rtas_event() */
+#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
+static unsigned long last_unread_rtas_event; /* timestamp */
+
+/* We preallocate oops_buf during init to avoid kmalloc during oops/panic. */
+static char *oops_buf;
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
{
@@ -134,7 +173,7 @@ static ssize_t pSeries_nvram_get_size(void)
}
-/* nvram_write_error_log
+/* nvram_write_os_partition, nvram_write_error_log
*
* We need to buffer the error logs into nvram to ensure that we have
* the failure information to decode. If we have a severe error there
@@ -156,48 +195,58 @@ static ssize_t pSeries_nvram_get_size(void)
* The 'data' section would look like (in bytes):
* +--------------+------------+-----------------------------------+
* | event_logged | sequence # | error log |
- * |0 3|4 7|8 nvram_error_log_size-1|
+ * |0 3|4 7|8 error_log_size-1|
* +--------------+------------+-----------------------------------+
*
* event_logged: 0 if event has not been logged to syslog, 1 if it has
* sequence #: The unique sequence # for each event. (until it wraps)
* error log: The error log from event_scan
*/
-int nvram_write_error_log(char * buff, int length,
- unsigned int err_type, unsigned int error_log_cnt)
+int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
+ int length, unsigned int err_type, unsigned int error_log_cnt)
{
int rc;
loff_t tmp_index;
struct err_log_info info;
- if (nvram_error_log_index == -1) {
+ if (part->index == -1) {
return -ESPIPE;
}
- if (length > nvram_error_log_size) {
- length = nvram_error_log_size;
+ if (length > part->size) {
+ length = part->size;
}
info.error_type = err_type;
info.seq_num = error_log_cnt;
- tmp_index = nvram_error_log_index;
+ tmp_index = part->index;
rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
if (rc <= 0) {
- printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+ pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc);
return rc;
}
rc = ppc_md.nvram_write(buff, length, &tmp_index);
if (rc <= 0) {
- printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+ pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc);
return rc;
}
return 0;
}
+int nvram_write_error_log(char * buff, int length,
+ unsigned int err_type, unsigned int error_log_cnt)
+{
+ int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
+ err_type, error_log_cnt);
+ if (!rc)
+ last_unread_rtas_event = get_seconds();
+ return rc;
+}
+
/* nvram_read_error_log
*
* Reads nvram for error log for at most 'length'
@@ -209,13 +258,13 @@ int nvram_read_error_log(char * buff, int length,
loff_t tmp_index;
struct err_log_info info;
- if (nvram_error_log_index == -1)
+ if (rtas_log_partition.index == -1)
return -1;
- if (length > nvram_error_log_size)
- length = nvram_error_log_size;
+ if (length > rtas_log_partition.size)
+ length = rtas_log_partition.size;
- tmp_index = nvram_error_log_index;
+ tmp_index = rtas_log_partition.index;
rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
if (rc <= 0) {
@@ -244,37 +293,40 @@ int nvram_clear_error_log(void)
int clear_word = ERR_FLAG_ALREADY_LOGGED;
int rc;
- if (nvram_error_log_index == -1)
+ if (rtas_log_partition.index == -1)
return -1;
- tmp_index = nvram_error_log_index;
+ tmp_index = rtas_log_partition.index;
rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
if (rc <= 0) {
printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
return rc;
}
+ last_unread_rtas_event = 0;
return 0;
}
-/* pseries_nvram_init_log_partition
+/* pseries_nvram_init_os_partition
*
- * This will setup the partition we need for buffering the
- * error logs and cleanup partitions if needed.
+ * This sets up a partition with an "OS" signature.
*
* The general strategy is the following:
- * 1.) If there is log partition large enough then use it.
- * 2.) If there is none large enough, search
- * for a free partition that is large enough.
- * 3.) If there is not a free partition large enough remove
- * _all_ OS partitions and consolidate the space.
- * 4.) Will first try getting a chunk that will satisfy the maximum
- * error log size (NVRAM_MAX_REQ).
- * 5.) If the max chunk cannot be allocated then try finding a chunk
- * that will satisfy the minum needed (NVRAM_MIN_REQ).
+ * 1.) If a partition with the indicated name already exists...
+ * - If it's large enough, use it.
+ * - Otherwise, recycle it and keep going.
+ * 2.) Search for a free partition that is large enough.
+ * 3.) If there's not a free partition large enough, recycle any obsolete
+ * OS partitions and try again.
+ * 4.) Will first try getting a chunk that will satisfy the requested size.
+ * 5.) If a chunk of the requested size cannot be allocated, then try finding
+ * a chunk that will satisfy the minum needed.
+ *
+ * Returns 0 on success, else -1.
*/
-static int __init pseries_nvram_init_log_partition(void)
+static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
+ *part)
{
loff_t p;
int size;
@@ -282,47 +334,76 @@ static int __init pseries_nvram_init_log_partition(void)
/* Scan nvram for partitions */
nvram_scan_partitions();
- /* Lookg for ours */
- p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
+ /* Look for ours */
+ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
/* Found one but too small, remove it */
- if (p && size < NVRAM_MIN_REQ) {
- pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
- ",removing it...");
- nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
+ if (p && size < part->min_size) {
+ pr_info("nvram: Found too small %s partition,"
+ " removing it...\n", part->name);
+ nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
p = 0;
}
/* Create one if we didn't find */
if (!p) {
- p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
- NVRAM_MAX_REQ, NVRAM_MIN_REQ);
- /* No room for it, try to get rid of any OS partition
- * and try again
- */
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
if (p == -ENOSPC) {
- pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
- " partition, deleting all OS partitions...");
- nvram_remove_partition(NULL, NVRAM_SIG_OS);
- p = nvram_create_partition(NVRAM_LOG_PART_NAME,
- NVRAM_SIG_OS, NVRAM_MAX_REQ,
- NVRAM_MIN_REQ);
+ pr_info("nvram: No room to create %s partition, "
+ "deleting any obsolete OS partitions...\n",
+ part->name);
+ nvram_remove_partition(NULL, NVRAM_SIG_OS,
+ pseries_nvram_os_partitions);
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
}
}
if (p <= 0) {
- pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
- " partition, err %d\n", (int)p);
- return 0;
+ pr_err("nvram: Failed to find or create %s"
+ " partition, err %d\n", part->name, (int)p);
+ return -1;
}
- nvram_error_log_index = p;
- nvram_error_log_size = nvram_get_partition_size(p) -
- sizeof(struct err_log_info);
+ part->index = p;
+ part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
return 0;
}
-machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
+
+static void __init nvram_init_oops_partition(int rtas_partition_exists)
+{
+ int rc;
+
+ rc = pseries_nvram_init_os_partition(&oops_log_partition);
+ if (rc != 0) {
+ if (!rtas_partition_exists)
+ return;
+ pr_notice("nvram: Using %s partition to log both"
+ " RTAS errors and oops/panic reports\n",
+ rtas_log_partition.name);
+ memcpy(&oops_log_partition, &rtas_log_partition,
+ sizeof(rtas_log_partition));
+ }
+ oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
+ rc = kmsg_dump_register(&nvram_kmsg_dumper);
+ if (rc != 0) {
+ pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
+ kfree(oops_buf);
+ return;
+ }
+}
+
+static int __init pseries_nvram_init_log_partitions(void)
+{
+ int rc;
+
+ rc = pseries_nvram_init_os_partition(&rtas_log_partition);
+ nvram_init_oops_partition(rc == 0);
+ return 0;
+}
+machine_arch_initcall(pseries, pseries_nvram_init_log_partitions);
int __init pSeries_nvram_init(void)
{
@@ -353,3 +434,59 @@ int __init pSeries_nvram_init(void)
return 0;
}
+
+/*
+ * Try to capture the last capture_len bytes of the printk buffer. Return
+ * the amount actually captured.
+ */
+static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
+ const char *new_msgs, size_t new_len,
+ char *captured, size_t capture_len)
+{
+ if (new_len >= capture_len) {
+ memcpy(captured, new_msgs + (new_len - capture_len),
+ capture_len);
+ return capture_len;
+ } else {
+ /* Grab the end of old_msgs. */
+ size_t old_tail_len = min(old_len, capture_len - new_len);
+ memcpy(captured, old_msgs + (old_len - old_tail_len),
+ old_tail_len);
+ memcpy(captured + old_tail_len, new_msgs, new_len);
+ return old_tail_len + new_len;
+ }
+}
+
+/*
+ * Are we using the ibm,rtas-log for oops/panic reports? And if so,
+ * would logging this oops/panic overwrite an RTAS event that rtas_errd
+ * hasn't had a chance to read and process? Return 1 if so, else 0.
+ *
+ * We assume that if rtas_errd hasn't read the RTAS event in
+ * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
+ */
+static int clobbering_unread_rtas_event(void)
+{
+ return (oops_log_partition.index == rtas_log_partition.index
+ && last_unread_rtas_event
+ && get_seconds() - last_unread_rtas_event <=
+ NVRAM_RTAS_READ_TIMEOUT);
+}
+
+/* our kmsg_dump callback */
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason,
+ const char *old_msgs, unsigned long old_len,
+ const char *new_msgs, unsigned long new_len)
+{
+ static unsigned int oops_count = 0;
+ size_t text_len;
+
+ if (clobbering_unread_rtas_event())
+ return;
+
+ text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len,
+ oops_buf, oops_log_partition.size);
+ (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) text_len, ERR_TYPE_KERNEL_PANIC, ++oops_count);
+}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index d345bfd56bbe..2a0089a2c829 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -114,10 +114,13 @@ static void __init fwnmi_init(void)
static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq = i8259_irq();
+
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
static void __init pseries_setup_i8259_cascade(void)
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 7b96e5a270ce..01fea46c0335 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -202,20 +202,20 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
#define get_irq_server(virq, cpumask, strict_check) (default_server)
#endif
-static void xics_unmask_irq(unsigned int virq)
+static void xics_unmask_irq(struct irq_data *d)
{
unsigned int irq;
int call_status;
int server;
- pr_devel("xics: unmask virq %d\n", virq);
+ pr_devel("xics: unmask virq %d\n", d->irq);
- irq = (unsigned int)irq_map[virq].hwirq;
+ irq = (unsigned int)irq_map[d->irq].hwirq;
pr_devel(" -> map to hwirq 0x%x\n", irq);
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
- server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0);
+ server = get_irq_server(d->irq, d->affinity, 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
@@ -235,61 +235,61 @@ static void xics_unmask_irq(unsigned int virq)
}
}
-static unsigned int xics_startup(unsigned int virq)
+static unsigned int xics_startup(struct irq_data *d)
{
/*
* The generic MSI code returns with the interrupt disabled on the
* card, using the MSI mask bits. Firmware doesn't appear to unmask
* at that level, so we do it here by hand.
*/
- if (irq_to_desc(virq)->msi_desc)
- unmask_msi_irq(irq_get_irq_data(virq));
+ if (d->msi_desc)
+ unmask_msi_irq(d);
/* unmask it */
- xics_unmask_irq(virq);
+ xics_unmask_irq(d);
return 0;
}
-static void xics_mask_real_irq(unsigned int irq)
+static void xics_mask_real_irq(struct irq_data *d)
{
int call_status;
- if (irq == XICS_IPI)
+ if (d->irq == XICS_IPI)
return;
- call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
+ call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
- __func__, irq, call_status);
+ __func__, d->irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq,
default_server, 0xff);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
- __func__, irq, call_status);
+ __func__, d->irq, call_status);
return;
}
}
-static void xics_mask_irq(unsigned int virq)
+static void xics_mask_irq(struct irq_data *d)
{
unsigned int irq;
- pr_devel("xics: mask virq %d\n", virq);
+ pr_devel("xics: mask virq %d\n", d->irq);
- irq = (unsigned int)irq_map[virq].hwirq;
+ irq = (unsigned int)irq_map[d->irq].hwirq;
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
- xics_mask_real_irq(irq);
+ xics_mask_real_irq(d);
}
static void xics_mask_unknown_vec(unsigned int vec)
{
printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
- xics_mask_real_irq(vec);
+ xics_mask_real_irq(irq_get_irq_data(vec));
}
static inline unsigned int xics_xirr_vector(unsigned int xirr)
@@ -371,30 +371,31 @@ static unsigned char pop_cppr(void)
return os_cppr->stack[--os_cppr->index];
}
-static void xics_eoi_direct(unsigned int virq)
+static void xics_eoi_direct(struct irq_data *d)
{
- unsigned int irq = (unsigned int)irq_map[virq].hwirq;
+ unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
iosync();
direct_xirr_info_set((pop_cppr() << 24) | irq);
}
-static void xics_eoi_lpar(unsigned int virq)
+static void xics_eoi_lpar(struct irq_data *d)
{
- unsigned int irq = (unsigned int)irq_map[virq].hwirq;
+ unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
iosync();
lpar_xirr_info_set((pop_cppr() << 24) | irq);
}
-static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
+static int
+xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
{
unsigned int irq;
int status;
int xics_status[2];
int irq_server;
- irq = (unsigned int)irq_map[virq].hwirq;
+ irq = (unsigned int)irq_map[d->irq].hwirq;
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return -1;
@@ -406,13 +407,13 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
return -1;
}
- irq_server = get_irq_server(virq, cpumask, 1);
+ irq_server = get_irq_server(d->irq, cpumask, 1);
if (irq_server == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
printk(KERN_WARNING
"%s: No online cpus in the mask %s for irq %d\n",
- __func__, cpulist, virq);
+ __func__, cpulist, d->irq);
return -1;
}
@@ -430,20 +431,20 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
static struct irq_chip xics_pic_direct = {
.name = "XICS",
- .startup = xics_startup,
- .mask = xics_mask_irq,
- .unmask = xics_unmask_irq,
- .eoi = xics_eoi_direct,
- .set_affinity = xics_set_affinity
+ .irq_startup = xics_startup,
+ .irq_mask = xics_mask_irq,
+ .irq_unmask = xics_unmask_irq,
+ .irq_eoi = xics_eoi_direct,
+ .irq_set_affinity = xics_set_affinity
};
static struct irq_chip xics_pic_lpar = {
.name = "XICS",
- .startup = xics_startup,
- .mask = xics_mask_irq,
- .unmask = xics_unmask_irq,
- .eoi = xics_eoi_lpar,
- .set_affinity = xics_set_affinity
+ .irq_startup = xics_startup,
+ .irq_mask = xics_mask_irq,
+ .irq_unmask = xics_unmask_irq,
+ .irq_eoi = xics_eoi_lpar,
+ .irq_set_affinity = xics_set_affinity
};
@@ -890,6 +891,7 @@ void xics_migrate_irqs_away(void)
for_each_irq(virq) {
struct irq_desc *desc;
+ struct irq_chip *chip;
int xics_status[2];
int status;
unsigned long flags;
@@ -903,12 +905,15 @@ void xics_migrate_irqs_away(void)
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
+
desc = irq_to_desc(virq);
/* We only need to migrate enabled IRQS */
- if (desc == NULL || desc->chip == NULL
- || desc->action == NULL
- || desc->chip->set_affinity == NULL)
+ if (desc == NULL || desc->action == NULL)
+ continue;
+
+ chip = get_irq_desc_chip(desc);
+ if (chip == NULL || chip->irq_set_affinity == NULL)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -934,8 +939,8 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
- cpumask_setall(irq_to_desc(virq)->affinity);
- desc->chip->set_affinity(virq, cpu_all_mask);
+ cpumask_setall(desc->irq_data.affinity);
+ chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true);
unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 00852124ff4a..0476bcc7c3e1 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -56,32 +56,32 @@ static cpic8xx_t __iomem *cpic_reg;
static struct irq_host *cpm_pic_host;
-static void cpm_mask_irq(unsigned int irq)
+static void cpm_mask_irq(struct irq_data *d)
{
- unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
+ unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq;
clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
}
-static void cpm_unmask_irq(unsigned int irq)
+static void cpm_unmask_irq(struct irq_data *d)
{
- unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
+ unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq;
setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
}
-static void cpm_end_irq(unsigned int irq)
+static void cpm_end_irq(struct irq_data *d)
{
- unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq;
+ unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq;
out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
}
static struct irq_chip cpm_pic = {
.name = "CPM PIC",
- .mask = cpm_mask_irq,
- .unmask = cpm_unmask_irq,
- .eoi = cpm_end_irq,
+ .irq_mask = cpm_mask_irq,
+ .irq_unmask = cpm_unmask_irq,
+ .irq_eoi = cpm_end_irq,
};
int cpm_get_irq(void)
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index fcea4ff825dd..473032556715 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -78,10 +78,10 @@ static const u_char irq_to_siubit[] = {
24, 25, 26, 27, 28, 29, 30, 31,
};
-static void cpm2_mask_irq(unsigned int virq)
+static void cpm2_mask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = virq_to_hw(virq);
+ unsigned int irq_nr = virq_to_hw(d->irq);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -90,10 +90,10 @@ static void cpm2_mask_irq(unsigned int virq)
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
-static void cpm2_unmask_irq(unsigned int virq)
+static void cpm2_unmask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = virq_to_hw(virq);
+ unsigned int irq_nr = virq_to_hw(d->irq);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -102,10 +102,10 @@ static void cpm2_unmask_irq(unsigned int virq)
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
-static void cpm2_ack(unsigned int virq)
+static void cpm2_ack(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = virq_to_hw(virq);
+ unsigned int irq_nr = virq_to_hw(d->irq);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -113,11 +113,11 @@ static void cpm2_ack(unsigned int virq)
out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit);
}
-static void cpm2_end_irq(unsigned int virq)
+static void cpm2_end_irq(struct irq_data *d)
{
struct irq_desc *desc;
int bit, word;
- unsigned int irq_nr = virq_to_hw(virq);
+ unsigned int irq_nr = virq_to_hw(d->irq);
desc = irq_to_desc(irq_nr);
if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))
@@ -137,10 +137,10 @@ static void cpm2_end_irq(unsigned int virq)
}
}
-static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
+static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- unsigned int src = virq_to_hw(virq);
- struct irq_desc *desc = irq_to_desc(virq);
+ unsigned int src = virq_to_hw(d->irq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
unsigned int vold, vnew, edibit;
/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
@@ -199,11 +199,11 @@ err_sense:
static struct irq_chip cpm2_pic = {
.name = "CPM2 SIU",
- .mask = cpm2_mask_irq,
- .unmask = cpm2_unmask_irq,
- .ack = cpm2_ack,
- .eoi = cpm2_end_irq,
- .set_type = cpm2_set_irq_type,
+ .irq_mask = cpm2_mask_irq,
+ .irq_unmask = cpm2_unmask_irq,
+ .irq_ack = cpm2_ack,
+ .irq_eoi = cpm2_end_irq,
+ .irq_set_type = cpm2_set_irq_type,
};
unsigned int cpm2_get_irq(void)
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 2b9f0c925326..5f88797dce73 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -93,14 +93,14 @@ static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
l2cache_size = *prop;
sram_params.sram_size = get_cache_sram_size();
- if (sram_params.sram_size <= 0) {
+ if ((int)sram_params.sram_size <= 0) {
dev_err(&dev->dev,
"Entire L2 as cache, Aborting Cache-SRAM stuff\n");
return -EINVAL;
}
sram_params.sram_offset = get_cache_sram_offset();
- if (sram_params.sram_offset <= 0) {
+ if ((int64_t)sram_params.sram_offset <= 0) {
dev_err(&dev->dev,
"Entire L2 as cache, provide a valid sram offset\n");
return -EINVAL;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index ee6a8a52ac71..58e09b2833f2 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
*
* Author: Tony Li <tony.li@freescale.com>
* Jason Jin <Jason.jin@freescale.com>
@@ -47,14 +47,14 @@ static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
* We do not need this actually. The MSIR register has been read once
* in the cascade interrupt. So, this MSI interrupt has been acked
*/
-static void fsl_msi_end_irq(unsigned int virq)
+static void fsl_msi_end_irq(struct irq_data *d)
{
}
static struct irq_chip fsl_msi_chip = {
.irq_mask = mask_msi_irq,
.irq_unmask = unmask_msi_irq,
- .ack = fsl_msi_end_irq,
+ .irq_ack = fsl_msi_end_irq,
.name = "FSL-MSI",
};
@@ -183,6 +183,7 @@ out_free:
static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq;
struct fsl_msi *msi_data;
int msir_index = -1;
@@ -196,11 +197,11 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
- if (desc->chip->mask_ack)
- desc->chip->mask_ack(irq);
+ if (chip->irq_mask_ack)
+ chip->irq_mask_ack(&desc->irq_data);
else {
- desc->chip->mask(irq);
- desc->chip->ack(irq);
+ chip->irq_mask(&desc->irq_data);
+ chip->irq_ack(&desc->irq_data);
}
}
@@ -238,11 +239,11 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
- desc->chip->eoi(irq);
+ chip->irq_eoi(&desc->irq_data);
break;
case FSL_PIC_IP_IPIC:
- if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
- desc->chip->unmask(irq);
+ if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
+ chip->irq_unmask(&desc->irq_data);
break;
}
unlock:
@@ -273,18 +274,46 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
return 0;
}
+static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
+ struct platform_device *dev,
+ int offset, int irq_index)
+{
+ struct fsl_msi_cascade_data *cascade_data = NULL;
+ int virt_msir;
+
+ virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
+ if (virt_msir == NO_IRQ) {
+ dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
+ __func__, irq_index);
+ return 0;
+ }
+
+ cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
+ if (!cascade_data) {
+ dev_err(&dev->dev, "No memory for MSI cascade data\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_virqs[irq_index] = virt_msir;
+ cascade_data->index = offset + irq_index;
+ cascade_data->msi_data = msi;
+ set_irq_data(virt_msir, cascade_data);
+ set_irq_chained_handler(virt_msir, fsl_msi_cascade);
+
+ return 0;
+}
+
static int __devinit fsl_of_msi_probe(struct platform_device *dev)
{
struct fsl_msi *msi;
struct resource res;
- int err, i, count;
+ int err, i, j, irq_index, count;
int rc;
- int virt_msir;
const u32 *p;
struct fsl_msi_feature *features;
- struct fsl_msi_cascade_data *cascade_data = NULL;
int len;
u32 offset;
+ static const u32 all_avail[] = { 0, NR_MSI_IRQS };
if (!dev->dev.of_match)
return -EINVAL;
@@ -335,42 +364,34 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
goto error_out;
}
- p = of_get_property(dev->dev.of_node, "interrupts", &count);
- if (!p) {
- dev_err(&dev->dev, "no interrupts property found on %s\n",
- dev->dev.of_node->full_name);
- err = -ENODEV;
- goto error_out;
- }
- if (count % 8 != 0) {
- dev_err(&dev->dev, "Malformed interrupts property on %s\n",
- dev->dev.of_node->full_name);
+ p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
+ if (p && len % (2 * sizeof(u32)) != 0) {
+ dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
+ __func__);
err = -EINVAL;
goto error_out;
}
- offset = 0;
- p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
- if (p)
- offset = *p / IRQS_PER_MSI_REG;
-
- count /= sizeof(u32);
- for (i = 0; i < min(count / 2, NR_MSI_REG); i++) {
- virt_msir = irq_of_parse_and_map(dev->dev.of_node, i);
- if (virt_msir != NO_IRQ) {
- cascade_data = kzalloc(
- sizeof(struct fsl_msi_cascade_data),
- GFP_KERNEL);
- if (!cascade_data) {
- dev_err(&dev->dev,
- "No memory for MSI cascade data\n");
- err = -ENOMEM;
+
+ if (!p)
+ p = all_avail;
+
+ for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
+ if (p[i * 2] % IRQS_PER_MSI_REG ||
+ p[i * 2 + 1] % IRQS_PER_MSI_REG) {
+ printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
+ __func__, dev->dev.of_node->full_name,
+ p[i * 2 + 1], p[i * 2]);
+ err = -EINVAL;
+ goto error_out;
+ }
+
+ offset = p[i * 2] / IRQS_PER_MSI_REG;
+ count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
+
+ for (j = 0; j < count; j++, irq_index++) {
+ err = fsl_msi_setup_hwirq(msi, dev, offset, irq_index);
+ if (err)
goto error_out;
- }
- msi->msi_virqs[i] = virt_msir;
- cascade_data->index = i + offset;
- cascade_data->msi_data = msi;
- set_irq_data(virt_msir, (void *)cascade_data);
- set_irq_chained_handler(virt_msir, fsl_msi_cascade);
}
}
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 818f7c6c8fa1..f8f7f28c6343 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1,7 +1,7 @@
/*
* MPC83xx/85xx/86xx PCI/PCIE support routing.
*
- * Copyright 2007-2010 Freescale Semiconductor, Inc.
+ * Copyright 2007-2011 Freescale Semiconductor, Inc.
* Copyright 2008-2009 MontaVista Software, Inc.
*
* Initial author: Xianghua Xiao <x.xiao@freescale.com>
@@ -99,7 +99,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
struct resource *rsrc)
{
struct ccsr_pci __iomem *pci;
- int i, j, n, mem_log, win_idx = 2;
+ int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
u64 mem, sz, paddr_hi = 0;
u64 paddr_lo = ULLONG_MAX;
u32 pcicsrbar = 0, pcicsrbar_sz;
@@ -109,6 +109,13 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
(u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1);
+
+ if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) {
+ win_idx = 2;
+ start_idx = 0;
+ end_idx = 3;
+ }
+
pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
if (!pci) {
dev_err(hose->parent, "Unable to map ATMU registers\n");
@@ -118,7 +125,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
/* Disable all windows (except powar0 since it's ignored) */
for(i = 1; i < 5; i++)
out_be32(&pci->pow[i].powar, 0);
- for(i = 0; i < 3; i++)
+ for (i = start_idx; i < end_idx; i++)
out_be32(&pci->piw[i].piwar, 0);
/* Setup outbound MEM window */
@@ -204,7 +211,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
mem_log++;
}
- piwar |= (mem_log - 1);
+ piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
/* Setup inbound memory window */
out_be32(&pci->piw[win_idx].pitar, 0x00000000);
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index 8ad72a11f77b..a39ed5cc2c5a 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -1,7 +1,7 @@
/*
* MPC85xx/86xx PCI Express structure define
*
- * Copyright 2007 Freescale Semiconductor, Inc
+ * Copyright 2007,2011 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -21,6 +21,7 @@
#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */
#define PIWAR_READ_SNOOP 0x00050000
#define PIWAR_WRITE_SNOOP 0x00005000
+#define PIWAR_SZ_MASK 0x0000003f
/* PCI/PCI Express outbound window reg */
struct pci_outbound_window_regs {
@@ -49,7 +50,9 @@ struct ccsr_pci {
__be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */
__be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */
__be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */
- u8 res2[12];
+ __be32 pex_config; /* 0x.014 - PCIE CONFIG Register */
+ __be32 pex_int_status; /* 0x.018 - PCIE interrupt status */
+ u8 res2[4];
__be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */
__be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */
__be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */
@@ -62,14 +65,14 @@ struct ccsr_pci {
* in all of the other outbound windows.
*/
struct pci_outbound_window_regs pow[5];
-
- u8 res14[256];
-
-/* PCI/PCI Express inbound window 3-1
+ u8 res14[96];
+ struct pci_inbound_window_regs pmit; /* 0xd00 - 0xd9c Inbound MSI */
+ u8 res6[96];
+/* PCI/PCI Express inbound window 3-0
* inbound window 1 supports only a 32-bit base address and does not
* define an inbound window base extended address register.
*/
- struct pci_inbound_window_regs piw[3];
+ struct pci_inbound_window_regs piw[4];
__be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */
u8 res21[4];
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 6323e70e6bf4..aeda4c8d0a0a 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -78,19 +78,19 @@ unsigned int i8259_irq(void)
return irq;
}
-static void i8259_mask_and_ack_irq(unsigned int irq_nr)
+static void i8259_mask_and_ack_irq(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&i8259_lock, flags);
- if (irq_nr > 7) {
- cached_A1 |= 1 << (irq_nr-8);
+ if (d->irq > 7) {
+ cached_A1 |= 1 << (d->irq-8);
inb(0xA1); /* DUMMY */
outb(cached_A1, 0xA1);
outb(0x20, 0xA0); /* Non-specific EOI */
outb(0x20, 0x20); /* Non-specific EOI to cascade */
} else {
- cached_21 |= 1 << irq_nr;
+ cached_21 |= 1 << d->irq;
inb(0x21); /* DUMMY */
outb(cached_21, 0x21);
outb(0x20, 0x20); /* Non-specific EOI */
@@ -104,42 +104,42 @@ static void i8259_set_irq_mask(int irq_nr)
outb(cached_21,0x21);
}
-static void i8259_mask_irq(unsigned int irq_nr)
+static void i8259_mask_irq(struct irq_data *d)
{
unsigned long flags;
- pr_debug("i8259_mask_irq(%d)\n", irq_nr);
+ pr_debug("i8259_mask_irq(%d)\n", d->irq);
raw_spin_lock_irqsave(&i8259_lock, flags);
- if (irq_nr < 8)
- cached_21 |= 1 << irq_nr;
+ if (d->irq < 8)
+ cached_21 |= 1 << d->irq;
else
- cached_A1 |= 1 << (irq_nr-8);
- i8259_set_irq_mask(irq_nr);
+ cached_A1 |= 1 << (d->irq-8);
+ i8259_set_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
-static void i8259_unmask_irq(unsigned int irq_nr)
+static void i8259_unmask_irq(struct irq_data *d)
{
unsigned long flags;
- pr_debug("i8259_unmask_irq(%d)\n", irq_nr);
+ pr_debug("i8259_unmask_irq(%d)\n", d->irq);
raw_spin_lock_irqsave(&i8259_lock, flags);
- if (irq_nr < 8)
- cached_21 &= ~(1 << irq_nr);
+ if (d->irq < 8)
+ cached_21 &= ~(1 << d->irq);
else
- cached_A1 &= ~(1 << (irq_nr-8));
- i8259_set_irq_mask(irq_nr);
+ cached_A1 &= ~(1 << (d->irq-8));
+ i8259_set_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static struct irq_chip i8259_pic = {
.name = "i8259",
- .mask = i8259_mask_irq,
- .disable = i8259_mask_irq,
- .unmask = i8259_unmask_irq,
- .mask_ack = i8259_mask_and_ack_irq,
+ .irq_mask = i8259_mask_irq,
+ .irq_disable = i8259_mask_irq,
+ .irq_unmask = i8259_unmask_irq,
+ .irq_mask_ack = i8259_mask_and_ack_irq,
};
static struct resource pic1_iores = {
@@ -188,7 +188,7 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
{
/* Make sure irq is masked in hardware */
- i8259_mask_irq(virq);
+ i8259_mask_irq(irq_get_irq_data(virq));
/* remove chip and handler */
set_irq_chip_and_handler(virq, NULL, NULL);
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index d7b9b9c69287..497047dc986e 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -523,10 +523,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq)
#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-static void ipic_unmask_irq(unsigned int virq)
+static void ipic_unmask_irq(struct irq_data *d)
{
- struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
+ struct ipic *ipic = ipic_from_irq(d->irq);
+ unsigned int src = ipic_irq_to_hw(d->irq);
unsigned long flags;
u32 temp;
@@ -539,10 +539,10 @@ static void ipic_unmask_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
-static void ipic_mask_irq(unsigned int virq)
+static void ipic_mask_irq(struct irq_data *d)
{
- struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
+ struct ipic *ipic = ipic_from_irq(d->irq);
+ unsigned int src = ipic_irq_to_hw(d->irq);
unsigned long flags;
u32 temp;
@@ -559,10 +559,10 @@ static void ipic_mask_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
-static void ipic_ack_irq(unsigned int virq)
+static void ipic_ack_irq(struct irq_data *d)
{
- struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
+ struct ipic *ipic = ipic_from_irq(d->irq);
+ unsigned int src = ipic_irq_to_hw(d->irq);
unsigned long flags;
u32 temp;
@@ -578,10 +578,10 @@ static void ipic_ack_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
-static void ipic_mask_irq_and_ack(unsigned int virq)
+static void ipic_mask_irq_and_ack(struct irq_data *d)
{
- struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
+ struct ipic *ipic = ipic_from_irq(d->irq);
+ unsigned int src = ipic_irq_to_hw(d->irq);
unsigned long flags;
u32 temp;
@@ -601,11 +601,11 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
-static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
+static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
- struct irq_desc *desc = irq_to_desc(virq);
+ struct ipic *ipic = ipic_from_irq(d->irq);
+ unsigned int src = ipic_irq_to_hw(d->irq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
unsigned int vold, vnew, edibit;
if (flow_type == IRQ_TYPE_NONE)
@@ -630,10 +630,10 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
desc->status |= IRQ_LEVEL;
desc->handle_irq = handle_level_irq;
- desc->chip = &ipic_level_irq_chip;
+ desc->irq_data.chip = &ipic_level_irq_chip;
} else {
desc->handle_irq = handle_edge_irq;
- desc->chip = &ipic_edge_irq_chip;
+ desc->irq_data.chip = &ipic_edge_irq_chip;
}
/* only EXT IRQ senses are programmable on ipic
@@ -661,19 +661,19 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
/* level interrupts and edge interrupts have different ack operations */
static struct irq_chip ipic_level_irq_chip = {
.name = "IPIC",
- .unmask = ipic_unmask_irq,
- .mask = ipic_mask_irq,
- .mask_ack = ipic_mask_irq,
- .set_type = ipic_set_irq_type,
+ .irq_unmask = ipic_unmask_irq,
+ .irq_mask = ipic_mask_irq,
+ .irq_mask_ack = ipic_mask_irq,
+ .irq_set_type = ipic_set_irq_type,
};
static struct irq_chip ipic_edge_irq_chip = {
.name = "IPIC",
- .unmask = ipic_unmask_irq,
- .mask = ipic_mask_irq,
- .mask_ack = ipic_mask_irq_and_ack,
- .ack = ipic_ack_irq,
- .set_type = ipic_set_irq_type,
+ .irq_unmask = ipic_unmask_irq,
+ .irq_mask = ipic_mask_irq,
+ .irq_mask_ack = ipic_mask_irq_and_ack,
+ .irq_ack = ipic_ack_irq,
+ .irq_set_type = ipic_set_irq_type,
};
static int ipic_host_match(struct irq_host *h, struct device_node *node)
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 8c27d261aba8..1a75a7fb4a99 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -25,10 +25,10 @@ static sysconf8xx_t __iomem *siu_reg;
int cpm_get_irq(struct pt_regs *regs);
-static void mpc8xx_unmask_irq(unsigned int virq)
+static void mpc8xx_unmask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq;
+ unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
@@ -37,10 +37,10 @@ static void mpc8xx_unmask_irq(unsigned int virq)
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
}
-static void mpc8xx_mask_irq(unsigned int virq)
+static void mpc8xx_mask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq;
+ unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
@@ -49,19 +49,19 @@ static void mpc8xx_mask_irq(unsigned int virq)
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
}
-static void mpc8xx_ack(unsigned int virq)
+static void mpc8xx_ack(struct irq_data *d)
{
int bit;
- unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq;
+ unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
bit = irq_nr & 0x1f;
out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
}
-static void mpc8xx_end_irq(unsigned int virq)
+static void mpc8xx_end_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq;
+ unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
@@ -70,9 +70,9 @@ static void mpc8xx_end_irq(unsigned int virq)
out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
}
-static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
+static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_desc *desc = irq_to_desc(virq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
@@ -80,7 +80,7 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
desc->status |= IRQ_LEVEL;
if (flow_type & IRQ_TYPE_EDGE_FALLING) {
- irq_hw_number_t hw = (unsigned int)irq_map[virq].hwirq;
+ irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq;
unsigned int siel = in_be32(&siu_reg->sc_siel);
/* only external IRQ senses are programmable */
@@ -95,11 +95,11 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
static struct irq_chip mpc8xx_pic = {
.name = "MPC8XX SIU",
- .unmask = mpc8xx_unmask_irq,
- .mask = mpc8xx_mask_irq,
- .ack = mpc8xx_ack,
- .eoi = mpc8xx_end_irq,
- .set_type = mpc8xx_set_irq_type,
+ .irq_unmask = mpc8xx_unmask_irq,
+ .irq_mask = mpc8xx_mask_irq,
+ .irq_ack = mpc8xx_ack,
+ .irq_eoi = mpc8xx_end_irq,
+ .irq_set_type = mpc8xx_set_irq_type,
};
unsigned int mpc8xx_get_irq(void)
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index c48cd8178079..232e701245d7 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -155,43 +155,43 @@ static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
32 - ffs(mask)));
}
-static void mpc8xxx_irq_unmask(unsigned int virq)
+static void mpc8xxx_irq_unmask(struct irq_data *d)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned long flags;
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
-static void mpc8xxx_irq_mask(unsigned int virq)
+static void mpc8xxx_irq_mask(struct irq_data *d)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned long flags;
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
-static void mpc8xxx_irq_ack(unsigned int virq)
+static void mpc8xxx_irq_ack(struct irq_data *d)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
- out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
}
-static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
+static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned long flags;
@@ -199,14 +199,14 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
setbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -217,11 +217,11 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
return 0;
}
-static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
+static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
- unsigned long gpio = virq_to_hw(virq);
+ unsigned long gpio = virq_to_hw(d->irq);
void __iomem *reg;
unsigned int shift;
unsigned long flags;
@@ -264,10 +264,10 @@ static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
static struct irq_chip mpc8xxx_irq_chip = {
.name = "mpc8xxx-gpio",
- .unmask = mpc8xxx_irq_unmask,
- .mask = mpc8xxx_irq_mask,
- .ack = mpc8xxx_irq_ack,
- .set_type = mpc8xxx_irq_set_type,
+ .irq_unmask = mpc8xxx_irq_unmask,
+ .irq_mask = mpc8xxx_irq_mask,
+ .irq_ack = mpc8xxx_irq_ack,
+ .irq_set_type = mpc8xxx_irq_set_type,
};
static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
@@ -276,7 +276,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
if (mpc8xxx_gc->of_dev_id_data)
- mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
+ mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
set_irq_chip_data(virq, h->host_data);
set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
@@ -310,6 +310,7 @@ static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
{ .compatible = "fsl,mpc8572-gpio", },
{ .compatible = "fsl,mpc8610-gpio", },
{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+ { .compatible = "fsl,qoriq-gpio", },
{}
};
@@ -389,9 +390,6 @@ static int __init mpc8xxx_add_gpiochips(void)
for_each_matching_node(np, mpc8xxx_gpio_ids)
mpc8xxx_add_controller(np);
- for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
- mpc8xxx_add_controller(np);
-
return 0;
}
arch_initcall(mpc8xxx_add_gpiochips);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b0c8469e5ddd..eb7021815e2d 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -611,7 +611,7 @@ static struct mpic *mpic_find(unsigned int irq)
if (irq < NUM_ISA_INTERRUPTS)
return NULL;
- return irq_to_desc(irq)->chip_data;
+ return get_irq_chip_data(irq);
}
/* Determine if the linux irq is an IPI */
@@ -636,16 +636,22 @@ static inline u32 mpic_physmask(u32 cpumask)
#ifdef CONFIG_SMP
/* Get the mpic structure from the IPI number */
-static inline struct mpic * mpic_from_ipi(unsigned int ipi)
+static inline struct mpic * mpic_from_ipi(struct irq_data *d)
{
- return irq_to_desc(ipi)->chip_data;
+ return irq_data_get_irq_chip_data(d);
}
#endif
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
- return irq_to_desc(irq)->chip_data;
+ return get_irq_chip_data(irq);
+}
+
+/* Get the mpic structure from the irq data */
+static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
}
/* Send an EOI */
@@ -660,13 +666,13 @@ static inline void mpic_eoi(struct mpic *mpic)
*/
-void mpic_unmask_irq(unsigned int irq)
+void mpic_unmask_irq(struct irq_data *d)
{
unsigned int loops = 100000;
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
- DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
+ DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
@@ -681,13 +687,13 @@ void mpic_unmask_irq(unsigned int irq)
} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
}
-void mpic_mask_irq(unsigned int irq)
+void mpic_mask_irq(struct irq_data *d)
{
unsigned int loops = 100000;
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
- DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
+ DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
@@ -703,12 +709,12 @@ void mpic_mask_irq(unsigned int irq)
} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
}
-void mpic_end_irq(unsigned int irq)
+void mpic_end_irq(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_irq(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
#ifdef DEBUG_IRQ
- DBG("%s: end_irq: %d\n", mpic->name, irq);
+ DBG("%s: end_irq: %d\n", mpic->name, d->irq);
#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
@@ -720,51 +726,51 @@ void mpic_end_irq(unsigned int irq)
#ifdef CONFIG_MPIC_U3_HT_IRQS
-static void mpic_unmask_ht_irq(unsigned int irq)
+static void mpic_unmask_ht_irq(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
- mpic_unmask_irq(irq);
+ mpic_unmask_irq(d);
- if (irq_to_desc(irq)->status & IRQ_LEVEL)
+ if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
mpic_ht_end_irq(mpic, src);
}
-static unsigned int mpic_startup_ht_irq(unsigned int irq)
+static unsigned int mpic_startup_ht_irq(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
- mpic_unmask_irq(irq);
- mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
+ mpic_unmask_irq(d);
+ mpic_startup_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status);
return 0;
}
-static void mpic_shutdown_ht_irq(unsigned int irq)
+static void mpic_shutdown_ht_irq(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
- mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
- mpic_mask_irq(irq);
+ mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status);
+ mpic_mask_irq(d);
}
-static void mpic_end_ht_irq(unsigned int irq)
+static void mpic_end_ht_irq(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
#ifdef DEBUG_IRQ
- DBG("%s: end_irq: %d\n", mpic->name, irq);
+ DBG("%s: end_irq: %d\n", mpic->name, d->irq);
#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
*/
- if (irq_to_desc(irq)->status & IRQ_LEVEL)
+ if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
}
@@ -772,23 +778,23 @@ static void mpic_end_ht_irq(unsigned int irq)
#ifdef CONFIG_SMP
-static void mpic_unmask_ipi(unsigned int irq)
+static void mpic_unmask_ipi(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_ipi(irq);
- unsigned int src = mpic_irq_to_hw(irq) - mpic->ipi_vecs[0];
+ struct mpic *mpic = mpic_from_ipi(d);
+ unsigned int src = mpic_irq_to_hw(d->irq) - mpic->ipi_vecs[0];
- DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
+ DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
}
-static void mpic_mask_ipi(unsigned int irq)
+static void mpic_mask_ipi(struct irq_data *d)
{
/* NEVER disable an IPI... that's just plain wrong! */
}
-static void mpic_end_ipi(unsigned int irq)
+static void mpic_end_ipi(struct irq_data *d)
{
- struct mpic *mpic = mpic_from_ipi(irq);
+ struct mpic *mpic = mpic_from_ipi(d);
/*
* IPIs are marked IRQ_PER_CPU. This has the side effect of
@@ -802,10 +808,11 @@ static void mpic_end_ipi(unsigned int irq)
#endif /* CONFIG_SMP */
-int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ bool force)
{
- struct mpic *mpic = mpic_from_irq(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
int cpuid = irq_choose_cpu(cpumask);
@@ -848,15 +855,15 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
}
}
-int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
+int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct mpic *mpic = mpic_from_irq(virq);
- unsigned int src = mpic_irq_to_hw(virq);
- struct irq_desc *desc = irq_to_desc(virq);
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = mpic_irq_to_hw(d->irq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
unsigned int vecpri, vold, vnew;
DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
- mpic, virq, src, flow_type);
+ mpic, d->irq, src, flow_type);
if (src >= mpic->irq_count)
return -EINVAL;
@@ -907,28 +914,28 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
}
static struct irq_chip mpic_irq_chip = {
- .mask = mpic_mask_irq,
- .unmask = mpic_unmask_irq,
- .eoi = mpic_end_irq,
- .set_type = mpic_set_irq_type,
+ .irq_mask = mpic_mask_irq,
+ .irq_unmask = mpic_unmask_irq,
+ .irq_eoi = mpic_end_irq,
+ .irq_set_type = mpic_set_irq_type,
};
#ifdef CONFIG_SMP
static struct irq_chip mpic_ipi_chip = {
- .mask = mpic_mask_ipi,
- .unmask = mpic_unmask_ipi,
- .eoi = mpic_end_ipi,
+ .irq_mask = mpic_mask_ipi,
+ .irq_unmask = mpic_unmask_ipi,
+ .irq_eoi = mpic_end_ipi,
};
#endif /* CONFIG_SMP */
#ifdef CONFIG_MPIC_U3_HT_IRQS
static struct irq_chip mpic_irq_ht_chip = {
- .startup = mpic_startup_ht_irq,
- .shutdown = mpic_shutdown_ht_irq,
- .mask = mpic_mask_irq,
- .unmask = mpic_unmask_ht_irq,
- .eoi = mpic_end_ht_irq,
- .set_type = mpic_set_irq_type,
+ .irq_startup = mpic_startup_ht_irq,
+ .irq_shutdown = mpic_shutdown_ht_irq,
+ .irq_mask = mpic_mask_irq,
+ .irq_unmask = mpic_unmask_ht_irq,
+ .irq_eoi = mpic_end_ht_irq,
+ .irq_set_type = mpic_set_irq_type,
};
#endif /* CONFIG_MPIC_U3_HT_IRQS */
@@ -1060,12 +1067,12 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->hc_irq = mpic_irq_chip;
mpic->hc_irq.name = name;
if (flags & MPIC_PRIMARY)
- mpic->hc_irq.set_affinity = mpic_set_affinity;
+ mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
#ifdef CONFIG_MPIC_U3_HT_IRQS
mpic->hc_ht_irq = mpic_irq_ht_chip;
mpic->hc_ht_irq.name = name;
if (flags & MPIC_PRIMARY)
- mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
+ mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index e4a6df77b8d7..13f3e8913a93 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -34,9 +34,10 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
}
#endif
-extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
+extern int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type);
extern void mpic_set_vector(unsigned int virq, unsigned int vector);
-extern int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int mpic_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask, bool force);
extern void mpic_reset_core(int cpu);
#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 320ad5a9a25d..0b7794acfce1 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -43,24 +43,24 @@ static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
{
pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
mask_msi_irq(data);
- mpic_mask_irq(data->irq);
+ mpic_mask_irq(data);
}
static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
{
pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
- mpic_unmask_irq(data->irq);
+ mpic_unmask_irq(data);
unmask_msi_irq(data);
}
static struct irq_chip mpic_pasemi_msi_chip = {
- .irq_shutdown = mpic_pasemi_msi_mask_irq,
- .irq_mask = mpic_pasemi_msi_mask_irq,
- .irq_unmask = mpic_pasemi_msi_unmask_irq,
- .eoi = mpic_end_irq,
- .set_type = mpic_set_irq_type,
- .set_affinity = mpic_set_affinity,
- .name = "PASEMI-MSI",
+ .irq_shutdown = mpic_pasemi_msi_mask_irq,
+ .irq_mask = mpic_pasemi_msi_mask_irq,
+ .irq_unmask = mpic_pasemi_msi_unmask_irq,
+ .irq_eoi = mpic_end_irq,
+ .irq_set_type = mpic_set_irq_type,
+ .irq_set_affinity = mpic_set_affinity,
+ .name = "PASEMI-MSI",
};
static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index a2b028b4a202..71900ac78270 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -26,23 +26,23 @@ static struct mpic *msi_mpic;
static void mpic_u3msi_mask_irq(struct irq_data *data)
{
mask_msi_irq(data);
- mpic_mask_irq(data->irq);
+ mpic_mask_irq(data);
}
static void mpic_u3msi_unmask_irq(struct irq_data *data)
{
- mpic_unmask_irq(data->irq);
+ mpic_unmask_irq(data);
unmask_msi_irq(data);
}
static struct irq_chip mpic_u3msi_chip = {
- .irq_shutdown = mpic_u3msi_mask_irq,
- .irq_mask = mpic_u3msi_mask_irq,
- .irq_unmask = mpic_u3msi_unmask_irq,
- .eoi = mpic_end_irq,
- .set_type = mpic_set_irq_type,
- .set_affinity = mpic_set_affinity,
- .name = "MPIC-U3MSI",
+ .irq_shutdown = mpic_u3msi_mask_irq,
+ .irq_mask = mpic_u3msi_mask_irq,
+ .irq_unmask = mpic_u3msi_unmask_irq,
+ .irq_eoi = mpic_end_irq,
+ .irq_set_type = mpic_set_irq_type,
+ .irq_set_affinity = mpic_set_affinity,
+ .name = "MPIC-U3MSI",
};
static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index feaee402e2d6..0f6af41ebb44 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -346,7 +346,7 @@ static int __init mv64x60_i2c_device_setup(struct device_node *np, int id)
if (prop)
pdata.freq_m = *prop;
- pdata.freq_m = 3; /* default */
+ pdata.freq_n = 3; /* default */
prop = of_get_property(np, "freq_n", NULL);
if (prop)
pdata.freq_n = *prop;
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 485b92477d7c..bc61ebb8987c 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -76,9 +76,9 @@ static struct irq_host *mv64x60_irq_host;
* mv64x60_chip_low functions
*/
-static void mv64x60_mask_low(unsigned int virq)
+static void mv64x60_mask_low(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -89,9 +89,9 @@ static void mv64x60_mask_low(unsigned int virq)
(void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
}
-static void mv64x60_unmask_low(unsigned int virq)
+static void mv64x60_unmask_low(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -104,18 +104,18 @@ static void mv64x60_unmask_low(unsigned int virq)
static struct irq_chip mv64x60_chip_low = {
.name = "mv64x60_low",
- .mask = mv64x60_mask_low,
- .mask_ack = mv64x60_mask_low,
- .unmask = mv64x60_unmask_low,
+ .irq_mask = mv64x60_mask_low,
+ .irq_mask_ack = mv64x60_mask_low,
+ .irq_unmask = mv64x60_unmask_low,
};
/*
* mv64x60_chip_high functions
*/
-static void mv64x60_mask_high(unsigned int virq)
+static void mv64x60_mask_high(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -126,9 +126,9 @@ static void mv64x60_mask_high(unsigned int virq)
(void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
}
-static void mv64x60_unmask_high(unsigned int virq)
+static void mv64x60_unmask_high(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -141,18 +141,18 @@ static void mv64x60_unmask_high(unsigned int virq)
static struct irq_chip mv64x60_chip_high = {
.name = "mv64x60_high",
- .mask = mv64x60_mask_high,
- .mask_ack = mv64x60_mask_high,
- .unmask = mv64x60_unmask_high,
+ .irq_mask = mv64x60_mask_high,
+ .irq_mask_ack = mv64x60_mask_high,
+ .irq_unmask = mv64x60_unmask_high,
};
/*
* mv64x60_chip_gpp functions
*/
-static void mv64x60_mask_gpp(unsigned int virq)
+static void mv64x60_mask_gpp(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -163,9 +163,9 @@ static void mv64x60_mask_gpp(unsigned int virq)
(void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
}
-static void mv64x60_mask_ack_gpp(unsigned int virq)
+static void mv64x60_mask_ack_gpp(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -178,9 +178,9 @@ static void mv64x60_mask_ack_gpp(unsigned int virq)
(void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
}
-static void mv64x60_unmask_gpp(unsigned int virq)
+static void mv64x60_unmask_gpp(struct irq_data *d)
{
- int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -193,9 +193,9 @@ static void mv64x60_unmask_gpp(unsigned int virq)
static struct irq_chip mv64x60_chip_gpp = {
.name = "mv64x60_gpp",
- .mask = mv64x60_mask_gpp,
- .mask_ack = mv64x60_mask_ack_gpp,
- .unmask = mv64x60_unmask_gpp,
+ .irq_mask = mv64x60_mask_gpp,
+ .irq_mask_ack = mv64x60_mask_ack_gpp,
+ .irq_unmask = mv64x60_unmask_gpp,
};
/*
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 541ba9863647..8c9ded8ea07c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -189,15 +189,20 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
{
- return irq_to_desc(virq)->chip_data;
+ return get_irq_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
}
#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-static void qe_ic_unmask_irq(unsigned int virq)
+static void qe_ic_unmask_irq(struct irq_data *d)
{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = virq_to_hw(d->irq);
unsigned long flags;
u32 temp;
@@ -210,10 +215,10 @@ static void qe_ic_unmask_irq(unsigned int virq)
raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
}
-static void qe_ic_mask_irq(unsigned int virq)
+static void qe_ic_mask_irq(struct irq_data *d)
{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = virq_to_hw(d->irq);
unsigned long flags;
u32 temp;
@@ -238,9 +243,9 @@ static void qe_ic_mask_irq(unsigned int virq)
static struct irq_chip qe_ic_irq_chip = {
.name = "QEIC",
- .unmask = qe_ic_unmask_irq,
- .mask = qe_ic_mask_irq,
- .mask_ack = qe_ic_mask_irq,
+ .irq_unmask = qe_ic_unmask_irq,
+ .irq_mask = qe_ic_mask_irq,
+ .irq_mask_ack = qe_ic_mask_irq,
};
static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 0ab9281e49ae..02c91db90037 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -343,24 +343,9 @@ static inline unsigned int get_pci_source(void)
* Linux descriptor level callbacks
*/
-static void tsi108_pci_irq_enable(u_int irq)
+static void tsi108_pci_irq_unmask(struct irq_data *d)
{
- tsi108_pci_int_unmask(irq);
-}
-
-static void tsi108_pci_irq_disable(u_int irq)
-{
- tsi108_pci_int_mask(irq);
-}
-
-static void tsi108_pci_irq_ack(u_int irq)
-{
- tsi108_pci_int_mask(irq);
-}
-
-static void tsi108_pci_irq_end(u_int irq)
-{
- tsi108_pci_int_unmask(irq);
+ tsi108_pci_int_unmask(d->irq);
/* Enable interrupts from PCI block */
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE,
@@ -370,16 +355,25 @@ static void tsi108_pci_irq_end(u_int irq)
mb();
}
+static void tsi108_pci_irq_mask(struct irq_data *d)
+{
+ tsi108_pci_int_mask(d->irq);
+}
+
+static void tsi108_pci_irq_ack(struct irq_data *d)
+{
+ tsi108_pci_int_mask(d->irq);
+}
+
/*
* Interrupt controller descriptor for cascaded PCI interrupt controller.
*/
static struct irq_chip tsi108_pci_irq = {
.name = "tsi108_PCI_int",
- .mask = tsi108_pci_irq_disable,
- .ack = tsi108_pci_irq_ack,
- .end = tsi108_pci_irq_end,
- .unmask = tsi108_pci_irq_enable,
+ .irq_mask = tsi108_pci_irq_mask,
+ .irq_ack = tsi108_pci_irq_ack,
+ .irq_unmask = tsi108_pci_irq_unmask,
};
static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
@@ -437,8 +431,11 @@ void __init tsi108_pci_int_init(struct device_node *node)
void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq = get_pci_source();
+
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+
+ chip->irq_eoi(&desc->irq_data);
}
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 0038fb78f094..835f7958b237 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -55,11 +55,11 @@ struct uic {
struct irq_host *irqhost;
};
-static void uic_unmask_irq(unsigned int virq)
+static void uic_unmask_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(virq);
- struct uic *uic = get_irq_chip_data(virq);
- unsigned int src = uic_irq_to_hw(virq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
+ struct uic *uic = irq_data_get_irq_chip_data(d);
+ unsigned int src = uic_irq_to_hw(d->irq);
unsigned long flags;
u32 er, sr;
@@ -74,10 +74,10 @@ static void uic_unmask_irq(unsigned int virq)
spin_unlock_irqrestore(&uic->lock, flags);
}
-static void uic_mask_irq(unsigned int virq)
+static void uic_mask_irq(struct irq_data *d)
{
- struct uic *uic = get_irq_chip_data(virq);
- unsigned int src = uic_irq_to_hw(virq);
+ struct uic *uic = irq_data_get_irq_chip_data(d);
+ unsigned int src = uic_irq_to_hw(d->irq);
unsigned long flags;
u32 er;
@@ -88,10 +88,10 @@ static void uic_mask_irq(unsigned int virq)
spin_unlock_irqrestore(&uic->lock, flags);
}
-static void uic_ack_irq(unsigned int virq)
+static void uic_ack_irq(struct irq_data *d)
{
- struct uic *uic = get_irq_chip_data(virq);
- unsigned int src = uic_irq_to_hw(virq);
+ struct uic *uic = irq_data_get_irq_chip_data(d);
+ unsigned int src = uic_irq_to_hw(d->irq);
unsigned long flags;
spin_lock_irqsave(&uic->lock, flags);
@@ -99,11 +99,11 @@ static void uic_ack_irq(unsigned int virq)
spin_unlock_irqrestore(&uic->lock, flags);
}
-static void uic_mask_ack_irq(unsigned int virq)
+static void uic_mask_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(virq);
- struct uic *uic = get_irq_chip_data(virq);
- unsigned int src = uic_irq_to_hw(virq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
+ struct uic *uic = irq_data_get_irq_chip_data(d);
+ unsigned int src = uic_irq_to_hw(d->irq);
unsigned long flags;
u32 er, sr;
@@ -125,18 +125,18 @@ static void uic_mask_ack_irq(unsigned int virq)
spin_unlock_irqrestore(&uic->lock, flags);
}
-static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
+static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct uic *uic = get_irq_chip_data(virq);
- unsigned int src = uic_irq_to_hw(virq);
- struct irq_desc *desc = irq_to_desc(virq);
+ struct uic *uic = irq_data_get_irq_chip_data(d);
+ unsigned int src = uic_irq_to_hw(d->irq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
unsigned long flags;
int trigger, polarity;
u32 tr, pr, mask;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_NONE:
- uic_mask_irq(virq);
+ uic_mask_irq(d);
return 0;
case IRQ_TYPE_EDGE_RISING:
@@ -178,11 +178,11 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
static struct irq_chip uic_irq_chip = {
.name = "UIC",
- .unmask = uic_unmask_irq,
- .mask = uic_mask_irq,
- .mask_ack = uic_mask_ack_irq,
- .ack = uic_ack_irq,
- .set_type = uic_set_irq_type,
+ .irq_unmask = uic_unmask_irq,
+ .irq_mask = uic_mask_irq,
+ .irq_mask_ack = uic_mask_ack_irq,
+ .irq_ack = uic_ack_irq,
+ .irq_set_type = uic_set_irq_type,
};
static int uic_host_map(struct irq_host *h, unsigned int virq,
@@ -220,6 +220,7 @@ static struct irq_host_ops uic_host_ops = {
void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
struct uic *uic = get_irq_data(virq);
u32 msr;
int src;
@@ -227,9 +228,9 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
- desc->chip->mask(virq);
+ chip->irq_mask(&desc->irq_data);
else
- desc->chip->mask_ack(virq);
+ chip->irq_mask_ack(&desc->irq_data);
raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
@@ -244,9 +245,9 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
uic_irq_ret:
raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
- desc->chip->ack(virq);
- if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
- desc->chip->unmask(virq);
+ chip->irq_ack(&desc->irq_data);
+ if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
+ chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 1e0ccfaf403e..7436f3ed4df6 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -69,17 +69,17 @@ static unsigned char xilinx_intc_map_senses[] = {
*
* IRQ Chip common (across level and edge) operations
*/
-static void xilinx_intc_mask(unsigned int virq)
+static void xilinx_intc_mask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void * regs = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void * regs = irq_data_get_irq_chip_data(d);
pr_debug("mask: %d\n", irq);
out_be32(regs + XINTC_CIE, 1 << irq);
}
-static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type)
+static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_desc *desc = irq_to_desc(virq);
+ struct irq_desc *desc = irq_to_desc(d->irq);
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
@@ -91,10 +91,10 @@ static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type)
/*
* IRQ Chip level operations
*/
-static void xilinx_intc_level_unmask(unsigned int virq)
+static void xilinx_intc_level_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void * regs = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void * regs = irq_data_get_irq_chip_data(d);
pr_debug("unmask: %d\n", irq);
out_be32(regs + XINTC_SIE, 1 << irq);
@@ -107,37 +107,37 @@ static void xilinx_intc_level_unmask(unsigned int virq)
static struct irq_chip xilinx_intc_level_irqchip = {
.name = "Xilinx Level INTC",
- .mask = xilinx_intc_mask,
- .mask_ack = xilinx_intc_mask,
- .unmask = xilinx_intc_level_unmask,
- .set_type = xilinx_intc_set_type,
+ .irq_mask = xilinx_intc_mask,
+ .irq_mask_ack = xilinx_intc_mask,
+ .irq_unmask = xilinx_intc_level_unmask,
+ .irq_set_type = xilinx_intc_set_type,
};
/*
* IRQ Chip edge operations
*/
-static void xilinx_intc_edge_unmask(unsigned int virq)
+static void xilinx_intc_edge_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void *regs = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void *regs = irq_data_get_irq_chip_data(d);
pr_debug("unmask: %d\n", irq);
out_be32(regs + XINTC_SIE, 1 << irq);
}
-static void xilinx_intc_edge_ack(unsigned int virq)
+static void xilinx_intc_edge_ack(struct irq_data *d)
{
- int irq = virq_to_hw(virq);
- void * regs = get_irq_chip_data(virq);
+ int irq = virq_to_hw(d->irq);
+ void * regs = irq_data_get_irq_chip_data(d);
pr_debug("ack: %d\n", irq);
out_be32(regs + XINTC_IAR, 1 << irq);
}
static struct irq_chip xilinx_intc_edge_irqchip = {
.name = "Xilinx Edge INTC",
- .mask = xilinx_intc_mask,
- .unmask = xilinx_intc_edge_unmask,
- .ack = xilinx_intc_edge_ack,
- .set_type = xilinx_intc_set_type,
+ .irq_mask = xilinx_intc_mask,
+ .irq_unmask = xilinx_intc_edge_unmask,
+ .irq_ack = xilinx_intc_edge_ack,
+ .irq_set_type = xilinx_intc_set_type,
};
/*
@@ -229,12 +229,14 @@ int xilinx_intc_get_irq(void)
*/
static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = get_irq_desc_chip(desc);
unsigned int cascade_irq = i8259_irq();
+
if (cascade_irq)
generic_handle_irq(cascade_irq);
/* Let xilinx_intc end the interrupt */
- desc->chip->unmask(irq);
+ chip->irq_unmask(&desc->irq_data);
}
static void __init xilinx_i8259_setup_cascade(void)
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index 537b2d840e69..d698cddcfbdd 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o hwsampler.o
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
new file mode 100644
index 000000000000..3d48f4db246d
--- /dev/null
+++ b/arch/s390/oprofile/hwsampler.c
@@ -0,0 +1,1256 @@
+/**
+ * arch/s390/oprofile/hwsampler.c
+ *
+ * Copyright IBM Corp. 2010
+ * Author: Heinz Graalfs <graalfs@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/semaphore.h>
+#include <linux/oom.h>
+#include <linux/oprofile.h>
+
+#include <asm/lowcore.h>
+#include <asm/s390_ext.h>
+
+#include "hwsampler.h"
+
+#define MAX_NUM_SDB 511
+#define MIN_NUM_SDB 1
+
+#define ALERT_REQ_MASK 0x4000000000000000ul
+#define BUFFER_FULL_MASK 0x8000000000000000ul
+
+#define EI_IEA (1 << 31) /* invalid entry address */
+#define EI_ISE (1 << 30) /* incorrect SDBT entry */
+#define EI_PRA (1 << 29) /* program request alert */
+#define EI_SACA (1 << 23) /* sampler authorization change alert */
+#define EI_LSDA (1 << 22) /* loss of sample data alert */
+
+DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
+
+struct hws_execute_parms {
+ void *buffer;
+ signed int rc;
+};
+
+DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
+EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
+
+static DEFINE_MUTEX(hws_sem);
+static DEFINE_MUTEX(hws_sem_oom);
+
+static unsigned char hws_flush_all;
+static unsigned int hws_oom;
+static struct workqueue_struct *hws_wq;
+
+static unsigned int hws_state;
+enum {
+ HWS_INIT = 1,
+ HWS_DEALLOCATED,
+ HWS_STOPPED,
+ HWS_STARTED,
+ HWS_STOPPING };
+
+/* set to 1 if called by kernel during memory allocation */
+static unsigned char oom_killer_was_active;
+/* size of SDBT and SDB as of allocate API */
+static unsigned long num_sdbt = 100;
+static unsigned long num_sdb = 511;
+/* sampling interval (machine cycles) */
+static unsigned long interval;
+
+static unsigned long min_sampler_rate;
+static unsigned long max_sampler_rate;
+
+static int ssctl(void *buffer)
+{
+ int cc;
+
+ /* set in order to detect a program check */
+ cc = 1;
+
+ asm volatile(
+ "0: .insn s,0xB2870000,0(%1)\n"
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (cc), "+a" (buffer)
+ : "m" (*((struct hws_ssctl_request_block *)buffer))
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0 ;
+}
+
+static int qsi(void *buffer)
+{
+ int cc;
+ cc = 1;
+
+ asm volatile(
+ "0: .insn s,0xB2860000,0(%1)\n"
+ "1: lhi %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "=d" (cc), "+a" (buffer)
+ : "m" (*((struct hws_qsi_info_block *)buffer))
+ : "cc", "memory");
+
+ return cc ? -EINVAL : 0;
+}
+
+static void execute_qsi(void *parms)
+{
+ struct hws_execute_parms *ep = parms;
+
+ ep->rc = qsi(ep->buffer);
+}
+
+static void execute_ssctl(void *parms)
+{
+ struct hws_execute_parms *ep = parms;
+
+ ep->rc = ssctl(ep->buffer);
+}
+
+static int smp_ctl_ssctl_stop(int cpu)
+{
+ int rc;
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ cb->ssctl.es = 0;
+ cb->ssctl.cs = 0;
+
+ ep.buffer = &cb->ssctl;
+ smp_call_function_single(cpu, execute_ssctl, &ep, 1);
+ rc = ep.rc;
+ if (rc) {
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
+ dump_stack();
+ }
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+
+ if (cb->qsi.es || cb->qsi.cs) {
+ printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
+ dump_stack();
+ }
+
+ return rc;
+}
+
+static int smp_ctl_ssctl_deactivate(int cpu)
+{
+ int rc;
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ cb->ssctl.es = 1;
+ cb->ssctl.cs = 0;
+
+ ep.buffer = &cb->ssctl;
+ smp_call_function_single(cpu, execute_ssctl, &ep, 1);
+ rc = ep.rc;
+ if (rc)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+
+ if (cb->qsi.cs)
+ printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
+
+ return rc;
+}
+
+static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
+{
+ int rc;
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ cb->ssctl.h = 1;
+ cb->ssctl.tear = cb->first_sdbt;
+ cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
+ cb->ssctl.interval = interval;
+ cb->ssctl.es = 1;
+ cb->ssctl.cs = 1;
+
+ ep.buffer = &cb->ssctl;
+ smp_call_function_single(cpu, execute_ssctl, &ep, 1);
+ rc = ep.rc;
+ if (rc)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+ if (ep.rc)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
+
+ return rc;
+}
+
+static int smp_ctl_qsi(int cpu)
+{
+ struct hws_execute_parms ep;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ ep.buffer = &cb->qsi;
+ smp_call_function_single(cpu, execute_qsi, &ep, 1);
+
+ return ep.rc;
+}
+
+static inline unsigned long *trailer_entry_ptr(unsigned long v)
+{
+ void *ret;
+
+ ret = (void *)v;
+ ret += PAGE_SIZE;
+ ret -= sizeof(struct hws_trailer_entry);
+
+ return (unsigned long *) ret;
+}
+
+/* prototypes for external interrupt handler and worker */
+static void hws_ext_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64);
+
+static void worker(struct work_struct *work);
+
+static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
+ unsigned long *dear);
+
+static void init_all_cpu_buffers(void)
+{
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ memset(cb, 0, sizeof(struct hws_cpu_buffer));
+ }
+}
+
+static int is_link_entry(unsigned long *s)
+{
+ return *s & 0x1ul ? 1 : 0;
+}
+
+static unsigned long *get_next_sdbt(unsigned long *s)
+{
+ return (unsigned long *) (*s & ~0x1ul);
+}
+
+static int prepare_cpu_buffers(void)
+{
+ int cpu;
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ atomic_set(&cb->ext_params, 0);
+ cb->worker_entry = 0;
+ cb->sample_overflow = 0;
+ cb->req_alert = 0;
+ cb->incorrect_sdbt_entry = 0;
+ cb->invalid_entry_address = 0;
+ cb->loss_of_sample_data = 0;
+ cb->sample_auth_change_alert = 0;
+ cb->finish = 0;
+ cb->oom = 0;
+ cb->stop_mode = 0;
+ }
+
+ return rc;
+}
+
+/*
+ * allocate_sdbt() - allocate sampler memory
+ * @cpu: the cpu for which sampler memory is allocated
+ *
+ * A 4K page is allocated for each requested SDBT.
+ * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
+ * Set ALERT_REQ mask in each SDBs trailer.
+ * Returns zero if successful, <0 otherwise.
+ */
+static int allocate_sdbt(int cpu)
+{
+ int j, k, rc;
+ unsigned long *sdbt;
+ unsigned long sdb;
+ unsigned long *tail;
+ unsigned long *trailer;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (cb->first_sdbt)
+ return -EINVAL;
+
+ sdbt = NULL;
+ tail = sdbt;
+
+ for (j = 0; j < num_sdbt; j++) {
+ sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+
+ mutex_lock(&hws_sem_oom);
+ /* OOM killer might have been activated */
+ barrier();
+ if (oom_killer_was_active || !sdbt) {
+ if (sdbt)
+ free_page((unsigned long)sdbt);
+
+ goto allocate_sdbt_error;
+ }
+ if (cb->first_sdbt == 0)
+ cb->first_sdbt = (unsigned long)sdbt;
+
+ /* link current page to tail of chain */
+ if (tail)
+ *tail = (unsigned long)(void *)sdbt + 1;
+
+ mutex_unlock(&hws_sem_oom);
+
+ for (k = 0; k < num_sdb; k++) {
+ /* get and set SDB page */
+ sdb = get_zeroed_page(GFP_KERNEL);
+
+ mutex_lock(&hws_sem_oom);
+ /* OOM killer might have been activated */
+ barrier();
+ if (oom_killer_was_active || !sdb) {
+ if (sdb)
+ free_page(sdb);
+
+ goto allocate_sdbt_error;
+ }
+ *sdbt = sdb;
+ trailer = trailer_entry_ptr(*sdbt);
+ *trailer = ALERT_REQ_MASK;
+ sdbt++;
+ mutex_unlock(&hws_sem_oom);
+ }
+ tail = sdbt;
+ }
+ mutex_lock(&hws_sem_oom);
+ if (oom_killer_was_active)
+ goto allocate_sdbt_error;
+
+ rc = 0;
+ if (tail)
+ *tail = (unsigned long)
+ ((void *)cb->first_sdbt) + 1;
+
+allocate_sdbt_exit:
+ mutex_unlock(&hws_sem_oom);
+ return rc;
+
+allocate_sdbt_error:
+ rc = -ENOMEM;
+ goto allocate_sdbt_exit;
+}
+
+/*
+ * deallocate_sdbt() - deallocate all sampler memory
+ *
+ * For each online CPU all SDBT trees are deallocated.
+ * Returns the number of freed pages.
+ */
+static int deallocate_sdbt(void)
+{
+ int cpu;
+ int counter;
+
+ counter = 0;
+
+ for_each_online_cpu(cpu) {
+ unsigned long start;
+ unsigned long sdbt;
+ unsigned long *curr;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (!cb->first_sdbt)
+ continue;
+
+ sdbt = cb->first_sdbt;
+ curr = (unsigned long *) sdbt;
+ start = sdbt;
+
+ /* we'll free the SDBT after all SDBs are processed... */
+ while (1) {
+ if (!*curr || !sdbt)
+ break;
+
+ /* watch for link entry reset if found */
+ if (is_link_entry(curr)) {
+ curr = get_next_sdbt(curr);
+ if (sdbt)
+ free_page(sdbt);
+
+ /* we are done if we reach the start */
+ if ((unsigned long) curr == start)
+ break;
+ else
+ sdbt = (unsigned long) curr;
+ } else {
+ /* process SDB pointer */
+ if (*curr) {
+ free_page(*curr);
+ curr++;
+ }
+ }
+ counter++;
+ }
+ cb->first_sdbt = 0;
+ }
+ return counter;
+}
+
+static int start_sampling(int cpu)
+{
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ rc = smp_ctl_ssctl_enable_activate(cpu, interval);
+ if (rc) {
+ printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
+ goto start_exit;
+ }
+
+ rc = -EINVAL;
+ if (!cb->qsi.es) {
+ printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
+ goto start_exit;
+ }
+
+ if (!cb->qsi.cs) {
+ printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
+ goto start_exit;
+ }
+
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
+ cpu, interval);
+
+ rc = 0;
+
+start_exit:
+ return rc;
+}
+
+static int stop_sampling(int cpu)
+{
+ unsigned long v;
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ if (!rc && !cb->qsi.es)
+ printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
+
+ rc = smp_ctl_ssctl_stop(cpu);
+ if (rc) {
+ printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
+ cpu, rc);
+ goto stop_exit;
+ }
+
+ printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
+
+stop_exit:
+ v = cb->req_alert;
+ if (v)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->loss_of_sample_data;
+ if (v)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->invalid_entry_address;
+ if (v)
+ printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->incorrect_sdbt_entry;
+ if (v)
+ printk(KERN_ERR
+ "hwsampler: CPU %d CPUMF Incorrect SDBT address,"
+ " count=%lu.\n", cpu, v);
+
+ v = cb->sample_auth_change_alert;
+ if (v)
+ printk(KERN_ERR
+ "hwsampler: CPU %d CPUMF Sample authorization change,"
+ " count=%lu.\n", cpu, v);
+
+ return rc;
+}
+
+static int check_hardware_prerequisites(void)
+{
+ unsigned long long facility_bits[2];
+
+ memcpy(facility_bits, S390_lowcore.stfle_fac_list, 32);
+ if (!(facility_bits[1] & (1ULL << 59)))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+/*
+ * hws_oom_callback() - the OOM callback function
+ *
+ * In case the callback is invoked during memory allocation for the
+ * hw sampler, all obtained memory is deallocated and a flag is set
+ * so main sampler memory allocation can exit with a failure code.
+ * In case the callback is invoked during sampling the hw sampler
+ * is deactivated for all CPUs.
+ */
+static int hws_oom_callback(struct notifier_block *nfb,
+ unsigned long dummy, void *parm)
+{
+ unsigned long *freed;
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ freed = parm;
+
+ mutex_lock(&hws_sem_oom);
+
+ if (hws_state == HWS_DEALLOCATED) {
+ /* during memory allocation */
+ if (oom_killer_was_active == 0) {
+ oom_killer_was_active = 1;
+ *freed += deallocate_sdbt();
+ }
+ } else {
+ int i;
+ cpu = get_cpu();
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (!cb->oom) {
+ for_each_online_cpu(i) {
+ smp_ctl_ssctl_deactivate(i);
+ cb->oom = 1;
+ }
+ cb->finish = 1;
+
+ printk(KERN_INFO
+ "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
+ cpu);
+ }
+ }
+
+ mutex_unlock(&hws_sem_oom);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hws_oom_notifier = {
+ .notifier_call = hws_oom_callback
+};
+
+static int hws_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ /* We do not have sampler space available for all possible CPUs.
+ All CPUs should be online when hw sampling is activated. */
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block hws_cpu_notifier = {
+ .notifier_call = hws_cpu_callback
+};
+
+/**
+ * hwsampler_deactivate() - set hardware sampling temporarily inactive
+ * @cpu: specifies the CPU to be set inactive.
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_deactivate(unsigned int cpu)
+{
+ /*
+ * Deactivate hw sampling temporarily and flush the buffer
+ * by pushing all the pending samples to oprofile buffer.
+ *
+ * This function can be called under one of the following conditions:
+ * Memory unmap, task is exiting.
+ */
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ mutex_lock(&hws_sem);
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ if (hws_state == HWS_STARTED) {
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (cb->qsi.cs) {
+ rc = smp_ctl_ssctl_deactivate(cpu);
+ if (rc) {
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
+ cb->finish = 1;
+ hws_state = HWS_STOPPING;
+ } else {
+ hws_flush_all = 1;
+ /* Add work to queue to read pending samples.*/
+ queue_work_on(cpu, hws_wq, &cb->worker);
+ }
+ }
+ }
+ mutex_unlock(&hws_sem);
+
+ if (hws_wq)
+ flush_workqueue(hws_wq);
+
+ return rc;
+}
+
+/**
+ * hwsampler_activate() - activate/resume hardware sampling which was deactivated
+ * @cpu: specifies the CPU to be set active.
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_activate(unsigned int cpu)
+{
+ /*
+ * Re-activate hw sampling. This should be called in pair with
+ * hwsampler_deactivate().
+ */
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ mutex_lock(&hws_sem);
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ if (hws_state == HWS_STARTED) {
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (!cb->qsi.cs) {
+ hws_flush_all = 0;
+ rc = smp_ctl_ssctl_enable_activate(cpu, interval);
+ if (rc) {
+ printk(KERN_ERR
+ "CPU %d, CPUMF activate sampling failed.\n",
+ cpu);
+ }
+ }
+ }
+
+ mutex_unlock(&hws_sem);
+
+ return rc;
+}
+
+static void hws_ext_handler(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
+{
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ cpu = smp_processor_id();
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ atomic_xchg(
+ &cb->ext_params,
+ atomic_read(&cb->ext_params)
+ | S390_lowcore.ext_params);
+
+ if (hws_wq)
+ queue_work(hws_wq, &cb->worker);
+}
+
+static int check_qsi_on_setup(void)
+{
+ int rc;
+ unsigned int cpu;
+ struct hws_cpu_buffer *cb;
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ if (!cb->qsi.as) {
+ printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
+ return -EINVAL;
+ }
+
+ if (cb->qsi.es) {
+ printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
+ rc = smp_ctl_ssctl_stop(cpu);
+ if (rc)
+ return -EINVAL;
+
+ printk(KERN_INFO
+ "CPU %d, CPUMF Sampling stopped now.\n", cpu);
+ }
+ }
+ return 0;
+}
+
+static int check_qsi_on_start(void)
+{
+ unsigned int cpu;
+ int rc;
+ struct hws_cpu_buffer *cb;
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+
+ if (!cb->qsi.as)
+ return -EINVAL;
+
+ if (cb->qsi.es)
+ return -EINVAL;
+
+ if (cb->qsi.cs)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void worker_on_start(unsigned int cpu)
+{
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ cb->worker_entry = cb->first_sdbt;
+}
+
+static int worker_check_error(unsigned int cpu, int ext_params)
+{
+ int rc;
+ unsigned long *sdbt;
+ struct hws_cpu_buffer *cb;
+
+ rc = 0;
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ sdbt = (unsigned long *) cb->worker_entry;
+
+ if (!sdbt || !*sdbt)
+ return -EINVAL;
+
+ if (ext_params & EI_IEA)
+ cb->req_alert++;
+
+ if (ext_params & EI_LSDA)
+ cb->loss_of_sample_data++;
+
+ if (ext_params & EI_IEA) {
+ cb->invalid_entry_address++;
+ rc = -EINVAL;
+ }
+
+ if (ext_params & EI_ISE) {
+ cb->incorrect_sdbt_entry++;
+ rc = -EINVAL;
+ }
+
+ if (ext_params & EI_SACA) {
+ cb->sample_auth_change_alert++;
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void worker_on_finish(unsigned int cpu)
+{
+ int rc, i;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ if (cb->finish) {
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (cb->qsi.es) {
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
+ cpu);
+ rc = smp_ctl_ssctl_stop(cpu);
+ if (rc)
+ printk(KERN_INFO
+ "hwsampler: CPU %d, CPUMF Deactivation failed.\n",
+ cpu);
+
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (!cb->finish) {
+ cb->finish = 1;
+ queue_work_on(i, hws_wq,
+ &cb->worker);
+ }
+ }
+ }
+ }
+}
+
+static void worker_on_interrupt(unsigned int cpu)
+{
+ unsigned long *sdbt;
+ unsigned char done;
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ sdbt = (unsigned long *) cb->worker_entry;
+
+ done = 0;
+ /* do not proceed if stop was entered,
+ * forget the buffers not yet processed */
+ while (!done && !cb->stop_mode) {
+ unsigned long *trailer;
+ struct hws_trailer_entry *te;
+ unsigned long *dear = 0;
+
+ trailer = trailer_entry_ptr(*sdbt);
+ /* leave loop if no more work to do */
+ if (!(*trailer & BUFFER_FULL_MASK)) {
+ done = 1;
+ if (!hws_flush_all)
+ continue;
+ }
+
+ te = (struct hws_trailer_entry *)trailer;
+ cb->sample_overflow += te->overflow;
+
+ add_samples_to_oprofile(cpu, sdbt, dear);
+
+ /* reset trailer */
+ xchg((unsigned char *) te, 0x40);
+
+ /* advance to next sdb slot in current sdbt */
+ sdbt++;
+ /* in case link bit is set use address w/o link bit */
+ if (is_link_entry(sdbt))
+ sdbt = get_next_sdbt(sdbt);
+
+ cb->worker_entry = (unsigned long)sdbt;
+ }
+}
+
+static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
+ unsigned long *dear)
+{
+ struct hws_data_entry *sample_data_ptr;
+ unsigned long *trailer;
+
+ trailer = trailer_entry_ptr(*sdbt);
+ if (dear) {
+ if (dear > trailer)
+ return;
+ trailer = dear;
+ }
+
+ sample_data_ptr = (struct hws_data_entry *)(*sdbt);
+
+ while ((unsigned long *)sample_data_ptr < trailer) {
+ struct pt_regs *regs = NULL;
+ struct task_struct *tsk = NULL;
+
+ /*
+ * Check sampling mode, 1 indicates basic (=customer) sampling
+ * mode.
+ */
+ if (sample_data_ptr->def != 1) {
+ /* sample slot is not yet written */
+ break;
+ } else {
+ /* make sure we don't use it twice,
+ * the next time the sampler will set it again */
+ sample_data_ptr->def = 0;
+ }
+
+ /* Get pt_regs. */
+ if (sample_data_ptr->P == 1) {
+ /* userspace sample */
+ unsigned int pid = sample_data_ptr->prim_asn;
+ rcu_read_lock();
+ tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
+ if (tsk)
+ regs = task_pt_regs(tsk);
+ rcu_read_unlock();
+ } else {
+ /* kernelspace sample */
+ regs = task_pt_regs(current);
+ }
+
+ mutex_lock(&hws_sem);
+ oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
+ !sample_data_ptr->P, tsk);
+ mutex_unlock(&hws_sem);
+
+ sample_data_ptr++;
+ }
+}
+
+static void worker(struct work_struct *work)
+{
+ unsigned int cpu;
+ int ext_params;
+ struct hws_cpu_buffer *cb;
+
+ cb = container_of(work, struct hws_cpu_buffer, worker);
+ cpu = smp_processor_id();
+ ext_params = atomic_xchg(&cb->ext_params, 0);
+
+ if (!cb->worker_entry)
+ worker_on_start(cpu);
+
+ if (worker_check_error(cpu, ext_params))
+ return;
+
+ if (!cb->finish)
+ worker_on_interrupt(cpu);
+
+ if (cb->finish)
+ worker_on_finish(cpu);
+}
+
+/**
+ * hwsampler_allocate() - allocate memory for the hardware sampler
+ * @sdbt: number of SDBTs per online CPU (must be > 0)
+ * @sdb: number of SDBs per SDBT (minimum 1, maximum 511)
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
+{
+ int cpu, rc;
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state != HWS_DEALLOCATED)
+ goto allocate_exit;
+
+ if (sdbt < 1)
+ goto allocate_exit;
+
+ if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
+ goto allocate_exit;
+
+ num_sdbt = sdbt;
+ num_sdb = sdb;
+
+ oom_killer_was_active = 0;
+ register_oom_notifier(&hws_oom_notifier);
+
+ for_each_online_cpu(cpu) {
+ if (allocate_sdbt(cpu)) {
+ unregister_oom_notifier(&hws_oom_notifier);
+ goto allocate_error;
+ }
+ }
+ unregister_oom_notifier(&hws_oom_notifier);
+ if (oom_killer_was_active)
+ goto allocate_error;
+
+ hws_state = HWS_STOPPED;
+ rc = 0;
+
+allocate_exit:
+ mutex_unlock(&hws_sem);
+ return rc;
+
+allocate_error:
+ rc = -ENOMEM;
+ printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
+ goto allocate_exit;
+}
+
+/**
+ * hwsampler_deallocate() - deallocate hardware sampler memory
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_deallocate()
+{
+ int rc;
+
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state != HWS_STOPPED)
+ goto deallocate_exit;
+
+ smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
+ deallocate_sdbt();
+
+ hws_state = HWS_DEALLOCATED;
+ rc = 0;
+
+deallocate_exit:
+ mutex_unlock(&hws_sem);
+
+ return rc;
+}
+
+long hwsampler_query_min_interval(void)
+{
+ if (min_sampler_rate)
+ return min_sampler_rate;
+ else
+ return -EINVAL;
+}
+
+long hwsampler_query_max_interval(void)
+{
+ if (max_sampler_rate)
+ return max_sampler_rate;
+ else
+ return -EINVAL;
+}
+
+unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
+{
+ struct hws_cpu_buffer *cb;
+
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+
+ return cb->sample_overflow;
+}
+
+int hwsampler_setup()
+{
+ int rc;
+ int cpu;
+ struct hws_cpu_buffer *cb;
+
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state)
+ goto setup_exit;
+
+ hws_state = HWS_INIT;
+
+ init_all_cpu_buffers();
+
+ rc = check_hardware_prerequisites();
+ if (rc)
+ goto setup_exit;
+
+ rc = check_qsi_on_setup();
+ if (rc)
+ goto setup_exit;
+
+ rc = -EINVAL;
+ hws_wq = create_workqueue("hwsampler");
+ if (!hws_wq)
+ goto setup_exit;
+
+ register_cpu_notifier(&hws_cpu_notifier);
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ INIT_WORK(&cb->worker, worker);
+ rc = smp_ctl_qsi(cpu);
+ WARN_ON(rc);
+ if (min_sampler_rate != cb->qsi.min_sampl_rate) {
+ if (min_sampler_rate) {
+ printk(KERN_WARNING
+ "hwsampler: different min sampler rate values.\n");
+ if (min_sampler_rate < cb->qsi.min_sampl_rate)
+ min_sampler_rate =
+ cb->qsi.min_sampl_rate;
+ } else
+ min_sampler_rate = cb->qsi.min_sampl_rate;
+ }
+ if (max_sampler_rate != cb->qsi.max_sampl_rate) {
+ if (max_sampler_rate) {
+ printk(KERN_WARNING
+ "hwsampler: different max sampler rate values.\n");
+ if (max_sampler_rate > cb->qsi.max_sampl_rate)
+ max_sampler_rate =
+ cb->qsi.max_sampl_rate;
+ } else
+ max_sampler_rate = cb->qsi.max_sampl_rate;
+ }
+ }
+ register_external_interrupt(0x1407, hws_ext_handler);
+
+ hws_state = HWS_DEALLOCATED;
+ rc = 0;
+
+setup_exit:
+ mutex_unlock(&hws_sem);
+ return rc;
+}
+
+int hwsampler_shutdown()
+{
+ int rc;
+
+ mutex_lock(&hws_sem);
+
+ rc = -EINVAL;
+ if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
+ mutex_unlock(&hws_sem);
+
+ if (hws_wq)
+ flush_workqueue(hws_wq);
+
+ mutex_lock(&hws_sem);
+
+ if (hws_state == HWS_STOPPED) {
+ smp_ctl_clear_bit(0, 5); /* set bit 58 CR0 off */
+ deallocate_sdbt();
+ }
+ if (hws_wq) {
+ destroy_workqueue(hws_wq);
+ hws_wq = NULL;
+ }
+
+ unregister_external_interrupt(0x1407, hws_ext_handler);
+ hws_state = HWS_INIT;
+ rc = 0;
+ }
+ mutex_unlock(&hws_sem);
+
+ unregister_cpu_notifier(&hws_cpu_notifier);
+
+ return rc;
+}
+
+/**
+ * hwsampler_start_all() - start hardware sampling on all online CPUs
+ * @rate: specifies the used interval when samples are taken
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_start_all(unsigned long rate)
+{
+ int rc, cpu;
+
+ mutex_lock(&hws_sem);
+
+ hws_oom = 0;
+
+ rc = -EINVAL;
+ if (hws_state != HWS_STOPPED)
+ goto start_all_exit;
+
+ interval = rate;
+
+ /* fail if rate is not valid */
+ if (interval < min_sampler_rate || interval > max_sampler_rate)
+ goto start_all_exit;
+
+ rc = check_qsi_on_start();
+ if (rc)
+ goto start_all_exit;
+
+ rc = prepare_cpu_buffers();
+ if (rc)
+ goto start_all_exit;
+
+ for_each_online_cpu(cpu) {
+ rc = start_sampling(cpu);
+ if (rc)
+ break;
+ }
+ if (rc) {
+ for_each_online_cpu(cpu) {
+ stop_sampling(cpu);
+ }
+ goto start_all_exit;
+ }
+ hws_state = HWS_STARTED;
+ rc = 0;
+
+start_all_exit:
+ mutex_unlock(&hws_sem);
+
+ if (rc)
+ return rc;
+
+ register_oom_notifier(&hws_oom_notifier);
+ hws_oom = 1;
+ hws_flush_all = 0;
+ /* now let them in, 1407 CPUMF external interrupts */
+ smp_ctl_set_bit(0, 5); /* set CR0 bit 58 */
+
+ return 0;
+}
+
+/**
+ * hwsampler_stop_all() - stop hardware sampling on all online CPUs
+ *
+ * Returns 0 on success, !0 on failure.
+ */
+int hwsampler_stop_all()
+{
+ int tmp_rc, rc, cpu;
+ struct hws_cpu_buffer *cb;
+
+ mutex_lock(&hws_sem);
+
+ rc = 0;
+ if (hws_state == HWS_INIT) {
+ mutex_unlock(&hws_sem);
+ return rc;
+ }
+ hws_state = HWS_STOPPING;
+ mutex_unlock(&hws_sem);
+
+ for_each_online_cpu(cpu) {
+ cb = &per_cpu(sampler_cpu_buffer, cpu);
+ cb->stop_mode = 1;
+ tmp_rc = stop_sampling(cpu);
+ if (tmp_rc)
+ rc = tmp_rc;
+ }
+
+ if (hws_wq)
+ flush_workqueue(hws_wq);
+
+ mutex_lock(&hws_sem);
+ if (hws_oom) {
+ unregister_oom_notifier(&hws_oom_notifier);
+ hws_oom = 0;
+ }
+ hws_state = HWS_STOPPED;
+ mutex_unlock(&hws_sem);
+
+ return rc;
+}
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
new file mode 100644
index 000000000000..8c72b59316b5
--- /dev/null
+++ b/arch/s390/oprofile/hwsampler.h
@@ -0,0 +1,113 @@
+/*
+ * CPUMF HW sampler functions and internal structures
+ *
+ * Copyright IBM Corp. 2010
+ * Author(s): Heinz Graalfs <graalfs@de.ibm.com>
+ */
+
+#ifndef HWSAMPLER_H_
+#define HWSAMPLER_H_
+
+#include <linux/workqueue.h>
+
+struct hws_qsi_info_block /* QUERY SAMPLING information block */
+{ /* Bit(s) */
+ unsigned int b0_13:14; /* 0-13: zeros */
+ unsigned int as:1; /* 14: sampling authorisation control*/
+ unsigned int b15_21:7; /* 15-21: zeros */
+ unsigned int es:1; /* 22: sampling enable control */
+ unsigned int b23_29:7; /* 23-29: zeros */
+ unsigned int cs:1; /* 30: sampling activation control */
+ unsigned int:1; /* 31: reserved */
+ unsigned int bsdes:16; /* 4-5: size of sampling entry */
+ unsigned int:16; /* 6-7: reserved */
+ unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
+ unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
+ unsigned long tear; /* 24-31: TEAR contents */
+ unsigned long dear; /* 32-39: DEAR contents */
+ unsigned int rsvrd0; /* 40-43: reserved */
+ unsigned int cpu_speed; /* 44-47: CPU speed */
+ unsigned long long rsvrd1; /* 48-55: reserved */
+ unsigned long long rsvrd2; /* 56-63: reserved */
+};
+
+struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */
+{ /* bytes 0 - 7 Bit(s) */
+ unsigned int s:1; /* 0: maximum buffer indicator */
+ unsigned int h:1; /* 1: part. level reserved for VM use*/
+ unsigned long b2_53:52; /* 2-53: zeros */
+ unsigned int es:1; /* 54: sampling enable control */
+ unsigned int b55_61:7; /* 55-61: - zeros */
+ unsigned int cs:1; /* 62: sampling activation control */
+ unsigned int b63:1; /* 63: zero */
+ unsigned long interval; /* 8-15: sampling interval */
+ unsigned long tear; /* 16-23: TEAR contents */
+ unsigned long dear; /* 24-31: DEAR contents */
+ /* 32-63: */
+ unsigned long rsvrd1; /* reserved */
+ unsigned long rsvrd2; /* reserved */
+ unsigned long rsvrd3; /* reserved */
+ unsigned long rsvrd4; /* reserved */
+};
+
+struct hws_cpu_buffer {
+ unsigned long first_sdbt; /* @ of 1st SDB-Table for this CP*/
+ unsigned long worker_entry;
+ unsigned long sample_overflow; /* taken from SDB ... */
+ struct hws_qsi_info_block qsi;
+ struct hws_ssctl_request_block ssctl;
+ struct work_struct worker;
+ atomic_t ext_params;
+ unsigned long req_alert;
+ unsigned long loss_of_sample_data;
+ unsigned long invalid_entry_address;
+ unsigned long incorrect_sdbt_entry;
+ unsigned long sample_auth_change_alert;
+ unsigned int finish:1;
+ unsigned int oom:1;
+ unsigned int stop_mode:1;
+};
+
+struct hws_data_entry {
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int R:4; /* 16-19 reserved */
+ unsigned int U:4; /* 20-23 Number of unique instruct. */
+ unsigned int z:2; /* zeros */
+ unsigned int T:1; /* 26 PSW DAT mode */
+ unsigned int W:1; /* 27 PSW wait state */
+ unsigned int P:1; /* 28 PSW Problem state */
+ unsigned int AS:2; /* 29-30 PSW address-space control */
+ unsigned int I:1; /* 31 entry valid or invalid */
+ unsigned int:16;
+ unsigned int prim_asn:16; /* primary ASN */
+ unsigned long long ia; /* Instruction Address */
+ unsigned long long lpp; /* Logical-Partition Program Param. */
+ unsigned long long vpp; /* Virtual-Machine Program Param. */
+};
+
+struct hws_trailer_entry {
+ unsigned int f:1; /* 0 - Block Full Indicator */
+ unsigned int a:1; /* 1 - Alert request control */
+ unsigned long:62; /* 2 - 63: Reserved */
+ unsigned long overflow; /* 64 - sample Overflow count */
+ unsigned long timestamp; /* 16 - time-stamp */
+ unsigned long timestamp1; /* */
+ unsigned long reserved1; /* 32 -Reserved */
+ unsigned long reserved2; /* */
+ unsigned long progusage1; /* 48 - reserved for programming use */
+ unsigned long progusage2; /* */
+};
+
+int hwsampler_setup(void);
+int hwsampler_shutdown(void);
+int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
+int hwsampler_deallocate(void);
+long hwsampler_query_min_interval(void);
+long hwsampler_query_max_interval(void);
+int hwsampler_start_all(unsigned long interval);
+int hwsampler_stop_all(void);
+int hwsampler_deactivate(unsigned int cpu);
+int hwsampler_activate(unsigned int cpu);
+unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu);
+
+#endif /*HWSAMPLER_H_*/
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 7a995113b918..16c76def4a9d 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -4,23 +4,182 @@
* S390 Version
* Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ * Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
+ * Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
*
- * @remark Copyright 2002 OProfile authors
+ * @remark Copyright 2002-2011 OProfile authors
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+
+#include "../../../drivers/oprofile/oprof.h"
+#include "hwsampler.h"
+
+#define DEFAULT_INTERVAL 4096
+
+#define DEFAULT_SDBT_BLOCKS 1
+#define DEFAULT_SDB_BLOCKS 511
+
+static unsigned long oprofile_hw_interval = DEFAULT_INTERVAL;
+static unsigned long oprofile_min_interval;
+static unsigned long oprofile_max_interval;
+
+static unsigned long oprofile_sdbt_blocks = DEFAULT_SDBT_BLOCKS;
+static unsigned long oprofile_sdb_blocks = DEFAULT_SDB_BLOCKS;
+static int hwsampler_file;
+static int hwsampler_running; /* start_mutex must be held to change */
+
+static struct oprofile_operations timer_ops;
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
-int __init oprofile_arch_init(struct oprofile_operations* ops)
+static int oprofile_hwsampler_start(void)
+{
+ int retval;
+
+ hwsampler_running = hwsampler_file;
+
+ if (!hwsampler_running)
+ return timer_ops.start();
+
+ retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
+ if (retval)
+ return retval;
+
+ retval = hwsampler_start_all(oprofile_hw_interval);
+ if (retval)
+ hwsampler_deallocate();
+
+ return retval;
+}
+
+static void oprofile_hwsampler_stop(void)
+{
+ if (!hwsampler_running) {
+ timer_ops.stop();
+ return;
+ }
+
+ hwsampler_stop_all();
+ hwsampler_deallocate();
+ return;
+}
+
+static ssize_t hwsampler_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(hwsampler_file, buf, count, offset);
+}
+
+static ssize_t hwsampler_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ if (oprofile_started)
+ /*
+ * save to do without locking as we set
+ * hwsampler_running in start() when start_mutex is
+ * held
+ */
+ return -EBUSY;
+
+ hwsampler_file = val;
+
+ return count;
+}
+
+static const struct file_operations hwsampler_fops = {
+ .read = hwsampler_read,
+ .write = hwsampler_write,
+};
+
+static int oprofile_create_hwsampling_files(struct super_block *sb,
+ struct dentry *root)
+{
+ struct dentry *hw_dir;
+
+ /* reinitialize default values */
+ hwsampler_file = 1;
+
+ hw_dir = oprofilefs_mkdir(sb, root, "hwsampling");
+ if (!hw_dir)
+ return -EINVAL;
+
+ oprofilefs_create_file(sb, hw_dir, "hwsampler", &hwsampler_fops);
+ oprofilefs_create_ulong(sb, hw_dir, "hw_interval",
+ &oprofile_hw_interval);
+ oprofilefs_create_ro_ulong(sb, hw_dir, "hw_min_interval",
+ &oprofile_min_interval);
+ oprofilefs_create_ro_ulong(sb, hw_dir, "hw_max_interval",
+ &oprofile_max_interval);
+ oprofilefs_create_ulong(sb, hw_dir, "hw_sdbt_blocks",
+ &oprofile_sdbt_blocks);
+
+ return 0;
+}
+
+static int oprofile_hwsampler_init(struct oprofile_operations *ops)
+{
+ if (hwsampler_setup())
+ return -ENODEV;
+
+ /*
+ * create hwsampler files only if hwsampler_setup() succeeds.
+ */
+ oprofile_min_interval = hwsampler_query_min_interval();
+ if (oprofile_min_interval < 0) {
+ oprofile_min_interval = 0;
+ return -ENODEV;
+ }
+ oprofile_max_interval = hwsampler_query_max_interval();
+ if (oprofile_max_interval < 0) {
+ oprofile_max_interval = 0;
+ return -ENODEV;
+ }
+
+ if (oprofile_timer_init(ops))
+ return -ENODEV;
+
+ printk(KERN_INFO "oprofile: using hardware sampling\n");
+
+ memcpy(&timer_ops, ops, sizeof(timer_ops));
+
+ ops->start = oprofile_hwsampler_start;
+ ops->stop = oprofile_hwsampler_stop;
+ ops->create_files = oprofile_create_hwsampling_files;
+
+ return 0;
+}
+
+static void oprofile_hwsampler_exit(void)
+{
+ oprofile_timer_exit();
+ hwsampler_shutdown();
+}
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = s390_backtrace;
- return -ENODEV;
+
+ return oprofile_hwsampler_init(ops);
}
void oprofile_arch_exit(void)
{
+ oprofile_hwsampler_exit();
}
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3b71d2190de1..e44480ce2ea8 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -725,11 +725,7 @@ static struct platform_device camera_devices[] = {
/* FSI */
static struct sh_fsi_platform_info fsi_info = {
- .portb_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(I2S) |
- SH_FSI_IFMT(I2S),
+ .portb_flags = SH_FSI_BRS_INV,
};
static struct resource fsi_resources[] = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527679394a25..c8bcf6a19b55 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -286,11 +286,7 @@ static struct platform_device ceu1_device = {
/* FSI */
/* change J20, J21, J22 pin to 1-2 connection to use slave mode */
static struct sh_fsi_platform_info fsi_info = {
- .porta_flags = SH_FSI_BRS_INV |
- SH_FSI_OUT_SLAVE_MODE |
- SH_FSI_IN_SLAVE_MODE |
- SH_FSI_OFMT(PCM) |
- SH_FSI_IFMT(PCM),
+ .porta_flags = SH_FSI_BRS_INV,
};
static struct resource fsi_resources[] = {
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index ed47e6e1747f..d161e939df62 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -326,7 +326,7 @@ config X86_PPRO_FENCE
Old PentiumPro multiprocessor systems had errata that could cause
memory operations to violate the x86 ordering standard in rare cases.
Enabling this option will attempt to work around some (but not all)
- occurances of this problem, at the cost of much heavier spinlock and
+ occurrences of this problem, at the cost of much heavier spinlock and
memory barrier operations.
If unsure, say n here. Even distro kernels should think twice before
@@ -366,7 +366,7 @@ config X86_INTEL_USERCOPY
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
config X86_USE_3DNOW
def_bool y
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 8fe2a4966b7a..adcf794b22e2 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt:
and $15, %r13 # %r13 = arg4 (mod 16)
je _multiple_of_16_bytes_decrypt
- # Handle the last <16 byte block seperately
+ # Handle the last <16 byte block separately
paddd ONE(%rip), %xmm0 # increment CNT to get Yn
movdqa SHUF_MASK(%rip), %xmm10
@@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt:
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
sub $16, %r11
add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
lea SHIFT_MASK+16(%rip), %r12
sub %r13, %r12
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
@@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt:
and $15, %r13 # %r13 = arg4 (mod 16)
je _multiple_of_16_bytes_encrypt
- # Handle the last <16 Byte block seperately
+ # Handle the last <16 Byte block separately
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 62f084478f7e..4e12668711e5 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
*
- * Within a catagory, the attributes are mutually exclusive.
+ * Within a category, the attributes are mutually exclusive.
*
* The implementation of this API will take care of various aspects that
* are associated with changing such attributes, such as:
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 518bbbb9ee59..fe2cc6e105fa 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -26,7 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_registers(struct pt_regs *regs);
extern void show_trace(struct task_struct *t, struct pt_regs *regs,
- unsigned long *sp);
+ unsigned long *sp, unsigned long bp);
extern void __show_regs(struct pt_regs *regs, int all);
extern void show_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 07f46016d3ff..4886a68f267e 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -29,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
* external nmis, because the local ones are more frequent.
*
* Also setup some default high/normal/low settings for
- * subsystems to registers with. Using 4 bits to seperate
- * the priorities. This can go alot higher if needed be.
+ * subsystems to registers with. Using 4 bits to separate
+ * the priorities. This can go a lot higher if needed be.
*/
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 6d8723a766cc..af788496020b 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -38,7 +38,7 @@
#define K8_NOP8 K8_NOP4 K8_NOP4
/* K7 nops
- uses eax dependencies (arbitary choice)
+ uses eax dependencies (arbitrary choice)
1: nop
2: movl %eax,%eax
3: leal (,%eax,1),%eax
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index f482010350fb..5ca6801b75f3 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
/*
* OLPC board IDs contain the major build number within the mask 0x0ff0,
- * and the minor build number withing 0x000f. Pre-builds have a minor
+ * and the minor build number within 0x000f. Pre-builds have a minor
* number less than 8, and normal builds start at 8. For example, 0x0B10
* is a PreB1, and 0x0C18 is a C1.
*/
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index cc29086e30cd..56fd9e3abbda 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -1,5 +1,5 @@
/*
- * Netburst Perfomance Events (P4, old Xeon)
+ * Netburst Performance Events (P4, old Xeon)
*/
#ifndef PERF_EVENT_P4_H
@@ -9,7 +9,7 @@
#include <linux/bitops.h>
/*
- * NetBurst has perfomance MSRs shared between
+ * NetBurst has performance MSRs shared between
* threads if HT is turned on, ie for both logical
* processors (mem: in turn in Atom with HT support
* perf-MSRs are not shared and every thread has its
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 94b979d1b58d..effff47a3c82 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd)
static inline void pud_clear(pud_t *pudp)
{
- unsigned long pgd;
-
set_pud(pudp, __pud(0));
/*
@@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp)
* section 8.1: in PAE mode we explicitly have to flush the
* TLB via cr3 if the top-level pgd is changed...
*
- * Make sure the pud entry we're updating is within the
- * current pgd to avoid unnecessary TLB flushes.
+ * Currently all places where pud_clear() is called either have
+ * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
+ * pud_clear_bad()), so we don't need TLB flush here.
*/
- pgd = read_cr3();
- if (__pa(pudp) >= pgd && __pa(pudp) <
- (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
- write_cr3(pgd);
}
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 7a3e836eb2a9..a898a2b6e10c 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -7,7 +7,7 @@
*/
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
-#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 52b098a6eebb..7b0a55a88851 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -31,7 +31,7 @@
#define R12 24
#define RBP 32
#define RBX 40
-/* arguments: interrupts/non tracing syscalls only save upto here*/
+/* arguments: interrupts/non tracing syscalls only save up to here*/
#define R11 48
#define R10 56
#define R9 64
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 78cd1ea94500..1babf8adecdf 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -73,7 +73,7 @@ struct pt_regs {
unsigned long r12;
unsigned long rbp;
unsigned long rbx;
-/* arguments: non interrupts/non tracing syscalls only save upto here*/
+/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
@@ -103,7 +103,7 @@ struct pt_regs {
unsigned long r12;
unsigned long bp;
unsigned long bx;
-/* arguments: non interrupts/non tracing syscalls only save upto here*/
+/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 52b5c7ed3608..d7e89c83645d 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -47,7 +47,7 @@ struct stacktrace_ops {
};
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
- unsigned long *stack,
+ unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data);
#ifdef CONFIG_X86_32
@@ -86,11 +86,11 @@ stack_frame(struct task_struct *task, struct pt_regs *regs)
extern void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, char *log_lvl);
+ unsigned long *stack, unsigned long bp, char *log_lvl);
extern void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl);
+ unsigned long *sp, unsigned long bp, char *log_lvl);
extern unsigned int code_bytes;
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 1ca132fc0d03..83e2efd181e2 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
static __always_inline cycles_t vget_cycles(void)
{
/*
- * We only do VDSOs on TSC capable CPUs, so this shouldnt
+ * We only do VDSOs on TSC capable CPUs, so this shouldn't
* access boot_cpu_data (which is not VDSO-safe):
*/
#ifndef CONFIG_X86_TSC
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 1c10c88ee4e1..5d4922ad4b9b 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
* The privilege level specifies which modes may enter a trap via a software
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
* privilege levels as follows:
- * Level == 0: Noone may enter
+ * Level == 0: No one may enter
* Level == 1: Kernel may enter
* Level == 2: Kernel may enter
* Level == 3: Everyone may enter
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4db35544de73..4a234677e213 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
- self modifying code. This implies that assymetric systems where
+ self modifying code. This implies that asymmetric systems where
APs have less capabilities than the boot processor are not handled.
Tough. Make sure you disable such features by hand. */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 7b1e8e10b89c..86d1ad4962a7 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void)
/*
* using 512M as goal, in case kexec will load kernel_big
* that will do the on position decompress, and could overlap with
- * that positon with gart that is used.
+ * that position with gart that is used.
* sequende:
* kernel_small
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 4b5ebd26f565..180ca240e03c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1886,7 +1886,7 @@ void disable_IO_APIC(void)
*
* With interrupt-remapping, for now we will use virtual wire A mode,
* as virtual wire B is little complex (need to configure both
- * IOAPIC RTE aswell as interrupt-remapping table entry).
+ * IOAPIC RTE as well as interrupt-remapping table entry).
* As this gets called during crash dump, keep this simple for now.
*/
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
@@ -2905,7 +2905,7 @@ void __init setup_IO_APIC(void)
}
/*
- * Called after all the initialization is done. If we didnt find any
+ * Called after all the initialization is done. If we didn't find any
* APIC bugs then we can allow the modify fast path
*/
@@ -3983,7 +3983,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
static __init int bad_ioapic(unsigned long address)
{
if (nr_ioapics >= MAX_IO_APICS) {
- printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
+ printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
"(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
return 1;
}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 9079926a5b18..0b4be431c620 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -66,7 +66,7 @@
* 1.5: Fix segment register reloading (in case of bad segments saved
* across BIOS call).
* Stephen Rothwell
- * 1.6: Cope with complier/assembler differences.
+ * 1.6: Cope with compiler/assembler differences.
* Only try to turn off the first display device.
* Fix OOPS at power off with no APM BIOS by Jan Echternach
* <echter@informatik.uni-rostock.de>
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 03162dac6271..cf48cdd6907d 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void)
return -EINVAL;
}
/* Get max multiplier - as we always did.
- * Longhaul MSR is usefull only when voltage scaling is enabled.
+ * Longhaul MSR is useful only when voltage scaling is enabled.
* C3 is booting at max anyway. */
maxmult = mult;
/* Get min multiplier */
@@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void)
* trigger frequency transition in some cases. */
module_param(disable_acpi_c3, int, 0644);
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
-/* Change CPU voltage with frequency. Very usefull to save
+/* Change CPU voltage with frequency. Very useful to save
* power, but most VIA C3 processors aren't supporting it. */
module_param(scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 1ae4133e6bd6..2368e38327b3 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1275,7 +1275,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (powernow_k8_cpu_init_acpi(data)) {
/*
- * Use the PSB BIOS structure. This is only availabe on
+ * Use the PSB BIOS structure. This is only available on
* an UP version, and is deprecated by AMD.
*/
if (num_online_cpus() != 1) {
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
index 8abd869baabf..91bc25b67bc1 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
result = speedstep_smi_ownership();
if (result) {
- dprintk("fails in aquiring ownership of a SMI interface.\n");
+ dprintk("fails in acquiring ownership of a SMI interface.\n");
return -EINVAL;
}
@@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
int result = speedstep_smi_ownership();
if (result)
- dprintk("fails in re-aquiring ownership of a SMI interface.\n");
+ dprintk("fails in re-acquiring ownership of a SMI interface.\n");
return result;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index a77971979564..0ed633c5048b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -32,7 +32,7 @@ static void inject_mce(struct mce *m)
{
struct mce *i = &per_cpu(injectm, m->extcpu);
- /* Make sure noone reads partially written injectm */
+ /* Make sure no one reads partially written injectm */
i->finished = 0;
mb();
m->finished = 0;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d916183b7f9c..ab1122998dba 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -881,7 +881,7 @@ reset:
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
* be somewhat complicated (e.g. segment offset would require an instruction
- * parser). So only support physical addresses upto page granuality for now.
+ * parser). So only support physical addresses up to page granuality for now.
*/
static int mce_usable_address(struct mce *m)
{
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 9f27228ceffd..a71efcdbb092 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -1,6 +1,6 @@
/*
* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
- * because MTRRs can span upto 40 bits (36bits on most modern x86)
+ * because MTRRs can span up to 40 bits (36bits on most modern x86)
*/
#define DEBUG
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 26604188aa49..87eab4a27dfc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -178,8 +178,6 @@ struct cpu_hw_events {
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
-#define PEBS_EVENT_CONSTRAINT(c, n) \
- INTEL_UEVENT_CONSTRAINT(c, n)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
@@ -1111,7 +1109,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
/*
* If group events scheduling transaction was started,
- * skip the schedulability test here, it will be peformed
+ * skip the schedulability test here, it will be performed
* at commit time (->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
@@ -1792,7 +1790,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
perf_callchain_store(entry, regs->ip);
- dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
+ dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
}
#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b95c66ae4a2a..bab491b8ee25 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -362,87 +362,69 @@ static int intel_pmu_drain_bts_buffer(void)
* PEBS
*/
static struct event_constraint intel_core2_pebs_event_constraints[] = {
- PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
- PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
- PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
- PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+ INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
+ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+ INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_atom_pebs_event_constraints[] = {
- PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
- PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+ INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
- INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
- INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
- INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
+ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
+ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
+ INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_westmere_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
- INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
-
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
- PEBS_EVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
- INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
+ INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_snb_pebs_events[] = {
- PEBS_EVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
- PEBS_EVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- PEBS_EVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
- PEBS_EVENT_CONSTRAINT(0x01c4, 0xf), /* BR_INST_RETIRED.CONDITIONAL */
- PEBS_EVENT_CONSTRAINT(0x02c4, 0xf), /* BR_INST_RETIRED.NEAR_CALL */
- PEBS_EVENT_CONSTRAINT(0x04c4, 0xf), /* BR_INST_RETIRED.ALL_BRANCHES */
- PEBS_EVENT_CONSTRAINT(0x08c4, 0xf), /* BR_INST_RETIRED.NEAR_RETURN */
- PEBS_EVENT_CONSTRAINT(0x10c4, 0xf), /* BR_INST_RETIRED.NOT_TAKEN */
- PEBS_EVENT_CONSTRAINT(0x20c4, 0xf), /* BR_INST_RETIRED.NEAR_TAKEN */
- PEBS_EVENT_CONSTRAINT(0x40c4, 0xf), /* BR_INST_RETIRED.FAR_BRANCH */
- PEBS_EVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */
- PEBS_EVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
- PEBS_EVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */
- PEBS_EVENT_CONSTRAINT(0x10c5, 0xf), /* BR_MISP_RETIRED.NOT_TAKEN */
- PEBS_EVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.TAKEN */
- PEBS_EVENT_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
- PEBS_EVENT_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORE */
- PEBS_EVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
- PEBS_EVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
- PEBS_EVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
- PEBS_EVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
- PEBS_EVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
- PEBS_EVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
- PEBS_EVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
- PEBS_EVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
- PEBS_EVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
- PEBS_EVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
- PEBS_EVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.LLC_HIT */
- PEBS_EVENT_CONSTRAINT(0x40d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
- PEBS_EVENT_CONSTRAINT(0x01d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
- PEBS_EVENT_CONSTRAINT(0x02d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
- PEBS_EVENT_CONSTRAINT(0x04d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM */
- PEBS_EVENT_CONSTRAINT(0x08d2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE */
- PEBS_EVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
+ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
+ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
+ INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
+ INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
+ INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
+ INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
+ INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
+ INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
+ INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
EVENT_CONSTRAINT_END
};
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 3769ac822f96..0811f5ebfba6 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1,5 +1,5 @@
/*
- * Netburst Perfomance Events (P4, old Xeon)
+ * Netburst Performance Events (P4, old Xeon)
*
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
@@ -679,7 +679,7 @@ static int p4_validate_raw_event(struct perf_event *event)
*/
/*
- * if an event is shared accross the logical threads
+ * if an event is shared across the logical threads
* the user needs special permissions to be able to use it
*/
if (p4_ht_active() && p4_event_bind_map[v].shared) {
@@ -790,13 +790,13 @@ static void p4_pmu_disable_pebs(void)
*
* It's still allowed that two threads setup same cache
* events so we can't simply clear metrics until we knew
- * noone is depending on us, so we need kind of counter
+ * no one is depending on us, so we need kind of counter
* for "ReplayEvent" users.
*
* What is more complex -- RAW events, if user (for some
* reason) will pass some cache event metric with improper
* event opcode -- it's fine from hardware point of view
- * but completely nonsence from "meaning" of such action.
+ * but completely nonsense from "meaning" of such action.
*
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 227b0448960d..d22d0c4edcfd 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -86,7 +86,7 @@ static void __init vmware_platform_setup(void)
}
/*
- * While checking the dmi string infomation, just checking the product
+ * While checking the dmi string information, just checking the product
* serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor.
*/
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 220a1c11cfde..999e2793590b 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -175,21 +175,21 @@ static const struct stacktrace_ops print_trace_ops = {
void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, char *log_lvl)
+ unsigned long *stack, unsigned long bp, char *log_lvl)
{
printk("%sCall Trace:\n", log_lvl);
- dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
+ dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
}
void show_trace(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack)
+ unsigned long *stack, unsigned long bp)
{
- show_trace_log_lvl(task, regs, stack, "");
+ show_trace_log_lvl(task, regs, stack, bp, "");
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
- show_stack_log_lvl(task, NULL, sp, "");
+ show_stack_log_lvl(task, NULL, sp, 0, "");
}
/*
@@ -197,14 +197,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
*/
void dump_stack(void)
{
+ unsigned long bp;
unsigned long stack;
+ bp = stack_frame(current, NULL);
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
current->pid, current->comm, print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
- show_trace(NULL, NULL, &stack);
+ show_trace(NULL, NULL, &stack, bp);
}
EXPORT_SYMBOL(dump_stack);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 74cc1eda384b..3b97a80ce329 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -17,12 +17,11 @@
#include <asm/stacktrace.h>
-void dump_trace(struct task_struct *task,
- struct pt_regs *regs, unsigned long *stack,
+void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
int graph = 0;
- unsigned long bp;
if (!task)
task = current;
@@ -35,7 +34,9 @@ void dump_trace(struct task_struct *task,
stack = (unsigned long *)task->thread.sp;
}
- bp = stack_frame(task, regs);
+ if (!bp)
+ bp = stack_frame(task, regs);
+
for (;;) {
struct thread_info *context;
@@ -55,7 +56,7 @@ EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl)
+ unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *stack;
int i;
@@ -77,7 +78,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
touch_nmi_watchdog();
}
printk(KERN_CONT "\n");
- show_trace_log_lvl(task, regs, sp, log_lvl);
+ show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
@@ -102,7 +103,7 @@ void show_registers(struct pt_regs *regs)
u8 *ip;
printk(KERN_EMERG "Stack:\n");
- show_stack_log_lvl(NULL, regs, &regs->sp, KERN_EMERG);
+ show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
printk(KERN_EMERG "Code: ");
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index a6b6fcf7f0ae..e71c98d3c0d2 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -139,8 +139,8 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-void dump_trace(struct task_struct *task,
- struct pt_regs *regs, unsigned long *stack,
+void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
@@ -150,7 +150,6 @@ void dump_trace(struct task_struct *task,
struct thread_info *tinfo;
int graph = 0;
unsigned long dummy;
- unsigned long bp;
if (!task)
task = current;
@@ -161,7 +160,8 @@ void dump_trace(struct task_struct *task,
stack = (unsigned long *)task->thread.sp;
}
- bp = stack_frame(task, regs);
+ if (!bp)
+ bp = stack_frame(task, regs);
/*
* Print function call entries in all stacks, starting at the
* current stack address. If the stacks consist of nested
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl)
+ unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *irq_stack_end;
unsigned long *irq_stack;
@@ -269,7 +269,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
preempt_enable();
printk(KERN_CONT "\n");
- show_trace_log_lvl(task, regs, sp, log_lvl);
+ show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
void show_registers(struct pt_regs *regs)
@@ -298,7 +298,7 @@ void show_registers(struct pt_regs *regs)
printk(KERN_EMERG "Stack:\n");
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
- KERN_EMERG);
+ 0, KERN_EMERG);
printk(KERN_EMERG "Code: ");
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b72b4a6466a9..8a445a0c989e 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -18,7 +18,7 @@
* A note on terminology:
* - top of stack: Architecture defined interrupt frame from SS to RIP
* at the top of the kernel process stack.
- * - partial stack frame: partially saved registers upto R11.
+ * - partial stack frame: partially saved registers up to R11.
* - full stack frame: Like partial stack frame, but all register saved.
*
* Some macro usage:
@@ -422,7 +422,7 @@ ENTRY(ret_from_fork)
END(ret_from_fork)
/*
- * System call entry. Upto 6 arguments in registers are supported.
+ * System call entry. Up to 6 arguments in registers are supported.
*
* SYSCALL does not save anything on the stack and does not change the
* stack pointer.
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index e60c38cc0eed..12aff2537682 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(fpu_finit);
* The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default
* value at reset if we support XMM instructions and then
- * remeber the current task has used the FPU.
+ * remember the current task has used the FPU.
*/
int init_fpu(struct task_struct *tsk)
{
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 9974d21048fd..72090705a656 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -172,7 +172,7 @@ asmlinkage void do_softirq(void)
call_on_stack(__do_softirq, isp);
/*
- * Shouldnt happen, we returned above if in_interrupt():
+ * Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 7c64c420a9f6..dba0b36941a5 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -278,7 +278,7 @@ static int hw_break_release_slot(int breakno)
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
/*
- * The debugger is responisble for handing the retry on
+ * The debugger is responsible for handing the retry on
* remove failure.
*/
return -1;
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c
index 63eaf6596233..177183cbb6ae 100644
--- a/arch/x86/kernel/mca_32.c
+++ b/arch/x86/kernel/mca_32.c
@@ -259,7 +259,7 @@ static int __init mca_init(void)
/*
* WARNING: Be careful when making changes here. Putting an adapter
* and the motherboard simultaneously into setup mode may result in
- * damage to chips (according to The Indispensible PC Hardware Book
+ * damage to chips (according to The Indispensable PC Hardware Book
* by Hans-Peter Messmer). Also, we disable system interrupts (so
* that we are not disturbed in the middle of this).
*/
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 01b0f6d06451..6f789a887c06 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -883,7 +883,7 @@ static int __init update_mp_table(void)
if (!mpc_new_phys) {
unsigned char old, new;
- /* check if we can change the postion */
+ /* check if we can change the position */
mpc->checksum = 0;
old = mpf_checksum((unsigned char *)mpc, mpc->length);
mpc->checksum = 0xff;
@@ -892,7 +892,7 @@ static int __init update_mp_table(void)
printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
return 0;
}
- printk(KERN_INFO "use in-positon replacing\n");
+ printk(KERN_INFO "use in-position replacing\n");
} else {
mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index f56a117cef68..e8c33a302006 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -1279,7 +1279,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
/*
- * FIXME: properly scan for devices accross the
+ * FIXME: properly scan for devices across the
* PCI-to-PCI bridge on every CalIOC2 port.
*/
return 1;
@@ -1295,7 +1295,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
/*
* calgary_init_bitmap_from_tce_table():
- * Funtion for kdump case. In the second/kdump kernel initialize
+ * Function for kdump case. In the second/kdump kernel initialize
* the bitmap based on the tce table entries obtained from first kernel
*/
static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 99fa3adf0141..d46cbe46b7ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -87,7 +87,7 @@ void exit_thread(void)
void show_regs(struct pt_regs *regs)
{
show_registers(regs);
- show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs));
+ show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0);
}
void show_regs_common(void)
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 938c8e10a19a..6515733a289d 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -73,7 +73,7 @@ static const struct stacktrace_ops save_stack_ops_nosched = {
*/
void save_stack_trace(struct stack_trace *trace)
{
- dump_trace(current, NULL, NULL, &save_stack_ops, trace);
+ dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
@@ -81,14 +81,14 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
{
- dump_trace(current, regs, NULL, &save_stack_ops, trace);
+ dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace);
+ dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 58de45ee08b6..7977f0cfe339 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -166,7 +166,7 @@ static void enable_step(struct task_struct *child, bool block)
* Make sure block stepping (BTF) is not enabled unless it should be.
* Note that we don't try to worry about any is_setting_trap_flag()
* instructions after the first when using block stepping.
- * So noone should try to use debugger block stepping in a program
+ * So no one should try to use debugger block stepping in a program
* that uses user-mode single stepping itself.
*/
if (enable_single_step(child) && block) {
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 7e4515957a1c..8927486a4649 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -39,7 +39,7 @@ int __ref arch_register_cpu(int num)
/*
* CPU0 cannot be offlined due to several
* restrictions and assumptions in kernel. This basically
- * doesnt add a control file, one cannot attempt to offline
+ * doesn't add a control file, one cannot attempt to offline
* BSP.
*
* Also certain PCI quirks require not to enable hotplug control
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index ffe5755caa8b..9335bf7dd2e7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -427,7 +427,7 @@ unsigned long native_calibrate_tsc(void)
* the delta to the previous read. We keep track of the min
* and max values of that delta. The delta is mostly defined
* by the IO time of the PIT access, so we can detect when a
- * SMI/SMM disturbance happend between the two reads. If the
+ * SMI/SMM disturbance happened between the two reads. If the
* maximum time is significantly larger than the minimum time,
* then we discard the result and have another try.
*
@@ -900,7 +900,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
* timer based, instead of loop based, we don't block the boot
* process while this longer calibration is done.
*
- * If there are any calibration anomolies (too many SMIs, etc),
+ * If there are any calibration anomalies (too many SMIs, etc),
* or the refined calibration is off by 1% of the fast early
* calibration, we throw out the new calibration and use the
* early calibration.
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 0edefc19a113..b9242bacbe59 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -18,7 +18,7 @@
* This file is expected to run in 32bit code. Currently:
*
* arch/x86/boot/compressed/head_64.S: Boot cpu verification
- * arch/x86/kernel/trampoline_64.S: secondary processor verfication
+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
* arch/x86/kernel/head_32.S: processor startup
*
* verify_cpu, returns the status of longmode and SSE in register %eax.
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 547128546cc3..a3911343976b 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -53,7 +53,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
/*
* None of the feature bits are in init state. So nothing else
- * to do for us, as the memory layout is upto date.
+ * to do for us, as the memory layout is up to date.
*/
if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
return;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 751405097d8c..c6397795d865 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -346,7 +346,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return;
/*
- * we call mmu_set_spte() with host_writable = true beacuse that
+ * we call mmu_set_spte() with host_writable = true because that
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
*/
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index fc7a101c4a35..abd86e865be3 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -25,7 +25,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
/*
* There is a race window between reading and incrementing, but we do
- * not care about potentially loosing timer events in the !reinject
+ * not care about potentially losing timer events in the !reinject
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
* in vcpu_enter_guest.
*/
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f1e4025f1ae2..58f517b59645 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1031,7 +1031,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
/*
* Special case: close write to TSC within 5 seconds of
* another CPU is interpreted as an attempt to synchronize
- * The 5 seconds is to accomodate host load / swapping as
+ * The 5 seconds is to accommodate host load / swapping as
* well as any reset of TSC during the boot process.
*
* In that case, for a reliable TSC, we can match TSC offsets,
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index b9ec1c74943c..1cd608973ce5 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -397,7 +397,7 @@ static void lguest_load_tr_desc(void)
* instead we just use the real "cpuid" instruction. Then I pretty much turned
* off feature bits until the Guest booted. (Don't say that: you'll damage
* lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
- * hardly future proof.) Noone's listening! They don't like you anyway,
+ * hardly future proof.) No one's listening! They don't like you anyway,
* parenthetic weirdo!
*
* Replacing the cpuid so we can turn features off is great for the kernel, but
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index a460158b5ac5..99e482615195 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -117,7 +117,7 @@ ENDPROC(bad_from_user)
* rdx count
*
* Output:
- * eax uncopied bytes or 0 if successfull.
+ * eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index f0dba36578ea..fb903b758da8 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -1,6 +1,6 @@
/*
- * Copyright 2002,2003 Andi Kleen, SuSE Labs.
- *
+ * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
+ *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details. No warranty for anything given at all.
@@ -11,82 +11,82 @@
/*
* Checksum copy with exception handling.
- * On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
+ * On exceptions src_err_ptr or dst_err_ptr is set to -EFAULT and the
* destination is zeroed.
- *
+ *
* Input
* rdi source
* rsi destination
* edx len (32bit)
- * ecx sum (32bit)
+ * ecx sum (32bit)
* r8 src_err_ptr (int)
* r9 dst_err_ptr (int)
*
* Output
* eax 64bit sum. undefined in case of exception.
- *
- * Wrappers need to take care of valid exception sum and zeroing.
+ *
+ * Wrappers need to take care of valid exception sum and zeroing.
* They also should align source or destination to 8 bytes.
*/
.macro source
10:
- .section __ex_table,"a"
+ .section __ex_table, "a"
.align 8
- .quad 10b,.Lbad_source
+ .quad 10b, .Lbad_source
.previous
.endm
-
+
.macro dest
20:
- .section __ex_table,"a"
+ .section __ex_table, "a"
.align 8
- .quad 20b,.Lbad_dest
+ .quad 20b, .Lbad_dest
.previous
.endm
-
+
.macro ignore L=.Lignore
30:
- .section __ex_table,"a"
+ .section __ex_table, "a"
.align 8
- .quad 30b,\L
+ .quad 30b, \L
.previous
.endm
-
-
+
+
ENTRY(csum_partial_copy_generic)
CFI_STARTPROC
- cmpl $3*64,%edx
- jle .Lignore
+ cmpl $3*64, %edx
+ jle .Lignore
-.Lignore:
- subq $7*8,%rsp
+.Lignore:
+ subq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET 7*8
- movq %rbx,2*8(%rsp)
+ movq %rbx, 2*8(%rsp)
CFI_REL_OFFSET rbx, 2*8
- movq %r12,3*8(%rsp)
+ movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
- movq %r14,4*8(%rsp)
+ movq %r14, 4*8(%rsp)
CFI_REL_OFFSET r14, 4*8
- movq %r13,5*8(%rsp)
+ movq %r13, 5*8(%rsp)
CFI_REL_OFFSET r13, 5*8
- movq %rbp,6*8(%rsp)
+ movq %rbp, 6*8(%rsp)
CFI_REL_OFFSET rbp, 6*8
- movq %r8,(%rsp)
- movq %r9,1*8(%rsp)
-
- movl %ecx,%eax
- movl %edx,%ecx
+ movq %r8, (%rsp)
+ movq %r9, 1*8(%rsp)
- xorl %r9d,%r9d
- movq %rcx,%r12
+ movl %ecx, %eax
+ movl %edx, %ecx
- shrq $6,%r12
- jz .Lhandle_tail /* < 64 */
+ xorl %r9d, %r9d
+ movq %rcx, %r12
+
+ shrq $6, %r12
+ jz .Lhandle_tail /* < 64 */
clc
-
+
/* main loop. clear in 64 byte blocks */
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
@@ -94,156 +94,156 @@ ENTRY(csum_partial_copy_generic)
.p2align 4
.Lloop:
source
- movq (%rdi),%rbx
+ movq (%rdi), %rbx
source
- movq 8(%rdi),%r8
+ movq 8(%rdi), %r8
source
- movq 16(%rdi),%r11
+ movq 16(%rdi), %r11
source
- movq 24(%rdi),%rdx
+ movq 24(%rdi), %rdx
source
- movq 32(%rdi),%r10
+ movq 32(%rdi), %r10
source
- movq 40(%rdi),%rbp
+ movq 40(%rdi), %rbp
source
- movq 48(%rdi),%r14
+ movq 48(%rdi), %r14
source
- movq 56(%rdi),%r13
-
+ movq 56(%rdi), %r13
+
ignore 2f
prefetcht0 5*64(%rdi)
-2:
- adcq %rbx,%rax
- adcq %r8,%rax
- adcq %r11,%rax
- adcq %rdx,%rax
- adcq %r10,%rax
- adcq %rbp,%rax
- adcq %r14,%rax
- adcq %r13,%rax
+2:
+ adcq %rbx, %rax
+ adcq %r8, %rax
+ adcq %r11, %rax
+ adcq %rdx, %rax
+ adcq %r10, %rax
+ adcq %rbp, %rax
+ adcq %r14, %rax
+ adcq %r13, %rax
decl %r12d
-
+
dest
- movq %rbx,(%rsi)
+ movq %rbx, (%rsi)
dest
- movq %r8,8(%rsi)
+ movq %r8, 8(%rsi)
dest
- movq %r11,16(%rsi)
+ movq %r11, 16(%rsi)
dest
- movq %rdx,24(%rsi)
+ movq %rdx, 24(%rsi)
dest
- movq %r10,32(%rsi)
+ movq %r10, 32(%rsi)
dest
- movq %rbp,40(%rsi)
+ movq %rbp, 40(%rsi)
dest
- movq %r14,48(%rsi)
+ movq %r14, 48(%rsi)
dest
- movq %r13,56(%rsi)
-
+ movq %r13, 56(%rsi)
+
3:
-
- leaq 64(%rdi),%rdi
- leaq 64(%rsi),%rsi
- jnz .Lloop
+ leaq 64(%rdi), %rdi
+ leaq 64(%rsi), %rsi
- adcq %r9,%rax
+ jnz .Lloop
- /* do last upto 56 bytes */
+ adcq %r9, %rax
+
+ /* do last up to 56 bytes */
.Lhandle_tail:
/* ecx: count */
- movl %ecx,%r10d
- andl $63,%ecx
- shrl $3,%ecx
- jz .Lfold
+ movl %ecx, %r10d
+ andl $63, %ecx
+ shrl $3, %ecx
+ jz .Lfold
clc
.p2align 4
-.Lloop_8:
+.Lloop_8:
source
- movq (%rdi),%rbx
- adcq %rbx,%rax
+ movq (%rdi), %rbx
+ adcq %rbx, %rax
decl %ecx
dest
- movq %rbx,(%rsi)
- leaq 8(%rsi),%rsi /* preserve carry */
- leaq 8(%rdi),%rdi
+ movq %rbx, (%rsi)
+ leaq 8(%rsi), %rsi /* preserve carry */
+ leaq 8(%rdi), %rdi
jnz .Lloop_8
- adcq %r9,%rax /* add in carry */
+ adcq %r9, %rax /* add in carry */
.Lfold:
/* reduce checksum to 32bits */
- movl %eax,%ebx
- shrq $32,%rax
- addl %ebx,%eax
- adcl %r9d,%eax
+ movl %eax, %ebx
+ shrq $32, %rax
+ addl %ebx, %eax
+ adcl %r9d, %eax
- /* do last upto 6 bytes */
+ /* do last up to 6 bytes */
.Lhandle_7:
- movl %r10d,%ecx
- andl $7,%ecx
- shrl $1,%ecx
+ movl %r10d, %ecx
+ andl $7, %ecx
+ shrl $1, %ecx
jz .Lhandle_1
- movl $2,%edx
- xorl %ebx,%ebx
- clc
+ movl $2, %edx
+ xorl %ebx, %ebx
+ clc
.p2align 4
-.Lloop_1:
+.Lloop_1:
source
- movw (%rdi),%bx
- adcl %ebx,%eax
+ movw (%rdi), %bx
+ adcl %ebx, %eax
decl %ecx
dest
- movw %bx,(%rsi)
- leaq 2(%rdi),%rdi
- leaq 2(%rsi),%rsi
+ movw %bx, (%rsi)
+ leaq 2(%rdi), %rdi
+ leaq 2(%rsi), %rsi
jnz .Lloop_1
- adcl %r9d,%eax /* add in carry */
-
+ adcl %r9d, %eax /* add in carry */
+
/* handle last odd byte */
.Lhandle_1:
- testl $1,%r10d
+ testl $1, %r10d
jz .Lende
- xorl %ebx,%ebx
+ xorl %ebx, %ebx
source
- movb (%rdi),%bl
+ movb (%rdi), %bl
dest
- movb %bl,(%rsi)
- addl %ebx,%eax
- adcl %r9d,%eax /* carry */
-
+ movb %bl, (%rsi)
+ addl %ebx, %eax
+ adcl %r9d, %eax /* carry */
+
CFI_REMEMBER_STATE
.Lende:
- movq 2*8(%rsp),%rbx
+ movq 2*8(%rsp), %rbx
CFI_RESTORE rbx
- movq 3*8(%rsp),%r12
+ movq 3*8(%rsp), %r12
CFI_RESTORE r12
- movq 4*8(%rsp),%r14
+ movq 4*8(%rsp), %r14
CFI_RESTORE r14
- movq 5*8(%rsp),%r13
+ movq 5*8(%rsp), %r13
CFI_RESTORE r13
- movq 6*8(%rsp),%rbp
+ movq 6*8(%rsp), %rbp
CFI_RESTORE rbp
- addq $7*8,%rsp
+ addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
ret
CFI_RESTORE_STATE
/* Exception handlers. Very simple, zeroing is done in the wrappers */
.Lbad_source:
- movq (%rsp),%rax
- testq %rax,%rax
+ movq (%rsp), %rax
+ testq %rax, %rax
jz .Lende
- movl $-EFAULT,(%rax)
+ movl $-EFAULT, (%rax)
jmp .Lende
-
+
.Lbad_dest:
- movq 8(%rsp),%rax
- testq %rax,%rax
- jz .Lende
- movl $-EFAULT,(%rax)
+ movq 8(%rsp), %rax
+ testq %rax, %rax
+ jz .Lende
+ movl $-EFAULT, (%rax)
jmp .Lende
CFI_ENDPROC
ENDPROC(csum_partial_copy_generic)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index bf51144d97e1..9845371c5c36 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -84,7 +84,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
count64--;
}
- /* last upto 7 8byte blocks */
+ /* last up to 7 8byte blocks */
count %= 8;
while (count) {
asm("addq %1,%0\n\t"
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 069ce7c37c01..d4203988504a 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -326,7 +326,7 @@ try_again:
if (mm->free_area_cache < len)
goto fail;
- /* either no address requested or cant fit in requested address hole */
+ /* either no address requested or can't fit in requested address hole */
addr = (mm->free_area_cache - len) & huge_page_mask(h);
do {
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 73ad7ebd6e9c..80088f994193 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -917,7 +917,7 @@ static void mark_nxdata_nx(void)
{
/*
* When this called, init has already been executed and released,
- * so everything past _etext sould be NX.
+ * so everything past _etext should be NX.
*/
unsigned long start = PFN_ALIGN(_etext);
/*
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 9ec0f209a6a4..e8c00cc72033 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -446,7 +446,7 @@ static int __init numa_alloc_distance(void)
* @distance: NUMA distance
*
* Set the distance from node @from to @to to @distance. If distance table
- * doesn't exist, one which is large enough to accomodate all the currently
+ * doesn't exist, one which is large enough to accommodate all the currently
* known nodes will be created.
*
* If such table cannot be allocated, a warning is printed and further
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 90825f2eb0f4..f9e526742fa1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -310,7 +310,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
* these shared mappings are made of small page mappings.
* Thus this don't enforce !RW mapping for small page kernel
* text mapping logic will help Linux Xen parvirt guest boot
- * aswell.
+ * as well.
*/
if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
pgprot_val(forbidden) |= _PAGE_RW;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 0113d19c8aa6..8573b83a63d0 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -168,8 +168,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
* section 8.1: in PAE mode we explicitly have to flush the
* TLB via cr3 if the top-level pgd is changed...
*/
- if (mm == current->active_mm)
- write_cr3(read_cr3());
+ flush_tlb_mm(mm);
}
#else /* !CONFIG_X86_PAE */
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 72cbec14d783..2d49d4e19a36 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -126,7 +126,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
if (!user_mode_vm(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
if (depth)
- dump_trace(NULL, regs, (unsigned long *)stack,
+ dump_trace(NULL, regs, (unsigned long *)stack, 0,
&backtrace_ops, &depth);
return;
}
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 9fadec074142..98ab13058f89 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
#endif
}
-static int inline addr_increment(void)
+static inline int addr_increment(void)
{
#ifdef CONFIG_SMP
return smp_num_siblings == 2 ? 2 : 1;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index b1805b78842f..494f2e7ea2b4 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -241,7 +241,7 @@ void __init pcibios_resource_survey(void)
e820_reserve_resources_late();
/*
* Insert the IO APIC resources after PCI initialization has
- * occured to handle IO APICS that are mapped in on a BAR in
+ * occurred to handle IO APICS that are mapped in on a BAR in
* PCI space, but before trying to assign unassigned pci res.
*/
ioapic_insert_resources();
@@ -304,7 +304,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here
- * aswell.
+ * as well.
*/
prot |= _PAGE_CACHE_UC_MINUS;
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 87e6c8323117..8201165bae28 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -597,21 +597,18 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN &&
+ device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)
+ || (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN &&
+ device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)
+ || (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN &&
+ device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
- r->name = "PIIX/ICH";
- r->get = pirq_piix_get;
- r->set = pirq_piix_set;
- return 1;
- }
return 0;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5695fa66d565..39ee7182fd18 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1744,7 +1744,7 @@ static void convert_pfn_mfn(void *v)
}
/*
- * Set up the inital kernel pagetable.
+ * Set up the initial kernel pagetable.
*
* We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 095cd8084164..42b7feba71b7 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -598,7 +598,7 @@ CONFIG_DEBUG_NOMMU_REGIONS=y
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#