summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig11
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/boot/Makefile6
-rw-r--r--arch/powerpc/boot/crt0.S116
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dts344
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core0.dts213
-rw-r--r--arch/powerpc/boot/dts/p1020rdb_camp_core1.dts148
-rw-r--r--arch/powerpc/boot/dts/p1020si.dtsi377
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts106
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dts374
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts390
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core0.dts249
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core1.dts160
-rw-r--r--arch/powerpc/boot/dts/p2020si.dtsi382
-rw-r--r--arch/powerpc/boot/epapr.c66
-rwxr-xr-xarch/powerpc/boot/wrapper19
-rw-r--r--arch/powerpc/boot/zImage.coff.lds.S6
-rw-r--r--arch/powerpc/boot/zImage.lds.S57
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig2
-rw-r--r--arch/powerpc/configs/c2k_defconfig4
-rw-r--r--arch/powerpc/configs/e55xx_smp_defconfig40
-rw-r--r--arch/powerpc/configs/linkstation_defconfig1
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig1
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/mpc86xx_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig4
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig5
-rw-r--r--arch/powerpc/configs/ps3_defconfig4
-rw-r--r--arch/powerpc/configs/pseries_defconfig11
-rw-r--r--arch/powerpc/include/asm/8xx_immap.h4
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/compat.h2
-rw-r--r--arch/powerpc/include/asm/cpm.h2
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h65
-rw-r--r--arch/powerpc/include/asm/cputhreads.h12
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h6
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h113
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h15
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h14
-rw-r--r--arch/powerpc/include/asm/io-workarounds.h (renamed from arch/powerpc/platforms/cell/io-workarounds.h)1
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/io_event_irq.h54
-rw-r--r--arch/powerpc/include/asm/irq.h18
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/include/asm/kprobes.h2
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h4
-rw-r--r--arch/powerpc/include/asm/machdep.h23
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h20
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h8
-rw-r--r--arch/powerpc/include/asm/mmu.h52
-rw-r--r--arch/powerpc/include/asm/mmu_context.h12
-rw-r--r--arch/powerpc/include/asm/mpic.h8
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h5
-rw-r--r--arch/powerpc/include/asm/paca.h11
-rw-r--r--arch/powerpc/include/asm/page.h2
-rw-r--r--arch/powerpc/include/asm/page_64.h23
-rw-r--r--arch/powerpc/include/asm/pasemi_dma.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h13
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h4
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h35
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h1
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/pte-common.h6
-rw-r--r--arch/powerpc/include/asm/qe_ic.h16
-rw-r--r--arch/powerpc/include/asm/reg.h104
-rw-r--r--arch/powerpc/include/asm/reg_a2.h165
-rw-r--r--arch/powerpc/include/asm/reg_booke.h14
-rw-r--r--arch/powerpc/include/asm/rtas.h45
-rw-r--r--arch/powerpc/include/asm/scom.h156
-rw-r--r--arch/powerpc/include/asm/smp.h43
-rw-r--r--arch/powerpc/include/asm/spu_priv1.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h5
-rw-r--r--arch/powerpc/include/asm/system.h2
-rw-r--r--arch/powerpc/include/asm/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/uninorth.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h7
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h2
-rw-r--r--arch/powerpc/include/asm/wsp.h14
-rw-r--r--arch/powerpc/include/asm/xics.h142
-rw-r--r--arch/powerpc/kernel/Makefile6
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S114
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S3
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S91
-rw-r--r--arch/powerpc/kernel/cputable.c68
-rw-r--r--arch/powerpc/kernel/crash.c97
-rw-r--r--arch/powerpc/kernel/dbell.c65
-rw-r--r--arch/powerpc/kernel/dma.c18
-rw-r--r--arch/powerpc/kernel/entry_64.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S204
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S246
-rw-r--r--arch/powerpc/kernel/head_32.S13
-rw-r--r--arch/powerpc/kernel/head_40x.S2
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_64.S58
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/powerpc/kernel/idle_power4.S21
-rw-r--r--arch/powerpc/kernel/idle_power7.S97
-rw-r--r--arch/powerpc/kernel/io-workarounds.c (renamed from arch/powerpc/platforms/cell/io-workarounds.c)31
-rw-r--r--arch/powerpc/kernel/irq.c248
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c8
-rw-r--r--arch/powerpc/kernel/lparcfg.c55
-rw-r--r--arch/powerpc/kernel/machine_kexec.c6
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c32
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci_dn.c3
-rw-r--r--arch/powerpc/kernel/perf_event.c39
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S2
-rw-r--r--arch/powerpc/kernel/process.c20
-rw-r--r--arch/powerpc/kernel/prom.c68
-rw-r--r--arch/powerpc/kernel/prom_init.c30
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c23
-rw-r--r--arch/powerpc/kernel/setup_32.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c44
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c293
-rw-r--r--arch/powerpc/kernel/swsusp_32.S2
-rw-r--r--arch/powerpc/kernel/sysfs.c38
-rw-r--r--arch/powerpc/kernel/time.c20
-rw-r--r--arch/powerpc/kernel/traps.c31
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c53
-rw-r--r--arch/powerpc/kernel/vdso32/sigtramp.S2
-rw-r--r--arch/powerpc/kernel/vdso64/sigtramp.S2
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kvm/book3s.c2
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/book3s_segment.S12
-rw-r--r--arch/powerpc/lib/alloc.c8
-rw-r--r--arch/powerpc/lib/copypage_64.S7
-rw-r--r--arch/powerpc/lib/devres.c6
-rw-r--r--arch/powerpc/lib/sstep.c62
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c20
-rw-r--r--arch/powerpc/mm/hash_low_64.S32
-rw-r--r--arch/powerpc/mm/hash_native_64.c18
-rw-r--r--arch/powerpc/mm/hash_utils_64.c64
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c214
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c18
-rw-r--r--arch/powerpc/mm/numa.c27
-rw-r--r--arch/powerpc/mm/pgtable_32.c12
-rw-r--r--arch/powerpc/mm/pgtable_64.c15
-rw-r--r--arch/powerpc/mm/slb.c10
-rw-r--r--arch/powerpc/mm/slb_low.S8
-rw-r--r--arch/powerpc/mm/stab.c2
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S6
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c6
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c2
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c6
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c16
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c23
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c10
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c6
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c91
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c24
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c7
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c4
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c38
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c4
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c18
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c99
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c6
-rw-r--r--arch/powerpc/platforms/86xx/pic.c4
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c8
-rw-r--r--arch/powerpc/platforms/Kconfig33
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype24
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig5
-rw-r--r--arch/powerpc/platforms/cell/Makefile9
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c17
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c32
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h3
-rw-r--r--arch/powerpc/platforms/cell/beat_smp.c124
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c11
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c25
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h3
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c80
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c1
-rw-r--r--arch/powerpc/platforms/cell/setup.c12
-rw-r--r--arch/powerpc/platforms/cell/smp.c37
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c42
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c4
-rw-r--r--arch/powerpc/platforms/chrp/smp.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c21
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c31
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c4
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig4
-rw-r--r--arch/powerpc/platforms/iseries/exception.S62
-rw-r--r--arch/powerpc/platforms/iseries/irq.c17
-rw-r--r--arch/powerpc/platforms/iseries/mf.c2
-rw-r--r--arch/powerpc/platforms/iseries/setup.c9
-rw-r--r--arch/powerpc/platforms/iseries/smp.c45
-rw-r--r--arch/powerpc/platforms/iseries/smp.h6
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c8
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c4
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig11
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c4
-rw-r--r--arch/powerpc/platforms/powermac/pic.c84
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c56
-rw-r--r--arch/powerpc/platforms/powermac/smp.c251
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c20
-rw-r--r--arch/powerpc/platforms/ps3/smp.c22
-rw-r--r--arch/powerpc/platforms/ps3/spu.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig23
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c20
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c84
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c22
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c7
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c231
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c123
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c5
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c48
-rw-r--r--arch/powerpc/platforms/pseries/msi.c4
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c24
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h2
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h27
-rw-r--r--arch/powerpc/platforms/pseries/ras.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c54
-rw-r--r--arch/powerpc/platforms/pseries/smp.c45
-rw-r--r--arch/powerpc/platforms/pseries/xics.c948
-rw-r--r--arch/powerpc/platforms/pseries/xics.h23
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig28
-rw-r--r--arch/powerpc/platforms/wsp/Makefile6
-rw-r--r--arch/powerpc/platforms/wsp/ics.c712
-rw-r--r--arch/powerpc/platforms/wsp/ics.h20
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c332
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c95
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c427
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c77
-rw-r--r--arch/powerpc/platforms/wsp/setup.c36
-rw-r--r--arch/powerpc/platforms/wsp/smp.c88
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h17
-rw-r--r--arch/powerpc/sysdev/Kconfig10
-rw-r--r--arch/powerpc/sysdev/Makefile6
-rw-r--r--arch/powerpc/sysdev/axonram.c4
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.h2
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm_priv.h2
-rw-r--r--arch/powerpc/sysdev/cpm1.c14
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c57
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c54
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/powerpc/sysdev/i8259.c19
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c74
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c21
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c24
-rw-r--r--arch/powerpc/sysdev/mpic.c300
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c8
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c8
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c19
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.h2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c22
-rw-r--r--arch/powerpc/sysdev/scom.c192
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c6
-rw-r--r--arch/powerpc/sysdev/uic.c53
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig13
-rw-r--r--arch/powerpc/sysdev/xics/Makefile6
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c164
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c293
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c240
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c443
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c28
-rw-r--r--arch/powerpc/xmon/xmon.c38
319 files changed, 9609 insertions, 4924 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3584e4d4a4ad..a3128ca0fe11 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,7 +138,8 @@ config PPC
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select IRQ_PER_CPU
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
config EARLY_PRINTK
bool
@@ -192,6 +193,12 @@ config SYS_SUPPORTS_APM_EMULATION
default y if PMAC_APM_EMU
bool
+config EPAPR_BOOT
+ bool
+ help
+ Used to allow a board to specify it wants an ePAPR compliant wrapper.
+ default n
+
config DEFAULT_UIMAGE
bool
help
@@ -208,7 +215,7 @@ config ARCH_HIBERNATION_POSSIBLE
config ARCH_SUSPEND_POSSIBLE
def_bool y
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
- PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
+ (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
config PPC_DCR_NATIVE
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2d38a50e66ba..a597dd77b903 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -267,6 +267,11 @@ config PPC_EARLY_DEBUG_USBGECKO
Select this to enable early debugging for Nintendo GameCube/Wii
consoles via an external USB Gecko adapter.
+config PPC_EARLY_DEBUG_WSP
+ bool "Early debugging via WSP's internal UART"
+ depends on PPC_WSP
+ select PPC_UDBG_16550
+
endchoice
config PPC_EARLY_DEBUG_44x_PHYSLOW
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 89178164af5e..c26200b40a47 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -69,7 +69,8 @@ src-wlib := string.S crt0.S crtsavres.S stdio.c main.c \
cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \
fsl-soc.c mpc8xx.c pq2.c ugecon.c
src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \
- cuboot-ebony.c cuboot-hotfoot.c treeboot-ebony.c prpmc2800.c \
+ cuboot-ebony.c cuboot-hotfoot.c epapr.c treeboot-ebony.c \
+ prpmc2800.c \
ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \
cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \
cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \
@@ -127,7 +128,7 @@ quiet_cmd_bootas = BOOTAS $@
cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
quiet_cmd_bootar = BOOTAR $@
- cmd_bootar = $(CROSS32AR) -cr $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
+ cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
$(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE
$(call if_changed_dep,bootcc)
@@ -182,6 +183,7 @@ image-$(CONFIG_PPC_HOLLY) += dtbImage.holly
image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
image-$(CONFIG_PPC_ISERIES) += zImage.iseries
image-$(CONFIG_DEFAULT_UIMAGE) += uImage
+image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
#
# Targets which embed a device tree blob
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index f1c4dfc635be..0f7428a37efb 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -6,16 +6,28 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
+ * NOTE: this code runs in 32 bit mode, is position-independent,
+ * and is packaged as ELF32.
*/
#include "ppc_asm.h"
.text
- /* a procedure descriptor used when booting this as a COFF file */
+ /* A procedure descriptor used when booting this as a COFF file.
+ * When making COFF, this comes first in the link and we're
+ * linked at 0x500000.
+ */
.globl _zimage_start_opd
_zimage_start_opd:
- .long _zimage_start, 0, 0, 0
+ .long 0x500000, 0, 0, 0
+
+p_start: .long _start
+p_etext: .long _etext
+p_bss_start: .long __bss_start
+p_end: .long _end
+
+ .weak _platform_stack_top
+p_pstack: .long _platform_stack_top
.weak _zimage_start
.globl _zimage_start
@@ -24,37 +36,65 @@ _zimage_start:
_zimage_start_lib:
/* Work out the offset between the address we were linked at
and the address where we're running. */
- bl 1f
-1: mflr r0
- lis r9,1b@ha
- addi r9,r9,1b@l
- subf. r0,r9,r0
- beq 3f /* if running at same address as linked */
+ bl .+4
+p_base: mflr r10 /* r10 now points to runtime addr of p_base */
+ /* grab the link address of the dynamic section in r11 */
+ addis r11,r10,(_GLOBAL_OFFSET_TABLE_-p_base)@ha
+ lwz r11,(_GLOBAL_OFFSET_TABLE_-p_base)@l(r11)
+ cmpwi r11,0
+ beq 3f /* if not linked -pie */
+ /* get the runtime address of the dynamic section in r12 */
+ .weak __dynamic_start
+ addis r12,r10,(__dynamic_start-p_base)@ha
+ addi r12,r12,(__dynamic_start-p_base)@l
+ subf r11,r11,r12 /* runtime - linktime offset */
+
+ /* The dynamic section contains a series of tagged entries.
+ * We need the RELA and RELACOUNT entries. */
+RELA = 7
+RELACOUNT = 0x6ffffff9
+ li r9,0
+ li r0,0
+9: lwz r8,0(r12) /* get tag */
+ cmpwi r8,0
+ beq 10f /* end of list */
+ cmpwi r8,RELA
+ bne 11f
+ lwz r9,4(r12) /* get RELA pointer in r9 */
+ b 12f
+11: addis r8,r8,(-RELACOUNT)@ha
+ cmpwi r8,RELACOUNT@l
+ bne 12f
+ lwz r0,4(r12) /* get RELACOUNT value in r0 */
+12: addi r12,r12,8
+ b 9b
- /* The .got2 section contains a list of addresses, so add
- the address offset onto each entry. */
- lis r9,__got2_start@ha
- addi r9,r9,__got2_start@l
- lis r8,__got2_end@ha
- addi r8,r8,__got2_end@l
- subf. r8,r9,r8
+ /* The relocation section contains a list of relocations.
+ * We now do the R_PPC_RELATIVE ones, which point to words
+ * which need to be initialized with addend + offset.
+ * The R_PPC_RELATIVE ones come first and there are RELACOUNT
+ * of them. */
+10: /* skip relocation if we don't have both */
+ cmpwi r0,0
beq 3f
- srwi. r8,r8,2
- mtctr r8
- add r9,r0,r9
-2: lwz r8,0(r9)
- add r8,r8,r0
- stw r8,0(r9)
- addi r9,r9,4
+ cmpwi r9,0
+ beq 3f
+
+ add r9,r9,r11 /* Relocate RELA pointer */
+ mtctr r0
+2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */
+ cmpwi r0,22 /* R_PPC_RELATIVE */
+ bne 3f
+ lwz r12,0(r9) /* reloc->r_offset */
+ lwz r0,8(r9) /* reloc->r_addend */
+ add r0,r0,r11
+ stwx r0,r11,r12
+ addi r9,r9,12
bdnz 2b
/* Do a cache flush for our text, in case the loader didn't */
-3: lis r9,_start@ha
- addi r9,r9,_start@l
- add r9,r0,r9
- lis r8,_etext@ha
- addi r8,r8,_etext@l
- add r8,r0,r8
+3: lwz r9,p_start-p_base(r10) /* note: these are relocated now */
+ lwz r8,p_etext-p_base(r10)
4: dcbf r0,r9
icbi r0,r9
addi r9,r9,0x20
@@ -64,27 +104,19 @@ _zimage_start_lib:
isync
/* Clear the BSS */
- lis r9,__bss_start@ha
- addi r9,r9,__bss_start@l
- add r9,r0,r9
- lis r8,_end@ha
- addi r8,r8,_end@l
- add r8,r0,r8
- li r10,0
-5: stw r10,0(r9)
+ lwz r9,p_bss_start-p_base(r10)
+ lwz r8,p_end-p_base(r10)
+ li r0,0
+5: stw r0,0(r9)
addi r9,r9,4
cmplw cr0,r9,r8
blt 5b
/* Possibly set up a custom stack */
-.weak _platform_stack_top
- lis r8,_platform_stack_top@ha
- addi r8,r8,_platform_stack_top@l
+ lwz r8,p_pstack-p_base(r10)
cmpwi r8,0
beq 6f
- add r8,r0,r8
lwz r1,0(r8)
- add r1,r0,r1
li r0,0
stwu r0,-16(r1) /* establish a stack frame */
6:
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
index 22f64b62d7f6..d6a8ae458137 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dts
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -1,7 +1,7 @@
/*
* P1020 RDB Device Tree Source
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -9,12 +9,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "p1020si.dtsi"
+
/ {
- model = "fsl,P1020";
+ model = "fsl,P1020RDB";
compatible = "fsl,P1020RDB";
- #address-cells = <2>;
- #size-cells = <2>;
aliases {
serial0 = &serial0;
@@ -26,34 +25,11 @@
pci1 = &pci1;
};
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P1020@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P1020@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
-
memory {
device_type = "memory";
};
localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
/* NOR, NAND Flashes and Vitesse 5 port L2 switch */
ranges = <0x0 0x0 0x0 0xef000000 0x01000000
@@ -165,88 +141,14 @@
};
soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p1020-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p1020-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <16 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p1020-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
rtc@68 {
compatible = "dallas,ds1339";
reg = <0x68>;
};
};
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- mode = "cpu";
fsl_m25p80@0 {
#address-cells = <1>;
@@ -294,66 +196,7 @@
};
};
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p1020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x40000>; // L2,256K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
mdio@24000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-mdio";
- reg = <0x24000 0x1000 0xb0030 0x4>;
phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
@@ -369,10 +212,6 @@
};
mdio@25000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,etsec2-tbi";
- reg = <0x25000 0x1000 0xb1030 0x4>;
tbi0: tbi-phy@11 {
reg = <0x11>;
@@ -381,97 +220,25 @@
};
enet0: ethernet@b0000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
fixed-link = <1 1 1000 0 0>;
phy-connection-type = "rgmii-id";
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb0000 0x1000>;
- interrupts = <29 2 30 2 34 2>;
- };
-
- queue-group@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb4000 0x1000>;
- interrupts = <17 2 18 2 24 2>;
- };
};
enet1: ethernet@b1000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
phy-handle = <&phy0>;
tbi-handle = <&tbi0>;
phy-connection-type = "sgmii";
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb1000 0x1000>;
- interrupts = <35 2 36 2 40 2>;
- };
-
- queue-group@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb5000 0x1000>;
- interrupts = <51 2 52 2 67 2>;
- };
};
enet2: ethernet@b2000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "fsl,etsec2";
- fsl,num_rx_queues = <0x8>;
- fsl,num_tx_queues = <0x8>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupt-parent = <&mpic>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
- queue-group@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb2000 0x1000>;
- interrupts = <31 2 32 2 33 2>;
- };
-
- queue-group@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xb6000 0x1000>;
- interrupts = <25 2 26 2 27 2>;
- };
};
usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
phy_type = "ulpi";
};
@@ -481,82 +248,23 @@
it enables USB2. OTOH, U-Boot does create a new node
when there isn't any. So, just comment it out.
usb@23000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x23000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <46 0x2>;
phy_type = "ulpi";
};
*/
- sdhci@2e000 {
- compatible = "fsl,p1020-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p1020-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p1020-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
};
pci0: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
#size-cells = <2>;
@@ -573,25 +281,23 @@
};
pci1: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ >;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
0x0 0x20000000
0x1000000 0x0 0x0
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
new file mode 100644
index 000000000000..f0bf7f42f097
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts
@@ -0,0 +1,213 @@
+/*
+ * P1020 RDB Core0 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb,
+ * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi.
+ *
+ * Please note to add "-b 0" for core0's dts compiling.
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "p1020si.dtsi"
+
+/ {
+ model = "fsl,P1020RDB";
+ compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP";
+
+ aliases {
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ serial0 = &serial0;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ PowerPC,P1020@1 {
+ status = "disabled";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ localbus@ffe05000 {
+ status = "disabled";
+ };
+
+ soc@ffe00000 {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ serial1: serial@4600 {
+ status = "disabled";
+ };
+
+ spi@7000 {
+ fsl_m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,espi-flash";
+ reg = <0>;
+ linux,modalias = "fsl_m25p80";
+ spi-max-frequency = <40000000>;
+
+ partition@0 {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "SPI (RO) U-Boot Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "SPI (RO) DTB Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "SPI (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@500000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "SPI (RO) Compressed RFS Image";
+ read-only;
+ };
+
+ partition@900000 {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "SPI (RW) JFFS2 RFS";
+ };
+ };
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x1>;
+ };
+ };
+
+ mdio@25000 {
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ status = "disabled";
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ usb@23000 {
+ phy_type = "ulpi";
+ };
+ */
+
+ mpic: pic@40000 {
+ protected-sources = <
+ 42 29 30 34 /* serial1, enet0-queue-group0 */
+ 17 18 24 45 /* enet0-queue-group1, crypto */
+ >;
+ };
+
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ >;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
new file mode 100644
index 000000000000..6ec02204a44e
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts
@@ -0,0 +1,148 @@
+/*
+ * P1020 RDB Core1 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts allows core1 to have l2, eth0, crypto.
+ *
+ * Please note to add "-b 1" for core1's dts compiling.
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "p1020si.dtsi"
+
+/ {
+ model = "fsl,P1020RDB";
+ compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP";
+
+ aliases {
+ ethernet0 = &enet0;
+ serial0 = &serial1;
+ };
+
+ cpus {
+ PowerPC,P1020@0 {
+ status = "disabled";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ localbus@ffe05000 {
+ status = "disabled";
+ };
+
+ soc@ffe00000 {
+ ecm-law@0 {
+ status = "disabled";
+ };
+
+ ecm@1000 {
+ status = "disabled";
+ };
+
+ memory-controller@2000 {
+ status = "disabled";
+ };
+
+ i2c@3000 {
+ status = "disabled";
+ };
+
+ i2c@3100 {
+ status = "disabled";
+ };
+
+ serial0: serial@4500 {
+ status = "disabled";
+ };
+
+ spi@7000 {
+ status = "disabled";
+ };
+
+ gpio: gpio-controller@f000 {
+ status = "disabled";
+ };
+
+ dma@21300 {
+ status = "disabled";
+ };
+
+ mdio@24000 {
+ status = "disabled";
+ };
+
+ mdio@25000 {
+ status = "disabled";
+ };
+
+ enet0: ethernet@b0000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@b2000 {
+ status = "disabled";
+ };
+
+ usb@22000 {
+ status = "disabled";
+ };
+
+ sdhci@2e000 {
+ status = "disabled";
+ };
+
+ mpic: pic@40000 {
+ protected-sources = <
+ 16 /* ecm, mem, L2, pci0, pci1 */
+ 43 42 59 /* i2c, serial0, spi */
+ 47 63 62 /* gpio, tdm */
+ 20 21 22 23 /* dma */
+ 03 02 /* mdio */
+ 35 36 40 /* enet1-queue-group0 */
+ 51 52 67 /* enet1-queue-group1 */
+ 31 32 33 /* enet2-queue-group0 */
+ 25 26 27 /* enet2-queue-group1 */
+ 28 72 58 /* usb, sdhci, crypto */
+ 0xb0 0xb1 0xb2 /* message */
+ 0xb3 0xb4 0xb5
+ 0xb6 0xb7
+ 0xe0 0xe1 0xe2 /* msi */
+ 0xe3 0xe4 0xe5
+ 0xe6 0xe7 /* sdhci, crypto , pci */
+ >;
+ };
+
+ msi@41600 {
+ status = "disabled";
+ };
+
+ global-utilities@e0000 { //global utilities block
+ status = "disabled";
+ };
+
+ };
+
+ pci0: pcie@ffe09000 {
+ status = "disabled";
+ };
+
+ pci1: pcie@ffe0a000 {
+ status = "disabled";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020si.dtsi b/arch/powerpc/boot/dts/p1020si.dtsi
new file mode 100644
index 000000000000..5c5acb66c3fc
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020si.dtsi
@@ -0,0 +1,377 @@
+/*
+ * P1020si Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P1020";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1020@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P1020@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ localbus@ffe05000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
+ reg = <0 0xffe05000 0 0x1000>;
+ interrupts = <19 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ soc@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1020-immr", "simple-bus";
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1020-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1020-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ spi@7000 {
+ cell-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2>;
+ interrupt-parent = <&mpic>;
+ mode = "cpu";
+ };
+
+ gpio: gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8572-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2>;
+ interrupt-parent = <&mpic>;
+ gpio-controller;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ mdio@24000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-mdio";
+ reg = <0x24000 0x1000 0xb0030 0x4>;
+
+ };
+
+ mdio@25000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-tbi";
+ reg = <0x25000 0x1000 0xb1030 0x4>;
+
+ };
+
+ enet0: ethernet@b0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0000 0x1000>;
+ interrupts = <29 2 30 2 34 2>;
+ };
+
+ queue-group@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb4000 0x1000>;
+ interrupts = <17 2 18 2 24 2>;
+ };
+ };
+
+ enet1: ethernet@b1000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb1000 0x1000>;
+ interrupts = <35 2 36 2 40 2>;
+ };
+
+ queue-group@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb5000 0x1000>;
+ interrupts = <51 2 52 2 67 2>;
+ };
+ };
+
+ enet2: ethernet@b2000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb2000 0x1000>;
+ interrupts = <31 2 32 2 33 2>;
+ };
+
+ queue-group@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb6000 0x1000>;
+ interrupts = <25 2 26 2 27 2>;
+ };
+ };
+
+ usb@22000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 0x2>;
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ usb@23000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x23000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <46 0x2>;
+ phy_type = "ulpi";
+ };
+ */
+
+ sdhci@2e000 {
+ compatible = "fsl,p1020-esdhc", "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <72 0x2>;
+ interrupt-parent = <&mpic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
+ "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 58 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xbfe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi@41600 {
+ compatible = "fsl,p1020-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,p1020-guts","fsl,p2020-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+ };
+
+ pci0: pcie@ffe09000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe09000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ pci1: pcie@ffe0a000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe0a000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 59ef405c1c91..4f685a779f4c 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -52,7 +52,7 @@
#size-cells = <1>;
compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
+ interrupts = <19 2 0 0>;
ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
0x1 0x0 0xf 0xe0000000 0x08000000
@@ -157,7 +157,7 @@
* IRQ8 is generated if the "EVENT" switch is pressed
* and PX_CTL[EVESEL] is set to 00.
*/
- interrupts = <8 8>;
+ interrupts = <8 8 0 0>;
};
};
@@ -178,13 +178,13 @@
ecm@1000 {
compatible = "fsl,p1022-ecm", "fsl,ecm";
reg = <0x1000 0x1000>;
- interrupts = <16 2>;
+ interrupts = <16 2 0 0>;
};
memory-controller@2000 {
compatible = "fsl,p1022-memory-controller";
reg = <0x2000 0x1000>;
- interrupts = <16 2>;
+ interrupts = <16 2 0 0>;
};
i2c@3000 {
@@ -193,7 +193,7 @@
cell-index = <0>;
compatible = "fsl-i2c";
reg = <0x3000 0x100>;
- interrupts = <43 2>;
+ interrupts = <43 2 0 0>;
dfsrr;
};
@@ -203,7 +203,7 @@
cell-index = <1>;
compatible = "fsl-i2c";
reg = <0x3100 0x100>;
- interrupts = <43 2>;
+ interrupts = <43 2 0 0>;
dfsrr;
wm8776:codec@1a {
@@ -220,7 +220,7 @@
compatible = "ns16550";
reg = <0x4500 0x100>;
clock-frequency = <0>;
- interrupts = <42 2>;
+ interrupts = <42 2 0 0>;
};
serial1: serial@4600 {
@@ -229,7 +229,7 @@
compatible = "ns16550";
reg = <0x4600 0x100>;
clock-frequency = <0>;
- interrupts = <42 2>;
+ interrupts = <42 2 0 0>;
};
spi@7000 {
@@ -238,7 +238,7 @@
#size-cells = <0>;
compatible = "fsl,espi";
reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
+ interrupts = <59 0x2 0 0>;
espi,num-ss-bits = <4>;
mode = "cpu";
@@ -275,7 +275,7 @@
compatible = "fsl,mpc8610-ssi";
cell-index = <0>;
reg = <0x15000 0x100>;
- interrupts = <75 2>;
+ interrupts = <75 2 0 0>;
fsl,mode = "i2s-slave";
codec-handle = <&wm8776>;
fsl,playback-dma = <&dma00>;
@@ -294,25 +294,25 @@
compatible = "fsl,ssi-dma-channel";
reg = <0x0 0x80>;
cell-index = <0>;
- interrupts = <76 2>;
+ interrupts = <76 2 0 0>;
};
dma01: dma-channel@80 {
compatible = "fsl,ssi-dma-channel";
reg = <0x80 0x80>;
cell-index = <1>;
- interrupts = <77 2>;
+ interrupts = <77 2 0 0>;
};
dma-channel@100 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x100 0x80>;
cell-index = <2>;
- interrupts = <78 2>;
+ interrupts = <78 2 0 0>;
};
dma-channel@180 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x180 0x80>;
cell-index = <3>;
- interrupts = <79 2>;
+ interrupts = <79 2 0 0>;
};
};
@@ -320,7 +320,7 @@
#gpio-cells = <2>;
compatible = "fsl,mpc8572-gpio";
reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
+ interrupts = <47 0x2 0 0>;
gpio-controller;
};
@@ -329,7 +329,7 @@
reg = <0x20000 0x1000>;
cache-line-size = <32>; // 32 bytes
cache-size = <0x40000>; // L2, 256K
- interrupts = <16 2>;
+ interrupts = <16 2 0 0>;
};
dma@21300 {
@@ -343,25 +343,25 @@
compatible = "fsl,eloplus-dma-channel";
reg = <0x0 0x80>;
cell-index = <0>;
- interrupts = <20 2>;
+ interrupts = <20 2 0 0>;
};
dma-channel@80 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x80 0x80>;
cell-index = <1>;
- interrupts = <21 2>;
+ interrupts = <21 2 0 0>;
};
dma-channel@100 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x100 0x80>;
cell-index = <2>;
- interrupts = <22 2>;
+ interrupts = <22 2 0 0>;
};
dma-channel@180 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x180 0x80>;
cell-index = <3>;
- interrupts = <23 2>;
+ interrupts = <23 2 0 0>;
};
};
@@ -370,7 +370,7 @@
#size-cells = <0>;
compatible = "fsl-usb2-dr";
reg = <0x22000 0x1000>;
- interrupts = <28 0x2>;
+ interrupts = <28 0x2 0 0>;
phy_type = "ulpi";
};
@@ -381,11 +381,11 @@
reg = <0x24000 0x1000 0xb0030 0x4>;
phy0: ethernet-phy@0 {
- interrupts = <3 1>;
+ interrupts = <3 1 0 0>;
reg = <0x1>;
};
phy1: ethernet-phy@1 {
- interrupts = <9 1>;
+ interrupts = <9 1 0 0>;
reg = <0x2>;
};
};
@@ -416,13 +416,13 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0xB0000 0x1000>;
- interrupts = <29 2 30 2 34 2>;
+ interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
};
queue-group@1{
#address-cells = <1>;
#size-cells = <1>;
reg = <0xB4000 0x1000>;
- interrupts = <17 2 18 2 24 2>;
+ interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>;
};
};
@@ -443,20 +443,20 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0xB1000 0x1000>;
- interrupts = <35 2 36 2 40 2>;
+ interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
};
queue-group@1{
#address-cells = <1>;
#size-cells = <1>;
reg = <0xB5000 0x1000>;
- interrupts = <51 2 52 2 67 2>;
+ interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>;
};
};
sdhci@2e000 {
compatible = "fsl,p1022-esdhc", "fsl,esdhc";
reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
+ interrupts = <72 0x2 0 0>;
fsl,sdhci-auto-cmd12;
/* Filled in by U-Boot */
clock-frequency = <0>;
@@ -467,7 +467,7 @@
"fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1",
"fsl,sec2.0";
reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
+ interrupts = <45 2 0 0 58 2 0 0>;
fsl,num-channels = <4>;
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x97c>;
@@ -478,14 +478,14 @@
compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
reg = <0x18000 0x1000>;
cell-index = <1>;
- interrupts = <74 0x2>;
+ interrupts = <74 0x2 0 0>;
};
sata@19000 {
compatible = "fsl,p1022-sata", "fsl,pq-sata-v2";
reg = <0x19000 0x1000>;
cell-index = <2>;
- interrupts = <41 0x2>;
+ interrupts = <41 0x2 0 0>;
};
power@e0070{
@@ -496,21 +496,33 @@
display@10000 {
compatible = "fsl,diu", "fsl,p1022-diu";
reg = <0x10000 1000>;
- interrupts = <64 2>;
+ interrupts = <64 2 0 0>;
};
timer@41100 {
compatible = "fsl,mpic-global-timer";
- reg = <0x41100 0x204>;
- interrupts = <0xf7 0x2>;
+ reg = <0x41100 0x100 0x41300 4>;
+ interrupts = <0 0 3 0
+ 1 0 3 0
+ 2 0 3 0
+ 3 0 3 0>;
+ };
+
+ timer@42100 {
+ compatible = "fsl,mpic-global-timer";
+ reg = <0x42100 0x100 0x42300 4>;
+ interrupts = <4 0 3 0
+ 5 0 3 0
+ 6 0 3 0
+ 7 0 3 0>;
};
mpic: pic@40000 {
interrupt-controller;
#address-cells = <0>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <4>;
reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
+ compatible = "fsl,mpic";
device_type = "open-pic";
};
@@ -519,14 +531,14 @@
reg = <0x41600 0x80>;
msi-available-ranges = <0 0x100>;
interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
};
global-utilities@e0000 { //global utilities block
@@ -547,7 +559,7 @@
ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
clock-frequency = <33333333>;
- interrupts = <16 2>;
+ interrupts = <16 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
/* IDSEL 0x0 */
@@ -582,7 +594,7 @@
ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
clock-frequency = <33333333>;
- interrupts = <16 2>;
+ interrupts = <16 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
/* IDSEL 0x0 */
@@ -618,7 +630,7 @@
ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
clock-frequency = <33333333>;
- interrupts = <16 2>;
+ interrupts = <16 2 0 0>;
interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map = <
/* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts
index 11019142813c..2bcf3683d223 100644
--- a/arch/powerpc/boot/dts/p2020ds.dts
+++ b/arch/powerpc/boot/dts/p2020ds.dts
@@ -1,7 +1,7 @@
/*
* P2020 DS Device Tree Source
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -9,12 +9,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "p2020si.dtsi"
+
/ {
- model = "fsl,P2020";
+ model = "fsl,P2020DS";
compatible = "fsl,P2020DS";
- #address-cells = <2>;
- #size-cells = <2>;
aliases {
ethernet0 = &enet0;
@@ -27,35 +26,13 @@
pci2 = &pci2;
};
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P2020@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P2020@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
memory {
device_type = "memory";
};
localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
compatible = "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
-
ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
0x1 0x0 0x0 0xe0000000 0x08000000
0x2 0x0 0x0 0xffa00000 0x00040000
@@ -158,352 +135,77 @@
};
soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p2020-immr", "simple-bus";
- ranges = <0x0 0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p2020-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p2020-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- spi@7000 {
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
+ usb@22000 {
+ phy_type = "ulpi";
};
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
- interrupts = <76 2>;
+ interrupts = <3 1>;
+ reg = <0x0>;
};
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
+ phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
- interrupts = <77 2>;
+ interrupts = <3 1>;
+ reg = <0x1>;
};
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
+ phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
- interrupts = <78 2>;
+ interrupts = <3 1>;
+ reg = <0x2>;
};
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
- };
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
};
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p2020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2, 512k
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
- usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- phy_type = "ulpi";
};
enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x1>;
- };
- phy2: ethernet-phy@2 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x2>;
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
};
enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
};
enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi2>;
phy-handle = <&phy2>;
phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- sdhci@2e000 {
- compatible = "fsl,p2020-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
};
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
msi@41600 {
compatible = "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p2020-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
};
};
pci0: pcie@ffe08000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe08000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <24 2>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x0 */
@@ -528,18 +230,8 @@
};
pci1: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
interrupt-map = <
@@ -667,18 +359,8 @@
};
pci2: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
/* IDSEL 0x0 */
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index da4cb0d8d215..3782a58f13be 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -1,7 +1,7 @@
/*
* P2020 RDB Device Tree Source
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -9,12 +9,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "p2020si.dtsi"
+
/ {
- model = "fsl,P2020";
+ model = "fsl,P2020RDB";
compatible = "fsl,P2020RDB";
- #address-cells = <2>;
- #size-cells = <2>;
aliases {
ethernet0 = &enet0;
@@ -26,34 +25,11 @@
pci1 = &pci1;
};
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P2020@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
- };
-
- PowerPC,P2020@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
- };
- };
-
memory {
device_type = "memory";
};
localbus@ffe05000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
- reg = <0 0xffe05000 0 0x1000>;
- interrupts = <19 2>;
- interrupt-parent = <&mpic>;
/* NOR and NAND Flashes */
ranges = <0x0 0x0 0x0 0xef000000 0x01000000
@@ -165,90 +141,16 @@
};
soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p2020-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p2020-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p2020-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
-
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
rtc@68 {
compatible = "dallas,ds1339";
reg = <0x68>;
};
};
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
+ spi@7000 {
- spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- mode = "cpu";
-
- fsl_m25p80@0 {
+ fsl_m25p80@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,espi-flash";
@@ -294,254 +196,68 @@
};
};
- dma@c300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0xc300 0x4>;
- ranges = <0x0 0xc100 0x200>;
- cell-index = <1>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <76 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <77 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <78 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <79 2>;
- };
- };
-
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p2020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2,512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
+ usb@22000 {
+ phy_type = "ulpi";
};
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
- interrupts = <23 2>;
+ interrupts = <3 1>;
+ reg = <0x1>;
+ };
+ };
+
+ mdio@25520 {
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
};
};
- usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
- phy_type = "ulpi";
+ mdio@26520 {
+ status = "disabled";
};
enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
fixed-link = <1 1 1000 0 0>;
phy-connection-type = "rgmii-id";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@0 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x0>;
- };
- phy1: ethernet-phy@1 {
- interrupt-parent = <&mpic>;
- interrupts = <3 1>;
- reg = <0x1>;
- };
- };
};
enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
phy-connection-type = "sgmii";
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
};
enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
- sdhci@2e000 {
- compatible = "fsl,p2020-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- msi@41600 {
- compatible = "fsl,p2020-msi", "fsl,mpic-msi";
- reg = <0x41600 0x80>;
- msi-available-ranges = <0 0x100>;
- interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
- interrupt-parent = <&mpic>;
- };
+ };
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,p2020-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
+ pci0: pcie@ffe08000 {
+ status = "disabled";
};
- pci0: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
+ pci1: pcie@ffe09000 {
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
- pcie@0 {
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
+ pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
#size-cells = <2>;
#address-cells = <3>;
@@ -556,26 +272,24 @@
};
};
- pci1: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
+ pci2: pcie@ffe0a000 {
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ >;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
0x0 0x20000000
0x1000000 0x0 0x0
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
index 0fe93d0c8b2e..fc8ddddfccb6 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
@@ -6,7 +6,7 @@
* This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb,
* eth1, eth2, sdhc, crypto, global-util, pci0.
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -14,12 +14,11 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "p2020si.dtsi"
+
/ {
- model = "fsl,P2020";
+ model = "fsl,P2020RDB";
compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
- #address-cells = <2>;
- #size-cells = <2>;
aliases {
ethernet1 = &enet1;
@@ -29,91 +28,33 @@
};
cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P2020@0 {
- device_type = "cpu";
- reg = <0x0>;
- next-level-cache = <&L2>;
+ PowerPC,P2020@1 {
+ status = "disabled";
};
+
};
memory {
device_type = "memory";
};
- soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p2020-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <12>;
- };
-
- ecm@1000 {
- compatible = "fsl,p2020-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,p2020-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <18 2>;
- };
+ localbus@ffe05000 {
+ status = "disabled";
+ };
+ soc@ffe00000 {
i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
rtc@68 {
compatible = "dallas,ds1339";
reg = <0x68>;
};
};
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
+ serial1: serial@4600 {
+ status = "disabled";
};
spi@7000 {
- cell-index = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,espi";
- reg = <0x7000 0x1000>;
- interrupts = <59 0x2>;
- interrupt-parent = <&mpic>;
- mode = "cpu";
fsl_m25p80@0 {
#address-cells = <1>;
@@ -161,76 +102,15 @@
};
};
- gpio: gpio-controller@f000 {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8572-gpio";
- reg = <0xf000 0x100>;
- interrupts = <47 0x2>;
- interrupt-parent = <&mpic>;
- gpio-controller;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,p2020-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <32>; // 32 bytes
- cache-size = <0x80000>; // L2,512K
- interrupt-parent = <&mpic>;
- interrupts = <16 2>;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
+ dma@c300 {
+ status = "disabled";
};
usb@22000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl-usb2-dr";
- reg = <0x22000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <28 0x2>;
phy_type = "ulpi";
};
mdio@24520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x24520 0x20>;
phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
@@ -245,29 +125,21 @@
};
mdio@25520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x26520 0x20>;
-
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ enet0: ethernet@24000 {
+ status = "disabled";
+ };
+
enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <35 2 36 2 40 2>;
- interrupt-parent = <&mpic>;
tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
phy-connection-type = "sgmii";
@@ -275,49 +147,12 @@
};
enet2: ethernet@26000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <2>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x26000 0x1000>;
- ranges = <0x0 0x26000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <31 2 32 2 33 2>;
- interrupt-parent = <&mpic>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
- sdhci@2e000 {
- compatible = "fsl,p2020-esdhc", "fsl,esdhc";
- reg = <0x2e000 0x1000>;
- interrupts = <72 0x2>;
- interrupt-parent = <&mpic>;
- /* Filled in by U-Boot */
- clock-frequency = <0>;
- };
-
- crypto@30000 {
- compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
- "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2 58 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xbfe>;
- fsl,descriptor-types-mask = <0x3ab0ebf>;
- };
mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
protected-sources = <
42 76 77 78 79 /* serial1 , dma2 */
29 30 34 26 /* enet0, pci1 */
@@ -326,26 +161,28 @@
>;
};
- global-utilities@e0000 {
- compatible = "fsl,p2020-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
+ msi@41600 {
+ status = "disabled";
};
+
+
};
- pci0: pcie@ffe09000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe09000 0 0x1000>;
- bus-range = <0 255>;
+ pci0: pcie@ffe08000 {
+ status = "disabled";
+ };
+
+ pci1: pcie@ffe09000 {
ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <25 2>;
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
#size-cells = <2>;
@@ -360,4 +197,8 @@
0x0 0x100000>;
};
};
+
+ pci2: pcie@ffe0a000 {
+ status = "disabled";
+ };
};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
index e95a51285328..261c34ba45ec 100644
--- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
@@ -7,7 +7,7 @@
*
* Please note to add "-b 1" for core1's dts compiling.
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -15,27 +15,21 @@
* option) any later version.
*/
-/dts-v1/;
+/include/ "p2020si.dtsi"
+
/ {
- model = "fsl,P2020";
+ model = "fsl,P2020RDB";
compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
- #address-cells = <2>;
- #size-cells = <2>;
aliases {
ethernet0 = &enet0;
- serial0 = &serial0;
+ serial0 = &serial1;
pci1 = &pci1;
};
cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,P2020@1 {
- device_type = "cpu";
- reg = <0x1>;
- next-level-cache = <&L2>;
+ PowerPC,P2020@0 {
+ status = "disabled";
};
};
@@ -43,20 +37,37 @@
device_type = "memory";
};
+ localbus@ffe05000 {
+ status = "disabled";
+ };
+
soc@ffe00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "fsl,p2020-immr", "simple-bus";
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- bus-frequency = <0>; // Filled out by uboot.
-
- serial0: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
+ ecm-law@0 {
+ status = "disabled";
+ };
+
+ ecm@1000 {
+ status = "disabled";
+ };
+
+ memory-controller@2000 {
+ status = "disabled";
+ };
+
+ i2c@3000 {
+ status = "disabled";
+ };
+
+ i2c@3100 {
+ status = "disabled";
+ };
+
+ serial0: serial@4500 {
+ status = "disabled";
+ };
+
+ spi@7000 {
+ status = "disabled";
};
dma@c300 {
@@ -96,6 +107,10 @@
};
};
+ gpio: gpio-controller@f000 {
+ status = "disabled";
+ };
+
L2: l2-cache-controller@20000 {
compatible = "fsl,p2020-l2-cache-controller";
reg = <0x20000 0x1000>;
@@ -104,31 +119,49 @@
interrupt-parent = <&mpic>;
};
+ dma@21300 {
+ status = "disabled";
+ };
+
+ usb@22000 {
+ status = "disabled";
+ };
+
+ mdio@24520 {
+ status = "disabled";
+ };
+
+ mdio@25520 {
+ status = "disabled";
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
fixed-link = <1 1 1000 0 0>;
phy-connection-type = "rgmii-id";
};
+ enet1: ethernet@25000 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@26000 {
+ status = "disabled";
+ };
+
+ sdhci@2e000 {
+ status = "disabled";
+ };
+
+ crypto@30000 {
+ status = "disabled";
+ };
+
mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
protected-sources = <
17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */
16 20 21 22 23 28 /* L2, dma1, USB */
@@ -152,28 +185,39 @@
0xe7 0>;
interrupt-parent = <&mpic>;
};
+
+ global-utilities@e0000 { //global utilities block
+ status = "disabled";
+ };
+
};
- pci1: pcie@ffe0a000 {
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0 0xffe0a000 0 0x1000>;
- bus-range = <0 255>;
- ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
- clock-frequency = <33333333>;
- interrupt-parent = <&mpic>;
- interrupts = <26 2>;
+ pci0: pcie@ffe08000 {
+ status = "disabled";
+ };
+
+ pci1: pcie@ffe09000 {
+ status = "disabled";
+ };
+
+ pci2: pcie@ffe0a000 {
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ >;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
0x0 0x20000000
0x1000000 0x0 0x0
diff --git a/arch/powerpc/boot/dts/p2020si.dtsi b/arch/powerpc/boot/dts/p2020si.dtsi
new file mode 100644
index 000000000000..6def17f265d3
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020si.dtsi
@@ -0,0 +1,382 @@
+/*
+ * P2020 Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ compatible = "fsl,P2020";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P2020@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P2020@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ localbus@ffe05000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus";
+ reg = <0 0xffe05000 0 0x1000>;
+ interrupts = <19 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ soc@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p2020-immr", "simple-bus";
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p2020-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p2020-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ spi@7000 {
+ cell-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2>;
+ interrupt-parent = <&mpic>;
+ mode = "cpu";
+ };
+
+ dma@c300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0xc300 0x4>;
+ ranges = <0x0 0xc100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <76 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <77 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <78 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <79 2>;
+ };
+ };
+
+ gpio: gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8572-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2>;
+ interrupt-parent = <&mpic>;
+ gpio-controller;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p2020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2,512K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ usb@22000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 0x2>;
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x520 0x20>;
+ };
+
+ enet0: ethernet@24000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ ranges = <0x0 0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <29 2 30 2 34 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ enet1: ethernet@25000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x25000 0x1000>;
+ ranges = <0x0 0x25000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <35 2 36 2 40 2>;
+ interrupt-parent = <&mpic>;
+
+ };
+
+ enet2: ethernet@26000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <2>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x26000 0x1000>;
+ ranges = <0x0 0x26000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <31 2 32 2 33 2>;
+ interrupt-parent = <&mpic>;
+
+ };
+
+ sdhci@2e000 {
+ compatible = "fsl,p2020-esdhc", "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <72 0x2>;
+ interrupt-parent = <&mpic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
+ "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 58 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xbfe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi@41600 {
+ compatible = "fsl,p2020-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,p2020-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+ };
+
+ pci0: pcie@ffe08000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe08000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <24 2>;
+ };
+
+ pci1: pcie@ffe09000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe09000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <25 2>;
+ };
+
+ pci2: pcie@ffe0a000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe0a000 0 0x1000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <26 2>;
+ };
+};
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c
new file mode 100644
index 000000000000..06c1961bd124
--- /dev/null
+++ b/arch/powerpc/boot/epapr.c
@@ -0,0 +1,66 @@
+/*
+ * Bootwrapper for ePAPR compliant firmwares
+ *
+ * Copyright 2010 David Gibson <david@gibson.dropbear.id.au>, IBM Corporation.
+ *
+ * Based on earlier bootwrappers by:
+ * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\
+ * and
+ * Scott Wood <scottwood@freescale.com>
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include "ops.h"
+#include "stdio.h"
+#include "io.h"
+#include <libfdt.h>
+
+BSS_STACK(4096);
+
+#define EPAPR_SMAGIC 0x65504150
+#define EPAPR_EMAGIC 0x45504150
+
+static unsigned epapr_magic;
+static unsigned long ima_size;
+static unsigned long fdt_addr;
+
+static void platform_fixups(void)
+{
+ if ((epapr_magic != EPAPR_EMAGIC)
+ && (epapr_magic != EPAPR_SMAGIC))
+ fatal("r6 contained 0x%08x instead of ePAPR magic number\n",
+ epapr_magic);
+
+ if (ima_size < (unsigned long)_end)
+ printf("WARNING: Image loaded outside IMA!"
+ " (_end=%p, ima_size=0x%lx)\n", _end, ima_size);
+ if (ima_size < fdt_addr)
+ printf("WARNING: Device tree address is outside IMA!"
+ "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr,
+ ima_size);
+ if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr))
+ printf("WARNING: Device tree extends outside IMA!"
+ " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n",
+ fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
+}
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ epapr_magic = r6;
+ ima_size = r7;
+ fdt_addr = r3;
+
+ /* FIXME: we should process reserve entries */
+
+ simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64);
+
+ fdt_init((void *)fdt_addr);
+
+ serial_console_init();
+ platform_ops.fixups = platform_fixups;
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index cb97e7511d7e..c74531af72c0 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -39,6 +39,7 @@ dts=
cacheit=
binary=
gzip=.gz
+pie=
# cross-compilation prefix
CROSS=
@@ -157,9 +158,10 @@ pmac|chrp)
platformo=$object/of.o
;;
coff)
- platformo=$object/of.o
+ platformo="$object/crt0.o $object/of.o"
lds=$object/zImage.coff.lds
link_address='0x500000'
+ pie=
;;
miboot|uboot)
# miboot and U-boot want just the bare bits, not an ELF binary
@@ -208,6 +210,7 @@ ps3)
ksection=.kernel:vmlinux.bin
isection=.kernel:initrd
link_address=''
+ pie=
;;
ep88xc|ep405|ep8248e)
platformo="$object/fixed-head.o $object/$platform.o"
@@ -244,6 +247,10 @@ gamecube|wii)
treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
+epapr)
+ link_address='0x20000000'
+ pie=-pie
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -251,7 +258,7 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then
${CROSS}objcopy $objflags "$kernel" "$vmz.$$"
if [ -n "$gzip" ]; then
- gzip -f -9 "$vmz.$$"
+ gzip -n -f -9 "$vmz.$$"
fi
if [ -n "$cacheit" ]; then
@@ -310,9 +317,9 @@ fi
if [ "$platform" != "miboot" ]; then
if [ -n "$link_address" ] ; then
- text_start="-Ttext $link_address --defsym _start=$link_address"
+ text_start="-Ttext $link_address"
fi
- ${CROSS}ld -m elf32ppc -T $lds $text_start -o "$ofile" \
+ ${CROSS}ld -m elf32ppc -T $lds $text_start $pie -o "$ofile" \
$platformo $tmp $object/wrapper.a
rm $tmp
fi
@@ -336,7 +343,7 @@ coff)
$objbin/hack-coff "$ofile"
;;
cuboot*)
- gzip -f -9 "$ofile"
+ gzip -n -f -9 "$ofile"
${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \
$uboot_version -d "$ofile".gz "$ofile"
;;
@@ -383,6 +390,6 @@ ps3)
odir="$(dirname "$ofile.bin")"
rm -f "$odir/otheros.bld"
- gzip --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
+ gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
;;
esac
diff --git a/arch/powerpc/boot/zImage.coff.lds.S b/arch/powerpc/boot/zImage.coff.lds.S
index 856dc78b14ef..de4c9e3c9344 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -3,13 +3,13 @@ ENTRY(_zimage_start_opd)
EXTERN(_zimage_start_opd)
SECTIONS
{
- _start = .;
.text :
{
+ _start = .;
*(.text)
*(.fixup)
+ _etext = .;
}
- _etext = .;
. = ALIGN(4096);
.data :
{
@@ -17,9 +17,7 @@ SECTIONS
*(.data*)
*(__builtin_*)
*(.sdata*)
- __got2_start = .;
*(.got2)
- __got2_end = .;
_dtb_start = .;
*(.kernel:dtb)
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 0962d62bdb50..2bd8731f1365 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -3,49 +3,64 @@ ENTRY(_zimage_start)
EXTERN(_zimage_start)
SECTIONS
{
- _start = .;
.text :
{
+ _start = .;
*(.text)
*(.fixup)
+ _etext = .;
}
- _etext = .;
. = ALIGN(4096);
.data :
{
*(.rodata*)
*(.data*)
*(.sdata*)
- __got2_start = .;
*(.got2)
- __got2_end = .;
}
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .dynamic :
+ {
+ __dynamic_start = .;
+ *(.dynamic)
+ }
+ .hash : { *(.hash) }
+ .interp : { *(.interp) }
+ .rela.dyn : { *(.rela*) }
. = ALIGN(8);
- _dtb_start = .;
- .kernel:dtb : { *(.kernel:dtb) }
- _dtb_end = .;
-
- . = ALIGN(4096);
- _vmlinux_start = .;
- .kernel:vmlinux.strip : { *(.kernel:vmlinux.strip) }
- _vmlinux_end = .;
+ .kernel:dtb :
+ {
+ _dtb_start = .;
+ *(.kernel:dtb)
+ _dtb_end = .;
+ }
. = ALIGN(4096);
- _initrd_start = .;
- .kernel:initrd : { *(.kernel:initrd) }
- _initrd_end = .;
+ .kernel:vmlinux.strip :
+ {
+ _vmlinux_start = .;
+ *(.kernel:vmlinux.strip)
+ _vmlinux_end = .;
+ }
. = ALIGN(4096);
- _edata = .;
+ .kernel:initrd :
+ {
+ _initrd_start = .;
+ *(.kernel:initrd)
+ _initrd_end = .;
+ }
. = ALIGN(4096);
- __bss_start = .;
.bss :
{
- *(.sbss)
- *(.bss)
+ _edata = .;
+ __bss_start = .;
+ *(.sbss)
+ *(.bss)
+ *(COMMON)
+ _end = . ;
}
- . = ALIGN(4096);
- _end = . ;
}
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 6cf9d6614805..abf74dc1f79c 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -47,6 +47,7 @@ CONFIG_MTD_NAND_NDFC=y
CONFIG_MTD_UBI=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 6828eda02bdc..0c7de9620ea6 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -43,6 +43,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index c683bce4c26e..126ef1b08a01 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -104,7 +104,6 @@ CONFIG_ROOT_NFS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index a721cd3d793f..abcf00ad939e 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -101,7 +101,6 @@ CONFIG_ROOT_NFS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 55e0725500dc..11662c217ac0 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -58,7 +58,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index d724095530a6..ebe9b30b0721 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -59,7 +59,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 4b44beaa21ae..eb25229b387a 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -63,7 +63,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 4b2441244eab..d41857a5152d 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index a360ba44b928..38303ec11bcd 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index be2829dd129f..98533973d20f 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -138,6 +138,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index 0c9c7ed7ec75..f51c7ebc181e 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -63,6 +63,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -167,7 +168,6 @@ CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index f9e6a3ea5a64..2a84fd7f631c 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -132,8 +132,8 @@ CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_IND=y
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig
index 06f95492afc7..d32283555b53 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/e55xx_smp_defconfig
@@ -6,10 +6,10 @@ CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_SPARSE_IRQ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
@@ -25,14 +25,42 @@ CONFIG_P5020_DS=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
-CONFIG_SPARSE_IRQ=y
# CONFIG_PCI is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_NET_ETHERNET=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -63,22 +91,14 @@ CONFIG_NLS=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
-CONFIG_LIBCRC32C=m
CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_VIRQ_DEBUG=y
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index f39d0cf876dd..8a874b999867 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 62db8a3df162..c02bbb2fddf8 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -61,6 +61,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_XIP=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 7376e27b8ed4..e63f537b854a 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -52,6 +52,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 99a19d1e9bf8..96b89df7752a 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -82,6 +82,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -203,7 +204,6 @@ CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index c636f23f8c92..de65841aa04e 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -84,6 +84,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -205,7 +206,6 @@ CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index 55b54318fef6..a1cc8179e9fd 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -66,6 +66,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -170,7 +171,6 @@ CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index edd2d54c8196..f4deb0b78cf0 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -59,6 +59,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index ac4fc41035f6..f8b394a76ac3 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -112,8 +112,8 @@ CONFIG_IRDA_CACHE_LAST_LSAP=y
CONFIG_IRDA_FAST_RR=y
CONFIG_IRTTY_SIR=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 9d64a6822d86..214208924a9c 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -351,8 +351,8 @@ CONFIG_VLSI_FIR=m
CONFIG_VIA_FIR=m
CONFIG_MCS_FIR=m
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -398,6 +398,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_CDROM_PKTCDVD=m
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_HD=y
+CONFIG_MISC_DEVICES=y
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SENSORS_TSL2550=m
CONFIG_EEPROM_AT24=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index caba919f65d8..6472322bf13b 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -52,8 +52,8 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_DIAG is not set
CONFIG_IPV6=y
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 9c3f22c6cde1..7de13865508c 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -146,12 +146,18 @@ CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_CXGB4_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
CONFIG_SCSI_IBMVSCSI=y
CONFIG_SCSI_IBMVFC=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_IPR=y
CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_LPFC=m
CONFIG_ATA=y
# CONFIG_ATA_SFF is not set
@@ -189,6 +195,7 @@ CONFIG_TIGON3=y
CONFIG_BNX2=m
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
CONFIG_EHEA=y
CONFIG_IXGBE=m
CONFIG_IXGB=m
@@ -196,6 +203,8 @@ CONFIG_S2IO=m
CONFIG_MYRI10GE=m
CONFIG_NETXEN_NIC=m
CONFIG_MLX4_EN=m
+CONFIG_QLGE=m
+CONFIG_BE2NET=m
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
@@ -255,6 +264,8 @@ CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_EHCA=m
+CONFIG_INFINIBAND_CXGB3=m
+CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
index 6b6dc20b0beb..bdf0563ba423 100644
--- a/arch/powerpc/include/asm/8xx_immap.h
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -393,8 +393,8 @@ typedef struct fec {
uint fec_addr_low; /* lower 32 bits of station address */
ushort fec_addr_high; /* upper 16 bits of station address */
ushort res1; /* reserved */
- uint fec_hash_table_high; /* upper 32-bits of hash table */
- uint fec_hash_table_low; /* lower 32-bits of hash table */
+ uint fec_grp_hash_table_high; /* upper 32-bits of hash table */
+ uint fec_grp_hash_table_low; /* lower 32-bits of hash table */
uint fec_r_des_start; /* beginning of Rx descriptor ring */
uint fec_x_des_start; /* beginning of Tx descriptor ring */
uint fec_r_buff_size; /* Rx buffer size */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 2e561876fc89..f18c6d9b9510 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -209,8 +209,8 @@ static __inline__ unsigned long ffz(unsigned long x)
return BITS_PER_LONG;
/*
- * Calculate the bit position of the least signficant '1' bit in x
- * (since x has been changed this will actually be the least signficant
+ * Calculate the bit position of the least significant '1' bit in x
+ * (since x has been changed this will actually be the least significant
* '0' bit in * the original x). Note: (x & -x) gives us a mask that
* is the least significant * (RIGHT-most) 1-bit of the value in x.
*/
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 2296112e247b..91010e8f8479 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -140,7 +140,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
unsigned long usp = regs->gpr[1];
/*
- * We cant access below the stack pointer in the 32bit ABI and
+ * We can't access below the stack pointer in the 32bit ABI and
* can access 288 bytes in the 64bit ABI
*/
if (!is_32bit_task())
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
index e50323fe941f..4398a6cdcf53 100644
--- a/arch/powerpc/include/asm/cpm.h
+++ b/arch/powerpc/include/asm/cpm.h
@@ -98,7 +98,7 @@ typedef struct cpm_buf_desc {
#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
#define BD_SC_LAST (0x0800) /* Last buffer in frame */
#define BD_SC_TC (0x0400) /* Transmit CRC */
-#define BD_SC_CM (0x0200) /* Continous mode */
+#define BD_SC_CM (0x0200) /* Continuous mode */
#define BD_SC_ID (0x0100) /* Rec'd too many idles */
#define BD_SC_P (0x0100) /* xmt preamble */
#define BD_SC_BR (0x0020) /* Break received */
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index bd07650dca56..8ee4211ca0c6 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -4,7 +4,7 @@
*
* This file contains structures and information for the communication
* processor channels. Some CPM control and status is available
- * throught the MPC8xx internal memory map. See immap.h for details.
+ * through the MPC8xx internal memory map. See immap.h for details.
* This file only contains what I need for the moment, not the total
* CPM capabilities. I (or someone else) will add definitions as they
* are needed. -- Dan
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index be3cdf9134ce..c0d842cfd012 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -157,6 +157,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000)
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
+#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
@@ -178,22 +179,18 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
-#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
-#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
+
+#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000)
-#define CPU_FTR_LOCKLESS_TLBIE LONG_ASM_CONST(0x0000040000000000)
-#define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
-#define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000)
-#define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000)
#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
@@ -202,12 +199,14 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
+#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
#ifndef __ASSEMBLY__
-#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | \
- CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
- CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE)
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
+
+#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \
+ MMU_FTR_16M_PAGE)
/* We only set the altivec features if the kernel was compiled with altivec
* support
@@ -382,10 +381,13 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL)
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -405,41 +407,46 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
- CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
- CPU_FTR_POPCNTB)
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+ CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
- CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_ICSWX | CPU_FTR_CFAR)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
+ CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_UNALIGNED_LD_STD)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | \
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_PURR | CPU_FTR_REAL_LE)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
+#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
+ CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2)
+#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
- CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+ CPU_FTR_VSX)
+#endif
#else
enum {
CPU_FTRS_POSSIBLE =
@@ -473,16 +480,21 @@ enum {
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+ CPU_FTRS_E5500 |
#endif
0,
};
#endif /* __powerpc64__ */
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2)
+#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
#else
enum {
CPU_FTRS_ALWAYS =
@@ -513,6 +525,7 @@ enum {
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+ CPU_FTRS_E5500 &
#endif
CPU_FTRS_POSSIBLE,
};
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index f71bb4c118b4..ce516e5eb0d3 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask;
* This can typically be used for things like IPI for tlb invalidations
* since those need to be done only once per core/TLB
*/
-static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads)
+static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{
cpumask_t tmp, res;
int i;
- res = CPU_MASK_NONE;
+ cpumask_clear(&res);
for (i = 0; i < NR_CPUS; i += threads_per_core) {
- cpus_shift_left(tmp, threads_core_mask, i);
- if (cpus_intersects(threads, tmp))
- cpu_set(i, res);
+ cpumask_shift_left(&tmp, &threads_core_mask, i);
+ if (cpumask_intersects(threads, &tmp))
+ cpumask_set_cpu(i, &res);
}
return res;
}
@@ -58,7 +58,7 @@ static inline int cpu_nr_cores(void)
static inline cpumask_t cpu_online_cores_map(void)
{
- return cpu_thread_mask_to_cores(cpu_online_map);
+ return cpu_thread_mask_to_cores(cpu_online_mask);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 0893ab9343a6..9c70d0ca96d4 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -27,9 +27,8 @@ enum ppc_dbell {
PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
};
-extern void doorbell_message_pass(int target, int msg);
+extern void doorbell_cause_ipi(int cpu, unsigned long data);
extern void doorbell_exception(struct pt_regs *regs);
-extern void doorbell_check_self(void);
extern void doorbell_setup_this_cpu(void);
static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 6d2416a85709..dd70fac57ec8 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
+extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
@@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
+#define ARCH_HAS_DMA_MMAP_COHERENT
+
+
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index f0fb4fc1f6e6..45921672b97a 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -52,6 +52,10 @@ extern struct ppc_emulated {
#ifdef CONFIG_VSX
struct ppc_emulated_entry vsx;
#endif
+#ifdef CONFIG_PPC64
+ struct ppc_emulated_entry mfdscr;
+ struct ppc_emulated_entry mtdscr;
+#endif
} ppc_emulated;
extern u32 ppc_warn_emulated;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 7778d6f0c878..f5dfe3411f64 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -46,6 +46,7 @@
#define EX_CCR 60
#define EX_R3 64
#define EX_LR 72
+#define EX_CFAR 80
/*
* We're short on space and time in the exception prolog, so we can't
@@ -56,30 +57,40 @@
#define LOAD_HANDLER(reg, label) \
addi reg,reg,(label)-_stext; /* virt addr of handler ... */
-#define EXCEPTION_PROLOG_1(area) \
- mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
+/* Exception register prefixes */
+#define EXC_HV H
+#define EXC_STD
+
+#define EXCEPTION_PROLOG_1(area) \
+ GET_PACA(r13); \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
std r10,area+EX_R10(r13); \
std r11,area+EX_R11(r13); \
std r12,area+EX_R12(r13); \
- mfspr r9,SPRN_SPRG_SCRATCH0; \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr r10,SPRN_CFAR; \
+ std r10,area+EX_CFAR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ GET_SCRATCH0(r9); \
std r9,area+EX_R13(r13); \
mfcr r9
-#define EXCEPTION_PROLOG_PSERIES_1(label) \
+#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label) \
- mtspr SPRN_SRR0,r12; \
- mfspr r12,SPRN_SRR1; /* and SRR1 */ \
- mtspr SPRN_SRR1,r10; \
- rfid; \
+ mtspr SPRN_##h##SRR0,r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ mtspr SPRN_##h##SRR1,r10; \
+ h##rfid; \
b . /* prevent speculative execution */
+#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
+ __EXCEPTION_PROLOG_PSERIES_1(label, h)
-#define EXCEPTION_PROLOG_PSERIES(area, label) \
+#define EXCEPTION_PROLOG_PSERIES(area, label, h) \
EXCEPTION_PROLOG_1(area); \
- EXCEPTION_PROLOG_PSERIES_1(label);
+ EXCEPTION_PROLOG_PSERIES_1(label, h);
/*
* The common exception prolog is used for all except a few exceptions
@@ -98,10 +109,11 @@
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
- bge- cr1,2f; /* abort if it is */ \
- b 3f; \
-2: li r1,(n); /* will be reloaded later */ \
+ blt+ cr1,3f; /* abort if it is */ \
+ li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
+ std r3,area+EX_R3(r13); \
+ addi r3,r13,area; /* r3 -> where regs are saved*/ \
b bad_stack; \
3: std r9,_CCR(r1); /* save CR in stackframe */ \
std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -123,6 +135,10 @@
std r9,GPR11(r1); \
std r10,GPR12(r1); \
std r11,GPR13(r1); \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,ORIG_GPR3(r1); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
mflr r9; /* save LR in stackframe */ \
std r9,_LINK(r1); \
@@ -143,57 +159,62 @@
/*
* Exception vectors.
*/
-#define STD_EXCEPTION_PSERIES(n, label) \
- . = n; \
+#define STD_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- DO_KVM n; \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+ DO_KVM vec; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD)
-#define HSTD_EXCEPTION_PSERIES(n, label) \
- . = n; \
- .globl label##_pSeries; \
-label##_pSeries: \
+#define STD_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_hv; \
+label##_hv: \
HMT_MEDIUM; \
- mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \
- mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \
- mtspr SPRN_SRR0,r20; \
- mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \
- mtspr SPRN_SRR1,r20; \
- mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+ DO_KVM vec; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV)
-
-#define MASKABLE_EXCEPTION_PSERIES(n, label) \
- . = n; \
- .globl label##_pSeries; \
-label##_pSeries: \
+#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
HMT_MEDIUM; \
- DO_KVM n; \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
+ DO_KVM vec; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ GET_PACA(r13); \
std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
std r10,PACA_EXGEN+EX_R10(r13); \
lbz r10,PACASOFTIRQEN(r13); \
mfcr r9; \
cmpwi r10,0; \
- beq masked_interrupt; \
- mfspr r10,SPRN_SPRG_SCRATCH0; \
+ beq masked_##h##interrupt; \
+ GET_SCRATCH0(r10); \
std r10,PACA_EXGEN+EX_R13(r13); \
std r11,PACA_EXGEN+EX_R11(r13); \
std r12,PACA_EXGEN+EX_R12(r13); \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label##_common) \
- mtspr SPRN_SRR0,r12; \
- mfspr r12,SPRN_SRR1; /* and SRR1 */ \
- mtspr SPRN_SRR1,r10; \
- rfid; \
+ mtspr SPRN_##h##SRR0,r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ mtspr SPRN_##h##SRR1,r10; \
+ h##rfid; \
b . /* prevent speculative execution */
+#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
+ __MASKABLE_EXCEPTION_PSERIES(vec, label, h)
+
+#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
+ . = loc; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD)
+
+#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
+ . = loc; \
+ .globl label##_hv; \
+label##_hv: \
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV)
#ifdef CONFIG_PPC_ISERIES
#define DISABLE_INTS \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 921a8470e18a..9a67a38bf7b9 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -49,7 +49,7 @@ label##5: \
FTR_ENTRY_OFFSET label##2b-label##5b; \
FTR_ENTRY_OFFSET label##3b-label##5b; \
FTR_ENTRY_OFFSET label##4b-label##5b; \
- .ifgt (label##4b-label##3b)-(label##2b-label##1b); \
+ .ifgt (label##4b- label##3b)-(label##2b- label##1b); \
.error "Feature section else case larger than body"; \
.endif; \
.popsection;
@@ -146,6 +146,19 @@ label##5: \
#ifndef __ASSEMBLY__
+#define ASM_FTR_IF(section_if, section_else, msk, val) \
+ stringify_in_c(BEGIN_FTR_SECTION) \
+ section_if "; " \
+ stringify_in_c(FTR_SECTION_ELSE) \
+ section_else "; " \
+ stringify_in_c(ALT_FTR_SECTION_END((msk), (val)))
+
+#define ASM_FTR_IFSET(section_if, section_else, msk) \
+ ASM_FTR_IF(section_if, section_else, (msk), (msk))
+
+#define ASM_FTR_IFCLR(section_if, section_else, msk) \
+ ASM_FTR_IF(section_if, section_else, (msk), 0)
+
#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \
stringify_in_c(BEGIN_MMU_FTR_SECTION) \
section_if "; " \
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 4ef662e4a31d..3a6c586c4e40 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -47,6 +47,7 @@
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
+#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
#ifndef __ASSEMBLY__
@@ -60,7 +61,7 @@ enum {
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
- FW_FEATURE_CMO | FW_FEATURE_VPHN,
+ FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index ec089acfa56b..852b8c1c09db 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -102,6 +102,7 @@
#define H_ANDCOND (1UL<<(63-33))
#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
+#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
#define H_COPY_PAGE (1UL<<(63-49))
#define H_N (1UL<<(63-61))
@@ -122,7 +123,7 @@
#define H_DABRX_KERNEL (1UL<<(63-62))
#define H_DABRX_USER (1UL<<(63-63))
-/* Each control block has to be on a 4K bondary */
+/* Each control block has to be on a 4K boundary */
#define H_CB_ALIGNMENT 4096
/* pSeries hypervisor opcodes */
@@ -234,6 +235,7 @@
#define H_GET_MPP 0x2D4
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
#define H_BEST_ENERGY 0x2F4
+#define H_GET_MPP_X 0x314
#define MAX_HCALL_OPCODE H_BEST_ENERGY
#ifndef __ASSEMBLY__
@@ -312,6 +314,16 @@ struct hvcall_mpp_data {
int h_get_mpp(struct hvcall_mpp_data *);
+struct hvcall_mpp_x_data {
+ unsigned long coalesced_bytes;
+ unsigned long pool_coalesced_bytes;
+ unsigned long pool_purr_cycles;
+ unsigned long pool_spurr_cycles;
+ unsigned long reserved[3];
+};
+
+int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data);
+
#ifdef CONFIG_PPC_PSERIES
extern int CMO_PrPSP;
extern int CMO_SecPSP;
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
index 6efc7782ebf2..fbae49286926 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.h
+++ b/arch/powerpc/include/asm/io-workarounds.h
@@ -31,7 +31,6 @@ struct iowa_bus {
void *private;
};
-void __devinit io_workaround_init(void);
void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
int (*)(struct iowa_bus *, void *), void *);
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 001f2f11c19b..45698d55cd6a 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -2,6 +2,8 @@
#define _ASM_POWERPC_IO_H
#ifdef __KERNEL__
+#define ARCH_HAS_IOREMAP_WC
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -481,10 +483,16 @@ __do_out_asm(_rec_outl, "stwbrx")
_memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
#endif /* !CONFIG_EEH */
-#ifdef CONFIG_PPC_INDIRECT_IO
-#define DEF_PCI_HOOK(x) x
+#ifdef CONFIG_PPC_INDIRECT_PIO
+#define DEF_PCI_HOOK_pio(x) x
+#else
+#define DEF_PCI_HOOK_pio(x) NULL
+#endif
+
+#ifdef CONFIG_PPC_INDIRECT_MMIO
+#define DEF_PCI_HOOK_mem(x) x
#else
-#define DEF_PCI_HOOK(x) NULL
+#define DEF_PCI_HOOK_mem(x) NULL
#endif
/* Structure containing all the hooks */
@@ -504,7 +512,7 @@ extern struct ppc_pci_io {
#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
static inline ret name at \
{ \
- if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
+ if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
return ppc_pci_io.name al; \
return __do_##name al; \
}
@@ -512,7 +520,7 @@ static inline ret name at \
#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
static inline void name at \
{ \
- if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
+ if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
ppc_pci_io.name al; \
else \
__do_##name al; \
@@ -616,12 +624,13 @@ static inline void iosync(void)
* * ioremap is the standard one and provides non-cacheable guarded mappings
* and can be hooked by the platform via ppc_md
*
- * * ioremap_flags allows to specify the page flags as an argument and can
- * also be hooked by the platform via ppc_md. ioremap_prot is the exact
- * same thing as ioremap_flags.
+ * * ioremap_prot allows to specify the page flags as an argument and can
+ * also be hooked by the platform via ppc_md.
*
* * ioremap_nocache is identical to ioremap
*
+ * * ioremap_wc enables write combining
+ *
* * iounmap undoes such a mapping and can be hooked
*
* * __ioremap_at (and the pending __iounmap_at) are low level functions to
@@ -629,7 +638,7 @@ static inline void iosync(void)
* currently be hooked. Must be page aligned.
*
* * __ioremap is the low level implementation used by ioremap and
- * ioremap_flags and cannot be hooked (but can be used by a hook on one
+ * ioremap_prot and cannot be hooked (but can be used by a hook on one
* of the previous ones)
*
* * __ioremap_caller is the same as above but takes an explicit caller
@@ -640,10 +649,10 @@ static inline void iosync(void)
*
*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
- unsigned long flags);
+extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
+ unsigned long flags);
+extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
extern void iounmap(volatile void __iomem *addr);
diff --git a/arch/powerpc/include/asm/io_event_irq.h b/arch/powerpc/include/asm/io_event_irq.h
new file mode 100644
index 000000000000..b1a9a1be3c21
--- /dev/null
+++ b/arch/powerpc/include/asm/io_event_irq.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_IO_EVENT_IRQ_H
+#define _ASM_POWERPC_IO_EVENT_IRQ_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+
+#define PSERIES_IOEI_RPC_MAX_LEN 216
+
+#define PSERIES_IOEI_TYPE_ERR_DETECTED 0x01
+#define PSERIES_IOEI_TYPE_ERR_RECOVERED 0x02
+#define PSERIES_IOEI_TYPE_EVENT 0x03
+#define PSERIES_IOEI_TYPE_RPC_PASS_THRU 0x04
+
+#define PSERIES_IOEI_SUBTYPE_NOT_APP 0x00
+#define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ 0x01
+#define PSERIES_IOEI_SUBTYPE_NODE_ONLINE 0x03
+#define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE 0x04
+#define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE 0x05
+#define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE 0x06
+#define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED 0x07
+
+#define PSERIES_IOEI_SCOPE_NOT_APP 0x00
+#define PSERIES_IOEI_SCOPE_RIO_HUB 0x36
+#define PSERIES_IOEI_SCOPE_RIO_BRIDGE 0x37
+#define PSERIES_IOEI_SCOPE_PHB 0x38
+#define PSERIES_IOEI_SCOPE_EADS_GLOBAL 0x39
+#define PSERIES_IOEI_SCOPE_EADS_SLOT 0x3A
+#define PSERIES_IOEI_SCOPE_TORRENT_HUB 0x3B
+#define PSERIES_IOEI_SCOPE_SERVICE_PROC 0x51
+
+/* Platform Event Log Format, Version 6, data portition of IO event section */
+struct pseries_io_event {
+ uint8_t event_type; /* 0x00 IO-Event Type */
+ uint8_t rpc_data_len; /* 0x01 RPC data length */
+ uint8_t scope; /* 0x02 Error/Event Scope */
+ uint8_t event_subtype; /* 0x03 I/O-Event Sub-Type */
+ uint32_t drc_index; /* 0x04 DRC Index */
+ uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN];
+ /* 0x08 RPC Data (0-216 bytes, */
+ /* padded to 4 bytes alignment) */
+};
+
+extern struct atomic_notifier_head pseries_ioei_notifier_list;
+
+#endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 67ab5fb7d153..1bff591f7f72 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -88,9 +88,6 @@ struct irq_host_ops {
/* Dispose of such a mapping */
void (*unmap)(struct irq_host *h, unsigned int virq);
- /* Update of such a mapping */
- void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
-
/* Translate device-tree interrupt specifier from raw format coming
* from the firmware to a irq_hw_number_t (interrupt line number) and
* type (sense) that can be passed to set_irq_type(). In the absence
@@ -128,19 +125,10 @@ struct irq_host {
struct device_node *of_node;
};
-/* The main irq map itself is an array of NR_IRQ entries containing the
- * associate host and irq number. An entry with a host of NULL is free.
- * An entry can be allocated if it's free, the allocator always then sets
- * hwirq first to the host's invalid irq number and then fills ops.
- */
-struct irq_map_entry {
- irq_hw_number_t hwirq;
- struct irq_host *host;
-};
-
-extern struct irq_map_entry irq_map[NR_IRQS];
-
+struct irq_data;
+extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq);
+extern bool virq_is_host(unsigned int virq, struct irq_host *host);
/**
* irq_alloc_host - Allocate a new irq_host data structure
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index f54408d995b5..8a33698c61bd 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
extern cpumask_t cpus_in_sr;
static inline int kexec_sr_activated(int cpu)
{
- return cpu_isset(cpu,cpus_in_sr);
+ return cpumask_test_cpu(cpu, &cpus_in_sr);
}
struct kimage;
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index d0e7701fa1f6..be0171afdc0f 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -50,7 +50,7 @@ typedef unsigned int kprobe_opcode_t;
* Handle cases where:
* - User passes a <.symbol> or <module:.symbol>
* - User passes a <symbol> or <module:symbol>
- * - User passes a non-existant symbol, kallsyms_lookup_name
+ * - User passes a non-existent symbol, kallsyms_lookup_name
* returns 0. Don't deref the NULL pointer in that case
*/
#define kprobe_lookup_name(name, addr) \
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 5b7504674397..0951b17f4eb5 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -59,6 +59,7 @@
#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
#define BOOK3S_INTERRUPT_EXTERNAL 0x500
#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
+#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
#define BOOK3S_INTERRUPT_PROGRAM 0x700
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 36fdb3aff30b..d5a8a3861635 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -34,6 +34,7 @@
(\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
(\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
(\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
+ (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \
(\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
(\intno == BOOK3S_INTERRUPT_PROGRAM) || \
(\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 26b8c807f8f1..e0298d26ce5d 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -105,7 +105,7 @@ struct lppaca {
// processing of external interrupts. Note that PLIC will store the
// XIRR directly into the xXirrValue field so that another XIRR will
// not be presented until this one clears. The layout of the low
- // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the
+ // 4-bytes of this Dword is up to SLIC - PLIC just checks whether the
// entire Dword is zero or not. A non-zero value in the low order
// 2-bytes will result in SLIC being granted the highest thread
// priority upon return. A 0 will return to SLIC as medium priority.
@@ -210,6 +210,8 @@ struct dtl_entry {
#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+extern struct kmem_cache *dtl_cache;
+
/*
* When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index fe56a23e1ff0..47cacddb14cf 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -29,21 +29,6 @@ struct file;
struct pci_controller;
struct kimage;
-#ifdef CONFIG_SMP
-struct smp_ops_t {
- void (*message_pass)(int target, int msg);
- int (*probe)(void);
- void (*kick_cpu)(int nr);
- void (*setup_cpu)(int nr);
- void (*take_timebase)(void);
- void (*give_timebase)(void);
- int (*cpu_enable)(unsigned int nr);
- int (*cpu_disable)(void);
- void (*cpu_die)(unsigned int nr);
- int (*cpu_bootable)(unsigned int nr);
-};
-#endif
-
struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
@@ -267,7 +252,7 @@ struct machdep_calls {
extern void e500_idle(void);
extern void power4_idle(void);
-extern void power4_cpu_offline_powersave(void);
+extern void power7_idle(void);
extern void ppc6xx_idle(void);
extern void book3e_idle(void);
@@ -312,12 +297,6 @@ extern sys_ctrler_t sys_ctrler;
#endif /* CONFIG_PPC_PMAC */
-#ifdef CONFIG_SMP
-/* Poor default implementations */
-extern void __devinit smp_generic_give_timebase(void);
-extern void __devinit smp_generic_take_timebase(void);
-#endif /* CONFIG_SMP */
-
/* Functions to produce codes on the leds.
* The SRC code should be unique for the message category and should
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 17194fcd4040..3ea0f9a259d8 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -43,6 +43,7 @@
#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
#define MAS0_NV(x) ((x) & 0x00000FFF)
+#define MAS0_ESEL_MASK 0x0FFF0000
#define MAS0_HES 0x00004000
#define MAS0_WQ_ALLWAYS 0x00000000
#define MAS0_WQ_COND 0x00001000
@@ -137,6 +138,21 @@
#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
+/* MMUCFG bits */
+#define MMUCFG_MAVN_NASK 0x00000003
+#define MMUCFG_MAVN_V1_0 0x00000000
+#define MMUCFG_MAVN_V2_0 0x00000001
+#define MMUCFG_NTLB_MASK 0x0000000c
+#define MMUCFG_NTLB_SHIFT 2
+#define MMUCFG_PIDSIZE_MASK 0x000007c0
+#define MMUCFG_PIDSIZE_SHIFT 6
+#define MMUCFG_TWC 0x00008000
+#define MMUCFG_LRAT 0x00010000
+#define MMUCFG_RASIZE_MASK 0x00fe0000
+#define MMUCFG_RASIZE_SHIFT 17
+#define MMUCFG_LPIDSIZE_MASK 0x0f000000
+#define MMUCFG_LPIDSIZE_SHIFT 24
+
/* TLBnCFG encoding */
#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
#define TLBnCFG_HES 0x00002000 /* HW select supported */
@@ -229,6 +245,10 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_vmemmap_psize;
+#ifdef CONFIG_PPC64
+extern unsigned long linear_map_top;
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index acac35d5b382..d865bd909c7d 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -27,7 +27,7 @@
#define STE_VSID_SHIFT 12
/* Location of cpu0's segment table */
-#define STAB0_PAGE 0x6
+#define STAB0_PAGE 0x8
#define STAB0_OFFSET (STAB0_PAGE << 12)
#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
@@ -408,6 +408,7 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */
typedef unsigned long mm_context_id_t;
+struct spinlock;
typedef struct {
mm_context_id_t id;
@@ -423,6 +424,11 @@ typedef struct {
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
+#ifdef CONFIG_PPC_ICSWX
+ struct spinlock *cop_lockp; /* guard acop and cop_pid */
+ unsigned long acop; /* mask of enabled coprocessor types */
+ unsigned int cop_pid; /* pid value used with coprocessors */
+#endif /* CONFIG_PPC_ICSWX */
} mm_context_t;
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index bb40a06d3b77..4138b21ae80a 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -56,11 +56,6 @@
*/
#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
-/* This indicates that the processor uses the ISA 2.06 server tlbie
- * mnemonics
- */
-#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000)
-
/* Enable use of TLB reservation. Processor should support tlbsrx.
* instruction and MAS0[WQ].
*/
@@ -70,6 +65,53 @@
*/
#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
+/* MMU is SLB-based
+ */
+#define MMU_FTR_SLB ASM_CONST(0x02000000)
+
+/* Support 16M large pages
+ */
+#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000)
+
+/* Supports TLBIEL variant
+ */
+#define MMU_FTR_TLBIEL ASM_CONST(0x08000000)
+
+/* Supports tlbies w/o locking
+ */
+#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000)
+
+/* Large pages can be marked CI
+ */
+#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000)
+
+/* 1T segments available
+ */
+#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
+
+/* Doesn't support the B bit (1T segment) in SLBIE
+ */
+#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000)
+
+/* MMU feature bit sets for various CPUs */
+#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
+ MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
+#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
+#define MMU_FTRS_PPC970 MMU_FTRS_POWER4
+#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+ MMU_FTR_CI_LARGE_PAGE
+#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
+ MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
+#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
+ MMU_FTR_USE_TLBIVAX_BCAST | \
+ MMU_FTR_LOCK_BCAST_INVAL | \
+ MMU_FTR_USE_TLBRSRV | \
+ MMU_FTR_USE_PAIRED_MAS | \
+ MMU_FTR_TLBIEL | \
+ MMU_FTR_16M_PAGE
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 81fb41289d6c..a73668a5f30d 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -32,6 +32,10 @@ extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
+extern void switch_cop(struct mm_struct *next);
+extern int use_cop(unsigned long acop, struct mm_struct *mm);
+extern void drop_cop(unsigned long acop, struct mm_struct *mm);
+
/*
* switch_mm is the entry point called from the architecture independent
* code in kernel/sched.c
@@ -55,6 +59,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next)
return;
+#ifdef CONFIG_PPC_ICSWX
+ /* Switch coprocessor context only if prev or next uses a coprocessor */
+ if (prev->context.acop || next->context.acop)
+ switch_cop(next);
+#endif /* CONFIG_PPC_ICSWX */
+
/* We must stop all altivec streams before changing the HW
* context
*/
@@ -67,7 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* sub architectures.
*/
#ifdef CONFIG_PPC_STD_MMU_64
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
switch_slb(tsk, next);
else
switch_stab(tsk, next);
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 7005ee0b074d..df18989e78d4 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -3,7 +3,6 @@
#ifdef __KERNEL__
#include <linux/irq.h>
-#include <linux/sysdev.h>
#include <asm/dcr.h>
#include <asm/msi_bitmap.h>
@@ -263,6 +262,7 @@ struct mpic
#ifdef CONFIG_SMP
struct irq_chip hc_ipi;
#endif
+ struct irq_chip hc_tm;
const char *name;
/* Flags */
unsigned int flags;
@@ -281,7 +281,7 @@ struct mpic
/* vector numbers used for internal sources (ipi/timers) */
unsigned int ipi_vecs[4];
- unsigned int timer_vecs[4];
+ unsigned int timer_vecs[8];
/* Spurious vector to program into unused sources */
unsigned int spurious_vec;
@@ -320,8 +320,6 @@ struct mpic
/* link */
struct mpic *next;
- struct sys_device sysdev;
-
#ifdef CONFIG_PM
struct mpic_irq_save *save_data;
#endif
@@ -371,6 +369,8 @@ struct mpic
* NOTE: This flag trumps MPIC_WANTS_RESET.
*/
#define MPIC_NO_RESET 0x00004000
+/* Freescale MPIC (compatible includes "fsl,mpic") */
+#define MPIC_FSL 0x00008000
/* MPIC HW modification ID */
#define MPIC_REGSET_MASK 0xf0000000
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index d4b4bfa26fb3..89d2f99c1bf4 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -18,13 +18,18 @@
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
extern struct blocking_notifier_head pSeries_reconfig_chain;
+/* Not the best place to put this, will be fixed when we move some
+ * of the rtas suspend-me stuff to pseries */
+extern void pSeries_coalesce_init(void);
#else /* !CONFIG_PPC_PSERIES */
static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
return 0;
}
static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
+static inline void pSeries_coalesce_init(void) { }
#endif /* CONFIG_PPC_PSERIES */
+
#endif /* __KERNEL__ */
#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec57540cd7af..74126765106a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -92,9 +92,9 @@ struct paca_struct {
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[10] __attribute__((aligned(0x80)));
- u64 exmc[10]; /* used for machine checks */
- u64 exslb[10]; /* used for SLB/segment table misses
+ u64 exgen[11] __attribute__((aligned(0x80)));
+ u64 exmc[11]; /* used for machine checks */
+ u64 exslb[11]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
@@ -106,7 +106,8 @@ struct paca_struct {
pgd_t *pgd; /* Current PGD */
pgd_t *kernel_pgd; /* Kernel PGD */
u64 exgen[8] __attribute__((aligned(0x80)));
- u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80)));
+ /* We can have up to 3 levels of reentrancy in the TLB miss handler */
+ u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80)));
u64 exmc[8]; /* used for machine checks */
u64 excrit[8]; /* used for crit interrupts */
u64 exdbg[8]; /* used for debug interrupts */
@@ -125,7 +126,7 @@ struct paca_struct {
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
u64 stab_rr; /* stab/slb round-robin counter */
- u64 saved_r1; /* r1 save for RTAS calls */
+ u64 saved_r1; /* r1 save for RTAS calls or PM */
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 soft_enabled; /* irq soft-enable flag */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index da4b20008541..2cd664ef0a5e 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -100,7 +100,7 @@ extern phys_addr_t kernstart_addr;
#endif
#ifdef CONFIG_FLATMEM
-#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 932f88dcf6fa..9356262fd3cc 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -59,24 +59,7 @@ static __inline__ void clear_page(void *addr)
: "ctr", "memory");
}
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
- unsigned int i;
- for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
- copy_4K_page(to, from);
- to += 4096;
- from += 4096;
- }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
- copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
+extern void copy_page(void *to, void *from);
/* Log 2 of page table size */
extern u64 ppc64_pft_size;
@@ -130,7 +113,7 @@ extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize);
-#define slice_mm_new_context(mm) ((mm)->context.id == 0)
+#define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT)
#endif /* __ASSEMBLY__ */
#else
@@ -169,7 +152,7 @@ do { \
/*
* This is the default if a program doesn't have a PT_GNU_STACK
* program header entry. The PPC64 ELF ABI has a non executable stack
- * stack by default, so in the absense of a PT_GNU_STACK program header
+ * stack by default, so in the absence of a PT_GNU_STACK program header
* we turn execute permission off.
*/
#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h
index 19fd7933e2d9..eafa5a5f56de 100644
--- a/arch/powerpc/include/asm/pasemi_dma.h
+++ b/arch/powerpc/include/asm/pasemi_dma.h
@@ -522,7 +522,7 @@ extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
dma_addr_t *handle);
-/* Routines to allocate flags (events) for channel syncronization */
+/* Routines to allocate flags (events) for channel synchronization */
extern int pasemi_dma_alloc_flag(void);
extern void pasemi_dma_free_flag(int flag);
extern void pasemi_dma_set_flag(int flag);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 5e156e034fe2..b90dbf8e5cd9 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -106,7 +106,7 @@ struct pci_controller {
* Used for variants of PCI indirect handling and possible quirks:
* SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
* EXT_REG - provides access to PCI-e extended registers
- * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
+ * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS
* on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
* to determine which bus number to match on when generating type0
* config cycles
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 2b09cd522d33..81576ee0cfb1 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -257,21 +257,20 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- unsigned long old;
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old;
-
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
+
+ pte_update(mm, addr, ptep, _PAGE_RW, 1);
}
/*
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 00eedc5a4e61..10902c9375d0 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -53,8 +53,8 @@
/* Here is the infamous serie of OHare based machines
*/
-#define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */
-#define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */
+#define PMAC_TYPE_COMET 0x20 /* Believed to be PowerBook 2400 */
+#define PMAC_TYPE_HOOPER 0x21 /* Believed to be PowerBook 3400 */
#define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */
#define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */
#define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 1255569387b6..e472659d906c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -41,6 +41,10 @@
#define PPC_INST_RFCI 0x4c000066
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
+#define PPC_INST_MFSPR_DSCR 0x7c1102a6
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR 0x7c1103a6
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -56,6 +60,17 @@
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_XXLOR 0xf0000510
+#define PPC_INST_NAP 0x4c000364
+#define PPC_INST_SLEEP 0x4c0003a4
+
+/* A2 specific instructions */
+#define PPC_INST_ERATWE 0x7c0001a6
+#define PPC_INST_ERATRE 0x7c000166
+#define PPC_INST_ERATILX 0x7c000066
+#define PPC_INST_ERATIVAX 0x7c000666
+#define PPC_INST_ERATSX 0x7c000126
+#define PPC_INST_ERATSX_DOT 0x7c000127
+
/* macros to insert fields into opcodes */
#define __PPC_RA(a) (((a) & 0x1f) << 16)
#define __PPC_RB(b) (((b) & 0x1f) << 11)
@@ -67,6 +82,8 @@
#define __PPC_XT(s) __PPC_XS(s)
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
#define __PPC_WC(w) (((w) & 0x3) << 21)
+#define __PPC_WS(w) (((w) & 0x1f) << 11)
+
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
* larx with EH set as an illegal instruction.
@@ -113,6 +130,21 @@
#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
__PPC_RA(a) | __PPC_RB(b))
+#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \
+ __PPC_T_TLB(t) | __PPC_RA(a) | \
+ __PPC_RB(b))
+#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \
+ __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
+ __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
+
+
/*
* Define what the VSX XX1 form instructions will look like, then add
* the 128 bit load store instructions based on that.
@@ -126,4 +158,7 @@
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
VSX_XX3((t), (a), (b)))
+#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
+#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 98210067c1cc..1b422381fc16 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -170,6 +170,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define HMT_MEDIUM or 2,2,2
#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
#define HMT_HIGH or 3,3,3
+#define HMT_EXTRA_HIGH or 7,7,7 # power7 only
#ifdef __KERNEL__
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index de1967a1ff57..d50c2b6d9bc3 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -238,6 +238,10 @@ struct thread_struct {
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#ifdef CONFIG_PPC64
+ unsigned long dscr;
+ int dscr_inherit;
+#endif
};
#define ARCH_MIN_TASKALIGN 16
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 76bb195e4f24..8d1569c29042 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -86,7 +86,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
#endif
-/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
+/* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
* on platforms where such control is possible.
*/
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES)
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
#else
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
@@ -174,7 +174,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/*
* Don't just check for any non zero bits in __PAGE_USER, since for book3e
* and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
- * _PAGE_USER. Need to explictly match _PAGE_BAP_UR bit in that case too.
+ * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
*/
#define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER)
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index 9e2cb2019161..f706164b0bd0 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
@@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
static inline void qe_ic_cascade_high_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq != NO_IRQ)
@@ -101,9 +101,9 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
static inline void qe_ic_cascade_low_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
@@ -114,9 +114,9 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
static inline void qe_ic_cascade_high_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
@@ -127,9 +127,9 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq,
static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == NO_IRQ)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7e4abebe76c0..c5cae0dd176c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -99,17 +99,23 @@
#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
#if defined(CONFIG_PPC_BOOK3S_64)
+#define MSR_64BIT MSR_SF
+
/* Server variant */
#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
-#define MSR_KERNEL MSR_ | MSR_SF
+#define MSR_KERNEL MSR_ | MSR_64BIT
#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64 MSR_USER32 | MSR_SF
+#define MSR_USER64 MSR_USER32 | MSR_64BIT
#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
/* Default MSR for kernel mode. */
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
#endif
+#ifndef MSR_64BIT
+#define MSR_64BIT 0
+#endif
+
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
@@ -182,6 +188,8 @@
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
+#define SPRN_CFAR 0x1c /* Come From Address Register */
+#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
@@ -210,8 +218,43 @@
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_SPURR 0x134 /* Scaled PURR */
+#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
+#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
+#define SPRN_HDSISR 0x132
+#define SPRN_HDAR 0x133
+#define SPRN_HDEC 0x136 /* Hypervisor Decrementer */
#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
+#define SPRN_RMOR 0x138 /* Real mode offset register */
+#define SPRN_HRMOR 0x139 /* Real mode offset register */
+#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
+#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
+#define LPCR_VPM0 (1ul << (63-0))
+#define LPCR_VPM1 (1ul << (63-1))
+#define LPCR_ISL (1ul << (63-2))
+#define LPCR_DPFD_SH (63-11)
+#define LPCR_VRMA_L (1ul << (63-12))
+#define LPCR_VRMA_LP0 (1ul << (63-15))
+#define LPCR_VRMA_LP1 (1ul << (63-16))
+#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
+#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
+#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
+#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
+#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
+#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
+#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
+#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
+#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
+#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
+#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#define SPRN_HMER 0x150 /* Hardware m? error recovery */
+#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
+#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
+#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
+#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
+#define SPRN_TLBRPNR 0x156 /* P7 TLB control register */
+#define SPRN_TLBLPIDR 0x157 /* P7 TLB control register */
#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
@@ -434,16 +477,23 @@
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
-#define SRR1_WAKERESET 0x00380000 /* System reset */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
+#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SRR1_WAKERESET 0x00100000 /* System reset */
+#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
+#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
+ * may not be recoverable */
+#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
+#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
#define SRR1_PROGTRAP 0x00020000 /* Trap */
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
+
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
@@ -673,12 +723,15 @@
* SPRG usage:
*
* All 64-bit:
- * - SPRG1 stores PACA pointer
+ * - SPRG1 stores PACA pointer except 64-bit server in
+ * HV mode in which case it is HSPRG0
*
* 64-bit server:
* - SPRG0 unused (reserved for HV on Power4)
* - SPRG2 scratch for exception vectors
* - SPRG3 unused (user visible)
+ * - HSPRG0 stores PACA in HV mode
+ * - HSPRG1 scratch for "HV" exceptions
*
* 64-bit embedded
* - SPRG0 generic exception scratch
@@ -741,6 +794,41 @@
#ifdef CONFIG_PPC_BOOK3S_64
#define SPRN_SPRG_SCRATCH0 SPRN_SPRG2
+#define SPRN_SPRG_HPACA SPRN_HSPRG0
+#define SPRN_SPRG_HSCRATCH0 SPRN_HSPRG1
+
+#define GET_PACA(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr rX,SPRN_SPRG_PACA; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mfspr rX,SPRN_SPRG_HPACA; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+
+#define SET_PACA(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mtspr SPRN_SPRG_PACA,rX; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mtspr SPRN_SPRG_HPACA,rX; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+
+#define GET_SCRATCH0(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mfspr rX,SPRN_SPRG_SCRATCH0; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mfspr rX,SPRN_SPRG_HSCRATCH0; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+
+#define SET_SCRATCH0(rX) \
+ BEGIN_FTR_SECTION_NESTED(66); \
+ mtspr SPRN_SPRG_SCRATCH0,rX; \
+ FTR_SECTION_ELSE_NESTED(66); \
+ mtspr SPRN_SPRG_HSCRATCH0,rX; \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+
+#else /* CONFIG_PPC_BOOK3S_64 */
+#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
+#define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX
+
#endif
#ifdef CONFIG_PPC_BOOK3E_64
@@ -750,6 +838,10 @@
#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
+
+#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX
+#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA
+
#endif
#ifdef CONFIG_PPC_BOOK3S_32
@@ -800,6 +892,8 @@
#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
#endif
+
+
/*
* An mtfsf instruction with the L bit set. On CPUs that support this a
* full 64bits of FPSCR is restored and on other CPUs the L bit is ignored.
@@ -894,6 +988,8 @@
#define PV_POWER5p 0x003B
#define PV_POWER7 0x003F
#define PV_970FX 0x003C
+#define PV_POWER6 0x003E
+#define PV_POWER7 0x003F
#define PV_630 0x0040
#define PV_630p 0x0041
#define PV_970MP 0x0044
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
new file mode 100644
index 000000000000..3d52a1132f3d
--- /dev/null
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -0,0 +1,165 @@
+/*
+ * Register definitions specific to the A2 core
+ *
+ * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_REG_A2_H__
+#define __ASM_POWERPC_REG_A2_H__
+
+#define SPRN_TENSR 0x1b5
+#define SPRN_TENS 0x1b6 /* Thread ENable Set */
+#define SPRN_TENC 0x1b7 /* Thread ENable Clear */
+
+#define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */
+#define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */
+#define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */
+#define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */
+#define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */
+#define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */
+#define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */
+
+#define SPRN_IAR 0x372
+
+#define SPRN_IUCR0 0x3f3
+#define IUCR0_ICBI_ACK 0x1000
+
+#define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */
+
+#define A2_IERAT_SIZE 16
+#define A2_DERAT_SIZE 32
+
+/* A2 MMUCR0 bits */
+#define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */
+#define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */
+#define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */
+#define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */
+#define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */
+#define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */
+#define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */
+#define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */
+#define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */
+#define MMUCR0_TID_MASK 0x000000ff /* TID field */
+
+/* A2 MMUCR1 bits */
+#define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */
+#define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */
+#define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/
+#define MMUCR1_CEE 0x10000000 /* Change exception enable */
+#define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */
+#define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/
+#define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */
+#define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */
+#define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */
+#define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */
+#define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */
+#define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */
+#define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */
+
+/* A2 MMUCR2 bits */
+#define MMUCR2_PSSEL_SHIFT 4
+
+/* A2 MMUCR3 bits */
+#define MMUCR3_THID 0x0000000f /* Thread ID */
+
+/* *** ERAT TLB bits definitions */
+#define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000)
+#define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00)
+#define TLB0_CLASS_00 ASM_CONST(0x0000000000000000)
+#define TLB0_CLASS_01 ASM_CONST(0x0000000000000400)
+#define TLB0_CLASS_10 ASM_CONST(0x0000000000000800)
+#define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00)
+#define TLB0_V ASM_CONST(0x0000000000000200)
+#define TLB0_X ASM_CONST(0x0000000000000100)
+#define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0)
+#define TLB0_SIZE_4K ASM_CONST(0x0000000000000010)
+#define TLB0_SIZE_64K ASM_CONST(0x0000000000000030)
+#define TLB0_SIZE_1M ASM_CONST(0x0000000000000050)
+#define TLB0_SIZE_16M ASM_CONST(0x0000000000000070)
+#define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0)
+#define TLB0_THDID_MASK ASM_CONST(0x000000000000000f)
+#define TLB0_THDID_0 ASM_CONST(0x0000000000000001)
+#define TLB0_THDID_1 ASM_CONST(0x0000000000000002)
+#define TLB0_THDID_2 ASM_CONST(0x0000000000000004)
+#define TLB0_THDID_3 ASM_CONST(0x0000000000000008)
+#define TLB0_THDID_ALL ASM_CONST(0x000000000000000f)
+
+#define TLB1_RESVATTR ASM_CONST(0x00f0000000000000)
+#define TLB1_U0 ASM_CONST(0x0008000000000000)
+#define TLB1_U1 ASM_CONST(0x0004000000000000)
+#define TLB1_U2 ASM_CONST(0x0002000000000000)
+#define TLB1_U3 ASM_CONST(0x0001000000000000)
+#define TLB1_R ASM_CONST(0x0000800000000000)
+#define TLB1_C ASM_CONST(0x0000400000000000)
+#define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000)
+#define TLB1_W ASM_CONST(0x0000000000000800)
+#define TLB1_I ASM_CONST(0x0000000000000400)
+#define TLB1_M ASM_CONST(0x0000000000000200)
+#define TLB1_G ASM_CONST(0x0000000000000100)
+#define TLB1_E ASM_CONST(0x0000000000000080)
+#define TLB1_VF ASM_CONST(0x0000000000000040)
+#define TLB1_UX ASM_CONST(0x0000000000000020)
+#define TLB1_SX ASM_CONST(0x0000000000000010)
+#define TLB1_UW ASM_CONST(0x0000000000000008)
+#define TLB1_SW ASM_CONST(0x0000000000000004)
+#define TLB1_UR ASM_CONST(0x0000000000000002)
+#define TLB1_SR ASM_CONST(0x0000000000000001)
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
+#define WSP_UART_PHYS 0xffc000c000
+/* This needs to be careful chosen to hit a !0 congruence class
+ * in the TLB since we bolt it in way 3, which is already occupied
+ * by our linear mapping primary bolted entry in CC 0.
+ */
+#define WSP_UART_VIRT 0xf000000000001000
+#endif
+
+/* A2 erativax attributes definitions */
+#define ERATIVAX_RS_IS_ALL 0x000
+#define ERATIVAX_RS_IS_TID 0x040
+#define ERATIVAX_RS_IS_CLASS 0x080
+#define ERATIVAX_RS_IS_FULLMATCH 0x0c0
+#define ERATIVAX_CLASS_00 0x000
+#define ERATIVAX_CLASS_01 0x010
+#define ERATIVAX_CLASS_10 0x020
+#define ERATIVAX_CLASS_11 0x030
+#define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1)
+#define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1)
+#define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1)
+#define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1)
+#define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1)
+
+/* A2 eratilx attributes definitions */
+#define ERATILX_T_ALL 0
+#define ERATILX_T_TID 1
+#define ERATILX_T_TGS 2
+#define ERATILX_T_FULLMATCH 3
+#define ERATILX_T_CLASS0 4
+#define ERATILX_T_CLASS1 5
+#define ERATILX_T_CLASS2 6
+#define ERATILX_T_CLASS3 7
+
+/* XUCR0 bits */
+#define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */
+#define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */
+#define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */
+#define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */
+
+/* A2 CCR0 register */
+#define A2_CCR0_PME_DISABLED 0x00000000
+#define A2_CCR0_PME_SLEEP 0x40000000
+#define A2_CCR0_PME_RVW 0x80000000
+#define A2_CCR0_PME_DISABLED2 0xc0000000
+
+/* A2 CCR2 register */
+#define A2_CCR2_ERAT_ONLY_MODE 0x00000001
+#define A2_CCR2_ENABLE_ICSWX 0x00000002
+#define A2_CCR2_ENABLE_PC 0x20000000
+#define A2_CCR2_ENABLE_TRACE 0x40000000
+
+#endif /* __ASM_POWERPC_REG_A2_H__ */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 86ad8128963a..0f0ad9fa01c1 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -2,7 +2,7 @@
* Contains register definitions common to the Book E PowerPC
* specification. Notice that while the IBM-40x series of CPUs
* are not true Book E PowerPCs, they borrowed a number of features
- * before Book E was finalized, and are included here as well. Unfortunatly,
+ * before Book E was finalized, and are included here as well. Unfortunately,
* they sometimes used different locations than true Book E CPUs did.
*
* This program is free software; you can redistribute it and/or
@@ -27,10 +27,12 @@
#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */
#if defined(CONFIG_PPC_BOOK3E_64)
+#define MSR_64BIT MSR_CM
+
#define MSR_ MSR_ME | MSR_CE
-#define MSR_KERNEL MSR_ | MSR_CM
+#define MSR_KERNEL MSR_ | MSR_64BIT
#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE
-#define MSR_USER64 MSR_USER32 | MSR_CM | MSR_DE
+#define MSR_USER64 MSR_USER32 | MSR_64BIT
#elif defined (CONFIG_40x)
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
@@ -81,6 +83,10 @@
#define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */
#define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */
#define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */
+#define SPRN_IVOR38 0x1B0 /* Interrupt Vector Offset Register 38 */
+#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
+#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
+#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
@@ -110,7 +116,7 @@
#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
-#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
+#define SPRN_MAS5 0x153 /* MMU Assist Register 5 */
#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
#define SPRN_PID1 0x279 /* Process ID Register 1 */
#define SPRN_PID2 0x27A /* Process ID Register 2 */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 9a1193e30f26..58625d1e7802 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -158,7 +158,50 @@ struct rtas_error_log {
unsigned long target:4; /* Target of failed operation */
unsigned long type:8; /* General event or error*/
unsigned long extended_log_length:32; /* length in bytes */
- unsigned char buffer[1];
+ unsigned char buffer[1]; /* Start of extended log */
+ /* Variable length. */
+};
+
+#define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14
+
+#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
+
+/* RTAS general extended event log, Version 6. The extended log starts
+ * from "buffer" field of struct rtas_error_log defined above.
+ */
+struct rtas_ext_event_log_v6 {
+ /* Byte 0 */
+ uint32_t log_valid:1; /* 1:Log valid */
+ uint32_t unrecoverable_error:1; /* 1:Unrecoverable error */
+ uint32_t recoverable_error:1; /* 1:recoverable (correctable */
+ /* or successfully retried) */
+ uint32_t degraded_operation:1; /* 1:Unrecoverable err, bypassed*/
+ /* - degraded operation (e.g. */
+ /* CPU or mem taken off-line) */
+ uint32_t predictive_error:1;
+ uint32_t new_log:1; /* 1:"New" log (Always 1 for */
+ /* data returned from RTAS */
+ uint32_t big_endian:1; /* 1: Big endian */
+ uint32_t :1; /* reserved */
+ /* Byte 1 */
+ uint32_t :8; /* reserved */
+ /* Byte 2 */
+ uint32_t powerpc_format:1; /* Set to 1 (indicating log is */
+ /* in PowerPC format */
+ uint32_t :3; /* reserved */
+ uint32_t log_format:4; /* Log format indicator. Define */
+ /* format used for byte 12-2047 */
+ /* Byte 3 */
+ uint32_t :8; /* reserved */
+ /* Byte 4-11 */
+ uint8_t reserved[8]; /* reserved */
+ /* Byte 12-15 */
+ uint32_t company_id; /* Company ID of the company */
+ /* that defines the format for */
+ /* the vendor specific log type */
+ /* Byte 16-end of log */
+ uint8_t vendor_log[1]; /* Start of vendor specific log */
+ /* Variable length. */
};
/*
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
new file mode 100644
index 000000000000..0cabfd7bc2d1
--- /dev/null
+++ b/arch/powerpc/include/asm/scom.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
+ * <benh@kernel.crashing.org>
+ * and David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_SCOM_H
+#define _ASM_POWERPC_SCOM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_SCOM
+
+/*
+ * The SCOM bus is a sideband bus used for accessing various internal
+ * registers of the processor or the chipset. The implementation details
+ * differ between processors and platforms, and the access method as
+ * well.
+ *
+ * This API allows to "map" ranges of SCOM register numbers associated
+ * with a given SCOM controller. The later must be represented by a
+ * device node, though some implementations might support NULL if there
+ * is no possible ambiguity
+ *
+ * Then, scom_read/scom_write can be used to accesses registers inside
+ * that range. The argument passed is a register number relative to
+ * the beginning of the range mapped.
+ */
+
+typedef void *scom_map_t;
+
+/* Value for an invalid SCOM map */
+#define SCOM_MAP_INVALID (NULL)
+
+/* The scom_controller data structure is what the platform passes
+ * to the core code in scom_init, it provides the actual implementation
+ * of all the SCOM functions
+ */
+struct scom_controller {
+ scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
+ void (*unmap)(scom_map_t map);
+
+ u64 (*read)(scom_map_t map, u32 reg);
+ void (*write)(scom_map_t map, u32 reg, u64 value);
+};
+
+extern const struct scom_controller *scom_controller;
+
+/**
+ * scom_init - Initialize the SCOM backend, called by the platform
+ * @controller: The platform SCOM controller
+ */
+static inline void scom_init(const struct scom_controller *controller)
+{
+ scom_controller = controller;
+}
+
+/**
+ * scom_map_ok - Test is a SCOM mapping is successful
+ * @map: The result of scom_map to test
+ */
+static inline int scom_map_ok(scom_map_t map)
+{
+ return map != SCOM_MAP_INVALID;
+}
+
+/**
+ * scom_map - Map a block of SCOM registers
+ * @ctrl_dev: Device node of the SCOM controller
+ * some implementations allow NULL here
+ * @reg: first SCOM register to map
+ * @count: Number of SCOM registers to map
+ */
+
+static inline scom_map_t scom_map(struct device_node *ctrl_dev,
+ u64 reg, u64 count)
+{
+ return scom_controller->map(ctrl_dev, reg, count);
+}
+
+/**
+ * scom_find_parent - Find the SCOM controller for a device
+ * @dev: OF node of the device
+ *
+ * This is not meant for general usage, but in combination with
+ * scom_map() allows to map registers not represented by the
+ * device own scom-reg property. Useful for applying HW workarounds
+ * on things not properly represented in the device-tree for example.
+ */
+struct device_node *scom_find_parent(struct device_node *dev);
+
+
+/**
+ * scom_map_device - Map a device's block of SCOM registers
+ * @dev: OF node of the device
+ * @index: Register bank index (index in "scom-reg" property)
+ *
+ * This function will use the device-tree binding for SCOM which
+ * is to follow "scom-parent" properties until it finds a node with
+ * a "scom-controller" property to find the controller. It will then
+ * use the "scom-reg" property which is made of reg/count pairs,
+ * each of them having a size defined by the controller's #scom-cells
+ * property
+ */
+extern scom_map_t scom_map_device(struct device_node *dev, int index);
+
+
+/**
+ * scom_unmap - Unmap a block of SCOM registers
+ * @map: Result of scom_map is to be unmapped
+ */
+static inline void scom_unmap(scom_map_t map)
+{
+ if (scom_map_ok(map))
+ scom_controller->unmap(map);
+}
+
+/**
+ * scom_read - Read a SCOM register
+ * @map: Result of scom_map
+ * @reg: Register index within that map
+ */
+static inline u64 scom_read(scom_map_t map, u32 reg)
+{
+ return scom_controller->read(map, reg);
+}
+
+/**
+ * scom_write - Write to a SCOM register
+ * @map: Result of scom_map
+ * @reg: Register index within that map
+ * @value: Value to write
+ */
+static inline void scom_write(scom_map_t map, u32 reg, u64 value)
+{
+ scom_controller->write(map, reg, value);
+}
+
+#endif /* CONFIG_PPC_SCOM */
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SCOM_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 66e237bbe15f..880b8c1e6e53 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -20,6 +20,7 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
+#include <linux/irqreturn.h>
#ifndef __ASSEMBLY__
@@ -29,22 +30,41 @@
#include <asm/percpu.h>
extern int boot_cpuid;
+extern int boot_cpu_count;
extern void cpu_die(void);
#ifdef CONFIG_SMP
-extern void smp_send_debugger_break(int cpu);
-extern void smp_message_recv(int);
+struct smp_ops_t {
+ void (*message_pass)(int cpu, int msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ void (*cause_ipi)(int cpu, unsigned long data);
+#endif
+ int (*probe)(void);
+ int (*kick_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+ void (*bringup_done)(void);
+ void (*take_timebase)(void);
+ void (*give_timebase)(void);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+};
+
+extern void smp_send_debugger_break(void);
+extern void start_secondary_resume(void);
+extern void __devinit smp_generic_give_timebase(void);
+extern void __devinit smp_generic_take_timebase(void);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(const struct cpumask *map);
+extern void migrate_irqs(void);
int generic_cpu_disable(void);
-int generic_cpu_enable(unsigned int cpu);
void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void);
+void generic_set_cpu_dead(unsigned int cpu);
#endif
#ifdef CONFIG_PPC64
@@ -92,13 +112,16 @@ extern int cpu_to_core_id(int cpu);
#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
-/*
- * irq controllers that have dedicated ipis per message and don't
- * need additional code in the action handler may use this
- */
+/* for irq controllers that have dedicated ipis per message (4) */
extern int smp_request_message_ipi(int virq, int message);
extern const char *smp_ipi_name[];
+/* for irq controllers with only a single ipi */
+extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
+extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern void smp_muxed_ipi_resend(void);
+extern irqreturn_t smp_ipi_demux(void);
+
void smp_init_iSeries(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
@@ -148,7 +171,7 @@ extern int smt_enabled_at_boot;
extern int smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu);
-extern void smp_generic_kick_cpu(int nr);
+extern int smp_generic_kick_cpu(int nr);
extern void smp_generic_give_timebase(void);
extern void smp_generic_take_timebase(void);
@@ -168,6 +191,8 @@ extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern irqreturn_t debug_ipi_action(int irq, void *data);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h
index 25020a34ce7f..d8f5c60f61c1 100644
--- a/arch/powerpc/include/asm/spu_priv1.h
+++ b/arch/powerpc/include/asm/spu_priv1.h
@@ -223,7 +223,7 @@ spu_disable_spu (struct spu_context *ctx)
}
/*
- * The declarations folowing are put here for convenience
+ * The declarations following are put here for convenience
* and only intended to be used by the platform setup code.
*/
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index aa0f1ebb4aaf..8489d372077f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -348,3 +348,8 @@ COMPAT_SYS_SPU(sendmsg)
COMPAT_SYS_SPU(recvmsg)
COMPAT_SYS_SPU(recvmmsg)
SYSCALL_SPU(accept4)
+SYSCALL_SPU(name_to_handle_at)
+COMPAT_SYS_SPU(open_by_handle_at)
+COMPAT_SYS_SPU(clock_adjtime)
+SYSCALL_SPU(syncfs)
+COMPAT_SYS_SPU(sendmmsg)
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 5e474ddd2273..2dc595dda03b 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -219,8 +219,6 @@ extern int mem_init_done; /* set on boot once kmalloc can be called */
extern int init_bootmem_done; /* set once bootmem is available */
extern phys_addr_t memory_limit;
extern unsigned long klimit;
-
-extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
extern int powersave_nap; /* set if nap mode can be used in idle loop */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index d50a380b2b6f..81143fcbd113 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -79,6 +79,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
#elif defined(CONFIG_PPC_STD_MMU_64)
+#define MMU_NO_CONTEXT 0
+
/*
* TLB flushing for 64-bit hash-MMU CPUs
*/
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 11ae699135ba..58580e94a2bb 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,6 +52,7 @@ extern void __init udbg_init_44x_as1(void);
extern void __init udbg_init_40x_realmode(void);
extern void __init udbg_init_cpm(void);
extern void __init udbg_init_usbgecko(void);
+extern void __init udbg_init_wsp(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
index f737732c3861..d12b11d7641e 100644
--- a/arch/powerpc/include/asm/uninorth.h
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -60,7 +60,7 @@
*
* Obviously, the GART is not cache coherent and so any change to it
* must be flushed to memory (or maybe just make the GART space non
- * cachable). AGP memory itself doens't seem to be cache coherent neither.
+ * cachable). AGP memory itself doesn't seem to be cache coherent neither.
*
* In order to invalidate the GART (which is probably necessary to inval
* the bridge internal TLBs), the following sequence has to be written,
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6151937657f6..6d23c8193caa 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -367,10 +367,15 @@
#define __NR_recvmsg 342
#define __NR_recvmmsg 343
#define __NR_accept4 344
+#define __NR_name_to_handle_at 345
+#define __NR_open_by_handle_at 346
+#define __NR_clock_adjtime 347
+#define __NR_syncfs 348
+#define __NR_sendmmsg 349
#ifdef __KERNEL__
-#define __NR_syscalls 345
+#define __NR_syscalls 350
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 25e39220e89c..b73a8199f161 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -57,7 +57,7 @@ struct vdso_data {
} version;
/* Note about the platform flags: it now only contains the lpar
- * bit. The actual platform number is dead and burried
+ * bit. The actual platform number is dead and buried
*/
__u32 platform; /* Platform flags 0x18 */
__u32 processor; /* Processor type 0x1C */
diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h
new file mode 100644
index 000000000000..c7dc83088a33
--- /dev/null
+++ b/arch/powerpc/include/asm/wsp.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2011 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_POWERPC_WSP_H
+#define __ASM_POWERPC_WSP_H
+
+extern int wsp_get_chip_id(struct device_node *dn);
+
+#endif /* __ASM_POWERPC_WSP_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
new file mode 100644
index 000000000000..b183a4062011
--- /dev/null
+++ b/arch/powerpc/include/asm/xics.h
@@ -0,0 +1,142 @@
+/*
+ * Common definitions accross all variants of ICP and ICS interrupt
+ * controllers.
+ */
+
+#ifndef _XICS_H
+#define _XICS_H
+
+#include <linux/interrupt.h>
+
+#define XICS_IPI 2
+#define XICS_IRQ_SPURIOUS 0
+
+/* Want a priority other than 0. Various HW issues require this. */
+#define DEFAULT_PRIORITY 5
+
+/*
+ * Mark IPIs as higher priority so we can take them inside interrupts that
+ * arent marked IRQF_DISABLED
+ */
+#define IPI_PRIORITY 4
+
+/* The least favored priority */
+#define LOWEST_PRIORITY 0xFF
+
+/* The number of priorities defined above */
+#define MAX_NUM_PRIORITIES 3
+
+/* Native ICP */
+extern int icp_native_init(void);
+
+/* PAPR ICP */
+extern int icp_hv_init(void);
+
+/* ICP ops */
+struct icp_ops {
+ unsigned int (*get_irq)(void);
+ void (*eoi)(struct irq_data *d);
+ void (*set_priority)(unsigned char prio);
+ void (*teardown_cpu)(void);
+ void (*flush_ipi)(void);
+#ifdef CONFIG_SMP
+ void (*cause_ipi)(int cpu, unsigned long data);
+ irq_handler_t ipi_action;
+#endif
+};
+
+extern const struct icp_ops *icp_ops;
+
+/* Native ICS */
+extern int ics_native_init(void);
+
+/* RTAS ICS */
+extern int ics_rtas_init(void);
+
+/* ICS instance, hooked up to chip_data of an irq */
+struct ics {
+ struct list_head link;
+ int (*map)(struct ics *ics, unsigned int virq);
+ void (*mask_unknown)(struct ics *ics, unsigned long vec);
+ long (*get_server)(struct ics *ics, unsigned long vec);
+ int (*host_match)(struct ics *ics, struct device_node *node);
+ char data[];
+};
+
+/* Commons */
+extern unsigned int xics_default_server;
+extern unsigned int xics_default_distrib_server;
+extern unsigned int xics_interrupt_server_size;
+extern struct irq_host *xics_host;
+
+struct xics_cppr {
+ unsigned char stack[MAX_NUM_PRIORITIES];
+ int index;
+};
+
+DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
+
+static inline void xics_push_cppr(unsigned int vec)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
+ return;
+
+ if (vec == XICS_IPI)
+ os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
+ else
+ os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
+}
+
+static inline unsigned char xics_pop_cppr(void)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ if (WARN_ON(os_cppr->index < 1))
+ return LOWEST_PRIORITY;
+
+ return os_cppr->stack[--os_cppr->index];
+}
+
+static inline void xics_set_base_cppr(unsigned char cppr)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ /* we only really want to set the priority when there's
+ * just one cppr value on the stack
+ */
+ WARN_ON(os_cppr->index != 0);
+
+ os_cppr->stack[0] = cppr;
+}
+
+static inline unsigned char xics_cppr_top(void)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ return os_cppr->stack[os_cppr->index];
+}
+
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
+
+extern void xics_init(void);
+extern void xics_setup_cpu(void);
+extern void xics_update_irq_servers(void);
+extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
+extern void xics_mask_unknown_vec(unsigned int vec);
+extern irqreturn_t xics_ipi_dispatch(int cpu);
+extern int xics_smp_probe(void);
+extern void xics_register_ics(struct ics *ics);
+extern void xics_teardown_cpu(void);
+extern void xics_kexec_teardown_cpu(int secondary);
+extern void xics_migrate_irqs_away(void);
+#ifdef CONFIG_SMP
+extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
+ unsigned int strict_check);
+#else
+#define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server)
+#endif
+
+
+#endif /* _XICS_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3bb2a3e6a337..9aab36312572 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,11 +38,14 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
paca.o nvram_64.o firmware.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
+obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
obj-$(CONFIG_PPC_CLOCK) += clock.o
procfs-y := proc_powerpc.o
@@ -75,7 +78,6 @@ obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o
obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o
extra-y := head_$(CONFIG_WORD_SIZE).o
-extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o
extra-$(CONFIG_40x) := head_40x.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
@@ -103,6 +105,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
+obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
+
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 23e6a93145ab..6887661ac072 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -74,6 +74,7 @@ int main(void)
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
+ DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 625942ae5585..60b3e377b1e4 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void)
/* This function can be used to enable the early boot text when doing
* OF booting or within bootx init. It must be followed by a btext_unmap()
- * call before the logical address becomes unuseable
+ * call before the logical address becomes unusable
*/
void __init btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address)
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
new file mode 100644
index 000000000000..7f818feaa7a5
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -0,0 +1,114 @@
+/*
+ * A2 specific assembly support code
+ *
+ * Copyright 2009 Ben Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+#include <asm/processor.h>
+#include <asm/reg_a2.h>
+#include <asm/reg.h>
+#include <asm/thread_info.h>
+
+/*
+ * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
+ * This also prevents external LPID accesses but that isn't a problem when not a
+ * guest. Under PV, this setting will be ignored and MMUCR will return the right
+ * number of PID bits we can use.
+ */
+#define MMUCR1_EXTEND_PID \
+ (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
+ MMUCR1_DTTID | MMUCR1_DCCD)
+
+/*
+ * Use extended PIDs if enabled.
+ * Don't clear the ERATs on context sync events and enable I & D LRU.
+ * Enable ERAT back invalidate when tlbwe overwrites an entry.
+ */
+#define INITIAL_MMUCR1 \
+ (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
+ MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
+
+_GLOBAL(__setup_cpu_a2)
+ /* Some of these are actually thread local and some are
+ * core local but doing it always won't hurt
+ */
+
+#ifdef CONFIG_PPC_WSP_COPRO
+ /* Make sure ACOP starts out as zero */
+ li r3,0
+ mtspr SPRN_ACOP,r3
+
+ /* Enable icswx instruction */
+ mfspr r3,SPRN_A2_CCR2
+ ori r3,r3,A2_CCR2_ENABLE_ICSWX
+ mtspr SPRN_A2_CCR2,r3
+
+ /* Unmask all CTs in HACOP */
+ li r3,-1
+ mtspr SPRN_HACOP,r3
+#endif /* CONFIG_PPC_WSP_COPRO */
+
+ /* Enable doorbell */
+ mfspr r3,SPRN_A2_CCR2
+ oris r3,r3,A2_CCR2_ENABLE_PC@h
+ mtspr SPRN_A2_CCR2,r3
+ isync
+
+ /* Setup CCR0 to disable power saving for now as it's busted
+ * in the current implementations. Setup CCR1 to wake on
+ * interrupts normally (we write the default value but who
+ * knows what FW may have clobbered...)
+ */
+ li r3,0
+ mtspr SPRN_A2_CCR0, r3
+ LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
+ mtspr SPRN_A2_CCR1, r3
+
+ /* Initialise MMUCR1 */
+ lis r3,INITIAL_MMUCR1@h
+ ori r3,r3,INITIAL_MMUCR1@l
+ mtspr SPRN_MMUCR1,r3
+
+ /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
+ LOAD_REG_IMMEDIATE(r3, 0x000a7531)
+ mtspr SPRN_MMUCR2,r3
+
+ /* Set MMUCR3 to write all thids bit to the TLB */
+ LOAD_REG_IMMEDIATE(r3, 0x0000000f)
+ mtspr SPRN_MMUCR3,r3
+
+ /* Don't do ERAT stuff if running guest mode */
+ mfmsr r3
+ andis. r0,r3,MSR_GS@h
+ bne 1f
+
+ /* Now set the I-ERAT watermark to 15 */
+ lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
+ mtspr SPRN_MMUCR0, r4
+ li r4,A2_IERAT_SIZE-1
+ PPC_ERATWE(r4,r4,3)
+
+ /* Now set the D-ERAT watermark to 31 */
+ lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
+ mtspr SPRN_MMUCR0, r4
+ li r4,A2_DERAT_SIZE-1
+ PPC_ERATWE(r4,r4,3)
+
+ /* And invalidate the beast just in case. That won't get rid of
+ * a bolted entry though it will be in LRU and so will go away eventually
+ * but let's not bother for now
+ */
+ PPC_ERATILX(0,0,0)
+1:
+ blr
+
+_GLOBAL(__restore_cpu_a2)
+ b __setup_cpu_a2
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 913611105c1f..8053db02b85e 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -88,6 +88,9 @@ _GLOBAL(__setup_cpu_e5500)
bl __e500_dcache_setup
#ifdef CONFIG_PPC_BOOK3E_64
bl .__setup_base_ivors
+ bl .setup_perfmon_ivor
+ bl .setup_doorbell_ivors
+ bl .setup_ehv_ivors
#else
bl __setup_e500mc_ivors
#endif
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
new file mode 100644
index 000000000000..4f9a93fcfe07
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -0,0 +1,91 @@
+/*
+ * This file contains low level CPU setup functions.
+ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+
+/* Entry: r3 = crap, r4 = ptr to cputable entry
+ *
+ * Note that we can be called twice for pseudo-PVRs
+ */
+_GLOBAL(__setup_cpu_power7)
+ mflr r11
+ bl __init_hvmode_206
+ mtlr r11
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
+ bl __init_LPCR
+ bl __init_TLB
+ mtlr r11
+ blr
+
+_GLOBAL(__restore_cpu_power7)
+ mflr r11
+ mfmsr r3
+ rldicl. r0,r3,4,63
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
+ bl __init_LPCR
+ bl __init_TLB
+ mtlr r11
+ blr
+
+__init_hvmode_206:
+ /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
+ mfmsr r3
+ rldicl. r0,r3,4,63
+ bnelr
+ ld r5,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
+ xor r5,r5,r6
+ std r5,CPU_SPEC_FEATURES(r4)
+ blr
+
+__init_LPCR:
+ /* Setup a sane LPCR:
+ *
+ * LPES = 0b01 (HSRR0/1 used for 0x500)
+ * PECE = 0b111
+ * DPFD = 4
+ *
+ * Other bits untouched for now
+ */
+ mfspr r3,SPRN_LPCR
+ ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
+ xori r3,r3, LPCR_LPES0
+ ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
+ li r5,7
+ sldi r5,r5,LPCR_DPFD_SH
+ andc r3,r3,r5
+ li r5,4
+ sldi r5,r5,LPCR_DPFD_SH
+ or r3,r3,r5
+ mtspr SPRN_LPCR,r3
+ isync
+ blr
+
+__init_TLB:
+ /* Clear the TLB */
+ li r6,128
+ mtctr r6
+ li r7,0xc00 /* IS field = 0b11 */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1: blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c9b68d07ac4f..34d2722b9451 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -62,10 +62,12 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_pa6t(void);
extern void __restore_cpu_ppc970(void);
extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power7(void);
+extern void __restore_cpu_a2(void);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -199,7 +201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -214,7 +216,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -230,7 +232,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -248,7 +250,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -284,7 +286,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -302,7 +304,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -318,7 +320,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5 (gr)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -338,7 +340,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -354,7 +356,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -371,7 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
@@ -385,7 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6 |
PPC_FEATURE_POWER6_EXT,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -404,7 +406,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER6 (architected)",
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
@@ -417,12 +419,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE |
- MMU_FTR_TLBIE_206,
+ .mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
.platform = "power7",
},
{ /* Power7 */
@@ -431,14 +434,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE |
- MMU_FTR_TLBIE_206,
+ .mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
.platform = "power7",
},
{ /* Power7+ */
@@ -447,14 +451,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7+ (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE |
- MMU_FTR_TLBIE_206,
+ .mmu_features = MMU_FTRS_POWER7,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power7",
.oprofile_type = PPC_OPROFILE_POWER4,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
.platform = "power7+",
},
{ /* Cell Broadband Engine */
@@ -465,7 +470,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_PPC64 |
PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
PPC_FEATURE_SMT,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_CELL,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 4,
@@ -480,7 +485,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "PA6T",
.cpu_features = CPU_FTRS_PA6T,
.cpu_user_features = COMMON_USER_PA6T,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PA6T,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 6,
@@ -497,7 +502,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (compatible)",
.cpu_features = CPU_FTRS_COMPATIBLE,
.cpu_user_features = COMMON_USER_PPC64,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -1973,7 +1978,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffff0000,
.pvr_value = 0x80240000,
.cpu_name = "e5500",
- .cpu_features = CPU_FTRS_E500MC,
+ .cpu_features = CPU_FTRS_E5500,
.cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
@@ -2005,7 +2010,22 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif /* CONFIG_PPC32 */
#endif /* CONFIG_E500 */
-#ifdef CONFIG_PPC_BOOK3E_64
+#ifdef CONFIG_PPC_A2
+ { /* Standard A2 (>= DD2) + FPU core */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00480000,
+ .cpu_name = "A2 (>= DD2)",
+ .cpu_features = CPU_FTRS_A2,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTRS_A2,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 0,
+ .cpu_setup = __setup_cpu_a2,
+ .cpu_restore = __restore_cpu_a2,
+ .machine_check = machine_check_generic,
+ .platform = "ppca2",
+ },
{ /* This is a default entry to get going, to be replaced by
* a real one at some stage
*/
@@ -2026,7 +2046,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "power6",
},
-#endif
+#endif /* CONFIG_PPC_A2 */
};
static struct cpu_spec the_cpu_spec;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 3d569e2aff18..4e6ee944495a 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs)
return;
hard_irq_disable();
- if (!cpu_isset(cpu, cpus_in_crash))
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
- cpu_set(cpu, cpus_in_crash);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
/*
* Entered via soft-reset - could be the kdump
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs)
* Tell the kexec CPU that entered via soft-reset and ready
* to go down.
*/
- if (cpu_isset(cpu, cpus_in_sr)) {
- cpu_clear(cpu, cpus_in_sr);
+ if (cpumask_test_cpu(cpu, &cpus_in_sr)) {
+ cpumask_clear_cpu(cpu, &cpus_in_sr);
atomic_inc(&enter_on_soft_reset);
}
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs)
* This barrier is needed to make sure that all CPUs are stopped.
* If not, soft-reset will be invoked to bring other CPUs.
*/
- while (!cpu_isset(crashing_cpu, cpus_in_crash))
+ while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
cpu_relax();
if (ppc_md.kexec_cpu_down)
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu)
{
unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
- cpu_clear(cpu, cpus_in_sr);
+ cpumask_clear_cpu(cpu, &cpus_in_sr);
while (atomic_read(&enter_on_soft_reset) != ncpus)
cpu_relax();
}
@@ -132,7 +132,7 @@ static void crash_kexec_prepare_cpus(int cpu)
*/
printk(KERN_EMERG "Sending IPI to other cpus...\n");
msecs = 10000;
- while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax();
mdelay(1);
}
@@ -144,52 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu)
* user to do soft reset such that we get all.
* Soft-reset will be used until better mechanism is implemented.
*/
- if (cpus_weight(cpus_in_crash) < ncpus) {
+ if (cpumask_weight(&cpus_in_crash) < ncpus) {
printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
- ncpus - cpus_weight(cpus_in_crash));
+ ncpus - cpumask_weight(&cpus_in_crash));
printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
- cpus_in_sr = CPU_MASK_NONE;
+ cpumask_clear(&cpus_in_sr);
atomic_set(&enter_on_soft_reset, 0);
- while (cpus_weight(cpus_in_crash) < ncpus)
+ while (cpumask_weight(&cpus_in_crash) < ncpus)
cpu_relax();
}
/*
* Make sure all CPUs are entered via soft-reset if the kdump is
* invoked using soft-reset.
*/
- if (cpu_isset(cpu, cpus_in_sr))
+ if (cpumask_test_cpu(cpu, &cpus_in_sr))
crash_soft_reset_check(cpu);
/* Leave the IPI callback set */
}
-/* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#ifdef CONFIG_PPC_STD_MMU_64
-static void crash_kexec_wait_realmode(int cpu)
-{
- unsigned int msecs;
- int i;
-
- msecs = 10000;
- for (i=0; i < NR_CPUS && msecs > 0; i++) {
- if (i == cpu)
- continue;
-
- while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
- barrier();
- if (!cpu_possible(i)) {
- break;
- }
- if (!cpu_online(i)) {
- break;
- }
- msecs--;
- mdelay(1);
- }
- }
- mb();
-}
-#endif
-
/*
* This function will be called by secondary cpus or by kexec cpu
* if soft-reset is activated to stop some CPUs.
@@ -210,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
* exited using 'x'(exit and recover) or
* kexec_should_crash() failed for all running tasks.
*/
- cpu_clear(cpu, cpus_in_sr);
+ cpumask_clear_cpu(cpu, &cpus_in_sr);
local_irq_restore(flags);
return;
}
@@ -224,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
* then start kexec boot.
*/
crash_soft_reset_check(cpu);
- cpu_set(crashing_cpu, cpus_in_crash);
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
machine_kexec(kexec_crash_image);
@@ -233,7 +205,8 @@ void crash_kexec_secondary(struct pt_regs *regs)
crash_ipi_callback(regs);
}
-#else
+#else /* ! CONFIG_SMP */
+
static void crash_kexec_prepare_cpus(int cpu)
{
/*
@@ -251,9 +224,39 @@ static void crash_kexec_prepare_cpus(int cpu)
void crash_kexec_secondary(struct pt_regs *regs)
{
- cpus_in_sr = CPU_MASK_NONE;
+ cpumask_clear(&cpus_in_sr);
}
-#endif
+#endif /* CONFIG_SMP */
+
+/* wait for all the CPUs to hit real mode but timeout if they don't come in */
+#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64)
+static void crash_kexec_wait_realmode(int cpu)
+{
+ unsigned int msecs;
+ int i;
+
+ msecs = 10000;
+ for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
+ if (i == cpu)
+ continue;
+
+ while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
+ barrier();
+ if (!cpu_possible(i)) {
+ break;
+ }
+ if (!cpu_online(i)) {
+ break;
+ }
+ msecs--;
+ mdelay(1);
+ }
+ }
+ mb();
+}
+#else
+static inline void crash_kexec_wait_realmode(int cpu) {}
+#endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */
/*
* Register a function to be called on shutdown. Only use this if you
@@ -343,10 +346,8 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus(crashing_cpu);
- cpu_set(crashing_cpu, cpus_in_crash);
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
crash_kexec_wait_realmode(crashing_cpu);
-#endif
machine_kexec_mask_interrupts();
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 3307a52d797f..2cc451aaaca7 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -13,84 +13,35 @@
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/dbell.h>
#include <asm/irq_regs.h>
#ifdef CONFIG_SMP
-struct doorbell_cpu_info {
- unsigned long messages; /* current messages bits */
- unsigned int tag; /* tag value */
-};
-
-static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info);
-
void doorbell_setup_this_cpu(void)
{
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
+ unsigned long tag = mfspr(SPRN_PIR) & 0x3fff;
- info->messages = 0;
- info->tag = mfspr(SPRN_PIR) & 0x3fff;
+ smp_muxed_ipi_set_data(smp_processor_id(), tag);
}
-void doorbell_message_pass(int target, int msg)
+void doorbell_cause_ipi(int cpu, unsigned long data)
{
- struct doorbell_cpu_info *info;
- int i;
-
- if (target < NR_CPUS) {
- info = &per_cpu(doorbell_cpu_info, target);
- set_bit(msg, &info->messages);
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
- }
- else if (target == MSG_ALL_BUT_SELF) {
- for_each_online_cpu(i) {
- if (i == smp_processor_id())
- continue;
- info = &per_cpu(doorbell_cpu_info, i);
- set_bit(msg, &info->messages);
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
- }
- }
- else { /* target == MSG_ALL */
- for_each_online_cpu(i) {
- info = &per_cpu(doorbell_cpu_info, i);
- set_bit(msg, &info->messages);
- }
- ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0);
- }
+ ppc_msgsnd(PPC_DBELL, 0, data);
}
void doorbell_exception(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
- int msg;
- /* Warning: regs can be NULL when called from irq enable */
+ irq_enter();
- if (!info->messages || (num_online_cpus() < 2))
- goto out;
+ smp_ipi_demux();
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &info->messages))
- smp_message_recv(msg);
-
-out:
+ irq_exit();
set_irq_regs(old_regs);
}
-
-void doorbell_check_self(void)
-{
- struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
-
- if (!info->messages)
- return;
-
- ppc_msgsnd(PPC_DBELL, 0, info->tag);
-}
-
#else /* CONFIG_SMP */
void doorbell_exception(struct pt_regs *regs)
{
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index cf02cad62d9a..d238c082c3c5 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -179,3 +179,21 @@ static int __init dma_init(void)
return 0;
}
fs_initcall(dma_init);
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+ return remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+EXPORT_SYMBOL_GPL(dma_mmap_coherent);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d82878c4daa6..d834425186ae 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -421,6 +421,12 @@ BEGIN_FTR_SECTION
std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ mfspr r25,SPRN_DSCR
+ std r25,THREAD_DSCR(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+#endif
and. r0,r0,r22
beq+ 1f
andc r22,r22,r0
@@ -462,10 +468,10 @@ BEGIN_FTR_SECTION
FTR_SECTION_ELSE_NESTED(95)
clrrdi r6,r8,40 /* get its 1T ESID */
clrrdi r9,r1,40 /* get current sp 1T ESID */
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
+ ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
FTR_SECTION_ELSE
b 2f
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
clrldi. r0,r6,2 /* is new ESID c00000000? */
cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
cror eq,4*cr1+eq,eq
@@ -479,7 +485,7 @@ BEGIN_FTR_SECTION
li r9,MMU_SEGSIZE_1T /* insert B field */
oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Update the last bolted SLB. No write barriers are needed
* here, provided we only update the current CPU's SLB shadow
@@ -491,7 +497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
- /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
+ /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
* we have 1TB segments, the only CPUs known to have the errata
* only support less than 1TB of system memory and we'll never
* actually hit this code path.
@@ -522,6 +528,15 @@ BEGIN_FTR_SECTION
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ ld r0,THREAD_DSCR(r4)
+ cmpd r0,r25
+ beq 1f
+ mtspr SPRN_DSCR,r0
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+#endif
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
@@ -838,7 +853,7 @@ _GLOBAL(enter_rtas)
_STATIC(rtas_return_loc)
/* relocation is off at this point */
- mfspr r4,SPRN_SPRG_PACA /* Get PACA */
+ GET_PACA(r4)
clrldi r4,r4,2 /* convert to realmode address */
bcl 20,31,$+4
@@ -869,7 +884,7 @@ _STATIC(rtas_restore_regs)
REST_8GPRS(14, r1) /* Restore the non-volatiles */
REST_10GPRS(22, r1) /* ditto */
- mfspr r13,SPRN_SPRG_PACA
+ GET_PACA(r13)
ld r4,_CCR(r1)
mtcr r4
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 5c43063d2506..d24d4400cc79 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -17,6 +17,7 @@
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/thread_info.h>
+#include <asm/reg_a2.h>
#include <asm/exception-64e.h>
#include <asm/bug.h>
#include <asm/irqflags.h>
@@ -252,9 +253,6 @@ exception_marker:
.balign 0x1000
.globl interrupt_base_book3e
interrupt_base_book3e: /* fake trap */
- /* Note: If real debug exceptions are supported by the HW, the vector
- * below will have to be patched up to point to an appropriate handler
- */
EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */
EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */
EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
@@ -271,8 +269,13 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
EXCEPTION_STUB(0x1c0, data_tlb_miss)
EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
+ EXCEPTION_STUB(0x260, perfmon)
EXCEPTION_STUB(0x280, doorbell)
EXCEPTION_STUB(0x2a0, doorbell_crit)
+ EXCEPTION_STUB(0x2c0, guest_doorbell)
+ EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
+ EXCEPTION_STUB(0x300, hypercall)
+ EXCEPTION_STUB(0x320, ehpriv)
.globl interrupt_end_book3e
interrupt_end_book3e:
@@ -379,7 +382,7 @@ interrupt_end_book3e:
mfspr r13,SPRN_SPRG_PACA /* get our PACA */
b system_call_common
-/* Auxillary Processor Unavailable Interrupt */
+/* Auxiliary Processor Unavailable Interrupt */
START_EXCEPTION(ap_unavailable);
NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
@@ -454,6 +457,70 @@ interrupt_end_book3e:
kernel_dbg_exc:
b . /* NYI */
+/* Debug exception as a debug interrupt*/
+ START_EXCEPTION(debug_debug);
+ DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
+
+ /*
+ * If there is a single step or branch-taken exception in an
+ * exception entry sequence, it was probably meant to apply to
+ * the code where the exception occurred (since exception entry
+ * doesn't turn off DE automatically). We simulate the effect
+ * of turning off DE on entry to an exception handler by turning
+ * off DE in the DSRR1 value and clearing the debug status.
+ */
+
+ mfspr r14,SPRN_DBSR /* check single-step/branch taken */
+ andis. r15,r14,DBSR_IC@h
+ beq+ 1f
+
+ LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
+ LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+ cmpld cr0,r10,r14
+ cmpld cr1,r10,r15
+ blt+ cr0,1f
+ bge+ cr1,1f
+
+ /* here it looks like we got an inappropriate debug exception. */
+ lis r14,DBSR_IC@h /* clear the IC event */
+ rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
+ mtspr SPRN_DBSR,r14
+ mtspr SPRN_DSRR1,r11
+ lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
+ ld r1,PACA_EXDBG+EX_R1(r13)
+ ld r14,PACA_EXDBG+EX_R14(r13)
+ ld r15,PACA_EXDBG+EX_R15(r13)
+ mtcr r10
+ ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
+ ld r11,PACA_EXDBG+EX_R11(r13)
+ mfspr r13,SPRN_SPRG_DBG_SCRATCH
+ rfdi
+
+ /* Normal debug exception */
+ /* XXX We only handle coming from userspace for now since we can't
+ * quite save properly an interrupted kernel state yet
+ */
+1: andi. r14,r11,MSR_PR; /* check for userspace again */
+ beq kernel_dbg_exc; /* if from kernel mode */
+
+ /* Now we mash up things to make it look like we are coming on a
+ * normal exception
+ */
+ mfspr r15,SPRN_SPRG_DBG_SCRATCH
+ mtspr SPRN_SPRG_GEN_SCRATCH,r15
+ mfspr r14,SPRN_DBSR
+ EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL)
+ std r14,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r14
+ ld r14,PACA_EXDBG+EX_R14(r13)
+ ld r15,PACA_EXDBG+EX_R15(r13)
+ bl .save_nvgprs
+ bl .DebugException
+ b .ret_from_except
+
+ MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
+
/* Doorbell interrupt */
MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
@@ -468,6 +535,11 @@ kernel_dbg_exc:
// b ret_from_crit_except
b .
+ MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE)
+ MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE)
+
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -587,7 +659,12 @@ fast_exception_return:
BAD_STACK_TRAMPOLINE(0x000)
BAD_STACK_TRAMPOLINE(0x100)
BAD_STACK_TRAMPOLINE(0x200)
+BAD_STACK_TRAMPOLINE(0x260)
+BAD_STACK_TRAMPOLINE(0x2c0)
+BAD_STACK_TRAMPOLINE(0x2e0)
BAD_STACK_TRAMPOLINE(0x300)
+BAD_STACK_TRAMPOLINE(0x310)
+BAD_STACK_TRAMPOLINE(0x320)
BAD_STACK_TRAMPOLINE(0x400)
BAD_STACK_TRAMPOLINE(0x500)
BAD_STACK_TRAMPOLINE(0x600)
@@ -864,8 +941,23 @@ have_hes:
* that will have to be made dependent on whether we are running under
* a hypervisor I suppose.
*/
- ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS
- mtspr SPRN_MAS0,r3
+
+ /* BEWARE, MAGIC
+ * This code is called as an ordinary function on the boot CPU. But to
+ * avoid duplication, this code is also used in SCOM bringup of
+ * secondary CPUs. We read the code between the initial_tlb_code_start
+ * and initial_tlb_code_end labels one instruction at a time and RAM it
+ * into the new core via SCOM. That doesn't process branches, so there
+ * must be none between those two labels. It also means if this code
+ * ever takes any parameters, the SCOM code must also be updated to
+ * provide them.
+ */
+ .globl a2_tlbinit_code_start
+a2_tlbinit_code_start:
+
+ ori r11,r3,MAS0_WQ_ALLWAYS
+ oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
+ mtspr SPRN_MAS0,r11
lis r3,(MAS1_VALID | MAS1_IPROT)@h
ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
mtspr SPRN_MAS1,r3
@@ -879,18 +971,86 @@ have_hes:
/* Write the TLB entry */
tlbwe
+ .globl a2_tlbinit_after_linear_map
+a2_tlbinit_after_linear_map:
+
/* Now we branch the new virtual address mapped by this entry */
LOAD_REG_IMMEDIATE(r3,1f)
mtctr r3
bctr
1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
- * else (XXX we should scan for bolted crap from the firmware too)
+ * else (including IPROTed things left by firmware)
+ * r4 = TLBnCFG
+ * r3 = current address (more or less)
*/
+
+ li r5,0
+ mtspr SPRN_MAS6,r5
+ tlbsx 0,r3
+
+ rlwinm r9,r4,0,TLBnCFG_N_ENTRY
+ rlwinm r10,r4,8,0xff
+ addi r10,r10,-1 /* Get inner loop mask */
+
+ li r3,1
+
+ mfspr r5,SPRN_MAS1
+ rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
+
+ mfspr r6,SPRN_MAS2
+ rldicr r6,r6,0,51 /* Extract EPN */
+
+ mfspr r7,SPRN_MAS0
+ rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
+
+ rlwinm r8,r7,16,0xfff /* Extract ESEL */
+
+2: add r4,r3,r8
+ and r4,r4,r10
+
+ rlwimi r7,r4,16,MAS0_ESEL_MASK
+
+ mtspr SPRN_MAS0,r7
+ mtspr SPRN_MAS1,r5
+ mtspr SPRN_MAS2,r6
+ tlbwe
+
+ addi r3,r3,1
+ and. r4,r3,r10
+
+ bne 3f
+ addis r6,r6,(1<<30)@h
+3:
+ cmpw r3,r9
+ blt 2b
+
+ .globl a2_tlbinit_after_iprot_flush
+a2_tlbinit_after_iprot_flush:
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
+ /* Now establish early debug mappings if applicable */
+ /* Restore the MAS0 we used for linear mapping load */
+ mtspr SPRN_MAS0,r11
+
+ lis r3,(MAS1_VALID | MAS1_IPROT)@h
+ ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
+ mtspr SPRN_MAS1,r3
+ LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
+ mtspr SPRN_MAS2,r3
+ LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
+ mtspr SPRN_MAS7_MAS3,r3
+ /* re-use the MAS8 value from the linear mapping */
+ tlbwe
+#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
+
PPC_TLBILX(0,0,0)
sync
isync
+ .globl a2_tlbinit_code_end
+a2_tlbinit_code_end:
+
/* We translate LR and return */
mflr r3
tovirt(r3,r3)
@@ -1040,3 +1200,33 @@ _GLOBAL(__setup_base_ivors)
sync
blr
+
+_GLOBAL(setup_perfmon_ivor)
+ SET_IVOR(35, 0x260) /* Performance Monitor */
+ blr
+
+_GLOBAL(setup_doorbell_ivors)
+ SET_IVOR(36, 0x280) /* Processor Doorbell */
+ SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
+
+ /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beqlr
+
+ SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
+ SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
+ blr
+
+_GLOBAL(setup_ehv_ivors)
+ /*
+ * We may be running as a guest and lack E.HV even on a chip
+ * that normally has it.
+ */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beqlr
+
+ SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
+ SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
+ blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 8a817995b4cd..a85f4874cba7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -5,7 +5,7 @@
* handling and other fixed offset specific things.
*
* This file is meant to be #included from head_64.S due to
- * position dependant assembly.
+ * position dependent assembly.
*
* Most of this originates from head_64.S and thus has the same
* copyright history.
@@ -37,23 +37,51 @@
.globl __start_interrupts
__start_interrupts:
- STD_EXCEPTION_PSERIES(0x100, system_reset)
+ .globl system_reset_pSeries;
+system_reset_pSeries:
+ HMT_MEDIUM;
+ DO_KVM 0x100;
+ SET_SCRATCH0(r13)
+#ifdef CONFIG_PPC_P7_NAP
+BEGIN_FTR_SECTION
+ /* Running native on arch 2.06 or later, check if we are
+ * waking up from nap. We only handle no state loss and
+ * supervisor state loss. We do -not- handle hypervisor
+ * state loss at this time.
+ */
+ mfspr r13,SPRN_SRR1
+ rlwinm r13,r13,47-31,30,31
+ cmpwi cr0,r13,1
+ bne 1f
+ b .power7_wakeup_noloss
+1: cmpwi cr0,r13,2
+ bne 1f
+ b .power7_wakeup_loss
+ /* Total loss of HV state is fatal, we could try to use the
+ * PIR to locate a PACA, then use an emergency stack etc...
+ * but for now, let's just stay stuck here
+ */
+1: cmpwi cr0,r13,3
+ beq .
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
+#endif /* CONFIG_PPC_P7_NAP */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
. = 0x200
_machine_check_pSeries:
HMT_MEDIUM
DO_KVM 0x200
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
HMT_MEDIUM
DO_KVM 0x300
- mtspr SPRN_SPRG_SCRATCH0,r13
+ SET_SCRATCH0(r13)
BEGIN_FTR_SECTION
- mfspr r13,SPRN_SPRG_PACA
+ GET_PACA(r13)
std r9,PACA_EXSLB+EX_R9(r13)
std r10,PACA_EXSLB+EX_R10(r13)
mfspr r10,SPRN_DAR
@@ -67,22 +95,22 @@ BEGIN_FTR_SECTION
std r11,PACA_EXGEN+EX_R11(r13)
ld r11,PACA_EXSLB+EX_R9(r13)
std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r12,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r12)
std r10,PACA_EXGEN+EX_R10(r13)
std r11,PACA_EXGEN+EX_R9(r13)
std r12,PACA_EXGEN+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(data_access_common)
+ EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
FTR_SECTION_ELSE
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
DO_KVM 0x380
- mtspr SPRN_SPRG_SCRATCH0,r13
- mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
@@ -95,7 +123,7 @@ data_access_slb_pSeries:
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
#ifndef CONFIG_RELOCATABLE
@@ -113,15 +141,15 @@ data_access_slb_pSeries:
bctr
#endif
- STD_EXCEPTION_PSERIES(0x400, instruction_access)
+ STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
. = 0x480
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
HMT_MEDIUM
DO_KVM 0x480
- mtspr SPRN_SPRG_SCRATCH0,r13
- mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
@@ -134,7 +162,7 @@ instruction_access_slb_pSeries:
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
#ifndef CONFIG_RELOCATABLE
@@ -147,13 +175,29 @@ instruction_access_slb_pSeries:
bctr
#endif
- MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
- STD_EXCEPTION_PSERIES(0x600, alignment)
- STD_EXCEPTION_PSERIES(0x700, program_check)
- STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
- STD_EXCEPTION_PSERIES(0xa00, trap_0a)
- STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+ /* We open code these as we can't have a ". = x" (even with
+ * x = "." within a feature section
+ */
+ . = 0x500;
+ .globl hardware_interrupt_pSeries;
+ .globl hardware_interrupt_hv;
+hardware_interrupt_pSeries:
+hardware_interrupt_hv:
+ BEGIN_FTR_SECTION
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
+ FTR_SECTION_ELSE
+ _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
+ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
+
+ STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
+ STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
+ STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
+
+ MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
+ MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
+
+ STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
+ STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
. = 0xc00
.globl system_call_pSeries
@@ -165,13 +209,13 @@ BEGIN_FTR_SECTION
beq- 1f
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mr r9,r13
- mfspr r13,SPRN_SPRG_PACA
+ GET_PACA(r13)
mfspr r11,SPRN_SRR0
- ld r12,PACAKBASE(r13)
- ld r10,PACAKMSR(r13)
- LOAD_HANDLER(r12, system_call_entry)
- mtspr SPRN_SRR0,r12
mfspr r12,SPRN_SRR1
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, system_call_entry)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b . /* prevent speculative execution */
@@ -183,8 +227,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
rfid /* return to userspace */
b .
- STD_EXCEPTION_PSERIES(0xd00, single_step)
- STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+ STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
+
+ /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
+ * out of line to handle them
+ */
+ . = 0xe00
+ b h_data_storage_hv
+ . = 0xe20
+ b h_instr_storage_hv
+ . = 0xe40
+ b emulation_assist_hv
+ . = 0xe50
+ b hmi_exception_hv
+ . = 0xe60
+ b hmi_exception_hv
/* We need to deal with the Altivec unavailable exception
* here which is at 0xf20, thus in the middle of the
@@ -193,39 +250,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
*/
performance_monitor_pSeries_1:
. = 0xf00
- DO_KVM 0xf00
b performance_monitor_pSeries
altivec_unavailable_pSeries_1:
. = 0xf20
- DO_KVM 0xf20
b altivec_unavailable_pSeries
vsx_unavailable_pSeries_1:
. = 0xf40
- DO_KVM 0xf40
b vsx_unavailable_pSeries
#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+ STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+ STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+ STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+ STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+ STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
-/*** pSeries interrupt support ***/
+/*** Out of line interrupts support ***/
+
+ /* moved from 0xe00 */
+ STD_EXCEPTION_HV(., 0xe00, h_data_storage)
+ STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
+ STD_EXCEPTION_HV(., 0xe40, emulation_assist)
+ STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
/* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., performance_monitor)
- STD_EXCEPTION_PSERIES(., altivec_unavailable)
- STD_EXCEPTION_PSERIES(., vsx_unavailable)
+ STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -240,17 +300,30 @@ masked_interrupt:
rotldi r10,r10,16
mtspr SPRN_SRR1,r10
ld r10,PACA_EXGEN+EX_R10(r13)
- mfspr r13,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r13)
rfid
b .
+masked_Hinterrupt:
+ stb r10,PACAHARDIRQEN(r13)
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ mfspr r10,SPRN_HSRR1
+ rldicl r10,r10,48,1 /* clear MSR_EE */
+ rotldi r10,r10,16
+ mtspr SPRN_HSRR1,r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
.align 7
do_stab_bolted_pSeries:
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
+ EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
#ifdef CONFIG_PPC_PSERIES
/*
@@ -260,15 +333,15 @@ do_stab_bolted_pSeries:
.align 7
system_reset_fwnmi:
HMT_MEDIUM
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
.globl machine_check_fwnmi
.align 7
machine_check_fwnmi:
HMT_MEDIUM
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
#endif /* CONFIG_PPC_PSERIES */
@@ -282,7 +355,7 @@ slb_miss_user_pseries:
std r10,PACA_EXGEN+EX_R10(r13)
std r11,PACA_EXGEN+EX_R11(r13)
std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r10,SPRG_SCRATCH0
+ GET_SCRATCH0(r10)
ld r11,PACA_EXSLB+EX_R9(r13)
ld r12,PACA_EXSLB+EX_R3(r13)
std r10,PACA_EXGEN+EX_R13(r13)
@@ -342,6 +415,8 @@ machine_check_common:
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+ STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
#ifdef CONFIG_ALTIVEC
@@ -386,9 +461,24 @@ bad_stack:
std r12,_XER(r1)
SAVE_GPR(0,r1)
SAVE_GPR(2,r1)
- SAVE_4GPRS(3,r1)
- SAVE_2GPRS(7,r1)
- SAVE_10GPRS(12,r1)
+ ld r10,EX_R3(r3)
+ std r10,GPR3(r1)
+ SAVE_GPR(4,r1)
+ SAVE_4GPRS(5,r1)
+ ld r9,EX_R9(r3)
+ ld r10,EX_R10(r3)
+ SAVE_2GPRS(9,r1)
+ ld r9,EX_R11(r3)
+ ld r10,EX_R12(r3)
+ ld r11,EX_R13(r3)
+ std r9,GPR11(r1)
+ std r10,GPR12(r1)
+ std r11,GPR13(r1)
+BEGIN_FTR_SECTION
+ ld r10,EX_CFAR(r3)
+ std r10,ORIG_GPR3(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ SAVE_8GPRS(14,r1)
SAVE_10GPRS(22,r1)
lhz r12,PACA_TRAP_SAVE(r13)
std r12,_TRAP(r1)
@@ -397,6 +487,9 @@ bad_stack:
li r12,0
std r12,0(r11)
ld r2,PACATOC(r13)
+ ld r11,exception_marker@toc(r2)
+ std r12,RESULT(r1)
+ std r11,STACK_FRAME_OVERHEAD-16(r1)
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .kernel_bad_stack
b 1b
@@ -419,6 +512,19 @@ data_access_common:
li r5,0x300
b .do_hash_page /* Try to handle as hpte fault */
+ .align 7
+ .globl h_data_storage_common
+h_data_storage_common:
+ mfspr r10,SPRN_HDAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_HDSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unknown_exception
+ b .ret_from_except
+
.align 7
.globl instruction_access_common
instruction_access_common:
@@ -428,6 +534,8 @@ instruction_access_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
+ STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
+
/*
* Here is the common SLB miss user that is used when going to virtual
* mode for SLB misses, that is currently not used
@@ -750,7 +858,7 @@ _STATIC(do_hash_page)
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
bne- do_ste_alloc /* If so handle it */
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
clrrdi r11,r1,THREAD_SHIFT
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
@@ -977,20 +1085,6 @@ _GLOBAL(do_stab_bolted)
rfid
b . /* prevent speculative execution */
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on). The address is given to the hv
- * as a page number (see xLparMap below), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
- . = STAB0_OFFSET /* 0x6000 */
- .globl initial_stab
-initial_stab:
- .space 4096
-
#ifdef CONFIG_PPC_PSERIES
/*
* Data area reserved for FWNMI option.
@@ -1027,3 +1121,17 @@ xLparMap:
#ifdef CONFIG_PPC_PSERIES
. = 0x8000
#endif /* CONFIG_PPC_PSERIES */
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on). The address is given to the hv
+ * as a page number (see xLparMap above), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+ . = STAB0_OFFSET /* 0x8000 */
+ .globl initial_stab
+initial_stab:
+ .space 4096
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 98c4b29a56f4..ba250d505e07 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -805,19 +805,6 @@ _ENTRY(copy_and_flush)
blr
#ifdef CONFIG_SMP
-#ifdef CONFIG_GEMINI
- .globl __secondary_start_gemini
-__secondary_start_gemini:
- mfspr r4,SPRN_HID0
- ori r4,r4,HID0_ICFI
- li r3,0
- ori r3,r3,HID0_ICE
- andc r4,r4,r3
- mtspr SPRN_HID0,r4
- sync
- b __secondary_start
-#endif /* CONFIG_GEMINI */
-
.globl __secondary_start_mpc86xx
__secondary_start_mpc86xx:
mfspr r3, SPRN_PIR
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 9dd21a8c4d52..a91626d87fc9 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -766,7 +766,7 @@ DataAccess:
* miss get to this point to load the TLB.
* r10 - TLB_TAG value
* r11 - Linux PTE
- * r12, r9 - avilable to use
+ * r12, r9 - available to use
* PID - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
* Actually, it will fit now, but oh well.....a common place
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index cbb3436b592d..5e12b741ba5f 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -178,7 +178,7 @@ interrupt_base:
NORMAL_EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
- /* Auxillary Processor Unavailable Interrupt */
+ /* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 782f23df7c85..ba504099844a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -40,7 +40,7 @@
#include <asm/kvm_book3s_asm.h>
#include <asm/ptrace.h>
-/* The physical memory is layed out such that the secondary processor
+/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
* using the layout described in exceptions-64s.S
*/
@@ -147,6 +147,8 @@ __secondary_hold:
mtctr r4
mr r3,r24
li r4,0
+ /* Make sure that patched code is visible */
+ isync
bctr
#else
BUG_OPCODE
@@ -216,19 +218,25 @@ generic_secondary_common_init:
*/
LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
ld r13,0(r13) /* Get base vaddr of paca array */
+#ifndef CONFIG_SMP
+ addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
+ b .kexec_wait /* wait for next kernel if !SMP */
+#else
+ LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
+ lwz r7,0(r7) /* also the max paca allocated */
li r5,0 /* logical cpu id */
1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
cmpw r6,r24 /* Compare to our id */
beq 2f
addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
addi r5,r5,1
- cmpwi r5,NR_CPUS
+ cmpw r5,r7 /* Check if more pacas exist */
blt 1b
mr r3,r24 /* not found, copy phys to r3 */
b .kexec_wait /* next kernel might do better */
-2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */
+2: SET_PACA(r13)
#ifdef CONFIG_PPC_BOOK3E
addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
mtspr SPRN_SPRG_TLB_EXFRAME,r12
@@ -236,34 +244,39 @@ generic_secondary_common_init:
/* From now on, r24 is expected to be logical cpuid */
mr r24,r5
-3: HMT_LOW
- lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
- /* start. */
-
-#ifndef CONFIG_SMP
- b 3b /* Never go on non-SMP */
-#else
- cmpwi 0,r23,0
- beq 3b /* Loop until told to go */
-
- sync /* order paca.run and cur_cpu_spec */
/* See if we need to call a cpu state restore handler */
LOAD_REG_ADDR(r23, cur_cpu_spec)
ld r23,0(r23)
ld r23,CPU_SPEC_RESTORE(r23)
cmpdi 0,r23,0
- beq 4f
+ beq 3f
ld r23,0(r23)
mtctr r23
bctrl
-4: /* Create a temp kernel stack for use before relocation is on. */
+3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */
+ lwarx r4,0,r3
+ subi r4,r4,1
+ stwcx. r4,0,r3
+ bne 3b
+ isync
+
+4: HMT_LOW
+ lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
+ /* start. */
+ cmpwi 0,r23,0
+ beq 4b /* Loop until told to go */
+
+ sync /* order paca.run and cur_cpu_spec */
+ isync /* In case code patching happened */
+
+ /* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
b __secondary_start
-#endif
+#endif /* SMP */
/*
* Turn the MMU off.
@@ -534,7 +547,14 @@ _GLOBAL(pmac_secondary_start)
ld r4,0(r4) /* Get base vaddr of paca array */
mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r4 /* for this processor. */
- mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
+ SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
+
+ /* Mark interrupts soft and hard disabled (they might be enabled
+ * in the PACA when doing hotplug)
+ */
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13)
+ stb r0,PACAHARDIRQEN(r13)
/* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
@@ -638,7 +658,7 @@ _GLOBAL(enable_64b_mode)
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
mtmsr r11
#else /* CONFIG_PPC_BOOK3E */
- li r12,(MSR_SF | MSR_ISF)@highest
+ li r12,(MSR_64BIT | MSR_ISF)@highest
sldi r12,r12,48
or r11,r11,r12
mtmsrd r11
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3e02710d9562..5ecf54cfa7d4 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -326,7 +326,7 @@ interrupt_base:
NORMAL_EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
- /* Auxillary Processor Unavailable Interrupt */
+ /* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index c00d4ca1ee15..28581f1ad2c0 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int ibmebus_bus_pm_freeze(struct device *dev)
{
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define ibmebus_bus_pm_freeze NULL
#define ibmebus_bus_pm_thaw NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
#define ibmebus_bus_pm_poweroff_noirq NULL
#define ibmebus_bus_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
.prepare = ibmebus_bus_pm_prepare,
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 5328709eeedc..ba3195478600 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
isync
b 1b
-_GLOBAL(power4_cpu_offline_powersave)
- /* Go to NAP now */
- mfmsr r7
- rldicl r0,r7,48,1
- rotldi r0,r0,16
- mtmsrd r0,1 /* hard-disable interrupts */
- li r0,1
- li r6,0
- stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
- stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- ori r7,r7,MSR_EE
- oris r7,r7,MSR_POW@h
- sync
- isync
- mtmsrd r7
- isync
- blr
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
new file mode 100644
index 000000000000..f8f0bc7f1d4f
--- /dev/null
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -0,0 +1,97 @@
+/*
+ * This file contains the power_save function for 970-family CPUs.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ppc-opcode.h>
+
+#undef DEBUG
+
+ .text
+
+_GLOBAL(power7_idle)
+ /* Now check if user or arch enabled NAP mode */
+ LOAD_REG_ADDRBASE(r3,powersave_nap)
+ lwz r4,ADDROFF(powersave_nap)(r3)
+ cmpwi 0,r4,0
+ beqlr
+
+ /* NAP is a state loss, we create a regs frame on the
+ * stack, fill it up with the state we care about and
+ * stick a pointer to it in PACAR1. We really only
+ * need to save PC, some CR bits and the NV GPRs,
+ * but for now an interrupt frame will do.
+ */
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-INT_FRAME_SIZE(r1)
+ std r0,_LINK(r1)
+ std r0,_NIP(r1)
+
+#ifndef CONFIG_SMP
+ /* Make sure FPU, VSX etc... are flushed as we may lose
+ * state when going to nap mode
+ */
+ bl .discard_lazy_cpu_state
+#endif /* CONFIG_SMP */
+
+ /* Hard disable interrupts */
+ mfmsr r9
+ rldicl r9,r9,48,1
+ rotldi r9,r9,16
+ mtmsrd r9,1 /* hard-disable interrupts */
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
+ stb r0,PACAHARDIRQEN(r13)
+
+ /* Continue saving state */
+ SAVE_GPR(2, r1)
+ SAVE_NVGPRS(r1)
+ mfcr r3
+ std r3,_CCR(r1)
+ std r9,_MSR(r1)
+ std r1,PACAR1(r13)
+
+ /* Magic NAP mode enter sequence */
+ std r0,0(r1)
+ ptesync
+ ld r0,0(r1)
+1: cmp cr0,r0,r0
+ bne 1b
+ PPC_NAP
+ b .
+
+_GLOBAL(power7_wakeup_loss)
+ GET_PACA(r13)
+ ld r1,PACAR1(r13)
+ REST_NVGPRS(r1)
+ REST_GPR(2, r1)
+ ld r3,_CCR(r1)
+ ld r4,_MSR(r1)
+ ld r5,_NIP(r1)
+ addi r1,r1,INT_FRAME_SIZE
+ mtcr r3
+ mtspr SPRN_SRR1,r4
+ mtspr SPRN_SRR0,r5
+ rfid
+
+_GLOBAL(power7_wakeup_noloss)
+ GET_PACA(r13)
+ ld r1,PACAR1(r13)
+ ld r4,_MSR(r1)
+ ld r5,_NIP(r1)
+ addi r1,r1,INT_FRAME_SIZE
+ mtspr SPRN_SRR1,r4
+ mtspr SPRN_SRR0,r5
+ rfid
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 5c1118e31940..ffafaea3d261 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -17,8 +17,7 @@
#include <asm/machdep.h>
#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
-
-#include "io-workarounds.h"
+#include <asm/io-workarounds.h>
#define IOWA_MAX_BUS 8
@@ -145,7 +144,19 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
return res;
}
-/* Regist new bus to support workaround */
+/* Enable IO workaround */
+static void __devinit io_workaround_init(void)
+{
+ static int io_workaround_inited;
+
+ if (io_workaround_inited)
+ return;
+ ppc_pci_io = iowa_pci_io;
+ ppc_md.ioremap = iowa_ioremap;
+ io_workaround_inited = 1;
+}
+
+/* Register new bus to support workaround */
void __devinit iowa_register_bus(struct pci_controller *phb,
struct ppc_pci_io *ops,
int (*initfunc)(struct iowa_bus *, void *), void *data)
@@ -153,6 +164,8 @@ void __devinit iowa_register_bus(struct pci_controller *phb,
struct iowa_bus *bus;
struct device_node *np = phb->dn;
+ io_workaround_init();
+
if (iowa_bus_count >= IOWA_MAX_BUS) {
pr_err("IOWA:Too many pci bridges, "
"workarounds disabled for %s\n", np->full_name);
@@ -162,6 +175,7 @@ void __devinit iowa_register_bus(struct pci_controller *phb,
bus = &iowa_busses[iowa_bus_count];
bus->phb = phb;
bus->ops = ops;
+ bus->private = data;
if (initfunc)
if ((*initfunc)(bus, data))
@@ -172,14 +186,3 @@ void __devinit iowa_register_bus(struct pci_controller *phb,
pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name);
}
-/* enable IO workaround */
-void __devinit io_workaround_init(void)
-{
- static int io_workaround_inited;
-
- if (io_workaround_inited)
- return;
- ppc_pci_io = iowa_pci_io;
- ppc_md.ioremap = iowa_ioremap;
- io_workaround_inited = 1;
-}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0a5570338b96..a24d37d4cf51 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,7 +66,6 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
-#include <asm/dbell.h>
#include <asm/smp.h>
#ifdef CONFIG_PPC64
@@ -160,7 +159,8 @@ notrace void arch_local_irq_restore(unsigned long en)
#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
/* Check for pending doorbell interrupts and resend to ourself */
- doorbell_check_self();
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ smp_muxed_ipi_resend();
#endif
/*
@@ -195,7 +195,7 @@ notrace void arch_local_irq_restore(unsigned long en)
EXPORT_SYMBOL(arch_local_irq_restore);
#endif /* CONFIG_PPC64 */
-static int show_other_interrupts(struct seq_file *p, int prec)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
@@ -231,65 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
return 0;
}
-int show_interrupts(struct seq_file *p, void *v)
-{
- unsigned long flags, any_count = 0;
- int i = *(loff_t *) v, j, prec;
- struct irqaction *action;
- struct irq_desc *desc;
- struct irq_chip *chip;
-
- if (i > nr_irqs)
- return 0;
-
- for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
- j *= 10;
-
- if (i == nr_irqs)
- return show_other_interrupts(p, prec);
-
- /* print header */
- if (i == 0) {
- seq_printf(p, "%*s", prec + 8, "");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%-8d", j);
- seq_putc(p, '\n');
- }
-
- desc = irq_to_desc(i);
- if (!desc)
- return 0;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- for_each_online_cpu(j)
- any_count |= kstat_irqs_cpu(i, j);
- action = desc->action;
- if (!action && !any_count)
- goto out;
-
- seq_printf(p, "%*d: ", prec, i);
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-
- chip = get_irq_desc_chip(desc);
- if (chip)
- seq_printf(p, " %-16s", chip->name);
- else
- seq_printf(p, " %-16s", "None");
- seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
-
- if (action) {
- seq_printf(p, " %s", action->name);
- while ((action = action->next) != NULL)
- seq_printf(p, ", %s", action->name);
- }
-
- seq_putc(p, '\n');
-out:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- return 0;
-}
-
/*
* /proc/stat helpers
*/
@@ -305,34 +246,37 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(const struct cpumask *map)
+void migrate_irqs(void)
{
struct irq_desc *desc;
unsigned int irq;
static int warned;
cpumask_var_t mask;
+ const struct cpumask *map = cpu_online_mask;
alloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq(irq) {
+ struct irq_data *data;
struct irq_chip *chip;
desc = irq_to_desc(irq);
if (!desc)
continue;
- if (desc->status & IRQ_PER_CPU)
+ data = irq_desc_get_irq_data(desc);
+ if (irqd_is_per_cpu(data))
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_data_get_irq_chip(data);
- cpumask_and(mask, desc->irq_data.affinity, map);
+ cpumask_and(mask, data->affinity, map);
if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
cpumask_copy(mask, map);
}
if (chip->irq_set_affinity)
- chip->irq_set_affinity(&desc->irq_data, mask, true);
+ chip->irq_set_affinity(data, mask, true);
else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
@@ -453,24 +397,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
void exc_lvl_ctx_init(void)
{
struct thread_info *tp;
- int i, hw_cpu;
+ int i, cpu_nr;
for_each_possible_cpu(i) {
- hw_cpu = get_hard_smp_processor_id(i);
- memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = critirq_ctx[hw_cpu];
- tp->cpu = i;
+#ifdef CONFIG_PPC64
+ cpu_nr = i;
+#else
+ cpu_nr = get_hard_smp_processor_id(i);
+#endif
+ memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
+ tp = critirq_ctx[cpu_nr];
+ tp->cpu = cpu_nr;
tp->preempt_count = 0;
#ifdef CONFIG_BOOKE
- memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = dbgirq_ctx[hw_cpu];
- tp->cpu = i;
+ memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
+ tp = dbgirq_ctx[cpu_nr];
+ tp->cpu = cpu_nr;
tp->preempt_count = 0;
- memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
- tp = mcheckirq_ctx[hw_cpu];
- tp->cpu = i;
+ memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
+ tp = mcheckirq_ctx[cpu_nr];
+ tp->cpu = cpu_nr;
tp->preempt_count = HARDIRQ_OFFSET;
#endif
}
@@ -533,20 +481,41 @@ void do_softirq(void)
* IRQ controller and virtual interrupts
*/
+/* The main irq map itself is an array of NR_IRQ entries containing the
+ * associate host and irq number. An entry with a host of NULL is free.
+ * An entry can be allocated if it's free, the allocator always then sets
+ * hwirq first to the host's invalid irq number and then fills ops.
+ */
+struct irq_map_entry {
+ irq_hw_number_t hwirq;
+ struct irq_host *host;
+};
+
static LIST_HEAD(irq_hosts);
static DEFINE_RAW_SPINLOCK(irq_big_lock);
-static unsigned int revmap_trees_allocated;
static DEFINE_MUTEX(revmap_trees_mutex);
-struct irq_map_entry irq_map[NR_IRQS];
+static struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
+irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+ return irq_map[d->irq].hwirq;
+}
+EXPORT_SYMBOL_GPL(irqd_to_hwirq);
+
irq_hw_number_t virq_to_hw(unsigned int virq)
{
return irq_map[virq].hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);
+bool virq_is_host(unsigned int virq, struct irq_host *host)
+{
+ return irq_map[virq].host == host;
+}
+EXPORT_SYMBOL_GPL(virq_is_host);
+
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
{
return h->of_node != NULL && h->of_node == np;
@@ -567,7 +536,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
/* Allocate structure and revmap table if using linear mapping */
if (revmap_type == IRQ_HOST_MAP_LINEAR)
size += revmap_arg * sizeof(unsigned int);
- host = zalloc_maybe_bootmem(size, GFP_KERNEL);
+ host = kzalloc(size, GFP_KERNEL);
if (host == NULL)
return NULL;
@@ -617,14 +586,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
irq_map[i].host = host;
smp_wmb();
- /* Clear norequest flags */
- irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
-
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
* explicitly change them
*/
ops->map(host, i, i);
+
+ /* Clear norequest flags */
+ irq_clear_status_flags(i, IRQ_NOREQUEST);
}
break;
case IRQ_HOST_MAP_LINEAR:
@@ -635,6 +604,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
smp_wmb();
host->revmap_data.linear.revmap = rmap;
break;
+ case IRQ_HOST_MAP_TREE:
+ INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
+ break;
default:
break;
}
@@ -692,8 +664,6 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
goto error;
}
- irq_clear_status_flags(virq, IRQ_NOREQUEST);
-
/* map it */
smp_wmb();
irq_map[virq].hwirq = hwirq;
@@ -704,6 +674,8 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
goto errdesc;
}
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+
return 0;
errdesc:
@@ -760,8 +732,6 @@ unsigned int irq_create_mapping(struct irq_host *host,
*/
virq = irq_find_mapping(host, hwirq);
if (virq != NO_IRQ) {
- if (host->ops->remap)
- host->ops->remap(host, virq, hwirq);
pr_debug("irq: -> existing mapping on virq %d\n", virq);
return virq;
}
@@ -827,8 +797,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
- type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
- set_irq_type(virq, type);
+ type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+ irq_set_irq_type(virq, type);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
@@ -842,16 +812,17 @@ void irq_dispose_mapping(unsigned int virq)
return;
host = irq_map[virq].host;
- WARN_ON (host == NULL);
- if (host == NULL)
+ if (WARN_ON(host == NULL))
return;
/* Never unmap legacy interrupts */
if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
return;
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
+
/* remove chip and handler */
- set_irq_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
@@ -869,13 +840,6 @@ void irq_dispose_mapping(unsigned int virq)
host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
break;
case IRQ_HOST_MAP_TREE:
- /*
- * Check if radix tree allocated yet, if not then nothing to
- * remove.
- */
- smp_rmb();
- if (revmap_trees_allocated < 1)
- break;
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&host->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
@@ -886,8 +850,6 @@ void irq_dispose_mapping(unsigned int virq)
smp_mb();
irq_map[virq].hwirq = host->inval_irq;
- irq_set_status_flags(virq, IRQ_NOREQUEST);
-
irq_free_descs(virq, 1);
/* Free it */
irq_free_virt(virq, 1);
@@ -933,16 +895,9 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
struct irq_map_entry *ptr;
unsigned int virq;
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
-
- /*
- * Check if the radix tree exists and has bee initialized.
- * If not, we fallback to slow mode
- */
- if (revmap_trees_allocated < 2)
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
return irq_find_mapping(host, hwirq);
- /* Now try to resolve */
/*
* No rcu_read_lock(ing) needed, the ptr returned can't go under us
* as it's referencing an entry in the static irq_map table.
@@ -965,16 +920,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
-
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
-
- /*
- * Check if the radix tree exists yet.
- * If not, then the irq will be inserted into the tree when it gets
- * initialized.
- */
- smp_rmb();
- if (revmap_trees_allocated < 1)
+ if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
return;
if (virq != NO_IRQ) {
@@ -990,7 +936,8 @@ unsigned int irq_linear_revmap(struct irq_host *host,
{
unsigned int *revmap;
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
+ if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
+ return irq_find_mapping(host, hwirq);
/* Check revmap bounds */
if (unlikely(hwirq >= host->revmap_data.linear.size))
@@ -1084,53 +1031,6 @@ int arch_early_irq_init(void)
return 0;
}
-/* We need to create the radix trees late */
-static int irq_late_init(void)
-{
- struct irq_host *h;
- unsigned int i;
-
- /*
- * No mutual exclusion with respect to accessors of the tree is needed
- * here as the synchronization is done via the state variable
- * revmap_trees_allocated.
- */
- list_for_each_entry(h, &irq_hosts, link) {
- if (h->revmap_type == IRQ_HOST_MAP_TREE)
- INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
- }
-
- /*
- * Make sure the radix trees inits are visible before setting
- * the flag
- */
- smp_wmb();
- revmap_trees_allocated = 1;
-
- /*
- * Insert the reverse mapping for those interrupts already present
- * in irq_map[].
- */
- mutex_lock(&revmap_trees_mutex);
- for (i = 0; i < irq_virq_count; i++) {
- if (irq_map[i].host &&
- (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
- radix_tree_insert(&irq_map[i].host->revmap_data.tree,
- irq_map[i].hwirq, &irq_map[i]);
- }
- mutex_unlock(&revmap_trees_mutex);
-
- /*
- * Make sure the radix trees insertions are visible before setting
- * the flag
- */
- smp_wmb();
- revmap_trees_allocated = 2;
-
- return 0;
-}
-arch_initcall(irq_late_init);
-
#ifdef CONFIG_VIRQ_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
@@ -1138,10 +1038,11 @@ static int virq_debug_show(struct seq_file *m, void *private)
struct irq_desc *desc;
const char *p;
static const char none[] = "none";
+ void *data;
int i;
- seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
- "chip name", "host name");
+ seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
+ "chip name", "chip data", "host name");
for (i = 1; i < nr_irqs; i++) {
desc = irq_to_desc(i);
@@ -1154,15 +1055,18 @@ static int virq_debug_show(struct seq_file *m, void *private)
struct irq_chip *chip;
seq_printf(m, "%5d ", i);
- seq_printf(m, "0x%05lx ", virq_to_hw(i));
+ seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (chip && chip->name)
p = chip->name;
else
p = none;
seq_printf(m, "%-15s ", p);
+ data = irq_desc_get_chip_data(desc);
+ seq_printf(m, "0x%16p ", data);
+
if (irq_map[i].host && irq_map[i].host->of_node)
p = irq_map[i].host->of_node->full_name;
else
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 42850ee00ada..bd9d35f59cf4 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs)
#ifdef CONFIG_SMP
void kgdb_roundup_cpus(unsigned long flags)
{
- smp_send_debugger_break(MSG_ALL_BUT_SELF);
+ smp_send_debugger_break();
}
#endif
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 2a2f3c3f6d80..97ec8557f974 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/**** Might be a good idea to set L2DO here - to prevent instructions
from getting into the cache. But since we invalidate
the next time we enable the cache it doesn't really matter.
- Don't do this unless you accomodate all processor variations.
+ Don't do this unless you accommodate all processor variations.
The bit moved on the 7450.....
****/
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c834757bebc0..2b97b80d6d7d 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
if (!parent)
continue;
if (of_match_node(legacy_serial_parents, parent) != NULL) {
- index = add_legacy_soc_port(np, np);
- if (index >= 0 && np == stdout)
- legacy_serial_console = index;
+ if (of_device_is_available(np)) {
+ index = add_legacy_soc_port(np, np);
+ if (index >= 0 && np == stdout)
+ legacy_serial_console = index;
+ }
}
of_node_put(parent);
}
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 16468362ad57..84daabe2fcba 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -132,34 +132,6 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v)
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
-/**
- * h_get_mpp
- * H_GET_MPP hcall returns info in 7 parms
- */
-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
-{
- int rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
-
- rc = plpar_hcall9(H_GET_MPP, retbuf);
-
- mpp_data->entitled_mem = retbuf[0];
- mpp_data->mapped_mem = retbuf[1];
-
- mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
- mpp_data->pool_num = retbuf[2] & 0xffff;
-
- mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
- mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
- mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
-
- mpp_data->pool_size = retbuf[4];
- mpp_data->loan_request = retbuf[5];
- mpp_data->backing_mem = retbuf[6];
-
- return rc;
-}
-EXPORT_SYMBOL(h_get_mpp);
struct hvcall_ppp_data {
u64 entitlement;
@@ -262,7 +234,7 @@ static void parse_ppp_data(struct seq_file *m)
seq_printf(m, "system_active_processors=%d\n",
ppp_data.active_system_procs);
- /* pool related entries are apropriate for shared configs */
+ /* pool related entries are appropriate for shared configs */
if (lppaca_of(0).shared_proc) {
unsigned long pool_idle_time, pool_procs;
@@ -345,6 +317,30 @@ static void parse_mpp_data(struct seq_file *m)
seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
}
+/**
+ * parse_mpp_x_data
+ * Parse out data returned from h_get_mpp_x
+ */
+static void parse_mpp_x_data(struct seq_file *m)
+{
+ struct hvcall_mpp_x_data mpp_x_data;
+
+ if (!firmware_has_feature(FW_FEATURE_XCMO))
+ return;
+ if (h_get_mpp_x(&mpp_x_data))
+ return;
+
+ seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
+
+ if (mpp_x_data.pool_coalesced_bytes)
+ seq_printf(m, "pool_coalesced_bytes=%ld\n",
+ mpp_x_data.pool_coalesced_bytes);
+ if (mpp_x_data.pool_purr_cycles)
+ seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
+ if (mpp_x_data.pool_spurr_cycles)
+ seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
+}
+
#define SPLPAR_CHARACTERISTICS_TOKEN 20
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
@@ -520,6 +516,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
parse_system_parameter_string(m);
parse_ppp_data(m);
parse_mpp_data(m);
+ parse_mpp_x_data(m);
pseries_cmo_data(m);
splpar_dispatch_data(m);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index bd1e1ff17b2d..7ee50f0547cb 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -31,17 +31,17 @@ void machine_kexec_mask_interrupts(void) {
if (!desc)
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (!chip)
continue;
- if (chip->irq_eoi && desc->status & IRQ_INPROGRESS)
+ if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
- if (chip->irq_disable && !(desc->status & IRQ_DISABLED))
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 094bd9821ad4..998a10028608 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -694,6 +694,17 @@ _GLOBAL(kernel_thread)
addi r1,r1,16
blr
+#ifdef CONFIG_SMP
+_GLOBAL(start_secondary_resume)
+ /* Reset stack */
+ rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r3,0
+ stw r3,0(r1) /* Zero the stack frame pointer */
+ bl start_secondary
+ b .
+#endif /* CONFIG_SMP */
+
/*
* This routine is just here to keep GCC happy - sigh...
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 206a321a71d3..e89df59cdc5a 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp)
* wait for the flag to change, indicating this kernel is going away but
* the slave code for the next one is at addresses 0 to 100.
*
- * This is used by all slaves.
+ * This is used by all slaves, even those that did not find a matching
+ * paca in the secondary startup code.
*
* Physical (hardware) cpu id should be in r3.
*/
@@ -471,10 +472,6 @@ _GLOBAL(kexec_wait)
1: mflr r5
addi r5,r5,kexec_flag-1b
- li r4,KEXEC_STATE_REAL_MODE
- stb r4,PACAKEXECSTATE(r13)
- SYNC
-
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
@@ -499,11 +496,17 @@ kexec_flag:
*
* get phys id from paca
* switch to real mode
+ * mark the paca as no longer used
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
bl real_mode
+
+ li r4,KEXEC_STATE_REAL_MODE
+ stb r4,PACAKEXECSTATE(r13)
+ SYNC
+
b .kexec_wait
/*
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index f4adf89d7614..efeb88184182 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/threads.h>
+#include <linux/smp.h>
#include <linux/module.h>
#include <linux/memblock.h>
@@ -156,18 +156,29 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
/* Put the paca pointer into r13 and SPRG_PACA */
void setup_paca(struct paca_struct *new_paca)
{
+ /* Setup r13 */
local_paca = new_paca;
- mtspr(SPRN_SPRG_PACA, local_paca);
+
#ifdef CONFIG_PPC_BOOK3E
+ /* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
+#else
+ /* In HV mode, we setup both HPACA and PACA to avoid problems
+ * if we do a GET_PACA() before the feature fixups have been
+ * applied
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE_206))
+ mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
+ mtspr(SPRN_SPRG_PACA, local_paca);
+
}
static int __initdata paca_size;
void __init allocate_pacas(void)
{
- int nr_cpus, cpu, limit;
+ int cpu, limit;
/*
* We can't take SLB misses on the paca, and we want to access them
@@ -179,23 +190,18 @@ void __init allocate_pacas(void)
if (firmware_has_feature(FW_FEATURE_ISERIES))
limit = min(limit, HvPagesToMap * HVPAGESIZE);
- nr_cpus = NR_CPUS;
- /* On iSeries we know we can never have more than 64 cpus */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- nr_cpus = min(64, nr_cpus);
-
- paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus);
+ paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
memset(paca, 0, paca_size);
printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
- paca_size, nr_cpus, paca);
+ paca_size, nr_cpu_ids, paca);
- allocate_lppacas(nr_cpus, limit);
+ allocate_lppacas(nr_cpu_ids, limit);
/* Can't use for_each_*_cpu, as they aren't functional yet */
- for (cpu = 0; cpu < nr_cpus; cpu++)
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
initialise_paca(&paca[cpu], cpu);
}
@@ -203,7 +209,7 @@ void __init free_unused_pacas(void)
{
int new_size;
- new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus());
+ new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
if (new_size >= paca_size)
return;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 3cd85faa8ac6..893af2a9cd03 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -261,7 +261,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
- set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index d225d99fe39d..6baabc13306a 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
const u32 *regs;
struct pci_dn *pdn;
- pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
+ pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
if (pdn == NULL)
return NULL;
- memset(pdn, 0, sizeof(*pdn));
dn->data = pdn;
pdn->node = dn;
pdn->phb = phb;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 97e0ae414940..822f63008ae1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
return 0;
}
+static u64 check_and_compute_delta(u64 prev, u64 val)
+{
+ u64 delta = (val - prev) & 0xfffffffful;
+
+ /*
+ * POWER7 can roll back counter values, if the new value is smaller
+ * than the previous value it will cause the delta and the counter to
+ * have bogus values unless we rolled a counter over. If a coutner is
+ * rolled back, it will be smaller, but within 256, which is the maximum
+ * number of events to rollback at once. If we dectect a rollback
+ * return 0. This can lead to a small lack of precision in the
+ * counters.
+ */
+ if (prev > val && (prev - val) < 256)
+ delta = 0;
+
+ return delta;
+}
+
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
+ delta = check_and_compute_delta(prev, val);
+ if (!delta)
+ return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
- /* The counters are only 32 bits wide */
- delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
- delta = (val - prev) & 0xfffffffful;
- local64_add(delta, &event->count);
+ delta = check_and_compute_delta(prev, val);
+ if (delta)
+ local64_add(delta, &event->count);
}
}
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
- u64 val;
+ u64 val, prev;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
- local64_set(&event->hw.prev_count, val);
+ prev = local64_read(&event->hw.prev_count);
+ if (check_and_compute_delta(prev, val))
+ local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
@@ -759,7 +782,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
/*
* If group events scheduling transaction was started,
- * skip the schedulability test here, it will be peformed
+ * skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN)
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
- delta = (val - prev) & 0xfffffffful;
+ delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ef3ef566235e..7d28f540200c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs);
extern int sys_sigreturn(struct pt_regs *regs);
EXPORT_SYMBOL(clear_pages);
-EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
@@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(copy_4K_page);
-#endif
+EXPORT_SYMBOL(copy_page);
#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
EXPORT_SYMBOL(isa_io_base);
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index e83ba3f078e4..1b1787d52896 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -15,7 +15,7 @@
/*
* Grab the register values as they are now.
- * This won't do a particularily good job because we really
+ * This won't do a particularly good job because we really
* want our caller's caller's registers, and our caller has
* already executed its prologue.
* ToDo: We could reach back into the caller's save area to do
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f74f355a9617..095043d79946 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -702,6 +702,8 @@ void prepare_to_copy(struct task_struct *tsk)
/*
* Copy a thread..
*/
+extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
+
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, struct task_struct *p,
struct pt_regs *regs)
@@ -755,11 +757,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
_ALIGN_UP(sizeof(struct thread_info), 16);
#ifdef CONFIG_PPC_STD_MMU_64
- if (cpu_has_feature(CPU_FTR_SLB)) {
+ if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
- if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
<< SLB_VSID_SHIFT_1T;
else
@@ -769,6 +771,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_vsid = sp_vsid;
}
#endif /* CONFIG_PPC_STD_MMU_64 */
+#ifdef CONFIG_PPC64
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+ if (current->thread.dscr_inherit) {
+ p->thread.dscr_inherit = 1;
+ p->thread.dscr = current->thread.dscr;
+ } else if (0 != dscr_default) {
+ p->thread.dscr_inherit = 1;
+ p->thread.dscr = dscr_default;
+ } else {
+ p->thread.dscr_inherit = 0;
+ p->thread.dscr = 0;
+ }
+ }
+#endif
/*
* The PPC64 ABI makes use of a TOC to contain function
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 05b7139d6a27..48aeb55faae9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -68,6 +68,7 @@ int __initdata iommu_force_on;
unsigned long tce_alloc_start, tce_alloc_end;
u64 ppc64_rma_size;
#endif
+static phys_addr_t first_memblock_size;
static int __init early_parse_mem(char *p)
{
@@ -123,18 +124,19 @@ static void __init move_device_tree(void)
*/
static struct ibm_pa_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
+ unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
- {0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
- {0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
- {CPU_FTR_SLB, 0, 0, 2, 0},
- {CPU_FTR_CTRL, 0, 0, 3, 0},
- {CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
- {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
- {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
+ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
+ {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
+ {0, MMU_FTR_SLB, 0, 0, 2, 0},
+ {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
+ {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
+ {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
+ {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
};
@@ -166,9 +168,11 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs,
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+ cur_cpu_spec->mmu_features |= fp->mmu_features;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+ cur_cpu_spec->mmu_features &= ~fp->mmu_features;
}
}
}
@@ -268,13 +272,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
- static int logical_cpuid = 0;
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const u32 *prop;
const u32 *intserv;
int i, nthreads;
unsigned long len;
- int found = 0;
+ int found = -1;
+ int found_thread = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -298,11 +302,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* version 2 of the kexec param format adds the phys cpuid of
* booted proc.
*/
- if (initial_boot_params && initial_boot_params->version >= 2) {
- if (intserv[i] ==
- initial_boot_params->boot_cpuid_phys) {
- found = 1;
- break;
+ if (initial_boot_params->version >= 2) {
+ if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
+ found = boot_cpu_count;
+ found_thread = i;
}
} else {
/*
@@ -311,23 +314,20 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* off secondary threads.
*/
if (of_get_flat_dt_prop(node,
- "linux,boot-cpu", NULL) != NULL) {
- found = 1;
- break;
- }
+ "linux,boot-cpu", NULL) != NULL)
+ found = boot_cpu_count;
}
-
#ifdef CONFIG_SMP
/* logical cpu id is always 0 on UP kernels */
- logical_cpuid++;
+ boot_cpu_count++;
#endif
}
- if (found) {
- DBG("boot cpu: logical %d physical %d\n", logical_cpuid,
- intserv[i]);
- boot_cpuid = logical_cpuid;
- set_hard_smp_processor_id(boot_cpuid, intserv[i]);
+ if (found >= 0) {
+ DBG("boot cpu: logical %d physical %d\n", found,
+ intserv[found_thread]);
+ boot_cpuid = found;
+ set_hard_smp_processor_id(found, intserv[found_thread]);
/*
* PAPR defines "logical" PVR values for cpus that
@@ -509,11 +509,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
size = 0x80000000ul - base;
}
#endif
-
- /* First MEMBLOCK added, do some special initializations */
- if (memstart_addr == ~(phys_addr_t)0)
- setup_initial_memory_limit(base, size);
- memstart_addr = min((u64)memstart_addr, base);
+ /* Keep track of the beginning of memory -and- the size of
+ * the very first block in the device-tree as it represents
+ * the RMA on ppc64 server
+ */
+ if (base < memstart_addr) {
+ memstart_addr = base;
+ first_memblock_size = size;
+ }
/* Add the chunk to the MEMBLOCK list */
memblock_add(base, size);
@@ -683,7 +686,7 @@ void __init early_init_devtree(void *params)
#endif
#ifdef CONFIG_PHYP_DUMP
- /* scan tree to see if dump occured during last boot */
+ /* scan tree to see if dump occurred during last boot */
of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
#endif
@@ -698,6 +701,7 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Save command line for /proc/cmdline and then parse parameters */
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
@@ -739,7 +743,7 @@ void __init early_init_devtree(void *params)
DBG("Scanning CPUs ...\n");
- /* Retreive CPU related informations from the flat tree
+ /* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 941ff4dbc567..c016033ba78d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -335,6 +335,7 @@ static void __init prom_printf(const char *format, ...)
const char *p, *q, *s;
va_list args;
unsigned long v;
+ long vs;
struct prom_t *_prom = &RELOC(prom);
va_start(args, format);
@@ -368,12 +369,35 @@ static void __init prom_printf(const char *format, ...)
v = va_arg(args, unsigned long);
prom_print_hex(v);
break;
+ case 'd':
+ ++q;
+ vs = va_arg(args, int);
+ if (vs < 0) {
+ prom_print(RELOC("-"));
+ vs = -vs;
+ }
+ prom_print_dec(vs);
+ break;
case 'l':
++q;
- if (*q == 'u') { /* '%lu' */
+ if (*q == 0)
+ break;
+ else if (*q == 'x') {
+ ++q;
+ v = va_arg(args, unsigned long);
+ prom_print_hex(v);
+ } else if (*q == 'u') { /* '%lu' */
++q;
v = va_arg(args, unsigned long);
prom_print_dec(v);
+ } else if (*q == 'd') { /* %ld */
+ ++q;
+ vs = va_arg(args, long);
+ if (vs < 0) {
+ prom_print(RELOC("-"));
+ vs = -vs;
+ }
+ prom_print_dec(vs);
}
break;
}
@@ -676,8 +700,10 @@ static void __init early_cmdline_parse(void)
#endif /* CONFIG_PCI_MSI */
#ifdef CONFIG_PPC_SMLPAR
#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x40 /* Page Coalescing */
#else
#define OV5_CMO 0x00
+#define OV5_XCMO 0x00
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
@@ -732,7 +758,7 @@ static unsigned char ibm_architecture_vec[] = {
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
OV5_DONATE_DEDICATE_CPU | OV5_MSI,
0,
- OV5_CMO,
+ OV5_CMO | OV5_XCMO,
OV5_TYPE1_AFFINITY,
0,
0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 895b082f1e48..a6ae1cfad86c 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
#ifdef CONFIG_VSX
/*
* Currently to set and and get all the vsx state, you need to call
- * the fp and VMX calls aswell. This only get/sets the lower 32
+ * the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
@@ -933,12 +933,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
if (data && !(data & DABR_TRANSLATION))
return -EIO;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ if (ptrace_get_breakpoints(task) < 0)
+ return -ESRCH;
+
bp = thread->ptrace_bps[0];
if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
+ ptrace_put_breakpoints(task);
return 0;
}
if (bp) {
@@ -948,9 +952,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
(DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
ret = modify_user_hw_breakpoint(bp, &attr);
- if (ret)
+ if (ret) {
+ ptrace_put_breakpoints(task);
return ret;
+ }
thread->ptrace_bps[0] = bp;
+ ptrace_put_breakpoints(task);
thread->dabr = data;
return 0;
}
@@ -965,9 +972,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
ptrace_triggered, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
+ ptrace_put_breakpoints(task);
return PTR_ERR(bp);
}
+ ptrace_put_breakpoints(task);
+
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* Move contents to the DABR register */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 2097f2b3cba8..271ff6318eda 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,6 +42,7 @@
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/topology.h>
+#include <asm/pSeries_reconfig.h>
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -494,7 +495,7 @@ unsigned int rtas_busy_delay(int status)
might_sleep();
ms = rtas_busy_delay_time(status);
- if (ms)
+ if (ms && need_resched())
msleep(ms);
return ms;
@@ -731,6 +732,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
atomic_set(&data->error, rc);
start_topology_update();
+ pSeries_coalesce_init();
if (wake_when_done) {
atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 7980ec0e1e1a..67f6c3b51357 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -465,7 +465,7 @@ static void start_event_scan(void)
pr_debug("rtasd: will sleep for %d milliseconds\n",
(30000 / rtas_event_scan_rate));
- /* Retreive errors from nvram if any */
+ /* Retrieve errors from nvram if any */
retreive_nvram_error_log();
schedule_delayed_work_on(cpumask_first(cpu_online_mask),
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 9d4882a46647..79fca2651b65 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
int i;
threads_per_core = tpc;
- threads_core_mask = CPU_MASK_NONE;
+ cpumask_clear(&threads_core_mask);
/* This implementation only supports power of 2 number of threads
* for simplicity and performance
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
BUG_ON(tpc != (1 << threads_shift));
for (i = 0; i < tpc; i++)
- cpu_set(i, threads_core_mask);
+ cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
tpc, tpc > 1 ? "s" : "");
@@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc)
* cpu_present_mask
*
* Having the possible map set up early allows us to restrict allocations
- * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
+ * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
* cpu_online_mask as they come up.
@@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void)
DBG("smp_setup_cpu_maps()\n");
- while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
+ while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
const int *intserv;
int j, len;
@@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void)
intserv = &cpu; /* assume logical == phys */
}
- for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
+ for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, intserv[j]);
set_cpu_present(cpu, true);
@@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void)
if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= nthreads;
- if (maxcpus > NR_CPUS) {
+ if (maxcpus > nr_cpu_ids) {
printk(KERN_WARNING
"Partition configured for %d cpus, "
"operating system maximum is %d.\n",
- maxcpus, NR_CPUS);
- maxcpus = NR_CPUS;
+ maxcpus, nr_cpu_ids);
+ maxcpus = nr_cpu_ids;
} else
printk(KERN_INFO "Partition configured for %d cpus.\n",
maxcpus);
@@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void)
*/
cpu_init_thread_core_maps(nthreads);
+ /* Now that possible cpus are set, set nr_cpu_ids for later use */
+ setup_nr_cpu_ids();
+
free_unused_pacas();
}
#endif /* CONFIG_SMP */
@@ -599,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port)
* name instead */
if (!np)
np = of_find_node_by_name(NULL, "8042");
+ if (np) {
+ of_i8042_kbd_irq = 1;
+ of_i8042_aux_irq = 12;
+ }
break;
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d2fbc905303..620d792b52e4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -48,6 +48,7 @@ extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
+int __initdata boot_cpu_count;
int boot_cpuid_phys;
int smp_hw_index[NR_CPUS];
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5a0401fcaebd..a88bf2713d41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -62,6 +62,7 @@
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/mmu_context.h>
+#include <asm/code-patching.h>
#include "setup.h"
@@ -72,6 +73,7 @@
#endif
int boot_cpuid = 0;
+int __initdata boot_cpu_count;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
@@ -233,6 +235,7 @@ void early_setup_secondary(void)
void smp_release_cpus(void)
{
unsigned long *ptr;
+ int i;
DBG(" -> smp_release_cpus()\n");
@@ -245,7 +248,16 @@ void smp_release_cpus(void)
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START);
*ptr = __pa(generic_secondary_smp_init);
- mb();
+
+ /* And wait a bit for them to catch up */
+ for (i = 0; i < 100000; i++) {
+ mb();
+ HMT_low();
+ if (boot_cpu_count == 0)
+ break;
+ udelay(1);
+ }
+ DBG("boot_cpu_count = %d\n", boot_cpu_count);
DBG(" <- smp_release_cpus()\n");
}
@@ -423,17 +435,30 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
-static u64 slb0_limit(void)
+/* This returns the limit below which memory accesses to the linear
+ * mapping are guarnateed not to cause a TLB or SLB miss. This is
+ * used to allocate interrupt or emergency stacks for which our
+ * exception entry path doesn't deal with being interrupted.
+ */
+static u64 safe_stack_limit(void)
{
- if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+#ifdef CONFIG_PPC_BOOK3E
+ /* Freescale BookE bolts the entire linear mapping */
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
+ return linear_map_top;
+ /* Other BookE, we assume the first GB is bolted */
+ return 1ul << 30;
+#else
+ /* BookS, the first segment is bolted */
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T;
- }
return 1UL << SID_SHIFT;
+#endif
}
static void __init irqstack_early_init(void)
{
- u64 limit = slb0_limit();
+ u64 limit = safe_stack_limit();
unsigned int i;
/*
@@ -453,6 +478,9 @@ static void __init irqstack_early_init(void)
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
+ extern unsigned int interrupt_base_book3e;
+ extern unsigned int exc_debug_debug_book3e;
+
unsigned int i;
for_each_possible_cpu(i) {
@@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void)
mcheckirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
}
+
+ if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
+ patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
+ (unsigned long)&exc_debug_debug_book3e, 0);
}
#else
#define exc_lvl_early_init()
@@ -486,7 +518,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(slb0_limit(), ppc64_rma_size);
+ limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
unsigned long sp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27c4a4584f80..da989fff19cc 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -381,7 +381,7 @@ badframe:
regs, uc, &uc->uc_mcontext);
#endif
if (show_unhandled_signals && printk_ratelimit())
- printk(regs->msr & MSR_SF ? fmt64 : fmt32,
+ printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, "rt_sigreturn",
(long)uc, regs->nip, regs->link);
@@ -469,7 +469,7 @@ badframe:
regs, frame, newsp);
#endif
if (show_unhandled_signals && printk_ratelimit())
- printk(regs->msr & MSR_SF ? fmt64 : fmt32,
+ printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, "setup_rt_frame",
(long)frame, regs->nip, regs->link);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 981360509172..4a6f2ec7e761 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,6 +57,25 @@
#define DBG(fmt...)
#endif
+
+/* Store all idle threads, this can be reused instead of creating
+* a new thread. Also avoids complicated thread destroy functionality
+* for idle threads.
+*/
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
+ * removed after init for !CONFIG_HOTPLUG_CPU.
+ */
+static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
+#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
+#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
+#else
+static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+#define get_idle_for_cpu(x) (idle_thread_array[(x)])
+#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
+#endif
+
struct thread_info *secondary_ti;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
@@ -76,7 +95,7 @@ int smt_enabled_at_boot = 1;
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
-void __devinit smp_generic_kick_cpu(int nr)
+int __devinit smp_generic_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -87,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr)
*/
paca[nr].cpu_start = 1;
smp_mb();
-}
-#endif
-void smp_message_recv(int msg)
-{
- switch(msg) {
- case PPC_MSG_CALL_FUNCTION:
- generic_smp_call_function_interrupt();
- break;
- case PPC_MSG_RESCHEDULE:
- /* we notice need_resched on exit */
- break;
- case PPC_MSG_CALL_FUNC_SINGLE:
- generic_smp_call_function_single_interrupt();
- break;
- case PPC_MSG_DEBUGGER_BREAK:
- if (crash_ipi_function_ptr) {
- crash_ipi_function_ptr(get_irq_regs());
- break;
- }
-#ifdef CONFIG_DEBUGGER
- debugger_ipi(get_irq_regs());
- break;
-#endif /* CONFIG_DEBUGGER */
- /* FALLTHROUGH */
- default:
- printk("SMP %d: smp_message_recv(): unknown msg %d\n",
- smp_processor_id(), msg);
- break;
- }
+ return 0;
}
+#endif
static irqreturn_t call_function_action(int irq, void *data)
{
@@ -127,7 +119,7 @@ static irqreturn_t call_function_action(int irq, void *data)
static irqreturn_t reschedule_action(int irq, void *data)
{
- /* we just need the return path side effect of checking need_resched */
+ scheduler_ipi();
return IRQ_HANDLED;
}
@@ -137,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t debug_ipi_action(int irq, void *data)
+irqreturn_t debug_ipi_action(int irq, void *data)
{
- smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
+ if (crash_ipi_function_ptr) {
+ crash_ipi_function_ptr(get_irq_regs());
+ return IRQ_HANDLED;
+ }
+
+#ifdef CONFIG_DEBUGGER
+ debugger_ipi(get_irq_regs());
+#endif /* CONFIG_DEBUGGER */
+
return IRQ_HANDLED;
}
@@ -178,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg)
return err;
}
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+struct cpu_messages {
+ int messages; /* current messages */
+ unsigned long data; /* data for cause ipi */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
+
+void smp_muxed_ipi_set_data(int cpu, unsigned long data)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+
+ info->data = data;
+}
+
+void smp_muxed_ipi_message_pass(int cpu, int msg)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+ char *message = (char *)&info->messages;
+
+ message[msg] = 1;
+ mb();
+ smp_ops->cause_ipi(cpu, info->data);
+}
+
+void smp_muxed_ipi_resend(void)
+{
+ struct cpu_messages *info = &__get_cpu_var(ipi_message);
+
+ if (info->messages)
+ smp_ops->cause_ipi(smp_processor_id(), info->data);
+}
+
+irqreturn_t smp_ipi_demux(void)
+{
+ struct cpu_messages *info = &__get_cpu_var(ipi_message);
+ unsigned int all;
+
+ mb(); /* order any irq clear */
+
+ do {
+ all = xchg_local(&info->messages, 0);
+
+#ifdef __BIG_ENDIAN
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
+ generic_smp_call_function_interrupt();
+ if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
+ scheduler_ipi();
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
+ generic_smp_call_function_single_interrupt();
+ if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
+ debug_ipi_action(0, NULL);
+#else
+#error Unsupported ENDIAN
+#endif
+ } while (info->messages);
+
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PPC_SMP_MUXED_IPI */
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
@@ -197,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
-#ifdef CONFIG_DEBUGGER
-void smp_send_debugger_break(int cpu)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+void smp_send_debugger_break(void)
{
- if (likely(smp_ops))
- smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ int cpu;
+ int me = raw_smp_processor_id();
+
+ if (unlikely(!smp_ops))
+ return;
+
+ for_each_online_cpu(cpu)
+ if (cpu != me)
+ smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
}
#endif
@@ -209,9 +276,9 @@ void smp_send_debugger_break(int cpu)
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
crash_ipi_function_ptr = crash_ipi_callback;
- if (crash_ipi_callback && smp_ops) {
+ if (crash_ipi_callback) {
mb();
- smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
+ smp_send_debugger_break();
}
}
#endif
@@ -238,23 +305,6 @@ static void __devinit smp_store_cpu_info(int id)
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
}
-static void __init smp_create_idle(unsigned int cpu)
-{
- struct task_struct *p;
-
- /* create a process for the processor */
- p = fork_idle(cpu);
- if (IS_ERR(p))
- panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-#ifdef CONFIG_PPC64
- paca[cpu].__current = p;
- paca[cpu].kstack = (unsigned long) task_thread_info(p)
- + THREAD_SIZE - STACK_FRAME_OVERHEAD;
-#endif
- current_set[cpu] = task_thread_info(p);
- task_thread_info(p)->cpu = cpu;
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu;
@@ -288,10 +338,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
max_cpus = NR_CPUS;
else
max_cpus = 1;
-
- for_each_possible_cpu(cpu)
- if (cpu != boot_cpuid)
- smp_create_idle(cpu);
}
void __devinit smp_prepare_boot_cpu(void)
@@ -305,7 +351,7 @@ void __devinit smp_prepare_boot_cpu(void)
#ifdef CONFIG_HOTPLUG_CPU
/* State of each CPU during hotplug phases */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
+static DEFINE_PER_CPU(int, cpu_state) = { 0 };
int generic_cpu_disable(void)
{
@@ -317,30 +363,8 @@ int generic_cpu_disable(void)
set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
- fixup_irqs(cpu_online_mask);
-#endif
- return 0;
-}
-
-int generic_cpu_enable(unsigned int cpu)
-{
- /* Do the normal bootup if we haven't
- * already bootstrapped. */
- if (system_state != SYSTEM_RUNNING)
- return -ENOSYS;
-
- /* get the target out of it's holding state */
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
- smp_wmb();
-
- while (!cpu_online(cpu))
- cpu_relax();
-
-#ifdef CONFIG_PPC64
- fixup_irqs(cpu_online_mask);
- /* counter the irq disable in fixup_irqs */
- local_irq_enable();
#endif
+ migrate_irqs();
return 0;
}
@@ -362,37 +386,89 @@ void generic_mach_cpu_die(void)
unsigned int cpu;
local_irq_disable();
+ idle_task_exit();
cpu = smp_processor_id();
printk(KERN_DEBUG "CPU%d offline\n", cpu);
__get_cpu_var(cpu_state) = CPU_DEAD;
smp_wmb();
while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
- set_cpu_online(cpu, true);
- local_irq_enable();
+}
+
+void generic_set_cpu_dead(unsigned int cpu)
+{
+ per_cpu(cpu_state, cpu) = CPU_DEAD;
}
#endif
-static int __devinit cpu_enable(unsigned int cpu)
+struct create_idle {
+ struct work_struct work;
+ struct task_struct *idle;
+ struct completion done;
+ int cpu;
+};
+
+static void __cpuinit do_fork_idle(struct work_struct *work)
{
- if (smp_ops && smp_ops->cpu_enable)
- return smp_ops->cpu_enable(cpu);
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
- return -ENOSYS;
+ c_idle->idle = fork_idle(c_idle->cpu);
+ complete(&c_idle->done);
}
-int __cpuinit __cpu_up(unsigned int cpu)
+static int __cpuinit create_idle(unsigned int cpu)
{
- int c;
+ struct thread_info *ti;
+ struct create_idle c_idle = {
+ .cpu = cpu,
+ .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+ };
+ INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
- secondary_ti = current_set[cpu];
- if (!cpu_enable(cpu))
- return 0;
+ c_idle.idle = get_idle_for_cpu(cpu);
+
+ /* We can't use kernel_thread since we must avoid to
+ * reschedule the child. We use a workqueue because
+ * we want to fork from a kernel thread, not whatever
+ * userspace process happens to be trying to online us.
+ */
+ if (!c_idle.idle) {
+ schedule_work(&c_idle.work);
+ wait_for_completion(&c_idle.done);
+ } else
+ init_idle(c_idle.idle, cpu);
+ if (IS_ERR(c_idle.idle)) {
+ pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
+ return PTR_ERR(c_idle.idle);
+ }
+ ti = task_thread_info(c_idle.idle);
+
+#ifdef CONFIG_PPC64
+ paca[cpu].__current = c_idle.idle;
+ paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
+#endif
+ ti->cpu = cpu;
+ current_set[cpu] = ti;
+
+ return 0;
+}
+
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+ int rc, c;
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
+ /* Make sure we have an idle thread */
+ rc = create_idle(cpu);
+ if (rc)
+ return rc;
+
+ secondary_ti = current_set[cpu];
+
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
@@ -406,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
/* wake up cpus */
DBG("smp: kicking cpu %d\n", cpu);
- smp_ops->kick_cpu(cpu);
+ rc = smp_ops->kick_cpu(cpu);
+ if (rc) {
+ pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
+ return rc;
+ }
/*
* wait to see if the cpu made a callin (is actually up).
@@ -479,7 +559,7 @@ int cpu_first_thread_of_core(int core)
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
-/* Must be called when no change can occur to cpu_present_map,
+/* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
@@ -502,7 +582,7 @@ static struct device_node *cpu_to_l2cache(int cpu)
}
/* Activate a secondary processor. */
-int __devinit start_secondary(void *unused)
+void __devinit start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
struct device_node *l2_cache;
@@ -523,6 +603,10 @@ int __devinit start_secondary(void *unused)
secondary_cpu_time_init();
+#ifdef CONFIG_PPC64
+ if (system_state == SYSTEM_RUNNING)
+ vdso_data->processorCount++;
+#endif
ipi_call_lock();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
@@ -558,7 +642,8 @@ int __devinit start_secondary(void *unused)
local_irq_enable();
cpu_idle();
- return 0;
+
+ BUG();
}
int setup_profiling_timer(unsigned int multiplier)
@@ -575,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
* se we pin us down to CPU 0 for a short while
*/
alloc_cpumask_var(&old_mask, GFP_NOWAIT);
- cpumask_copy(old_mask, &current->cpus_allowed);
+ cpumask_copy(old_mask, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu)
@@ -585,7 +670,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
free_cpumask_var(old_mask);
+ if (smp_ops && smp_ops->bringup_done)
+ smp_ops->bringup_done();
+
dump_numa_cpu_topology();
+
}
int arch_sd_sibling_asym_packing(void)
@@ -660,5 +749,9 @@ void cpu_die(void)
{
if (ppc_md.cpu_die)
ppc_md.cpu_die();
+
+ /* If we return, we re-enter start_secondary */
+ start_secondary_resume();
}
+
#endif
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index b0754e237438..ba4dee3d233f 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Disable MSR:DR to make sure we don't take a TLB or
* hash miss during the copy, as our hash table will
- * for a while be unuseable. For .text, we assume we are
+ * for a while be unusable. For .text, we assume we are
* covered by a BAT. This works only for non-G5 at this
* point. G5 will need a better approach, possibly using
* a small temporary hash table filled with large mappings,
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c0d8c2006bf4..f0f2199e64e1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -182,6 +182,41 @@ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
+
+unsigned long dscr_default = 0;
+EXPORT_SYMBOL(dscr_default);
+
+static ssize_t show_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lx\n", dscr_default);
+}
+
+static ssize_t __used store_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int ret = 0;
+
+ ret = sscanf(buf, "%lx", &val);
+ if (ret != 1)
+ return -EINVAL;
+ dscr_default = val;
+
+ return count;
+}
+
+static SYSDEV_CLASS_ATTR(dscr_default, 0600,
+ show_dscr_default, store_dscr_default);
+
+static void sysfs_create_dscr_default(void)
+{
+ int err = 0;
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &attr_dscr_default.attr);
+}
#endif /* CONFIG_PPC64 */
#ifdef HAS_PPC_PMC_PA6T
@@ -617,6 +652,9 @@ static int __init topology_init(void)
if (cpu_online(cpu))
register_cpu_online(cpu);
}
+#ifdef CONFIG_PPC64
+ sysfs_create_dscr_default();
+#endif /* CONFIG_PPC64 */
return 0;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 09d31dbf43f9..f33acfd872ad 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
u64 stolen = 0;
u64 dtb;
+ if (!dtl)
+ return 0;
+
if (i == vpa->dtl_idx)
return 0;
while (i < vpa->dtl_idx) {
@@ -356,7 +359,7 @@ void account_system_vtime(struct task_struct *tsk)
}
get_paca()->user_time_scaled += user_scaled;
- if (in_irq() || idle_task(smp_processor_id()) != tsk) {
+ if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
account_system_time(tsk, 0, delta, sys_scaled);
if (stolen)
account_steal_time(stolen);
@@ -577,14 +580,21 @@ void timer_interrupt(struct pt_regs * regs)
struct clock_event_device *evt = &decrementer->event;
u64 now;
+ /* Ensure a positive value is written to the decrementer, or else
+ * some CPUs will continue to take decrementer exceptions.
+ */
+ set_dec(DECREMENTER_MAX);
+
+ /* Some implementations of hotplug will get timer interrupts while
+ * offline, just ignore these
+ */
+ if (!cpu_online(smp_processor_id()))
+ return;
+
trace_timer_interrupt_entry(regs);
__get_cpu_var(irq_stat).timer_irqs++;
- /* Ensure a positive value is written to the decrementer, or else
- * some CPUs will continuue to take decrementer exceptions */
- set_dec(DECREMENTER_MAX);
-
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bd74fac169be..b13306b0d925 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -143,7 +143,6 @@ int die(const char *str, struct pt_regs *regs, long err)
#endif
printk("%s\n", ppc_md.name ? ppc_md.name : "");
- sysfs_printk_last_file();
if (notify_die(DIE_OOPS, str, regs, err, 255,
SIGSEGV) == NOTIFY_STOP)
return 1;
@@ -199,7 +198,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
} else if (show_unhandled_signals &&
unhandled_signal(current, signr) &&
printk_ratelimit()) {
- printk(regs->msr & MSR_SF ? fmt64 : fmt32,
+ printk(regs->msr & MSR_64BIT ? fmt64 : fmt32,
current->comm, current->pid, signr,
addr, regs->nip, regs->link, code);
}
@@ -221,7 +220,7 @@ void system_reset_exception(struct pt_regs *regs)
}
#ifdef CONFIG_KEXEC
- cpu_set(smp_processor_id(), cpus_in_sr);
+ cpumask_set_cpu(smp_processor_id(), &cpus_in_sr);
#endif
die("System Reset", regs, SIGABRT);
@@ -909,6 +908,26 @@ static int emulate_instruction(struct pt_regs *regs)
return emulate_isel(regs, instword);
}
+#ifdef CONFIG_PPC64
+ /* Emulate the mfspr rD, DSCR. */
+ if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
+ cpu_has_feature(CPU_FTR_DSCR)) {
+ PPC_WARN_EMULATED(mfdscr, regs);
+ rd = (instword >> 21) & 0x1f;
+ regs->gpr[rd] = mfspr(SPRN_DSCR);
+ return 0;
+ }
+ /* Emulate the mtspr DSCR, rD. */
+ if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
+ cpu_has_feature(CPU_FTR_DSCR)) {
+ PPC_WARN_EMULATED(mtdscr, regs);
+ rd = (instword >> 21) & 0x1f;
+ mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr_inherit = 1;
+ return 0;
+ }
+#endif
+
return -EINVAL;
}
@@ -959,7 +978,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
* ESR_DST (!?) or 0. In the process of chasing this with the
* hardware people - not sure if it can happen on any illegal
* instruction or only on FP instructions, whether there is a
- * pattern to occurences etc. -dgibson 31/Mar/2003 */
+ * pattern to occurrences etc. -dgibson 31/Mar/2003 */
switch (do_mathemu(regs)) {
case 0:
emulate_single_step(regs);
@@ -1506,6 +1525,10 @@ struct ppc_emulated ppc_emulated = {
#ifdef CONFIG_VSX
WARN_EMULATED_SETUP(vsx),
#endif
+#ifdef CONFIG_PPC64
+ WARN_EMULATED_SETUP(mfdscr),
+ WARN_EMULATED_SETUP(mtdscr),
+#endif
};
u32 ppc_warn_emulated;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index e39cad83c884..23d65abbedce 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -62,6 +62,8 @@ void __init udbg_early_init(void)
udbg_init_cpm();
#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
udbg_init_usbgecko();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
+ udbg_init_wsp();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index b4b167b33643..6837f839ab78 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -1,5 +1,5 @@
/*
- * udbg for NS16550 compatable serial ports
+ * udbg for NS16550 compatible serial ports
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/io.h>
+#include <asm/reg_a2.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
@@ -298,3 +299,53 @@ void __init udbg_init_40x_realmode(void)
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
+static void udbg_wsp_flush(void)
+{
+ if (udbg_comport) {
+ while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0)
+ /* wait for idle */;
+ }
+}
+
+static void udbg_wsp_putc(char c)
+{
+ if (udbg_comport) {
+ if (c == '\n')
+ udbg_wsp_putc('\r');
+ udbg_wsp_flush();
+ writeb(c, &udbg_comport->thr); eieio();
+ }
+}
+
+static int udbg_wsp_getc(void)
+{
+ if (udbg_comport) {
+ while ((readb(&udbg_comport->lsr) & LSR_DR) == 0)
+ ; /* wait for char */
+ return readb(&udbg_comport->rbr);
+ }
+ return -1;
+}
+
+static int udbg_wsp_getc_poll(void)
+{
+ if (udbg_comport)
+ if (readb(&udbg_comport->lsr) & LSR_DR)
+ return readb(&udbg_comport->rbr);
+ return -1;
+}
+
+void __init udbg_init_wsp(void)
+{
+ udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT;
+
+ udbg_init_uart(udbg_comport, 57600, 50000000);
+
+ udbg_putc = udbg_wsp_putc;
+ udbg_flush = udbg_wsp_flush;
+ udbg_getc = udbg_wsp_getc;
+ udbg_getc_poll = udbg_wsp_getc_poll;
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S
index 68d49dd71dcc..cf0c9c9c24f9 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso32/sigtramp.S
@@ -19,7 +19,7 @@
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
the return address to get an address in the middle of the presumed
- call instruction. Since we don't have a call here, we artifically
+ call instruction. Since we don't have a call here, we artificially
extend the range covered by the unwind info by adding a nop before
the real start. */
nop
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S
index 59eb59bb4082..45ea281e9a21 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso64/sigtramp.S
@@ -20,7 +20,7 @@
/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
the return address to get an address in the middle of the presumed
- call instruction. Since we don't have a call here, we artifically
+ call instruction. Since we don't have a call here, we artificially
extend the range covered by the unwind info by padding before the
real start. */
nop
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 9de6f396cf85..4d5a3edff49e 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -102,7 +102,7 @@ _GLOBAL(giveup_altivec)
MTMSRD(r5) /* enable use of VMX now */
isync
PPC_LCMPI 0,r3,0
- beqlr- /* if no previous owner, done */
+ beqlr /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c961de40c676..0f95b5cce033 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -236,7 +236,7 @@ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
- return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
+ return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 2b9c9088d00e..1a1b34487e71 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -35,9 +35,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
-#define LOAD_SHADOW_VCPU(reg) \
- mfspr reg, SPRN_SPRG_PACA
-
+#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
#define FUNC(name) GLUE(.,name)
@@ -72,7 +70,7 @@
.global kvmppc_trampoline_\intno
kvmppc_trampoline_\intno:
- mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
+ SET_SCRATCH0(r13) /* Save r13 */
/*
* First thing to do is to find out if we're coming
@@ -91,7 +89,7 @@ kvmppc_trampoline_\intno:
lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
mtcr r12
PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
+ GET_SCRATCH0(r13) /* r13 = original r13 */
b kvmppc_resume_\intno /* Get back original handler */
/* Now we know we're handling a KVM guest */
@@ -114,6 +112,9 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
+#ifdef CONFIG_PPC_BOOK3S_64
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
+#endif
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
@@ -158,7 +159,7 @@ kvmppc_handler_skip_ins:
lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
mtcr r12
PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- mfspr r13, SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r13)
/* And get back into the code */
RFI
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 7c52ed0b7051..451264274b8c 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -155,14 +155,20 @@ kvmppc_handler_trampoline_exit:
PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
/* Save guest PC and MSR */
- mfsrr0 r3
+ andi. r0,r12,0x2
+ beq 1f
+ mfspr r3,SPRN_HSRR0
+ mfspr r4,SPRN_HSRR1
+ andi. r12,r12,0x3ffd
+ b 2f
+1: mfsrr0 r3
mfsrr1 r4
-
+2:
PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
/* Get scratch'ed off registers */
- mfspr r9, SPRN_SPRG_SCRATCH0
+ GET_SCRATCH0(r9)
PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index f53e09c7dac7..13b676c20d12 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -6,14 +6,6 @@
#include <asm/system.h>
-void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
-{
- if (mem_init_done)
- return kmalloc(size, mask);
- else
- return alloc_bootmem(size);
-}
-
void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 4d4eeb900486..53dcb6b1b708 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -6,6 +6,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <asm/page.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
@@ -15,9 +16,9 @@ PPC64_CACHES:
.tc ppc64_caches[TC],ppc64_caches
.section ".text"
-
-_GLOBAL(copy_4K_page)
- li r5,4096 /* 4K page size */
+_GLOBAL(copy_page)
+ lis r5,PAGE_SIZE@h
+ ori r5,r5,PAGE_SIZE@l
BEGIN_FTR_SECTION
ld r10,PPC64_CACHES@toc(r2)
lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */
diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c
index deac4d30daf4..e91615abae66 100644
--- a/arch/powerpc/lib/devres.c
+++ b/arch/powerpc/lib/devres.c
@@ -9,11 +9,11 @@
#include <linux/device.h> /* devres_*(), devm_ioremap_release() */
#include <linux/gfp.h>
-#include <linux/io.h> /* ioremap_flags() */
+#include <linux/io.h> /* ioremap_prot() */
#include <linux/module.h> /* EXPORT_SYMBOL() */
/**
- * devm_ioremap_prot - Managed ioremap_flags()
+ * devm_ioremap_prot - Managed ioremap_prot()
* @dev: Generic device to remap IO address for
* @offset: BUS offset to map
* @size: Size of map
@@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
if (!ptr)
return NULL;
- addr = ioremap_flags(offset, size, flags);
+ addr = ioremap_prot(offset, size, flags);
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index ae5189ab0049..9a52349874ee 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
+#include <linux/prefetch.h>
#include <asm/sstep.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -45,6 +46,18 @@ extern int do_stxvd2x(int rn, unsigned long ea);
#endif
/*
+ * Emulate the truncation of 64 bit values in 32-bit mode.
+ */
+static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
+{
+#ifdef __powerpc64__
+ if ((msr & MSR_64BIT) == 0)
+ val &= 0xffffffffUL;
+#endif
+ return val;
+}
+
+/*
* Determine whether a conditional branch instruction would branch.
*/
static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
@@ -90,11 +103,8 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs
if (instr & 0x04000000) /* update forms */
regs->gpr[ra] = ea;
}
-#ifdef __powerpc64__
- if (!(regs->msr & MSR_SF))
- ea &= 0xffffffffUL;
-#endif
- return ea;
+
+ return truncate_if_32bit(regs->msr, ea);
}
#ifdef __powerpc64__
@@ -113,9 +123,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg
if ((instr & 3) == 1) /* update forms */
regs->gpr[ra] = ea;
}
- if (!(regs->msr & MSR_SF))
- ea &= 0xffffffffUL;
- return ea;
+
+ return truncate_if_32bit(regs->msr, ea);
}
#endif /* __powerpc64 */
@@ -136,11 +145,8 @@ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs
if (do_update) /* update forms */
regs->gpr[ra] = ea;
}
-#ifdef __powerpc64__
- if (!(regs->msr & MSR_SF))
- ea &= 0xffffffffUL;
-#endif
- return ea;
+
+ return truncate_if_32bit(regs->msr, ea);
}
/*
@@ -466,7 +472,7 @@ static void __kprobes set_cr0(struct pt_regs *regs, int rd)
regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
#ifdef __powerpc64__
- if (!(regs->msr & MSR_SF))
+ if (!(regs->msr & MSR_64BIT))
val = (int) val;
#endif
if (val < 0)
@@ -487,7 +493,7 @@ static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
++val;
regs->gpr[rd] = val;
#ifdef __powerpc64__
- if (!(regs->msr & MSR_SF)) {
+ if (!(regs->msr & MSR_64BIT)) {
val = (unsigned int) val;
val1 = (unsigned int) val1;
}
@@ -570,8 +576,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
if ((instr & 2) == 0)
imm += regs->nip;
regs->nip += 4;
- if ((regs->msr & MSR_SF) == 0)
- regs->nip &= 0xffffffffUL;
+ regs->nip = truncate_if_32bit(regs->msr, regs->nip);
if (instr & 1)
regs->link = regs->nip;
if (branch_taken(instr, regs))
@@ -604,13 +609,9 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
imm -= 0x04000000;
if ((instr & 2) == 0)
imm += regs->nip;
- if (instr & 1) {
- regs->link = regs->nip + 4;
- if ((regs->msr & MSR_SF) == 0)
- regs->link &= 0xffffffffUL;
- }
- if ((regs->msr & MSR_SF) == 0)
- imm &= 0xffffffffUL;
+ if (instr & 1)
+ regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
+ imm = truncate_if_32bit(regs->msr, imm);
regs->nip = imm;
return 1;
case 19:
@@ -618,11 +619,8 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
case 16: /* bclr */
case 528: /* bcctr */
imm = (instr & 0x400)? regs->ctr: regs->link;
- regs->nip += 4;
- if ((regs->msr & MSR_SF) == 0) {
- regs->nip &= 0xffffffffUL;
- imm &= 0xffffffffUL;
- }
+ regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
+ imm = truncate_if_32bit(regs->msr, imm);
if (instr & 1)
regs->link = regs->nip;
if (branch_taken(instr, regs))
@@ -1616,11 +1614,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
return 0; /* invoke DSI if -EFAULT? */
}
instr_done:
- regs->nip += 4;
-#ifdef __powerpc64__
- if ((regs->msr & MSR_SF) == 0)
- regs->nip &= 0xffffffffUL;
-#endif
+ regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
return 1;
logical_done:
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 757c0bed9a91..b42f76c4948d 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
#endif
}
EXPORT_SYMBOL(__dma_sync_page);
+
+/*
+ * Return the PFN for a given cpu virtual address returned by
+ * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ */
+unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
+{
+ /* This should always be populated, so we don't test every
+ * level. If that fails, we'll have a nice crash which
+ * will be as good as a BUG_ON()
+ */
+ pgd_t *pgd = pgd_offset_k(cpu_addr);
+ pud_t *pud = pud_offset(pgd, cpu_addr);
+ pmd_t *pmd = pmd_offset(pud, cpu_addr);
+ pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
+
+ if (pte_none(*ptep) || !pte_present(*ptep))
+ return 0;
+ return pte_pfn(*ptep);
+}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 3079f6b44cf5..a242b5d7cbe4 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -118,7 +118,7 @@ _GLOBAL(__hash_page_4K)
BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Calc va and put it in r29 */
rldicr r29,r5,28,63-28
rldicl r3,r3,0,36
@@ -192,8 +192,8 @@ htab_insert_pte:
rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -401,7 +401,7 @@ _GLOBAL(__hash_page_4K)
BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Calc va and put it in r29 */
rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
@@ -495,8 +495,8 @@ htab_special_pfn:
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -715,7 +715,7 @@ BEGIN_FTR_SECTION
andi. r0,r31,_PAGE_NO_CACHE
/* If so, bail out and refault as a 4k page */
bne- ht64_bail_ok
-END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE)
/* Prepare new PTE value (turn access RW into DIRTY, then
* add BUSY and ACCESSED)
*/
@@ -736,7 +736,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
/* Calc va and put it in r29 */
rldicr r29,r5,28,63-28
rldicl r3,r3,0,36
@@ -813,8 +813,8 @@ ht64_insert_pte:
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARM(r9)(r1) /* segment size */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 784a400e0781..dfd764896db0 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -50,9 +50,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
case MMU_PAGE_4K:
va &= ~0xffful;
va |= ssize << 8;
- asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0),
- %2)
- : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
+ asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
: "memory");
break;
default:
@@ -61,9 +60,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va |= penc << 12;
va |= ssize << 8;
va |= 1; /* L */
- asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0),
- %2)
- : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
+ asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
: "memory");
break;
}
@@ -98,8 +96,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
static inline void tlbie(unsigned long va, int psize, int ssize, int local)
{
- unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
+ unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
@@ -503,7 +501,7 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
- if (cpu_has_feature(CPU_FTR_TLBIEL) &&
+ if (mmu_has_feature(MMU_FTR_TLBIEL) &&
mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
@@ -517,7 +515,7 @@ static void native_flush_hash_range(unsigned long number, int local)
}
asm volatile("ptesync":::"memory");
} else {
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
raw_spin_lock(&native_tlbie_lock);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a5991facddce..26b2872b3d00 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -53,6 +53,7 @@
#include <asm/sections.h>
#include <asm/spu.h>
#include <asm/udbg.h>
+#include <asm/code-patching.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -258,11 +259,11 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
for (; size >= 4; size -= 4, ++prop) {
if (prop[0] == 40) {
DBG("1T segment support detected\n");
- cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
+ cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
return 1;
}
}
- cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
return 0;
}
@@ -288,7 +289,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
if (prop != NULL) {
DBG("Page sizes from device-tree:\n");
size /= 4;
- cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
+ cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
while(size > 0) {
unsigned int shift = prop[0];
unsigned int slbenc = prop[1];
@@ -316,7 +317,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
break;
case 0x18:
idx = MMU_PAGE_16M;
- cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
+ cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
break;
case 0x22:
idx = MMU_PAGE_16G;
@@ -411,7 +412,7 @@ static void __init htab_init_page_sizes(void)
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
- if (cpu_has_feature(CPU_FTR_16M_PAGE))
+ if (mmu_has_feature(MMU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
found:
@@ -441,7 +442,7 @@ static void __init htab_init_page_sizes(void)
mmu_vmalloc_psize = MMU_PAGE_64K;
if (mmu_linear_psize == MMU_PAGE_4K)
mmu_linear_psize = MMU_PAGE_64K;
- if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
+ if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
/*
* Don't use 64k pages for ioremap on pSeries, since
* that would stop us accessing the HEA ethernet.
@@ -547,15 +548,7 @@ int remove_section_mapping(unsigned long start, unsigned long end)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-static inline void make_bl(unsigned int *insn_addr, void *func)
-{
- unsigned long funcp = *((unsigned long *)func);
- int offset = funcp - (unsigned long)insn_addr;
-
- *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
- flush_icache_range((unsigned long)insn_addr, 4+
- (unsigned long)insn_addr);
-}
+#define FUNCTION_TEXT(A) ((*(unsigned long *)(A)))
static void __init htab_finish_init(void)
{
@@ -570,16 +563,33 @@ static void __init htab_finish_init(void)
extern unsigned int *ht64_call_hpte_remove;
extern unsigned int *ht64_call_hpte_updatepp;
- make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
- make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
- make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
- make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
+ patch_branch(ht64_call_hpte_insert1,
+ FUNCTION_TEXT(ppc_md.hpte_insert),
+ BRANCH_SET_LINK);
+ patch_branch(ht64_call_hpte_insert2,
+ FUNCTION_TEXT(ppc_md.hpte_insert),
+ BRANCH_SET_LINK);
+ patch_branch(ht64_call_hpte_remove,
+ FUNCTION_TEXT(ppc_md.hpte_remove),
+ BRANCH_SET_LINK);
+ patch_branch(ht64_call_hpte_updatepp,
+ FUNCTION_TEXT(ppc_md.hpte_updatepp),
+ BRANCH_SET_LINK);
+
#endif /* CONFIG_PPC_HAS_HASH_64K */
- make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
- make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
- make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
- make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
+ patch_branch(htab_call_hpte_insert1,
+ FUNCTION_TEXT(ppc_md.hpte_insert),
+ BRANCH_SET_LINK);
+ patch_branch(htab_call_hpte_insert2,
+ FUNCTION_TEXT(ppc_md.hpte_insert),
+ BRANCH_SET_LINK);
+ patch_branch(htab_call_hpte_remove,
+ FUNCTION_TEXT(ppc_md.hpte_remove),
+ BRANCH_SET_LINK);
+ patch_branch(htab_call_hpte_updatepp,
+ FUNCTION_TEXT(ppc_md.hpte_updatepp),
+ BRANCH_SET_LINK);
}
static void __init htab_initialize(void)
@@ -598,7 +608,7 @@ static void __init htab_initialize(void)
/* Initialize page sizes */
htab_init_page_sizes();
- if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T;
printk(KERN_INFO "Using 1TB segments\n");
@@ -739,7 +749,7 @@ void __init early_init_mmu(void)
/* Initialize stab / SLB management except on iSeries
*/
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
else if (!firmware_has_feature(FW_FEATURE_ISERIES))
stab_initialize(get_paca()->stab_real);
@@ -753,10 +763,10 @@ void __cpuinit early_init_mmu_secondary(void)
mtspr(SPRN_SDR1, _SDR1);
/* Initialize STAB/SLB. We use a virtual address as it works
- * in real mode on pSeries and we want a virutal address on
+ * in real mode on pSeries and we want a virtual address on
* iSeries anyway
*/
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
else
stab_initialize(get_paca()->stab_addr);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9bb249c3046e..0b9a5c1901b9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -529,7 +529,7 @@ static int __init hugetlbpage_init(void)
{
int psize;
- if (!cpu_has_feature(CPU_FTR_16M_PAGE))
+ if (!mmu_has_feature(MMU_FTR_16M_PAGE))
return -ENODEV;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index a66499650909..57e545b84bf1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -424,7 +424,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
clear_page(page);
/*
- * We shouldnt have to do this, but some versions of glibc
+ * We shouldn't have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 2535828aa84b..3bafc3deca6d 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -20,9 +20,205 @@
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <asm/mmu_context.h>
+#ifdef CONFIG_PPC_ICSWX
+/*
+ * The processor and its L2 cache cause the icswx instruction to
+ * generate a COP_REQ transaction on PowerBus. The transaction has
+ * no address, and the processor does not perform an MMU access
+ * to authenticate the transaction. The command portion of the
+ * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and
+ * the coprocessor Process ID (PID), which the coprocessor compares
+ * to the authorized LPID and PID held in the coprocessor, to determine
+ * if the process is authorized to generate the transaction.
+ * The data of the COP_REQ transaction is 128-byte or less and is
+ * placed in cacheable memory on a 128-byte cache line boundary.
+ *
+ * The task to use a coprocessor should use use_cop() to allocate
+ * a coprocessor PID before executing icswx instruction. use_cop()
+ * also enables the coprocessor context switching. Drop_cop() is
+ * used to free the coprocessor PID.
+ *
+ * Example:
+ * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
+ * Each HFI have multiple windows. Each HFI window serves as a
+ * network device sending to and receiving from HFI network.
+ * HFI immediate send function uses icswx instruction. The immediate
+ * send function allows small (single cache-line) packets be sent
+ * without using the regular HFI send FIFO and doorbell, which are
+ * much slower than immediate send.
+ *
+ * For each task intending to use HFI immediate send, the HFI driver
+ * calls use_cop() to obtain a coprocessor PID for the task.
+ * The HFI driver then allocate a free HFI window and save the
+ * coprocessor PID to the HFI window to allow the task to use the
+ * HFI window.
+ *
+ * The HFI driver repeatedly creates immediate send packets and
+ * issues icswx instruction to send data through the HFI window.
+ * The HFI compares the coprocessor PID in the CPU PID register
+ * to the PID held in the HFI window to determine if the transaction
+ * is allowed.
+ *
+ * When the task to release the HFI window, the HFI driver calls
+ * drop_cop() to release the coprocessor PID.
+ */
+
+#define COP_PID_NONE 0
+#define COP_PID_MIN (COP_PID_NONE + 1)
+#define COP_PID_MAX (0xFFFF)
+
+static DEFINE_SPINLOCK(mmu_context_acop_lock);
+static DEFINE_IDA(cop_ida);
+
+void switch_cop(struct mm_struct *next)
+{
+ mtspr(SPRN_PID, next->context.cop_pid);
+ mtspr(SPRN_ACOP, next->context.acop);
+}
+
+static int new_cop_pid(struct ida *ida, int min_id, int max_id,
+ spinlock_t *lock)
+{
+ int index;
+ int err;
+
+again:
+ if (!ida_pre_get(ida, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(lock);
+ err = ida_get_new_above(ida, min_id, &index);
+ spin_unlock(lock);
+
+ if (err == -EAGAIN)
+ goto again;
+ else if (err)
+ return err;
+
+ if (index > max_id) {
+ spin_lock(lock);
+ ida_remove(ida, index);
+ spin_unlock(lock);
+ return -ENOMEM;
+ }
+
+ return index;
+}
+
+static void sync_cop(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (mm == current->active_mm)
+ switch_cop(current->active_mm);
+}
+
+/**
+ * Start using a coprocessor.
+ * @acop: mask of coprocessor to be used.
+ * @mm: The mm the coprocessor to associate with. Most likely current mm.
+ *
+ * Return a positive PID if successful. Negative errno otherwise.
+ * The returned PID will be fed to the coprocessor to determine if an
+ * icswx transaction is authenticated.
+ */
+int use_cop(unsigned long acop, struct mm_struct *mm)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_ICSWX))
+ return -ENODEV;
+
+ if (!mm || !acop)
+ return -EINVAL;
+
+ /* We need to make sure mm_users doesn't change */
+ down_read(&mm->mmap_sem);
+ spin_lock(mm->context.cop_lockp);
+
+ if (mm->context.cop_pid == COP_PID_NONE) {
+ ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
+ &mmu_context_acop_lock);
+ if (ret < 0)
+ goto out;
+
+ mm->context.cop_pid = ret;
+ }
+ mm->context.acop |= acop;
+
+ sync_cop(mm);
+
+ /*
+ * If this is a threaded process then there might be other threads
+ * running. We need to send an IPI to force them to pick up any
+ * change in PID and ACOP.
+ */
+ if (atomic_read(&mm->mm_users) > 1)
+ smp_call_function(sync_cop, mm, 1);
+
+ ret = mm->context.cop_pid;
+
+out:
+ spin_unlock(mm->context.cop_lockp);
+ up_read(&mm->mmap_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(use_cop);
+
+/**
+ * Stop using a coprocessor.
+ * @acop: mask of coprocessor to be stopped.
+ * @mm: The mm the coprocessor associated with.
+ */
+void drop_cop(unsigned long acop, struct mm_struct *mm)
+{
+ int free_pid = COP_PID_NONE;
+
+ if (!cpu_has_feature(CPU_FTR_ICSWX))
+ return;
+
+ if (WARN_ON_ONCE(!mm))
+ return;
+
+ /* We need to make sure mm_users doesn't change */
+ down_read(&mm->mmap_sem);
+ spin_lock(mm->context.cop_lockp);
+
+ mm->context.acop &= ~acop;
+
+ if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
+ free_pid = mm->context.cop_pid;
+ mm->context.cop_pid = COP_PID_NONE;
+ }
+
+ sync_cop(mm);
+
+ /*
+ * If this is a threaded process then there might be other threads
+ * running. We need to send an IPI to force them to pick up any
+ * change in PID and ACOP.
+ */
+ if (atomic_read(&mm->mm_users) > 1)
+ smp_call_function(sync_cop, mm, 1);
+
+ if (free_pid != COP_PID_NONE) {
+ spin_lock(&mmu_context_acop_lock);
+ ida_remove(&cop_ida, free_pid);
+ spin_unlock(&mmu_context_acop_lock);
+ }
+
+ spin_unlock(mm->context.cop_lockp);
+ up_read(&mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(drop_cop);
+
+#endif /* CONFIG_PPC_ICSWX */
+
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
@@ -31,7 +227,6 @@ static DEFINE_IDA(mmu_context_ida);
* Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
* so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
*/
-#define NO_CONTEXT 0
#define MAX_CONTEXT ((1UL << 19) - 1)
int __init_new_context(void)
@@ -79,6 +274,16 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
slice_set_user_psize(mm, mmu_virtual_psize);
subpage_prot_init_new_context(mm);
mm->context.id = index;
+#ifdef CONFIG_PPC_ICSWX
+ mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ if (!mm->context.cop_lockp) {
+ __destroy_context(index);
+ subpage_prot_free(mm);
+ mm->context.id = MMU_NO_CONTEXT;
+ return -ENOMEM;
+ }
+ spin_lock_init(mm->context.cop_lockp);
+#endif /* CONFIG_PPC_ICSWX */
return 0;
}
@@ -93,7 +298,12 @@ EXPORT_SYMBOL_GPL(__destroy_context);
void destroy_context(struct mm_struct *mm)
{
+#ifdef CONFIG_PPC_ICSWX
+ drop_cop(mm->context.acop, mm);
+ kfree(mm->context.cop_lockp);
+ mm->context.cop_lockp = NULL;
+#endif /* CONFIG_PPC_ICSWX */
__destroy_context(mm->context.id);
subpage_prot_free(mm);
- mm->context.id = NO_CONTEXT;
+ mm->context.id = MMU_NO_CONTEXT;
}
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index c0aab52da3a5..336807de550e 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -338,12 +338,14 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
@@ -407,7 +409,17 @@ void __init mmu_context_init(void)
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
- } else {
+ } else
+#ifdef CONFIG_PPC_BOOK3E_MMU
+ if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
+ u32 mmucfg = mfspr(SPRN_MMUCFG);
+ u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
+ >> MMUCFG_PIDSIZE_SHIFT;
+ first_context = 1;
+ last_context = (1UL << (pid_bits + 1)) - 1;
+ } else
+#endif
+ {
first_context = 1;
last_context = 255;
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0dc95c0aa3be..2164006fe170 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -311,14 +311,13 @@ EXPORT_SYMBOL_GPL(of_node_to_nid);
static int __init find_min_common_depth(void)
{
int depth;
- struct device_node *rtas_root;
struct device_node *chosen;
+ struct device_node *root;
const char *vec5;
- rtas_root = of_find_node_by_path("/rtas");
-
- if (!rtas_root)
- return -1;
+ root = of_find_node_by_path("/rtas");
+ if (!root)
+ root = of_find_node_by_path("/");
/*
* This property is a set of 32-bit integers, each representing
@@ -332,7 +331,7 @@ static int __init find_min_common_depth(void)
* NUMA boundary and the following are progressively less significant
* boundaries. There can be more than one level of NUMA.
*/
- distance_ref_points = of_get_property(rtas_root,
+ distance_ref_points = of_get_property(root,
"ibm,associativity-reference-points",
&distance_ref_points_depth);
@@ -376,11 +375,11 @@ static int __init find_min_common_depth(void)
distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
}
- of_node_put(rtas_root);
+ of_node_put(root);
return depth;
err:
- of_node_put(rtas_root);
+ of_node_put(root);
return -1;
}
@@ -440,11 +439,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
}
/*
- * Retreive and validate the ibm,dynamic-memory property of the device tree.
+ * Retrieve and validate the ibm,dynamic-memory property of the device tree.
*
* The layout of the ibm,dynamic-memory property is a number N of memblock
* list entries followed by N memblock list entries. Each memblock list entry
- * contains information as layed out in the of_drconf_cell struct above.
+ * contains information as laid out in the of_drconf_cell struct above.
*/
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
{
@@ -468,7 +467,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
}
/*
- * Retreive and validate the ibm,lmb-size property for drconf memory
+ * Retrieve and validate the ibm,lmb-size property for drconf memory
* from the device tree.
*/
static u64 of_get_lmb_size(struct device_node *memory)
@@ -490,7 +489,7 @@ struct assoc_arrays {
};
/*
- * Retreive and validate the list of associativity arrays for drconf
+ * Retrieve and validate the list of associativity arrays for drconf
* memory from the ibm,associativity-lookup-arrays property of the
* device tree..
*
@@ -604,7 +603,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
* Returns the size the region should have to enforce the memory limit.
* This will either be the original value of size, a truncated value,
* or zero. If the returned value of size is 0 the region should be
- * discarded as it lies wholy above the memory limit.
+ * discarded as it lies wholly above the memory limit.
*/
static unsigned long __init numa_enforce_memory_limit(unsigned long start,
unsigned long size)
@@ -1453,7 +1452,7 @@ int arch_update_cpu_topology(void)
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct sys_device *sysdev;
- for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
+ for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
nid = associativity_to_nid(associativity);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8dc41c0157fe..51f87956f8f8 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -133,7 +133,15 @@ ioremap(phys_addr_t addr, unsigned long size)
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
+ioremap_wc(phys_addr_t addr, unsigned long size)
+{
+ return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iomem *
+ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
/* writeable implies dirty for kernel addresses */
if (flags & _PAGE_RW)
@@ -152,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
-EXPORT_SYMBOL(ioremap_flags);
+EXPORT_SYMBOL(ioremap_prot);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 88927a05cdc2..6e595f6496d4 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -255,7 +255,17 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size)
return __ioremap_caller(addr, size, flags, caller);
}
-void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
+void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
+{
+ unsigned long flags = _PAGE_NO_CACHE;
+ void *caller = __builtin_return_address(0);
+
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags, caller);
+ return __ioremap_caller(addr, size, flags, caller);
+}
+
+void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
void *caller = __builtin_return_address(0);
@@ -311,7 +321,8 @@ void iounmap(volatile void __iomem *token)
}
EXPORT_SYMBOL(ioremap);
-EXPORT_SYMBOL(ioremap_flags);
+EXPORT_SYMBOL(ioremap_wc);
+EXPORT_SYMBOL(ioremap_prot);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(__ioremap_at);
EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 1d98ecc8eecd..e22276cb67a4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -24,6 +24,7 @@
#include <asm/firmware.h>
#include <linux/compiler.h>
#include <asm/udbg.h>
+#include <asm/code-patching.h>
extern void slb_allocate_realmode(unsigned long ea);
@@ -166,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
int esid_1t_count;
/* System is not 1T segment size capable. */
- if (!cpu_has_feature(CPU_FTR_1T_SEGMENT))
+ if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
return (GET_ESID(addr1) == GET_ESID(addr2));
esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
@@ -201,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
*/
hard_irq_disable();
offset = get_paca()->slb_cache_ptr;
- if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
+ if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
int i;
asm volatile("isync" : : : "memory");
@@ -249,9 +250,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
- *insn_addr = (*insn_addr & 0xffff0000) | immed;
- flush_icache_range((unsigned long)insn_addr, 4+
- (unsigned long)insn_addr);
+ int insn = (*insn_addr & 0xffff0000) | immed;
+ patch_instruction(insn_addr, insn);
}
void slb_set_size(u16 size)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 95ce35581696..ef653dc95b65 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -58,7 +58,7 @@ _GLOBAL(slb_miss_kernel_load_linear)
li r11,0
BEGIN_FTR_SECTION
b slb_finish_load
-END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
1:
@@ -87,7 +87,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
6:
BEGIN_FTR_SECTION
b slb_finish_load
-END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
0: /* user address: proto-VSID = context << 15 | ESID. First check
@@ -138,11 +138,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldimi r10,r9,USER_ESID_BITS,0
BEGIN_FTR_SECTION
bge slb_finish_load_1T
-END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 446a01842a73..41e31642a86a 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -243,7 +243,7 @@ void __init stabs_alloc(void)
{
int cpu;
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
return;
for_each_possible_cpu(cpu) {
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 8526bd9d2aa3..af0892209417 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -192,7 +192,7 @@ normal_tlb_miss:
or r10,r15,r14
BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and seach for existing entry. Then load
+ /* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
PPC_TLBSRX_DOT(0,r16)
@@ -425,13 +425,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
virt_page_table_tlb_miss_fault:
/* If we fault here, things are a little bit tricky. We need to call
- * either data or instruction store fault, and we need to retreive
+ * either data or instruction store fault, and we need to retrieve
* the original fault address and ESR (for data).
*
* The thing is, we know that in normal circumstances, this is
* always called as a second level tlb miss for SW load or as a first
* level TLB miss for HW load, so we should be able to peek at the
- * relevant informations in the first exception frame in the PACA.
+ * relevant information in the first exception frame in the PACA.
*
* However, we do need to double check that, because we may just hit
* a stray kernel pointer or a userland attack trying to hit those
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index c4d2b7167568..cb515cff745c 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -67,7 +67,7 @@
#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
-/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
+/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
* To configure counter to send value every N cycles set counter to
* 2^32 - 1 - N.
*/
@@ -1470,7 +1470,7 @@ static int cell_global_start(struct op_counter_config *ctr)
* trace buffer at the maximum rate possible. The trace buffer is configured
* to store the PCs, wrapping when it is full. The performance counter is
* initialized to the max hardware count minus the number of events, N, between
- * samples. Once the N events have occured, a HW counter overflow occurs
+ * samples. Once the N events have occurred, a HW counter overflow occurs
* causing the generation of a HW counter interrupt which also stops the
* writing of the SPU PC values to the trace buffer. Hence the last PC
* written to the trace buffer is the SPU PC that we want. Unfortunately,
@@ -1656,7 +1656,7 @@ static void cell_handle_interrupt_ppu(struct pt_regs *regs,
* The counters were frozen by the interrupt.
* Reenable the interrupt and restart the counters.
* If there was a race between the interrupt handler and
- * the virtual counter routine. The virutal counter
+ * the virtual counter routine. The virtual counter
* routine may have cleared the interrupts. Hence must
* use the virt_cntr_inter_mask to re-enable the interrupts.
*/
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 80774092db77..8ee51a252cf1 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -207,7 +207,7 @@ static unsigned long get_pc(struct pt_regs *regs)
unsigned long mmcra;
unsigned long slot;
- /* Cant do much about it */
+ /* Can't do much about it */
if (!cur_cpu_spec->oprofile_mmcra_sihv)
return pc;
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index aa46e9d1e771..19395f18b1db 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -87,7 +87,7 @@ static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
mpic_setup_this_cpu();
}
-static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
+static int __cpuinit smp_iss4xx_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;
@@ -104,7 +104,7 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
NULL);
if (spin_table_addr_prop == NULL) {
pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
- return;
+ return -ENOENT;
}
/* Assume it's mapped as part of the linear mapping. This is a bit
@@ -117,6 +117,8 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
smp_wmb();
spin_table[1] = __pa(start_secondary_47x);
mb();
+
+ return 0;
}
static struct smp_ops_t iss_smp_ops = {
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index fde0ea50c97d..9f09319352c0 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -61,7 +61,7 @@ irq_to_pic_bit(unsigned int irq)
static void
cpld_mask_irq(struct irq_data *d)
{
- unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d);
void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
out_8(pic_mask,
@@ -71,7 +71,7 @@ cpld_mask_irq(struct irq_data *d)
static void
cpld_unmask_irq(struct irq_data *d)
{
- unsigned int cpld_irq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d);
void __iomem *pic_mask = irq_to_pic_mask(cpld_irq);
out_8(pic_mask,
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
status |= (ignore | mask);
if (status == 0xff)
- return NO_IRQ_IGNORE;
+ return NO_IRQ;
cpld_irq = ffz(status) + offset;
@@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
&cpld_regs->pci_mask);
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
+ if (irq != NO_IRQ) {
generic_handle_irq(irq);
return;
}
irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
&cpld_regs->misc_mask);
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
+ if (irq != NO_IRQ) {
generic_handle_irq(irq);
return;
}
@@ -132,8 +132,8 @@ static int
cpld_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq);
return 0;
}
@@ -198,7 +198,7 @@ mpc5121_ads_cpld_pic_init(void)
goto end;
}
- set_irq_chained_handler(cascade_irq, cpld_pic_cascade);
+ irq_set_chained_handler(cascade_irq, cpld_pic_cascade);
end:
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 2bd1e6cf1f58..96f85e5e0cd3 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -56,7 +56,7 @@ static void media5200_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&media5200_irq.lock, flags);
val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
- val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq);
+ val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d));
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
spin_unlock_irqrestore(&media5200_irq.lock, flags);
}
@@ -68,7 +68,7 @@ static void media5200_irq_mask(struct irq_data *d)
spin_lock_irqsave(&media5200_irq.lock, flags);
val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
- val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[d->irq].hwirq));
+ val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)));
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
spin_unlock_irqrestore(&media5200_irq.lock, flags);
}
@@ -82,7 +82,7 @@ static struct irq_chip media5200_irq_chip = {
void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int sub_virq, val;
u32 status, enable;
@@ -107,7 +107,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
/* Processing done; can reenable the cascade now */
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data);
- if (!(desc->status & IRQ_DISABLED))
+ if (!irqd_irq_disabled(&desc->irq_data))
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
@@ -115,15 +115,10 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = irq_to_desc(virq);
-
pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
- set_irq_chip_data(virq, &media5200_irq);
- set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq);
- set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL;
-
+ irq_set_chip_data(virq, &media5200_irq);
+ irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
return 0;
}
@@ -187,8 +182,8 @@ static void __init media5200_init_irq(void)
media5200_irq.irqhost->host_data = &media5200_irq;
- set_irq_data(cascade_virq, &media5200_irq);
- set_irq_chained_handler(cascade_virq, media5200_irq_cascade);
+ irq_set_handler_data(cascade_virq, &media5200_irq);
+ irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
return;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 6da44f0f2934..6c39b9cc2fa3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -192,7 +192,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct mpc52xx_gpt_priv *gpt = get_irq_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq);
int sub_virq;
u32 status;
@@ -209,8 +209,8 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
struct mpc52xx_gpt_priv *gpt = h->host_data;
dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq);
- set_irq_chip_data(virq, gpt);
- set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq);
+ irq_set_chip_data(virq, gpt);
+ irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq);
return 0;
}
@@ -259,8 +259,8 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
}
gpt->irqhost->host_data = gpt;
- set_irq_data(cascade_virq, gpt);
- set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
+ irq_set_handler_data(cascade_virq, gpt);
+ irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
/* If the GPT is currently disabled, then change it to be in Input
* Capture mode. If the mode is non-zero, then the pin could be
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 6385d883cb8d..9940ce8a2d4e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo {
static struct mpc52xx_lpbfifo lpbfifo;
/**
- * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered
+ * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred
*/
static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
{
@@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
*
* On transmit, the dma completion irq triggers before the fifo completion
* triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
- * task completion irq becuase everyting is not really done until the LPB FIFO
+ * task completion irq because everything is not really done until the LPB FIFO
* completion irq triggers.
*
* In other words:
@@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
* Exit conditions:
* 1) Transfer aborted
* 2) FIFO complete without DMA; more data to do
- * 3) FIFO complete without DMA; all data transfered
+ * 3) FIFO complete without DMA; all data transferred
* 4) FIFO complete using DMA
*
* Condition 1 can occur regardless of whether or not DMA is used.
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 9f3ed582d082..1a9a49570579 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -157,48 +157,30 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
*/
static void mpc52xx_extirq_mask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->ctrl, 11 - l2irq);
}
static void mpc52xx_extirq_unmask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 11 - l2irq);
}
static void mpc52xx_extirq_ack(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 27-l2irq);
}
static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
{
u32 ctrl_reg, type;
- int irq;
- int l2irq;
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
void *handler = handle_level_irq;
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
- pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
+ pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__,
+ (int) irqd_to_hwirq(d), l2irq, flow_type);
switch (flow_type) {
case IRQF_TRIGGER_HIGH: type = 0; break;
@@ -214,7 +196,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
- __set_irq_handler_unlocked(d->irq, handler);
+ __irq_set_handler_locked(d->irq, handler);
return 0;
}
@@ -237,23 +219,13 @@ static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type)
static void mpc52xx_main_mask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->main_mask, 16 - l2irq);
}
static void mpc52xx_main_unmask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->main_mask, 16 - l2irq);
}
@@ -270,23 +242,13 @@ static struct irq_chip mpc52xx_main_irqchip = {
*/
static void mpc52xx_periph_mask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->per_mask, 31 - l2irq);
}
static void mpc52xx_periph_unmask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->per_mask, 31 - l2irq);
}
@@ -303,34 +265,19 @@ static struct irq_chip mpc52xx_periph_irqchip = {
*/
static void mpc52xx_sdma_mask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&sdma->IntMask, l2irq);
}
static void mpc52xx_sdma_unmask(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&sdma->IntMask, l2irq);
}
static void mpc52xx_sdma_ack(struct irq_data *d)
{
- int irq;
- int l2irq;
-
- irq = irq_map[d->irq].hwirq;
- l2irq = irq & MPC52xx_IRQ_L2_MASK;
-
+ int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
out_be32(&sdma->IntPend, 1 << l2irq);
}
@@ -414,7 +361,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
else
hndlr = handle_level_irq;
- set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr);
+ irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr);
pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n",
__func__, l2irq, virq, (int)irq, type);
return 0;
@@ -431,7 +378,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
return -EINVAL;
}
- set_irq_chip_and_handler(virq, irqchip, handle_level_irq);
+ irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq);
return 0;
@@ -512,7 +459,7 @@ void __init mpc52xx_init_irq(void)
/**
* mpc52xx_get_irq - Get pending interrupt number hook function
*
- * Called by the interupt handler to determine what IRQ handler needs to be
+ * Called by the interrupt handler to determine what IRQ handler needs to be
* executed.
*
* Status of pending interrupts is determined by reading the encoded status
@@ -539,7 +486,7 @@ void __init mpc52xx_init_irq(void)
unsigned int mpc52xx_get_irq(void)
{
u32 status;
- int irq = NO_IRQ_IGNORE;
+ int irq;
status = in_be32(&intr->enc_status);
if (status & 0x00000400) { /* critical */
@@ -562,6 +509,8 @@ unsigned int mpc52xx_get_irq(void)
} else {
irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
+ } else {
+ return NO_IRQ;
}
return irq_linear_revmap(mpc52xx_irqhost, irq);
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 926dfdaaf57a..8ccf9ed62fe2 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -42,7 +42,7 @@ struct pq2ads_pci_pic {
static void pq2ads_pci_mask_irq(struct irq_data *d)
{
struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
- int irq = NUM_IRQS - virq_to_hw(d->irq) - 1;
+ int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
if (irq != -1) {
unsigned long flags;
@@ -58,7 +58,7 @@ static void pq2ads_pci_mask_irq(struct irq_data *d)
static void pq2ads_pci_unmask_irq(struct irq_data *d)
{
struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d);
- int irq = NUM_IRQS - virq_to_hw(d->irq) - 1;
+ int irq = NUM_IRQS - irqd_to_hwirq(d) - 1;
if (irq != -1) {
unsigned long flags;
@@ -81,7 +81,7 @@ static struct irq_chip pq2ads_pci_ic = {
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct pq2ads_pci_pic *priv = get_irq_desc_data(desc);
+ struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
u32 stat, mask, pend;
int bit;
@@ -106,22 +106,14 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_data(virq, h->host_data);
- set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
return 0;
}
-static void pci_host_unmap(struct irq_host *h, unsigned int virq)
-{
- /* remove chip and handler */
- set_irq_chip_data(virq, NULL);
- set_irq_chip(virq, NULL);
-}
-
static struct irq_host_ops pci_pic_host_ops = {
.map = pci_pic_host_map,
- .unmap = pci_host_unmap,
};
int __init pq2ads_pci_init_irq(void)
@@ -175,8 +167,8 @@ int __init pq2ads_pci_init_irq(void)
priv->host = host;
host->host_data = priv;
- set_irq_data(irq, priv);
- set_irq_chained_handler(irq, pq2ads_pci_irq_demux);
+ irq_set_handler_data(irq, priv);
+ irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
of_node_put(np);
return 0;
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 188272934cfb..104faa8aa23c 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
.end = mpc83xx_suspend_end,
};
+static struct of_device_id pmc_match[];
static int pmc_probe(struct platform_device *ofdev)
{
+ const struct of_device_id *match;
struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct pmc_type *type;
int ret = 0;
- if (!ofdev->dev.of_match)
+ match = of_match_device(pmc_match, &ofdev->dev);
+ if (!match)
return -EINVAL;
- type = ofdev->dev.of_match->data;
+ type = match->data;
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 64447e48f3d5..c46f9359be15 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -56,7 +56,7 @@ static void machine_restart(char *cmd)
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
@@ -106,7 +106,7 @@ static void __init ksi8560_pic_init(void)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 1352d1107bfd..3b2c9bb66199 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -50,7 +50,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
@@ -101,7 +101,7 @@ static void __init mpc85xx_ads_pic_init(void)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 458d91fba91d..6299a2a51ae8 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -255,7 +255,7 @@ static int mpc85xx_cds_8259_attach(void)
}
/* Success. Connect our low-level cascade handler. */
- set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler);
+ irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler);
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 793ead7993ab..c7b97f70312e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -47,7 +47,7 @@
#ifdef CONFIG_PPC_I8259
static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ) {
@@ -122,7 +122,7 @@ void __init mpc85xx_ds_pic_init(void)
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
- set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade);
+ irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade);
#endif /* CONFIG_PPC_I8259 */
}
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index d7e28ec3e072..d2dfd465fbf6 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -41,7 +41,7 @@
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
@@ -92,7 +92,7 @@ static void __init sbc8560_pic_init(void)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 0d00ff9d05a0..d6a93a10c0f5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -41,7 +41,7 @@ extern void __early_start(void);
#define NUM_BOOT_ENTRY 8
#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
-static void __init
+static int __init
smp_85xx_kick_cpu(int nr)
{
unsigned long flags;
@@ -60,7 +60,7 @@ smp_85xx_kick_cpu(int nr)
if (cpu_rel_addr == NULL) {
printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
- return;
+ return -ENOENT;
}
/*
@@ -107,6 +107,8 @@ smp_85xx_kick_cpu(int nr)
iounmap(bptr_vaddr);
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
+
+ return 0;
}
static void __init
@@ -233,8 +235,10 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.message_pass = smp_mpic_message_pass;
}
- if (cpu_has_feature(CPU_FTR_DBELL))
- smp_85xx_ops.message_pass = doorbell_message_pass;
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass;
+ smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
+ }
BUG_ON(!smp_85xx_ops.message_pass);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 79d85aca4767..12cb9bb2cc68 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -48,8 +48,6 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
[8] = {0, IRQ_TYPE_LEVEL_HIGH},
};
-#define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-
static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
static void __iomem *socrates_fpga_pic_iobase;
@@ -93,7 +91,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
/*
@@ -110,11 +108,9 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
static void socrates_fpga_pic_ack(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq, irq_line;
+ unsigned int irq_line, hwirq = irqd_to_hwirq(d);
uint32_t mask;
- hwirq = socrates_fpga_irq_to_hw(d->irq);
-
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -127,12 +123,10 @@ static void socrates_fpga_pic_ack(struct irq_data *d)
static void socrates_fpga_pic_mask(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(d->irq);
-
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -145,12 +139,10 @@ static void socrates_fpga_pic_mask(struct irq_data *d)
static void socrates_fpga_pic_mask_ack(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(d->irq);
-
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -164,12 +156,10 @@ static void socrates_fpga_pic_mask_ack(struct irq_data *d)
static void socrates_fpga_pic_unmask(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(d->irq);
-
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -182,12 +172,10 @@ static void socrates_fpga_pic_unmask(struct irq_data *d)
static void socrates_fpga_pic_eoi(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(d->irq);
-
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
@@ -201,12 +189,10 @@ static int socrates_fpga_pic_set_type(struct irq_data *d,
unsigned int flow_type)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
int polarity;
u32 mask;
- hwirq = socrates_fpga_irq_to_hw(d->irq);
-
if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE)
return -EINVAL;
@@ -245,9 +231,9 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip,
- handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip,
+ handle_fasteoi_irq);
return 0;
}
@@ -308,8 +294,8 @@ void socrates_fpga_pic_init(struct device_node *pic)
pr_warning("FPGA PIC: can't get irq%d.\n", i);
continue;
}
- set_irq_chained_handler(socrates_fpga_irqs[i],
- socrates_fpga_pic_cascade);
+ irq_set_chained_handler(socrates_fpga_irqs[i],
+ socrates_fpga_pic_cascade);
}
socrates_fpga_pic_iobase = of_iomap(pic, 0);
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 2b62b064eac7..5387e9f06bdb 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -46,7 +46,7 @@
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
@@ -102,7 +102,7 @@ static void __init stx_gp3_pic_init(void)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 2265b68e3279..325de772725a 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -44,7 +44,7 @@
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
@@ -100,7 +100,7 @@ static void __init tqm85xx_pic_init(void)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 0adfe3b740cd..94594e58594c 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -46,8 +46,6 @@
#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0)
#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1)
-#define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-
static DEFINE_RAW_SPINLOCK(gef_pic_lock);
@@ -95,7 +93,7 @@ static int gef_pic_cascade_irq;
void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
/*
@@ -113,11 +111,9 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
static void gef_pic_mask(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
u32 mask;
- hwirq = gef_irq_to_hw(d->irq);
-
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask &= ~(1 << hwirq);
@@ -136,11 +132,9 @@ static void gef_pic_mask_ack(struct irq_data *d)
static void gef_pic_unmask(struct irq_data *d)
{
unsigned long flags;
- unsigned int hwirq;
+ unsigned int hwirq = irqd_to_hwirq(d);
u32 mask;
- hwirq = gef_irq_to_hw(d->irq);
-
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask |= (1 << hwirq);
@@ -163,8 +157,8 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
return 0;
}
@@ -225,7 +219,7 @@ void __init gef_pic_init(struct device_node *np)
return;
/* Chain with parent controller */
- set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade);
+ irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade);
}
/*
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 018cc67be426..a896511690c2 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -66,7 +66,7 @@ static void __init mpc8610_suspend_init(void)
return;
}
- ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL);
+ ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL);
if (ret) {
pr_err("%s: can't request pixis event IRQ: %d\n",
__func__, ret);
@@ -105,45 +105,77 @@ machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
-static u32 get_busfreq(void)
-{
- struct device_node *node;
-
- u32 fs_busfreq = 0;
- node = of_find_node_by_type(NULL, "cpu");
- if (node) {
- unsigned int size;
- const unsigned int *prop =
- of_get_property(node, "bus-frequency", &size);
- if (prop)
- fs_busfreq = *prop;
- of_node_put(node);
- };
- return fs_busfreq;
-}
+/*
+ * DIU Area Descriptor
+ *
+ * The MPC8610 reference manual shows the bits of the AD register in
+ * little-endian order, which causes the BLUE_C field to be split into two
+ * parts. To simplify the definition of the MAKE_AD() macro, we define the
+ * fields in big-endian order and byte-swap the result.
+ *
+ * So even though the registers don't look like they're in the
+ * same bit positions as they are on the P1022, the same value is written to
+ * the AD register on the MPC8610 and on the P1022.
+ */
+#define AD_BYTE_F 0x10000000
+#define AD_ALPHA_C_MASK 0x0E000000
+#define AD_ALPHA_C_SHIFT 25
+#define AD_BLUE_C_MASK 0x01800000
+#define AD_BLUE_C_SHIFT 23
+#define AD_GREEN_C_MASK 0x00600000
+#define AD_GREEN_C_SHIFT 21
+#define AD_RED_C_MASK 0x00180000
+#define AD_RED_C_SHIFT 19
+#define AD_PALETTE 0x00040000
+#define AD_PIXEL_S_MASK 0x00030000
+#define AD_PIXEL_S_SHIFT 16
+#define AD_COMP_3_MASK 0x0000F000
+#define AD_COMP_3_SHIFT 12
+#define AD_COMP_2_MASK 0x00000F00
+#define AD_COMP_2_SHIFT 8
+#define AD_COMP_1_MASK 0x000000F0
+#define AD_COMP_1_SHIFT 4
+#define AD_COMP_0_MASK 0x0000000F
+#define AD_COMP_0_SHIFT 0
+
+#define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \
+ cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \
+ (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \
+ (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \
+ (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \
+ (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT))
unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel,
int monitor_port)
{
static const unsigned long pixelformat[][3] = {
- {0x88882317, 0x88083218, 0x65052119},
- {0x88883316, 0x88082219, 0x65053118},
+ {
+ MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8),
+ MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0),
+ MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0)
+ },
+ {
+ MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8),
+ MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0),
+ MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0)
+ },
};
- unsigned int pix_fmt, arch_monitor;
+ unsigned int arch_monitor;
+ /* The DVI port is mis-wired on revision 1 of this board. */
arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1;
- /* DVI port for board version 0x01 */
-
- if (bits_per_pixel == 32)
- pix_fmt = pixelformat[arch_monitor][0];
- else if (bits_per_pixel == 24)
- pix_fmt = pixelformat[arch_monitor][1];
- else if (bits_per_pixel == 16)
- pix_fmt = pixelformat[arch_monitor][2];
- else
- pix_fmt = pixelformat[1][0];
-
- return pix_fmt;
+
+ switch (bits_per_pixel) {
+ case 32:
+ return pixelformat[arch_monitor][0];
+ case 24:
+ return pixelformat[arch_monitor][1];
+ case 16:
+ return pixelformat[arch_monitor][2];
+ default:
+ pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel);
+ return 0;
+ }
}
void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base)
@@ -190,8 +222,7 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
}
/* Pixel Clock configuration */
- pr_debug("DIU: Bus Frequency = %d\n", get_busfreq());
- speed_ccb = get_busfreq();
+ speed_ccb = fsl_get_sys_freq();
/* Calculate the pixel clock with the smallest error */
/* calculate the following in steps to avoid overflow */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index eacea0e3fcc8..af09baee22cb 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -56,7 +56,7 @@ smp_86xx_release_core(int nr)
}
-static void __init
+static int __init
smp_86xx_kick_cpu(int nr)
{
unsigned int save_vector;
@@ -65,7 +65,7 @@ smp_86xx_kick_cpu(int nr)
unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
if (nr < 0 || nr >= NR_CPUS)
- return;
+ return -ENOENT;
pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
@@ -92,6 +92,8 @@ smp_86xx_kick_cpu(int nr)
local_irq_restore(flags);
pr_debug("wait CPU #%d for %d msecs.\n", nr, n);
+
+ return 0;
}
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index cbe33639b478..8ef8960abda6 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -19,7 +19,7 @@
#ifdef CONFIG_PPC_I8259
static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
@@ -77,6 +77,6 @@ void __init mpc86xx_init_irq(void)
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
- set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade);
+ irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade);
#endif
}
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index fabb108e8744..1e121088826f 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -150,7 +150,7 @@ void __init mpc8xx_calibrate_decr(void)
*/
cpu = of_find_node_by_type(NULL, "cpu");
virq= irq_of_parse_and_map(cpu, 0);
- irq = irq_map[virq].hwirq;
+ irq = virq_to_hw(virq);
sys_tmr2 = immr_map(im_sit);
out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) |
@@ -226,11 +226,11 @@ static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(cascade_irq);
- chip = get_irq_desc_chip(cdesc);
+ chip = irq_desc_get_chip(cdesc);
chip->irq_eoi(&cdesc->irq_data);
}
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
chip->irq_eoi(&desc->irq_data);
}
@@ -251,5 +251,5 @@ void __init mpc8xx_pics_init(void)
irq = cpm_pic_init();
if (irq != NO_IRQ)
- set_irq_chained_handler(irq, cpm_cascade);
+ irq_set_chained_handler(irq, cpm_cascade);
}
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 20576829eca5..f970ca2b180c 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,7 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig"
+source "arch/powerpc/platforms/wsp/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
@@ -46,7 +47,7 @@ config PPC_OF_BOOT_TRAMPOLINE
help
Support from booting from Open Firmware or yaboot using an
Open Firmware client interface. This enables the kernel to
- communicate with open firmware to retrieve system informations
+ communicate with open firmware to retrieve system information
such as the device tree.
In case of doubt, say Y
@@ -56,16 +57,19 @@ config UDBG_RTAS_CONSOLE
depends on PPC_RTAS
default n
+config PPC_SMP_MUXED_IPI
+ bool
+ help
+ Select this opton if your platform supports SMP and your
+ interrupt controller provides less than 4 interrupts to each
+ cpu. This will enable the generic code to multiplex the 4
+ messages on to one ipi.
+
config PPC_UDBG_BEAT
bool "BEAT based debug console"
depends on PPC_CELLEB
default n
-config XICS
- depends on PPC_PSERIES
- bool
- default y
-
config IPIC
bool
default n
@@ -147,14 +151,27 @@ config PPC_970_NAP
bool
default n
+config PPC_P7_NAP
+ bool
+ default n
+
config PPC_INDIRECT_IO
bool
select GENERIC_IOMAP
- default n
+
+config PPC_INDIRECT_PIO
+ bool
+ select PPC_INDIRECT_IO
+
+config PPC_INDIRECT_MMIO
+ bool
+ select PPC_INDIRECT_IO
+
+config PPC_IO_WORKAROUNDS
+ bool
config GENERIC_IOMAP
bool
- default n
source "drivers/cpufreq/Kconfig"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 111138c55f9c..2165b65876f9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -73,6 +73,7 @@ config PPC_BOOK3S_64
config PPC_BOOK3E_64
bool "Embedded processors"
select PPC_FPU # Make it a choice ?
+ select PPC_SMP_MUXED_IPI
endchoice
@@ -107,6 +108,10 @@ config POWER4
depends on PPC64 && PPC_BOOK3S
def_bool y
+config PPC_A2
+ bool
+ depends on PPC_BOOK3E_64
+
config TUNE_CELL
bool "Optimize for Cell Broadband Engine"
depends on PPC64 && PPC_BOOK3S
@@ -174,6 +179,7 @@ config FSL_BOOKE
config PPC_FSL_BOOK3E
bool
select FSL_EMB_PERFMON
+ select PPC_SMP_MUXED_IPI
default y if FSL_BOOKE
config PTE_64BIT
@@ -226,6 +232,24 @@ config VSX
If in doubt, say Y here.
+config PPC_ICSWX
+ bool "Support for PowerPC icswx coprocessor instruction"
+ depends on POWER4
+ default n
+ ---help---
+
+ This option enables kernel support for the PowerPC Initiate
+ Coprocessor Store Word (icswx) coprocessor instruction on POWER7
+ or newer processors.
+
+ This option is only useful if you have a processor that supports
+ the icswx coprocessor instruction. It does not have any effect
+ on processors without the icswx coprocessor instruction.
+
+ This option slightly increases kernel memory usage.
+
+ If in doubt, say N here.
+
config SPE
bool "SPE Support"
depends on E200 || (E500 && !PPC_E500MC)
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index fdb9f0b0d7a8..73e2116cfeed 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/
+obj-$(CONFIG_PPC_WSP) += wsp/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 48cd7d2e1b75..67d5009b4e86 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -6,14 +6,17 @@ config PPC_CELL_COMMON
bool
select PPC_CELL
select PPC_DCR_MMIO
- select PPC_INDIRECT_IO
+ select PPC_INDIRECT_PIO
+ select PPC_INDIRECT_MMIO
select PPC_NATIVE
select PPC_RTAS
+ select IRQ_EDGE_EOI_HANDLER
config PPC_CELL_NATIVE
bool
select PPC_CELL_COMMON
select MPIC
+ select PPC_IO_WORKAROUNDS
select IBM_NEW_EMAC_EMAC4
select IBM_NEW_EMAC_RGMII
select IBM_NEW_EMAC_ZMII #test only
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 83fafe922641..a4a89350bcfc 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
- pmu.o io-workarounds.o spider-pci.o
+ pmu.o spider-pci.o
obj-$(CONFIG_CBE_RAS) += ras.o
obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -39,11 +39,10 @@ obj-y += celleb_setup.o \
celleb_pci.o celleb_scc_epci.o \
celleb_scc_pciex.o \
celleb_scc_uhc.o \
- io-workarounds.o spider-pci.o \
- beat.o beat_htab.o beat_hvCall.o \
- beat_interrupt.o beat_iommu.o
+ spider-pci.o beat.o beat_htab.o \
+ beat_hvCall.o beat_interrupt.o \
+ beat_iommu.o
-obj-$(CONFIG_SMP) += beat_smp.o
obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index c48b66a67e42..ac06903e136a 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -93,8 +93,8 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct axon_msic *msic = get_irq_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct axon_msic *msic = irq_get_handler_data(irq);
u32 write_offset, msi;
int idx;
int retry = 0;
@@ -113,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
- if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
+ if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {
@@ -287,7 +287,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
}
dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
- set_irq_msi(virq, entry);
+ irq_set_msi_desc(virq, entry);
msg.data = virq;
write_msi_msg(virq, &msg);
}
@@ -305,7 +305,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
}
@@ -320,7 +320,8 @@ static struct irq_chip msic_irq_chip = {
static int msic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
return 0;
}
@@ -400,8 +401,8 @@ static int axon_msi_probe(struct platform_device *device)
msic->irq_host->host_data = msic;
- set_irq_data(virq, msic);
- set_irq_chained_handler(virq, axon_msi_cascade);
+ irq_set_handler_data(virq, msic);
+ irq_set_chained_handler(virq, axon_msi_cascade);
pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
/* Enable the MSIC hardware */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 0b8f7d7135c5..55015e1f6939 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -136,29 +136,18 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = irq_to_desc(virq);
int64_t err;
err = beat_construct_and_connect_irq_plug(virq, hw);
if (err < 0)
return -EIO;
- desc->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
return 0;
}
/*
- * Update binding hardware IRQ number (hw) and Virtuql
- * IRQ number (virq). This is called only once for a given mapping.
- */
-static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- beat_construct_and_connect_irq_plug(virq, hw);
-}
-
-/*
* Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
* to pass away to irq_create_mapping().
*
@@ -185,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np)
static struct irq_host_ops beatic_pic_host_ops = {
.map = beatic_pic_host_map,
- .remap = beatic_pic_host_remap,
.unmap = beatic_pic_host_unmap,
.xlate = beatic_pic_host_xlate,
.match = beatic_pic_host_match,
@@ -258,22 +246,6 @@ void __init beatic_init_IRQ(void)
irq_set_default_host(beatic_host);
}
-#ifdef CONFIG_SMP
-
-/* Nullified to compile with SMP mode */
-void beatic_setup_cpu(int cpu)
-{
-}
-
-void beatic_cause_IPI(int cpu, int mesg)
-{
-}
-
-void beatic_request_IPIs(void)
-{
-}
-#endif /* CONFIG_SMP */
-
void beatic_deinit_IRQ(void)
{
int i;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
index b470fd0051f1..a7e52f91a078 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ b/arch/powerpc/platforms/cell/beat_interrupt.h
@@ -24,9 +24,6 @@
extern void beatic_init_IRQ(void);
extern unsigned int beatic_get_irq(void);
-extern void beatic_cause_IPI(int cpu, int mesg);
-extern void beatic_request_IPIs(void);
-extern void beatic_setup_cpu(int);
extern void beatic_deinit_IRQ(void);
#endif
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c
deleted file mode 100644
index 26efc204c47f..000000000000
--- a/arch/powerpc/platforms/cell/beat_smp.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * SMP support for Celleb platform. (Incomplete)
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/cell/smp.c:
- * Dave Engebretsen, Peter Bergner, and
- * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
- * Plus various changes from other IBM teams...
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/cpu.h>
-
-#include <asm/irq.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-
-#include "beat_interrupt.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/*
- * The primary thread of each non-boot processor is recorded here before
- * smp init.
- */
-/* static cpumask_t of_spin_map; */
-
-/**
- * smp_startup_cpu() - start the given cpu
- *
- * At boot time, there is nothing to do for primary threads which were
- * started from Open Firmware. For anything else, call RTAS with the
- * appropriate start location.
- *
- * Returns:
- * 0 - failure
- * 1 - success
- */
-static inline int __devinit smp_startup_cpu(unsigned int lcpu)
-{
- return 0;
-}
-
-static void smp_beatic_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- beatic_cause_IPI(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- beatic_cause_IPI(i, msg);
- }
- }
-}
-
-static int __init smp_beatic_probe(void)
-{
- return cpus_weight(cpu_possible_map);
-}
-
-static void __devinit smp_beatic_setup_cpu(int cpu)
-{
- beatic_setup_cpu(cpu);
-}
-
-static void __devinit smp_celleb_kick_cpu(int nr)
-{
- BUG_ON(nr < 0 || nr >= NR_CPUS);
-
- if (!smp_startup_cpu(nr))
- return;
-}
-
-static int smp_celleb_cpu_bootable(unsigned int nr)
-{
- return 1;
-}
-static struct smp_ops_t bpa_beatic_smp_ops = {
- .message_pass = smp_beatic_message_pass,
- .probe = smp_beatic_probe,
- .kick_cpu = smp_celleb_kick_cpu,
- .setup_cpu = smp_beatic_setup_cpu,
- .cpu_bootable = smp_celleb_cpu_bootable,
-};
-
-/* This is called very early */
-void __init smp_init_celleb(void)
-{
- DBG(" -> smp_init_celleb()\n");
-
- smp_ops = &bpa_beatic_smp_ops;
-
- DBG(" <- smp_init_celleb()\n");
-}
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index dbc338f187a2..f3917e7a5b44 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -45,8 +45,8 @@ static struct cbe_thread_map
unsigned int cbe_id;
} cbe_thread_map[NR_CPUS];
-static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE };
-static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE;
+static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
+static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
static struct cbe_regs_map *cbe_find_map(struct device_node *np)
{
@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
u32 cbe_node_to_cpu(int node)
{
- return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t));
+ return cpumask_first(&cbe_local_mask[node]);
+
}
EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
@@ -268,9 +269,9 @@ void __init cbe_regs_init(void)
thread->regs = map;
thread->cbe_id = cbe_id;
map->be_node = thread->be_node;
- cpu_set(i, cbe_local_mask[cbe_id]);
+ cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
if(thread->thread_id == 0)
- cpu_set(i, cbe_first_online_cpu);
+ cpumask_set_cpu(i, &cbe_first_online_cpu);
}
}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 404d1fc04d59..5822141aa63f 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -41,7 +41,6 @@
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
-#include "io-workarounds.h"
#include "celleb_pci.h"
#define MAX_PCI_DEVICES 32
@@ -320,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
size = 256;
config = &private->fake_config[devno][fn];
- *config = alloc_maybe_bootmem(size, GFP_KERNEL);
+ *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
if (*config == NULL) {
printk(KERN_ERR "PCI: "
"not enough memory for fake configuration space\n");
@@ -331,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node,
size = sizeof(struct celleb_pci_resource);
res = &private->res[devno][fn];
- *res = alloc_maybe_bootmem(size, GFP_KERNEL);
+ *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
if (*res == NULL) {
printk(KERN_ERR
"PCI: not enough memory for resource data space\n");
@@ -432,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev,
static void __init celleb_alloc_private_mem(struct pci_controller *hose)
{
hose->private_data =
- alloc_maybe_bootmem(sizeof(struct celleb_pci_private),
+ zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
GFP_KERNEL);
}
@@ -469,18 +468,6 @@ static struct of_device_id celleb_phb_match[] __initdata = {
},
};
-static int __init celleb_io_workaround_init(struct pci_controller *phb,
- struct celleb_phb_spec *phb_spec)
-{
- if (phb_spec->ops) {
- iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init,
- phb_spec->iowa_data);
- io_workaround_init();
- }
-
- return 0;
-}
-
int __init celleb_setup_phb(struct pci_controller *phb)
{
struct device_node *dev = phb->dn;
@@ -500,7 +487,11 @@ int __init celleb_setup_phb(struct pci_controller *phb)
if (rc)
return 1;
- return celleb_io_workaround_init(phb, phb_spec);
+ if (phb_spec->ops)
+ iowa_register_bus(phb, phb_spec->ops,
+ phb_spec->iowa_init,
+ phb_spec->iowa_data);
+ return 0;
}
int celleb_pci_probe_mode(struct pci_bus *bus)
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
index 4cba1523ec50..a801fcc5f389 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ b/arch/powerpc/platforms/cell/celleb_pci.h
@@ -26,8 +26,9 @@
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/ppc-pci.h>
+#include <asm/io-workarounds.h>
-#include "io-workarounds.h"
+struct iowa_bus;
struct celleb_phb_spec {
int (*setup)(struct device_node *, struct pci_controller *);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index e53845579770..d58d9bae4b9b 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void)
spu_management_ops = &spu_management_of_ops;
#endif
-#ifdef CONFIG_SMP
- smp_init_celleb();
-#endif
-
celleb_setup_arch_common();
}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 624d26e72f1d..449c08c15862 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -101,9 +101,9 @@ static void iic_ioexc_eoi(struct irq_data *d)
static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct cbe_iic_regs __iomem *node_iic =
- (void __iomem *)get_irq_desc_data(desc);
+ (void __iomem *)irq_desc_get_handler_data(desc);
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
unsigned long bits, ack;
int cascade;
@@ -196,8 +196,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id)
{
int ipi = (int)(long)dev_id;
- smp_message_recv(ipi);
-
+ switch(ipi) {
+ case PPC_MSG_CALL_FUNCTION:
+ generic_smp_call_function_interrupt();
+ break;
+ case PPC_MSG_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case PPC_MSG_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+ case PPC_MSG_DEBUGGER_BREAK:
+ debug_ipi_action(0, NULL);
+ break;
+ }
return IRQ_HANDLED;
}
static void iic_request_ipi(int ipi, const char *name)
@@ -235,67 +247,19 @@ static int iic_host_match(struct irq_host *h, struct device_node *node)
"IBM,CBEA-Internal-Interrupt-Controller");
}
-extern int noirqdebug;
-
-static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_chip *chip = get_irq_desc_chip(desc);
-
- raw_spin_lock(&desc->lock);
-
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
-
- /*
- * If we're currently running this IRQ, or its disabled,
- * we shouldn't process the IRQ. Mark it pending, handle
- * the necessary masking and go out
- */
- if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
- !desc->action)) {
- desc->status |= IRQ_PENDING;
- goto out_eoi;
- }
-
- kstat_incr_irqs_this_cpu(irq, desc);
-
- /* Mark the IRQ currently in progress.*/
- desc->status |= IRQ_INPROGRESS;
-
- do {
- struct irqaction *action = desc->action;
- irqreturn_t action_ret;
-
- if (unlikely(!action))
- goto out_eoi;
-
- desc->status &= ~IRQ_PENDING;
- raw_spin_unlock(&desc->lock);
- action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
- raw_spin_lock(&desc->lock);
-
- } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
-
- desc->status &= ~IRQ_INPROGRESS;
-out_eoi:
- chip->irq_eoi(&desc->irq_data);
- raw_spin_unlock(&desc->lock);
-}
-
static int iic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
switch (hw & IIC_IRQ_TYPE_MASK) {
case IIC_IRQ_TYPE_IPI:
- set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
+ irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
break;
case IIC_IRQ_TYPE_IOEXC:
- set_irq_chip_and_handler(virq, &iic_ioexc_chip,
- handle_iic_irq);
+ irq_set_chip_and_handler(virq, &iic_ioexc_chip,
+ handle_edge_eoi_irq);
break;
default:
- set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
+ irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
}
return 0;
}
@@ -412,8 +376,8 @@ static int __init setup_iic(void)
* irq_data is a generic pointer that gets passed back
* to us later, so the forced cast is fine.
*/
- set_irq_data(cascade, (void __force *)node_iic);
- set_irq_chained_handler(cascade , iic_ioexc_cascade);
+ irq_set_handler_data(cascade, (void __force *)node_iic);
+ irq_set_chained_handler(cascade, iic_ioexc_cascade);
out_be64(&node_iic->iic_ir,
(1 << 12) /* priority */ |
(node << 4) /* dest node */ |
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index d31c594cfdf3..51e290126bc1 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -42,7 +42,6 @@
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
-#include "io-workarounds.h"
static void qpace_show_cpuinfo(struct seq_file *m)
{
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6a28d027d959..c73cf4c43fc2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -51,11 +51,11 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/cell-regs.h>
+#include <asm/io-workarounds.h>
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
-#include "io-workarounds.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -136,8 +136,6 @@ static int __devinit cell_setup_phb(struct pci_controller *phb)
iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init,
(void *)SPIDER_PCI_REG_BASE);
- io_workaround_init();
-
return 0;
}
@@ -187,8 +185,8 @@ machine_subsys_initcall(cell, cell_publish_devices);
static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct mpic *mpic = get_irq_desc_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
unsigned int virq;
virq = mpic_get_one_irq(mpic);
@@ -223,8 +221,8 @@ static void __init mpic_init_IRQ(void)
printk(KERN_INFO "%s : hooking up to IRQ %d\n",
dn->full_name, virq);
- set_irq_data(virq, mpic);
- set_irq_chained_handler(virq, cell_mpic_cascade);
+ irq_set_handler_data(virq, mpic);
+ irq_set_chained_handler(virq, cell_mpic_cascade);
}
}
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index f774530075b7..d176e6148e3f 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
unsigned int pcpu;
int start_cpu;
- if (cpu_isset(lcpu, of_spin_map))
+ if (cpumask_test_cpu(lcpu, &of_spin_map))
/* Already started by OF and sitting in spin loop */
return 1;
@@ -103,27 +103,11 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
return 1;
}
-static void smp_iic_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- iic_cause_IPI(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- iic_cause_IPI(i, msg);
- }
- }
-}
-
static int __init smp_iic_probe(void)
{
iic_request_IPIs();
- return cpus_weight(cpu_possible_map);
+ return cpumask_weight(cpu_possible_mask);
}
static void __devinit smp_cell_setup_cpu(int cpu)
@@ -137,12 +121,12 @@ static void __devinit smp_cell_setup_cpu(int cpu)
mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
-static void __devinit smp_cell_kick_cpu(int nr)
+static int __devinit smp_cell_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
if (!smp_startup_cpu(nr))
- return;
+ return -ENOENT;
/*
* The processor is currently spinning, waiting for the
@@ -150,6 +134,8 @@ static void __devinit smp_cell_kick_cpu(int nr)
* the processor will continue on to secondary_start
*/
paca[nr].cpu_start = 1;
+
+ return 0;
}
static int smp_cell_cpu_bootable(unsigned int nr)
@@ -166,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
return 1;
}
static struct smp_ops_t bpa_iic_smp_ops = {
- .message_pass = smp_iic_message_pass,
+ .message_pass = iic_cause_IPI,
.probe = smp_iic_probe,
.kick_cpu = smp_cell_kick_cpu,
.setup_cpu = smp_cell_setup_cpu,
@@ -186,13 +172,12 @@ void __init smp_init_cell(void)
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0)
- cpu_set(i, of_spin_map);
+ cpumask_set_cpu(i, &of_spin_map);
}
- } else {
- of_spin_map = cpu_present_map;
- }
+ } else
+ cpumask_copy(&of_spin_map, cpu_present_mask);
- cpu_clear(boot_cpuid, of_spin_map);
+ cpumask_clear_cpu(boot_cpuid, &of_spin_map);
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index ca7731c0b595..f1f7878893f3 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -27,8 +27,7 @@
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
-
-#include "io-workarounds.h"
+#include <asm/io-workarounds.h>
#define SPIDER_PCI_DISABLE_PREFETCH
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index b38cdfc1deb8..442c28c00f88 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -68,9 +68,9 @@ struct spider_pic {
};
static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
-static struct spider_pic *spider_virq_to_pic(unsigned int virq)
+static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d)
{
- return irq_map[virq].host->host_data;
+ return irq_data_get_irq_chip_data(d);
}
static void __iomem *spider_get_irq_config(struct spider_pic *pic,
@@ -81,28 +81,28 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic,
static void spider_unmask_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
- void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq);
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
+ void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
out_be32(cfg, in_be32(cfg) | 0x30000000u);
}
static void spider_mask_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
- void __iomem *cfg = spider_get_irq_config(pic, irq_map[d->irq].hwirq);
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
+ void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
out_be32(cfg, in_be32(cfg) & ~0x30000000u);
}
static void spider_ack_irq(struct irq_data *d)
{
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
- unsigned int src = irq_map[d->irq].hwirq;
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
+ unsigned int src = irqd_to_hwirq(d);
/* Reset edge detection logic if necessary
*/
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
return;
/* Only interrupts 47 to 50 can be set to edge */
@@ -116,10 +116,9 @@ static void spider_ack_irq(struct irq_data *d)
static int spider_set_irq_type(struct irq_data *d, unsigned int type)
{
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
- struct spider_pic *pic = spider_virq_to_pic(d->irq);
- unsigned int hw = irq_map[d->irq].hwirq;
+ struct spider_pic *pic = spider_irq_data_to_pic(d);
+ unsigned int hw = irqd_to_hwirq(d);
void __iomem *cfg = spider_get_irq_config(pic, hw);
- struct irq_desc *desc = irq_to_desc(d->irq);
u32 old_mask;
u32 ic;
@@ -147,12 +146,6 @@ static int spider_set_irq_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- /* Update irq_desc */
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= type & IRQ_TYPE_SENSE_MASK;
- if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
-
/* Configure the source. One gross hack that was there before and
* that I've kept around is the priority to the BE which I set to
* be the same as the interrupt source number. I don't know wether
@@ -178,10 +171,11 @@ static struct irq_chip spider_pic = {
static int spider_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
@@ -207,8 +201,8 @@ static struct irq_host_ops spider_host_ops = {
static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct spider_pic *pic = get_irq_desc_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct spider_pic *pic = irq_desc_get_handler_data(desc);
unsigned int cs, virq;
cs = in_be32(pic->regs + TIR_CS) >> 24;
@@ -328,8 +322,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
virq = spider_find_cascade_and_node(pic);
if (virq == NO_IRQ)
return;
- set_irq_data(virq, pic);
- set_irq_chained_handler(virq, spider_irq_cascade);
+ irq_set_handler_data(virq, pic);
+ irq_set_chained_handler(virq, spider_irq_cascade);
printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
pic->node_id, addr, of_node->full_name);
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index acfaccea5f4f..3675da73623f 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/linux_logo.h>
+#include <linux/syscore_ops.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
@@ -521,18 +522,8 @@ void spu_init_channels(struct spu *spu)
}
EXPORT_SYMBOL_GPL(spu_init_channels);
-static int spu_shutdown(struct sys_device *sysdev)
-{
- struct spu *spu = container_of(sysdev, struct spu, sysdev);
-
- spu_free_irqs(spu);
- spu_destroy_spu(spu);
- return 0;
-}
-
static struct sysdev_class spu_sysdev_class = {
.name = "spu",
- .shutdown = spu_shutdown,
};
int spu_add_sysdev_attr(struct sysdev_attribute *attr)
@@ -797,6 +788,22 @@ static inline void crash_register_spus(struct list_head *list)
}
#endif
+static void spu_shutdown(void)
+{
+ struct spu *spu;
+
+ mutex_lock(&spu_full_list_mutex);
+ list_for_each_entry(spu, &spu_full_list, full_list) {
+ spu_free_irqs(spu);
+ spu_destroy_spu(spu);
+ }
+ mutex_unlock(&spu_full_list_mutex);
+}
+
+static struct syscore_ops spu_syscore_ops = {
+ .shutdown = spu_shutdown,
+};
+
static int __init init_spu_base(void)
{
int i, ret = 0;
@@ -830,6 +837,7 @@ static int __init init_spu_base(void)
crash_register_spus(&spu_full_list);
mutex_unlock(&spu_full_list_mutex);
spu_add_sysdev_attr(&attr_stat);
+ register_syscore_ops(&spu_syscore_ops);
spu_init_affinity();
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index 3b894f585280..147069938cfe 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -90,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa)
*/
for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
/* XXX This is likely to fail, we should use a special pool
- * similiar to what hugetlbfs does.
+ * similar to what hugetlbfs does.
*/
csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
SPU_64K_PAGE_ORDER);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 0b0466284932..32cb4e66d2cd 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted.
*/
- ctx->cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
/* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id();
@@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node)
struct list_head *rq = &spu_prio->runq[best];
list_for_each_entry(ctx, rq, rq) {
- /* XXX(hch): check for affinity here aswell */
+ /* XXX(hch): check for affinity here as well */
if (__node_allowed(ctx, node)) {
__spu_del_from_rq(ctx);
goto found;
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
index 21a9c952d88b..72c905f1ee7a 100644
--- a/arch/powerpc/platforms/cell/spufs/spu_restore.c
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -284,7 +284,7 @@ static inline void restore_complete(void)
exit_instrs[3] = BR_INSTR;
break;
default:
- /* SPU_Status[R]=1. No additonal instructions. */
+ /* SPU_Status[R]=1. No additional instructions. */
break;
}
spu_sync();
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 4c1288451a21..122786498419 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -365,7 +365,7 @@ void __init chrp_setup_arch(void)
static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
@@ -517,7 +517,7 @@ static void __init chrp_find_8259(void)
if (cascade_irq == NO_IRQ)
printk(KERN_ERR "i8259: failed to map cascade irq\n");
else
- set_irq_chained_handler(cascade_irq,
+ irq_set_chained_handler(cascade_irq,
chrp_8259_cascade);
}
}
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 02cafecc90e3..a800122e4dda 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -30,10 +30,12 @@
#include <asm/mpic.h>
#include <asm/rtas.h>
-static void __devinit smp_chrp_kick_cpu(int nr)
+static int __devinit smp_chrp_kick_cpu(int nr)
{
*(unsigned long *)KERNELBASE = nr;
asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
+
+ return 0;
}
static void __devinit smp_chrp_setup_cpu(int cpu_nr)
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 0aca0e28a8e5..f61a2dd96b99 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -48,7 +48,7 @@
static void flipper_pic_mask_and_ack(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
@@ -59,7 +59,7 @@ static void flipper_pic_mask_and_ack(struct irq_data *d)
static void flipper_pic_ack(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
/* this is at least needed for RSW */
@@ -68,7 +68,7 @@ static void flipper_pic_ack(struct irq_data *d)
static void flipper_pic_mask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + FLIPPER_IMR, 1 << irq);
@@ -76,7 +76,7 @@ static void flipper_pic_mask(struct irq_data *d)
static void flipper_pic_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + FLIPPER_IMR, 1 << irq);
@@ -101,18 +101,12 @@ static struct irq_host *flipper_irq_host;
static int flipper_pic_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
- set_irq_chip_data(virq, h->host_data);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq);
return 0;
}
-static void flipper_pic_unmap(struct irq_host *h, unsigned int irq)
-{
- set_irq_chip_data(irq, NULL);
- set_irq_chip(irq, NULL);
-}
-
static int flipper_pic_match(struct irq_host *h, struct device_node *np)
{
return 1;
@@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np)
static struct irq_host_ops flipper_irq_host_ops = {
.map = flipper_pic_map,
- .unmap = flipper_pic_unmap,
.match = flipper_pic_match,
};
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 35e448bd8479..e4919170c6bc 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -43,7 +43,7 @@
static void hlwd_pic_mask_and_ack(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
@@ -53,7 +53,7 @@ static void hlwd_pic_mask_and_ack(struct irq_data *d)
static void hlwd_pic_ack(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
@@ -61,7 +61,7 @@ static void hlwd_pic_ack(struct irq_data *d)
static void hlwd_pic_mask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
@@ -69,7 +69,7 @@ static void hlwd_pic_mask(struct irq_data *d)
static void hlwd_pic_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
@@ -94,21 +94,14 @@ static struct irq_host *hlwd_irq_host;
static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
- set_irq_chip_data(virq, h->host_data);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
return 0;
}
-static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq)
-{
- set_irq_chip_data(irq, NULL);
- set_irq_chip(irq, NULL);
-}
-
static struct irq_host_ops hlwd_irq_host_ops = {
.map = hlwd_pic_map,
- .unmap = hlwd_pic_unmap,
};
static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
@@ -129,8 +122,8 @@ static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct irq_host *irq_host = get_irq_data(cascade_virq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_host *irq_host = irq_get_handler_data(cascade_virq);
unsigned int virq;
raw_spin_lock(&desc->lock);
@@ -145,7 +138,7 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
+ if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask)
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
@@ -218,8 +211,8 @@ void hlwd_pic_probe(void)
host = hlwd_pic_init(np);
BUG_ON(!host);
cascade_virq = irq_of_parse_and_map(np, 0);
- set_irq_data(cascade_virq, host);
- set_irq_chained_handler(cascade_virq,
+ irq_set_handler_data(cascade_virq, host);
+ irq_set_chained_handler(cascade_virq,
hlwd_pic_irq_cascade);
hlwd_irq_host = host;
break;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index b21fde589ca7..487bda0d18d8 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -198,8 +198,8 @@ static void __init holly_init_IRQ(void)
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
- set_irq_data(cascade_pci_irq, mpic);
- set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
+ irq_set_handler_data(cascade_pci_irq, mpic);
+ irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 7a2ba39d7811..1cb907c94359 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -153,8 +153,8 @@ static void __init mpc7448_hpc2_init_IRQ(void)
DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__,
(u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
- set_irq_data(cascade_pci_irq, mpic);
- set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
+ irq_set_handler_data(cascade_pci_irq, mpic);
+ irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index e5bc9f75d474..b57cda3a0817 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -1,7 +1,9 @@
config PPC_ISERIES
bool "IBM Legacy iSeries"
depends on PPC64 && PPC_BOOK3S
- select PPC_INDIRECT_IO
+ select PPC_SMP_MUXED_IPI
+ select PPC_INDIRECT_PIO
+ select PPC_INDIRECT_MMIO
select PPC_PCI_CHOICE if EXPERT
menu "iSeries device drivers"
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 32a56c6dfa72..29c02f36b32f 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -31,6 +31,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cputable.h>
+#include <asm/mmu.h>
#include "exception.h"
@@ -60,29 +61,31 @@ system_reset_iSeries:
/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
/* In the UP case we'll yield() later, and we will not access the paca anyway */
#ifdef CONFIG_SMP
-1:
+iSeries_secondary_wait_paca:
HMT_LOW
LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
ld r23,0(r23)
- sync
- LOAD_REG_ADDR(r3,current_set)
- sldi r28,r24,3 /* get current_set[cpu#] */
- ldx r3,r3,r28
- addi r1,r3,THREAD_SIZE
- subi r1,r1,STACK_FRAME_OVERHEAD
- cmpwi 0,r23,0 /* Keep poking the Hypervisor until */
- bne 2f /* we're released */
- /* Let the Hypervisor know we are alive */
+ cmpdi 0,r23,0
+ bne 2f /* go on when the master is ready */
+
+ /* Keep poking the Hypervisor until we're released */
/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
lis r3,0x8002
rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
- b 1b
-#endif
+ b iSeries_secondary_wait_paca
2:
+ HMT_MEDIUM
+ sync
+
+ LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
+ lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
+ cmpld 0,r24,r3 /* is our cpu number allocated? */
+ bge iSeries_secondary_yield /* no, yield forever */
+
/* Load our paca now that it's been allocated */
LOAD_REG_ADDR(r13, paca)
ld r13,0(r13)
@@ -93,10 +96,24 @@ system_reset_iSeries:
ori r23,r23,MSR_RI
mtmsrd r23 /* RI on */
- HMT_LOW
-#ifdef CONFIG_SMP
+iSeries_secondary_smp_loop:
lbz r23,PACAPROCSTART(r13) /* Test if this processor
* should start */
+ cmpwi 0,r23,0
+ bne 3f /* go on when we are told */
+
+ HMT_LOW
+ /* Let the Hypervisor know we are alive */
+ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
+ lis r3,0x8002
+ rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
+ li r0,-1 /* r0=-1 indicates a Hypervisor call */
+ sc /* Invoke the hypervisor via a system call */
+ mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
+ b iSeries_secondary_smp_loop /* wait for signal to start */
+
+3:
+ HMT_MEDIUM
sync
LOAD_REG_ADDR(r3,current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
@@ -104,27 +121,22 @@ system_reset_iSeries:
addi r1,r3,THREAD_SIZE
subi r1,r1,STACK_FRAME_OVERHEAD
- cmpwi 0,r23,0
- beq iSeries_secondary_smp_loop /* Loop until told to go */
b __secondary_start /* Loop until told to go */
-iSeries_secondary_smp_loop:
- /* Let the Hypervisor know we are alive */
- /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
- lis r3,0x8002
- rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
-#else /* CONFIG_SMP */
+#endif /* CONFIG_SMP */
+
+iSeries_secondary_yield:
/* Yield the processor. This is required for non-SMP kernels
which are running on multi-threaded machines. */
+ HMT_LOW
lis r3,0x8000
rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
li r4,0 /* "yield timed" */
li r5,-1 /* "yield forever" */
-#endif /* CONFIG_SMP */
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
- b 2b /* If SMP not configured, secondaries
+ b iSeries_secondary_yield /* If SMP not configured, secondaries
* loop forever */
/*** ISeries-LPAR interrupt handlers ***/
@@ -157,7 +169,7 @@ BEGIN_FTR_SECTION
FTR_SECTION_ELSE
EXCEPTION_PROLOG_1(PACA_EXGEN)
EXCEPTION_PROLOG_ISERIES_1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
b data_access_common
.do_stab_bolted_iSeries:
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 4fb96f0b2df6..b2103453eb01 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -42,7 +42,6 @@
#include "irq.h"
#include "pci.h"
#include "call_pci.h"
-#include "smp.h"
#ifdef CONFIG_PCI
@@ -171,7 +170,7 @@ static void iseries_enable_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
@@ -188,7 +187,7 @@ static unsigned int iseries_startup_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
@@ -220,7 +219,7 @@ void __init iSeries_activate_IRQs()
if (!desc)
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (chip && chip->irq_startup) {
raw_spin_lock_irqsave(&desc->lock, flags);
chip->irq_startup(&desc->irq_data);
@@ -234,7 +233,7 @@ static void iseries_shutdown_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
/* irq should be locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
@@ -257,7 +256,7 @@ static void iseries_disable_IRQ(struct irq_data *d)
{
u32 bus, dev_id, function, mask;
const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
@@ -271,7 +270,7 @@ static void iseries_disable_IRQ(struct irq_data *d)
static void iseries_end_IRQ(struct irq_data *d)
{
- unsigned int rirq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
(REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
@@ -316,7 +315,7 @@ unsigned int iSeries_get_irq(void)
#ifdef CONFIG_SMP
if (get_lppaca()->int_dword.fields.ipi_cnt) {
get_lppaca()->int_dword.fields.ipi_cnt = 0;
- iSeries_smp_message_recv();
+ smp_ipi_demux();
}
#endif /* CONFIG_SMP */
if (hvlpevent_is_pending())
@@ -346,7 +345,7 @@ unsigned int iSeries_get_irq(void)
static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
+ irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
return 0;
}
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index b5e026bdca21..62dabe3c2bfa 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -51,7 +51,7 @@
static int mf_initialized;
/*
- * This is the structure layout for the Machine Facilites LPAR event
+ * This is the structure layout for the Machine Facilities LPAR event
* flows.
*/
struct vsp_cmd_data {
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 2946ae10fbfd..c25a0815c26b 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -249,7 +249,7 @@ static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
unsigned long i;
unsigned long mem_blocks = 0;
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
max_entries);
else
@@ -634,7 +634,7 @@ static int __init iseries_probe(void)
hpte_init_iSeries();
/* iSeries does not support 16M pages */
- cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE;
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
return 1;
}
@@ -685,6 +685,11 @@ void * __init iSeries_early_setup(void)
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
+#ifdef CONFIG_SMP
+ /* On iSeries we know we can never have more than 64 cpus */
+ nr_cpu_ids = max(nr_cpu_ids, 64);
+#endif
+
iSeries_fixup_klimit();
/*
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 6c6029914dbc..e3265adde5d3 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -42,57 +42,23 @@
#include <asm/cputable.h>
#include <asm/system.h>
-#include "smp.h"
-
-static unsigned long iSeries_smp_message[NR_CPUS];
-
-void iSeries_smp_message_recv(void)
-{
- int cpu = smp_processor_id();
- int msg;
-
- if (num_online_cpus() < 2)
- return;
-
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
- smp_message_recv(msg);
-}
-
-static inline void smp_iSeries_do_message(int cpu, int msg)
+static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
{
- set_bit(msg, &iSeries_smp_message[cpu]);
HvCall_sendIPI(&(paca[cpu]));
}
-static void smp_iSeries_message_pass(int target, int msg)
-{
- int i;
-
- if (target < NR_CPUS)
- smp_iSeries_do_message(target, msg);
- else {
- for_each_online_cpu(i) {
- if ((target == MSG_ALL_BUT_SELF) &&
- (i == smp_processor_id()))
- continue;
- smp_iSeries_do_message(i, msg);
- }
- }
-}
-
static int smp_iSeries_probe(void)
{
return cpumask_weight(cpu_possible_mask);
}
-static void smp_iSeries_kick_cpu(int nr)
+static int smp_iSeries_kick_cpu(int nr)
{
BUG_ON((nr < 0) || (nr >= NR_CPUS));
/* Verify that our partition has a processor nr */
if (lppaca_of(nr).dyn_proc_status >= 2)
- return;
+ return -ENOENT;
/* The processor is currently spinning, waiting
* for the cpu_start field to become non-zero
@@ -100,6 +66,8 @@ static void smp_iSeries_kick_cpu(int nr)
* continue on to secondary_start in iSeries_head.S
*/
paca[nr].cpu_start = 1;
+
+ return 0;
}
static void __devinit smp_iSeries_setup_cpu(int nr)
@@ -107,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
}
static struct smp_ops_t iSeries_smp_ops = {
- .message_pass = smp_iSeries_message_pass,
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = smp_iSeries_cause_ipi,
.probe = smp_iSeries_probe,
.kick_cpu = smp_iSeries_kick_cpu,
.setup_cpu = smp_iSeries_setup_cpu,
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h
deleted file mode 100644
index d501f7de01e7..000000000000
--- a/arch/powerpc/platforms/iseries/smp.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _PLATFORMS_ISERIES_SMP_H
-#define _PLATFORMS_ISERIES_SMP_H
-
-extern void iSeries_smp_message_recv(void);
-
-#endif /* _PLATFORMS_ISERIES_SMP_H */
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index b5f05d943a90..2376069cdc14 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event)
viopathStatus[remoteLp].mTargetInst)) {
printk(VIOPATH_KERN_WARN
"message from invalid partition. "
- "int msg rcvd, source inst (%d) doesnt match (%d)\n",
+ "int msg rcvd, source inst (%d) doesn't match (%d)\n",
viopathStatus[remoteLp].mTargetInst,
event->xSourceInstanceId);
return;
@@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event)
viopathStatus[remoteLp].mSourceInst)) {
printk(VIOPATH_KERN_WARN
"message from invalid partition. "
- "int msg rcvd, target inst (%d) doesnt match (%d)\n",
+ "int msg rcvd, target inst (%d) doesn't match (%d)\n",
viopathStatus[remoteLp].mSourceInst,
event->xTargetInstanceId);
return;
@@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event)
viopathStatus[remoteLp].mSourceInst) {
printk(VIOPATH_KERN_WARN
"message from invalid partition. "
- "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
+ "ack msg rcvd, source inst (%d) doesn't match (%d)\n",
viopathStatus[remoteLp].mSourceInst,
event->xSourceInstanceId);
return;
@@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event)
viopathStatus[remoteLp].mTargetInst) {
printk(VIOPATH_KERN_WARN
"message from invalid partition. "
- "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
+ "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n",
viopathStatus[remoteLp].mTargetInst,
event->xTargetInstanceId);
return;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 04296ffff8bf..dd2e48b28508 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -498,7 +498,7 @@ void __devinit maple_pci_irq_fixup(struct pci_dev *dev)
printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
dev->irq = irq_create_mapping(NULL, 1);
if (dev->irq != NO_IRQ)
- set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
/* Hide AMD8111 IDE interrupt when in legacy mode so
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 09695ae50f91..321a9b3a2d00 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
}
EXPORT_SYMBOL(pasemi_dma_free_buf);
-/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization
+/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
*
- * Allocates a flag for use with channel syncronization (event descriptors).
+ * Allocates a flag for use with channel synchronization (event descriptors).
* Returns allocated flag (0-63), < 0 on error.
*/
int pasemi_dma_alloc_flag(void)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index a6067b38d2ca..7c858e6f843c 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -239,7 +239,7 @@ static __init void pas_init_IRQ(void)
if (nmiprop) {
nmi_virq = irq_create_mapping(NULL, *nmiprop);
mpic_irq_set_priority(nmi_virq, 15);
- set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
mpic_unmask_irq(irq_get_irq_data(nmi_virq));
}
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 1e1a0873e1dd..1afd10f67858 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -18,4 +18,13 @@ config PPC_PMAC64
select PPC_970_NAP
default y
-
+config PPC_PMAC32_PSURGE
+ bool "Support for powersurge upgrade cards" if EXPERT
+ depends on SMP && PPC32 && PPC_PMAC
+ select PPC_SMP_MUXED_IPI
+ default y
+ help
+ The powersurge cpu boards can be used in the generation
+ of powermacs that have a socket for an upgradeable cpu card,
+ including the 7500, 8500, 9500, 9600. Support exists for
+ both dual and quad socket upgrade cards.
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 50f169392551..ea47df66fee5 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \
obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o
-# CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really
+# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really
# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
# CONFIG_NVRAM=y
obj-$(CONFIG_NVRAM:m=y) += nvram.o
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 480567e5fa9a..e9c8a607268e 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void)
printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
/* Look for childs, note that they might not be of the right
- * type as older device trees mix i2c busses and other thigns
+ * type as older device trees mix i2c busses and other things
* at the same level
*/
for (busnode = NULL;
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 3bc075c788ef..f33e08d573ce 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose,
* This function deals with some "special cases" devices.
*
* 0 -> No special case
- * 1 -> Skip the device but act as if the access was successfull
+ * 1 -> Skip the device but act as if the access was successful
* (return 0xff's on reads, eventually, cache config space
* accesses in a later version)
* -1 -> Hide the device (unsuccessful access)
@@ -988,7 +988,7 @@ void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
dev->vendor == PCI_VENDOR_ID_DEC &&
dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
dev->irq = irq_create_mapping(NULL, 60);
- set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
#endif /* CONFIG_PPC32 */
}
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index c55812bb6a51..9089b0421191 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -21,7 +21,7 @@
#include <linux/signal.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/module.h>
@@ -84,7 +84,7 @@ static void __pmac_retrigger(unsigned int irq_nr)
static void pmac_mask_and_ack_irq(struct irq_data *d)
{
- unsigned int src = irq_map[d->irq].hwirq;
+ unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
@@ -106,7 +106,7 @@ static void pmac_mask_and_ack_irq(struct irq_data *d)
static void pmac_ack_irq(struct irq_data *d)
{
- unsigned int src = irq_map[d->irq].hwirq;
+ unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
@@ -152,12 +152,12 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
static unsigned int pmac_startup_irq(struct irq_data *d)
{
unsigned long flags;
- unsigned int src = irq_map[d->irq].hwirq;
+ unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
- if ((irq_to_desc(d->irq)->status & IRQ_LEVEL) == 0)
+ if (!irqd_is_level_type(d))
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
@@ -169,7 +169,7 @@ static unsigned int pmac_startup_irq(struct irq_data *d)
static void pmac_mask_irq(struct irq_data *d)
{
unsigned long flags;
- unsigned int src = irq_map[d->irq].hwirq;
+ unsigned int src = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
@@ -180,7 +180,7 @@ static void pmac_mask_irq(struct irq_data *d)
static void pmac_unmask_irq(struct irq_data *d)
{
unsigned long flags;
- unsigned int src = irq_map[d->irq].hwirq;
+ unsigned int src = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__set_bit(src, ppc_cached_irq_mask);
@@ -193,7 +193,7 @@ static int pmac_retrigger(struct irq_data *d)
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
- __pmac_retrigger(irq_map[d->irq].hwirq);
+ __pmac_retrigger(irqd_to_hwirq(d));
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 1;
}
@@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void)
unsigned long bits = 0;
unsigned long flags;
-#ifdef CONFIG_SMP
- void psurge_smp_message_recv(void);
-
- /* IPI's are a hack on the powersurge -- Cort */
- if ( smp_processor_id() != 0 ) {
- psurge_smp_message_recv();
- return NO_IRQ_IGNORE; /* ignore, already handled */
+#ifdef CONFIG_PPC_PMAC32_PSURGE
+ /* IPI's are a hack on the powersurge -- Cort */
+ if (smp_processor_id() != 0) {
+ return psurge_secondary_virq;
}
-#endif /* CONFIG_SMP */
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
@@ -289,7 +286,6 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = irq_to_desc(virq);
int level;
if (hw >= max_irqs)
@@ -300,9 +296,9 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
*/
level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
if (level)
- desc->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &pmac_pic, level ?
- handle_level_irq : handle_edge_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pmac_pic,
+ level ? handle_level_irq : handle_edge_irq);
return 0;
}
@@ -472,8 +468,8 @@ int of_irq_map_oldworld(struct device_node *device, int index,
static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct mpic *mpic = get_irq_desc_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = mpic_get_one_irq(mpic);
if (cascade_irq != NO_IRQ)
@@ -591,8 +587,8 @@ static int __init pmac_pic_probe_mpic(void)
of_node_put(slave);
return 0;
}
- set_irq_data(cascade, mpic2);
- set_irq_chained_handler(cascade, pmac_u3_cascade);
+ irq_set_handler_data(cascade, mpic2);
+ irq_set_chained_handler(cascade, pmac_u3_cascade);
of_node_put(slave);
return 0;
@@ -678,7 +674,7 @@ not_found:
return viaint;
}
-static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
+static int pmacpic_suspend(void)
{
int viaint = pmacpic_find_viaint();
@@ -699,7 +695,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
return 0;
}
-static int pmacpic_resume(struct sys_device *sysdev)
+static void pmacpic_resume(void)
{
int i;
@@ -710,39 +706,19 @@ static int pmacpic_resume(struct sys_device *sysdev)
for (i = 0; i < max_real_irqs; ++i)
if (test_bit(i, sleep_save_mask))
pmac_unmask_irq(irq_get_irq_data(i));
-
- return 0;
}
-#endif /* CONFIG_PM && CONFIG_PPC32 */
-
-static struct sysdev_class pmacpic_sysclass = {
- .name = "pmac_pic",
-};
-
-static struct sys_device device_pmacpic = {
- .id = 0,
- .cls = &pmacpic_sysclass,
-};
-
-static struct sysdev_driver driver_pmacpic = {
-#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
- .suspend = &pmacpic_suspend,
- .resume = &pmacpic_resume,
-#endif /* CONFIG_PM && CONFIG_PPC32 */
+static struct syscore_ops pmacpic_syscore_ops = {
+ .suspend = pmacpic_suspend,
+ .resume = pmacpic_resume,
};
-static int __init init_pmacpic_sysfs(void)
+static int __init init_pmacpic_syscore(void)
{
-#ifdef CONFIG_PPC32
- if (max_irqs == 0)
- return -ENODEV;
-#endif
- printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
- sysdev_class_register(&pmacpic_sysclass);
- sysdev_register(&device_pmacpic);
- sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
+ register_syscore_ops(&pmacpic_syscore_ops);
return 0;
}
-machine_subsys_initcall(powermac, init_pmacpic_sysfs);
+machine_subsys_initcall(powermac, init_pmacpic_syscore);
+
+#endif /* CONFIG_PM && CONFIG_PPC32 */
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
deleted file mode 100644
index d622a8345aaa..000000000000
--- a/arch/powerpc/platforms/powermac/pic.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __PPC_PLATFORMS_PMAC_PIC_H
-#define __PPC_PLATFORMS_PMAC_PIC_H
-
-#include <linux/irq.h>
-
-extern struct irq_chip pmac_pic;
-
-extern void pmac_pic_init(void);
-extern int pmac_get_irq(void);
-
-#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index f0bc08f6c1f0..8327cce2bdb0 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,7 +33,7 @@ extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
-extern void pmac32_cpu_die(void);
+extern int psurge_secondary_virq;
extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index d5aceb7fb125..aa45281bd296 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
return PCI_PROBE_NORMAL;
return PCI_PROBE_DEVTREE;
}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/* access per cpu vars from generic smp.c */
-DECLARE_PER_CPU(int, cpu_state);
-
-static void pmac64_cpu_die(void)
-{
- /*
- * turn off as much as possible, we'll be
- * kicked out as this will only be invoked
- * on core99 platforms for now ...
- */
-
- printk(KERN_INFO "CPU#%d offline\n", smp_processor_id());
- __get_cpu_var(cpu_state) = CPU_DEAD;
- smp_wmb();
-
- /*
- * during the path that leads here preemption is disabled,
- * reenable it now so that when coming up preempt count is
- * zero correctly
- */
- preempt_enable();
-
- /*
- * hard-disable interrupts for the non-NAP case, the NAP code
- * needs to re-enable interrupts (but soft-disables them)
- */
- hard_irq_disable();
-
- while (1) {
- /* let's not take timer interrupts too often ... */
- set_dec(0x7fffffff);
-
- /* should always be true at this point */
- if (cpu_has_feature(CPU_FTR_CAN_NAP))
- power4_cpu_offline_powersave();
- else {
- HMT_low();
- HMT_very_low();
- }
- }
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
#endif /* CONFIG_PPC64 */
define_machine(powermac) {
@@ -726,15 +681,4 @@ define_machine(powermac) {
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_PPC64
- .cpu_die = pmac64_cpu_die,
-#endif
-#ifdef CONFIG_PPC32
- .cpu_die = pmac32_cpu_die,
-#endif
-#endif
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
- .cpu_die = generic_mach_cpu_die,
-#endif
};
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index c95215f4f8b6..db092d7c4c5b 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze);
static u64 timebase;
static int tb_req;
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_PMAC32_PSURGE
/*
* Powersurge (old powermac SMP) support.
@@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start;
/* what sort of powersurge board we have */
static int psurge_type = PSURGE_NONE;
+/* irq for secondary cpus to report */
+static struct irq_host *psurge_host;
+int psurge_secondary_virq;
+
/*
* Set and clear IPIs for powersurge.
*/
@@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu)
/*
* On powersurge (old SMP powermac architecture) we don't have
* separate IPIs for separate messages like openpic does. Instead
- * we have a bitmap for each processor, where a 1 bit means that
- * the corresponding message is pending for that processor.
- * Ideally each cpu's entry would be in a different cache line.
+ * use the generic demux helpers
* -- paulus.
*/
-static unsigned long psurge_smp_message[NR_CPUS];
-
-void psurge_smp_message_recv(void)
+static irqreturn_t psurge_ipi_intr(int irq, void *d)
{
- int cpu = smp_processor_id();
- int msg;
-
- /* clear interrupt */
- psurge_clr_ipi(cpu);
+ psurge_clr_ipi(smp_processor_id());
+ smp_ipi_demux();
- if (num_online_cpus() < 2)
- return;
+ return IRQ_HANDLED;
+}
- /* make sure there is a message there */
- for (msg = 0; msg < 4; msg++)
- if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
- smp_message_recv(msg);
+static void smp_psurge_cause_ipi(int cpu, unsigned long data)
+{
+ psurge_set_ipi(cpu);
}
-irqreturn_t psurge_primary_intr(int irq, void *d)
+static int psurge_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
{
- psurge_smp_message_recv();
- return IRQ_HANDLED;
+ irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
+
+ return 0;
}
-static void smp_psurge_message_pass(int target, int msg)
+struct irq_host_ops psurge_host_ops = {
+ .map = psurge_host_map,
+};
+
+static int psurge_secondary_ipi_init(void)
{
- int i;
+ int rc = -ENOMEM;
- if (num_online_cpus() < 2)
- return;
+ psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
+ &psurge_host_ops, 0);
- for_each_online_cpu(i) {
- if (target == MSG_ALL
- || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
- || target == i) {
- set_bit(msg, &psurge_smp_message[i]);
- psurge_set_ipi(i);
- }
- }
+ if (psurge_host)
+ psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
+
+ if (psurge_secondary_virq)
+ rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
+ IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
+
+ if (rc)
+ pr_err("Failed to setup secondary cpu IPI\n");
+
+ return rc;
}
/*
@@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void)
ncpus = 2;
}
+ if (psurge_secondary_ipi_init())
+ return 1;
+
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
@@ -329,7 +337,7 @@ static int __init smp_psurge_probe(void)
return ncpus;
}
-static void __init smp_psurge_kick_cpu(int nr)
+static int __init smp_psurge_kick_cpu(int nr)
{
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
unsigned long a, flags;
@@ -394,11 +402,13 @@ static void __init smp_psurge_kick_cpu(int nr)
psurge_set_ipi(1);
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
+
+ return 0;
}
static struct irqaction psurge_irqaction = {
- .handler = psurge_primary_intr,
- .flags = IRQF_DISABLED,
+ .handler = psurge_ipi_intr,
+ .flags = IRQF_DISABLED|IRQF_PERCPU,
.name = "primary IPI",
};
@@ -437,14 +447,15 @@ void __init smp_psurge_give_timebase(void)
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops = {
- .message_pass = smp_psurge_message_pass,
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = smp_psurge_cause_ipi,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
.give_timebase = smp_psurge_give_timebase,
.take_timebase = smp_psurge_take_timebase,
};
-#endif /* CONFIG_PPC32 - actually powersurge support */
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
/*
* Core 99 and later support
@@ -791,14 +802,14 @@ static int __init smp_core99_probe(void)
return ncpus;
}
-static void __devinit smp_core99_kick_cpu(int nr)
+static int __devinit smp_core99_kick_cpu(int nr)
{
unsigned int save_vector;
unsigned long target, flags;
unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100);
if (nr < 0 || nr > 3)
- return;
+ return -ENOENT;
if (ppc_md.progress)
ppc_md.progress("smp_core99_kick_cpu", 0x346);
@@ -830,6 +841,8 @@ static void __devinit smp_core99_kick_cpu(int nr)
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
+
+ return 0;
}
static void __devinit smp_core99_setup_cpu(int cpu_nr)
@@ -840,92 +853,151 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
/* Setup openpic */
mpic_setup_this_cpu();
+}
- if (cpu_nr == 0) {
#ifdef CONFIG_PPC64
- extern void g5_phy_disable_cpu1(void);
+#ifdef CONFIG_HOTPLUG_CPU
+static int smp_core99_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int rc;
- /* Close i2c bus if it was used for tb sync */
+ switch(action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ /* Open i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host) {
- pmac_i2c_close(pmac_tb_clock_chip_host);
- pmac_tb_clock_chip_host = NULL;
+ rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
+ if (rc) {
+ pr_err("Failed to open i2c bus for time sync\n");
+ return notifier_from_errno(rc);
+ }
}
+ break;
+ case CPU_ONLINE:
+ case CPU_UP_CANCELED:
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host)
+ pmac_i2c_close(pmac_tb_clock_chip_host);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- /* If we didn't start the second CPU, we must take
- * it off the bus
- */
- if (of_machine_is_compatible("MacRISC4") &&
- num_online_cpus() < 2)
- g5_phy_disable_cpu1();
-#endif /* CONFIG_PPC64 */
+static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
+ .notifier_call = smp_core99_cpu_notify,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static void __init smp_core99_bringup_done(void)
+{
+ extern void g5_phy_disable_cpu1(void);
- if (ppc_md.progress)
- ppc_md.progress("core99_setup_cpu 0 done", 0x349);
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host)
+ pmac_i2c_close(pmac_tb_clock_chip_host);
+
+ /* If we didn't start the second CPU, we must take
+ * it off the bus.
+ */
+ if (of_machine_is_compatible("MacRISC4") &&
+ num_online_cpus() < 2) {
+ set_cpu_present(1, false);
+ g5_phy_disable_cpu1();
}
-}
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&smp_core99_cpu_nb);
+#endif
+ if (ppc_md.progress)
+ ppc_md.progress("smp_core99_bringup_done", 0x349);
+}
+#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
+#ifdef CONFIG_HOTPLUG_CPU
-int smp_core99_cpu_disable(void)
+static int smp_core99_cpu_disable(void)
{
- set_cpu_online(smp_processor_id(), false);
+ int rc = generic_cpu_disable();
+ if (rc)
+ return rc;
- /* XXX reset cpu affinity here */
mpic_cpu_set_priority(0xf);
- asm volatile("mtdec %0" : : "r" (0x7fffffff));
- mb();
- udelay(20);
- asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
return 0;
}
-static int cpu_dead[NR_CPUS];
+#ifdef CONFIG_PPC32
-void pmac32_cpu_die(void)
+static void pmac_cpu_die(void)
{
+ int cpu = smp_processor_id();
+
local_irq_disable();
- cpu_dead[smp_processor_id()] = 1;
+ idle_task_exit();
+ pr_debug("CPU%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
mb();
low_cpu_die();
}
-void smp_core99_cpu_die(unsigned int cpu)
+#else /* CONFIG_PPC32 */
+
+static void pmac_cpu_die(void)
{
- int timeout;
+ int cpu = smp_processor_id();
- timeout = 1000;
- while (!cpu_dead[cpu]) {
- if (--timeout == 0) {
- printk("CPU %u refused to die!\n", cpu);
- break;
- }
- msleep(1);
+ local_irq_disable();
+ idle_task_exit();
+
+ /*
+ * turn off as much as possible, we'll be
+ * kicked out as this will only be invoked
+ * on core99 platforms for now ...
+ */
+
+ printk(KERN_INFO "CPU#%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
+
+ /*
+ * Re-enable interrupts. The NAP code needs to enable them
+ * anyways, do it now so we deal with the case where one already
+ * happened while soft-disabled.
+ * We shouldn't get any external interrupts, only decrementer, and the
+ * decrementer handler is safe for use on offline CPUs
+ */
+ local_irq_enable();
+
+ while (1) {
+ /* let's not take timer interrupts too often ... */
+ set_dec(0x7fffffff);
+
+ /* Enter NAP mode */
+ power4_idle();
}
- cpu_dead[cpu] = 0;
}
-#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */
+#endif /* else CONFIG_PPC32 */
+#endif /* CONFIG_HOTPLUG_CPU */
/* Core99 Macs (dual G4s and G5s) */
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
+#ifdef CONFIG_PPC64
+ .bringup_done = smp_core99_bringup_done,
+#endif
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
.take_timebase = smp_core99_take_timebase,
#if defined(CONFIG_HOTPLUG_CPU)
-# if defined(CONFIG_PPC32)
.cpu_disable = smp_core99_cpu_disable,
- .cpu_die = smp_core99_cpu_die,
-# endif
-# if defined(CONFIG_PPC64)
- .cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
- /* intentionally do *NOT* assign cpu_enable,
- * the generic code will use kick_cpu then! */
-# endif
#endif
};
@@ -943,7 +1015,7 @@ void __init pmac_setup_smp(void)
of_node_put(np);
smp_ops = &core99_smp_ops;
}
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_PMAC32_PSURGE
else {
/* We have to set bits in cpu_possible_mask here since the
* secondary CPU(s) aren't in the device tree. Various
@@ -956,6 +1028,11 @@ void __init pmac_setup_smp(void)
set_cpu_possible(cpu, true);
smp_ops = &psurge_smp_ops;
}
-#endif /* CONFIG_PPC32 */
+#endif /* CONFIG_PPC_PMAC32_PSURGE */
+
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = pmac_cpu_die;
+#endif
}
+
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 3988c86682a5..600ed2c0ed59 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -194,10 +194,10 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
- result = set_irq_chip_data(*virq, pd);
+ result = irq_set_chip_data(*virq, pd);
if (result) {
- pr_debug("%s:%d: set_irq_chip_data failed\n",
+ pr_debug("%s:%d: irq_set_chip_data failed\n",
__func__, __LINE__);
goto fail_set;
}
@@ -221,12 +221,12 @@ fail_create:
static int ps3_virq_destroy(unsigned int virq)
{
- const struct ps3_private *pd = get_irq_chip_data(virq);
+ const struct ps3_private *pd = irq_get_chip_data(virq);
pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
- set_irq_chip_data(virq, NULL);
+ irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
pr_debug("%s:%d <-\n", __func__, __LINE__);
@@ -256,7 +256,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
goto fail_setup;
}
- pd = get_irq_chip_data(*virq);
+ pd = irq_get_chip_data(*virq);
/* Binds outlet to cpu + virq. */
@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(ps3_irq_plug_setup);
int ps3_irq_plug_destroy(unsigned int virq)
{
int result;
- const struct ps3_private *pd = get_irq_chip_data(virq);
+ const struct ps3_private *pd = irq_get_chip_data(virq);
pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
@@ -659,18 +659,13 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
-static void ps3_host_unmap(struct irq_host *h, unsigned int virq)
-{
- set_irq_chip_data(virq, NULL);
-}
-
static int ps3_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
- set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
+ irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
return 0;
}
@@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np)
static struct irq_host_ops ps3_host_ops = {
.map = ps3_host_map,
- .unmap = ps3_host_unmap,
.match = ps3_host_match,
};
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 51ffde40af2b..4c44794faac0 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -39,7 +39,7 @@
#define MSG_COUNT 4
static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
-static void do_message_pass(int target, int msg)
+static void ps3_smp_message_pass(int cpu, int msg)
{
int result;
unsigned int virq;
@@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg)
return;
}
- virq = per_cpu(ps3_ipi_virqs, target)[msg];
+ virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
result = ps3_send_event_locally(virq);
if (result)
DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
- " (%d)\n", __func__, __LINE__, target, msg, result);
-}
-
-static void ps3_smp_message_pass(int target, int msg)
-{
- int cpu;
-
- if (target < NR_CPUS)
- do_message_pass(target, msg);
- else if (target == MSG_ALL_BUT_SELF) {
- for_each_online_cpu(cpu)
- if (cpu != smp_processor_id())
- do_message_pass(cpu, msg);
- } else {
- for_each_online_cpu(cpu)
- do_message_pass(cpu, msg);
- }
+ " (%d)\n", __func__, __LINE__, cpu, msg, result);
}
static int ps3_smp_probe(void)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 39a472e9e80f..375a9f92158d 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu)
* The current HV requires the spu shadow regs to be mapped with the
* PTE page protection bits set as read-only (PP=3). This implementation
* uses the low level __ioremap() to bypass the page protection settings
- * inforced by ioremap_flags() to get the needed PTE bits set for the
+ * inforced by ioremap_prot() to get the needed PTE bits set for the
* shadow regs.
*/
@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu)
goto fail_ioremap;
}
- spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys,
+ spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
LS_SIZE, _PAGE_NO_CACHE);
if (!spu->local_store) {
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 5b3da4b4ea79..71af4c5d6c05 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -3,7 +3,10 @@ config PPC_PSERIES
bool "IBM pSeries & new (POWER5-based) iSeries"
select MPIC
select PCI_MSI
- select XICS
+ select PPC_XICS
+ select PPC_ICP_NATIVE
+ select PPC_ICP_HV
+ select PPC_ICS_RTAS
select PPC_I8259
select PPC_RTAS
select PPC_RTAS_DAEMON
@@ -47,6 +50,24 @@ config SCANLOG
tristate "Scanlog dump interface"
depends on RTAS_PROC && PPC_PSERIES
+config IO_EVENT_IRQ
+ bool "IO Event Interrupt support"
+ depends on PPC_PSERIES
+ default y
+ help
+ Select this option, if you want to enable support for IO Event
+ interrupts. IO event interrupt is a mechanism provided by RTAS
+ to return information about hardware error and non-error events
+ which may need OS attention. RTAS returns events for multiple
+ event types and scopes. Device drivers can register their handlers
+ to receive events.
+
+ This option will only enable the IO event platform code. You
+ will still need to enable or compile the actual drivers
+ that use this infrastruture to handle IO event interrupts.
+
+ Say Y if you are unsure.
+
config LPARCFG
bool "LPAR Configuration Data"
depends on PPC_PSERIES || PPC_ISERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fc5237810ece..3556e402cbf5 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -5,7 +5,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o mobility.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_KEXEC) += kexec.o
@@ -22,6 +21,7 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
+obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
ifeq ($(CONFIG_PPC_PSERIES),y)
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index b74a9230edc9..57ceb92b2288 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -74,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
return NULL;
/* The configure connector reported name does not contain a
- * preceeding '/', so we allocate a buffer large enough to
+ * preceding '/', so we allocate a buffer large enough to
* prepend this to the full_name.
*/
name = (char *)ccwa + ccwa->name_offset;
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index c371bc06434b..e9190073bb97 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -52,10 +52,10 @@ static u8 dtl_event_mask = 0x7;
/*
- * Size of per-cpu log buffers. Default is just under 16 pages worth.
+ * Size of per-cpu log buffers. Firmware requires that the buffer does
+ * not cross a 4k boundary.
*/
-static int dtl_buf_entries = (16 * 85);
-
+static int dtl_buf_entries = N_DISPATCH_LOG;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
struct dtl_ring {
@@ -151,7 +151,7 @@ static int dtl_start(struct dtl *dtl)
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
- ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry);
+ ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
@@ -196,13 +196,15 @@ static int dtl_enable(struct dtl *dtl)
long int rc;
struct dtl_entry *buf = NULL;
+ if (!dtl_cache)
+ return -ENOMEM;
+
/* only allow one reader */
if (dtl->buf)
return -EBUSY;
n_entries = dtl_buf_entries;
- buf = kmalloc_node(n_entries * sizeof(struct dtl_entry),
- GFP_KERNEL, cpu_to_node(dtl->cpu));
+ buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
if (!buf) {
printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
__func__, dtl->cpu);
@@ -223,7 +225,7 @@ static int dtl_enable(struct dtl *dtl)
spin_unlock(&dtl->lock);
if (rc)
- kfree(buf);
+ kmem_cache_free(dtl_cache, buf);
return rc;
}
@@ -231,7 +233,7 @@ static void dtl_disable(struct dtl *dtl)
{
spin_lock(&dtl->lock);
dtl_stop(dtl);
- kfree(dtl->buf);
+ kmem_cache_free(dtl_cache, dtl->buf);
dtl->buf = NULL;
dtl->buf_entries = 0;
spin_unlock(&dtl->lock);
@@ -365,7 +367,7 @@ static int dtl_init(void)
event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
dtl_dir, &dtl_event_mask);
- buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600,
+ buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
dtl_dir, &dtl_buf_entries);
if (!event_mask_file || !buf_entries_file) {
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 3cc4d102b1f1..46b55cf563e3 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -65,7 +65,7 @@
* with EEH.
*
* Ideally, a PCI device driver, when suspecting that an isolation
- * event has occured (e.g. by reading 0xff's), will then ask EEH
+ * event has occurred (e.g. by reading 0xff's), will then ask EEH
* whether this is the case, and then take appropriate steps to
* reset the PCI slot, the PCI device, and then resume operations.
* However, until that day, the checking is done here, with the
@@ -93,6 +93,7 @@ static int ibm_slot_error_detail;
static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_bridge;
+static int ibm_configure_pe;
int eeh_subsystem_enabled;
EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -261,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
pci_regs_buf[0] = 0;
rtas_pci_enable(pdn, EEH_THAW_MMIO);
+ rtas_configure_bridge(pdn);
+ eeh_restore_bars(pdn);
loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
@@ -448,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
}
+void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
+{
+ struct device_node *dn;
+
+ for_each_child_of_node(parent, dn) {
+ if (PCI_DN(dn)) {
+
+ struct pci_dev *dev = PCI_DN(dn)->pcidev;
+
+ if (dev && dev->driver)
+ *freset |= dev->needs_freset;
+
+ __eeh_set_pe_freset(dn, freset);
+ }
+ }
+}
+
+void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
+{
+ struct pci_dev *dev;
+ dn = find_device_pe(dn);
+
+ /* Back up one, since config addrs might be shared */
+ if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
+ dn = dn->parent;
+
+ dev = PCI_DN(dn)->pcidev;
+ if (dev)
+ *freset |= dev->needs_freset;
+
+ __eeh_set_pe_freset(dn, freset);
+}
+
/**
* eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
* @dn device node
@@ -692,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
if (pdn->eeh_pe_config_addr)
config_addr = pdn->eeh_pe_config_addr;
- rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
+ rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid),
state);
- if (rc)
- printk (KERN_WARNING "EEH: Unable to reset the failed slot,"
- " (%d) #RST=%d dn=%s\n",
- rc, state, pdn->node->full_name);
+
+ /* Fundamental-reset not supported on this PE, try hot-reset */
+ if (rc == -8 && state == 3) {
+ rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
+ config_addr,
+ BUID_HI(pdn->phb->buid),
+ BUID_LO(pdn->phb->buid), 1);
+ if (rc)
+ printk(KERN_WARNING
+ "EEH: Unable to reset the failed slot,"
+ " #RST=%d dn=%s\n",
+ rc, pdn->node->full_name);
+ }
}
/**
@@ -736,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
/**
* rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
* @pdn: pci device node to be reset.
- *
- * Return 0 if success, else a non-zero value.
*/
static void __rtas_set_slot_reset(struct pci_dn *pdn)
{
- struct pci_dev *dev = pdn->pcidev;
+ unsigned int freset = 0;
- /* Determine type of EEH reset required by device,
- * default hot reset or fundamental reset
- */
- if (dev && dev->needs_freset)
+ /* Determine type of EEH reset required for
+ * Partitionable Endpoint, a hot-reset (1)
+ * or a fundamental reset (3).
+ * A fundamental reset required by any device under
+ * Partitionable Endpoint trumps hot-reset.
+ */
+ eeh_set_pe_freset(pdn->node, &freset);
+
+ if (freset)
rtas_pci_slot_reset(pdn, 3);
else
rtas_pci_slot_reset(pdn, 1);
@@ -895,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn)
{
int config_addr;
int rc;
+ int token;
/* Use PE configuration address, if present */
config_addr = pdn->eeh_config_addr;
if (pdn->eeh_pe_config_addr)
config_addr = pdn->eeh_pe_config_addr;
- rc = rtas_call(ibm_configure_bridge,3,1, NULL,
+ /* Use new configure-pe function, if supported */
+ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
+ token = ibm_configure_pe;
+ else
+ token = ibm_configure_bridge;
+
+ rc = rtas_call(token, 3, 1, NULL,
config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid));
@@ -1077,6 +1132,7 @@ void __init eeh_init(void)
ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
+ ibm_configure_pe = rtas_token("ibm,configure-pe");
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
return;
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index b8d70f5d9aa9..1b6cb10589e0 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
struct pci_bus *frozen_bus;
int rc = 0;
enum pci_ers_result result = PCI_ERS_RESULT_NONE;
- const char *location, *pci_str, *drv_str;
+ const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
frozen_dn = find_device_pe(event->dn);
if (!frozen_dn) {
@@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
frozen_pdn = PCI_DN(frozen_dn);
frozen_pdn->eeh_freeze_count++;
- if (frozen_pdn->pcidev) {
- pci_str = pci_name (frozen_pdn->pcidev);
- drv_str = pcid_name (frozen_pdn->pcidev);
- } else {
- pci_str = eeh_pci_name(event->dev);
- drv_str = pcid_name (event->dev);
- }
+ pci_str = eeh_pci_name(event->dev);
+ drv_str = pcid_name(event->dev);
if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
goto excess_failures;
@@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
printk(KERN_WARNING
"EEH: This PCI device has failed %d times in the last hour:\n",
frozen_pdn->eeh_freeze_count);
+
+ if (frozen_pdn->pcidev) {
+ bus_pci_str = pci_name(frozen_pdn->pcidev);
+ bus_drv_str = pcid_name(frozen_pdn->pcidev);
+ printk(KERN_WARNING
+ "EEH: Bus location=%s driver=%s pci addr=%s\n",
+ location, bus_drv_str, bus_pci_str);
+ }
+
printk(KERN_WARNING
- "EEH: location=%s driver=%s pci addr=%s\n",
+ "EEH: Device location=%s driver=%s pci addr=%s\n",
location, drv_str, pci_str);
/* Walk the various device drivers attached to this slot through
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index fd50ccd4bac1..46f13a3c5d09 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -19,6 +19,7 @@
*/
#include <linux/kernel.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/system.h>
@@ -28,7 +29,7 @@
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/pSeries_reconfig.h>
-#include "xics.h"
+#include <asm/xics.h>
#include "plpar_wrappers.h"
#include "offline_states.h"
@@ -216,7 +217,7 @@ static void pseries_cpu_die(unsigned int cpu)
cpu, pcpu, cpu_status);
}
- /* Isolation and deallocation are definatly done by
+ /* Isolation and deallocation are definitely done by
* drslot_chrp_cpu. If they were not they would be
* done here. Change isolate state to Isolate and
* change allocation-state to Unusable.
@@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np)
}
for_each_cpu(cpu, tmp) {
- BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
+ BUG_ON(cpu_present(cpu));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++);
}
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
new file mode 100644
index 000000000000..c829e6067d54
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+
+#include <asm/machdep.h>
+#include <asm/rtas.h>
+#include <asm/irq.h>
+#include <asm/io_event_irq.h>
+
+#include "pseries.h"
+
+/*
+ * IO event interrupt is a mechanism provided by RTAS to return
+ * information about hardware error and non-error events. Device
+ * drivers can register their event handlers to receive events.
+ * Device drivers are expected to use atomic_notifier_chain_register()
+ * and atomic_notifier_chain_unregister() to register and unregister
+ * their event handlers. Since multiple IO event types and scopes
+ * share an IO event interrupt, the event handlers are called one
+ * by one until the IO event is claimed by one of the handlers.
+ * The event handlers are expected to return NOTIFY_OK if the
+ * event is handled by the event handler or NOTIFY_DONE if the
+ * event does not belong to the handler.
+ *
+ * Usage:
+ *
+ * Notifier function:
+ * #include <asm/io_event_irq.h>
+ * int event_handler(struct notifier_block *nb, unsigned long val, void *data) {
+ * p = (struct pseries_io_event_sect_data *) data;
+ * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE;
+ * :
+ * :
+ * return NOTIFY_OK;
+ * }
+ * struct notifier_block event_nb = {
+ * .notifier_call = event_handler,
+ * }
+ *
+ * Registration:
+ * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb);
+ *
+ * Unregistration:
+ * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb);
+ */
+
+ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list);
+EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
+
+static int ioei_check_exception_token;
+
+/* pSeries event log format */
+
+/* Two bytes ASCII section IDs */
+#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
+#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
+#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
+#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
+#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
+#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
+#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_elog_section {
+ uint16_t id; /* 0x00 2-byte ASCII section ID */
+ uint16_t length; /* 0x02 Section length in bytes */
+ uint8_t version; /* 0x04 Section version */
+ uint8_t subtype; /* 0x05 Section subtype */
+ uint16_t creator_component; /* 0x06 Creator component ID */
+ uint8_t data[]; /* 0x08 Start of section data */
+};
+
+static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
+
+/**
+ * Find data portion of a specific section in RTAS extended event log.
+ * @elog: RTAS error/event log.
+ * @sect_id: secsion ID.
+ *
+ * Return:
+ * pointer to the section data of the specified section
+ * NULL if not found
+ */
+static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog,
+ uint16_t sect_id)
+{
+ struct rtas_ext_event_log_v6 *xelog =
+ (struct rtas_ext_event_log_v6 *) elog->buffer;
+ struct pseries_elog_section *sect;
+ unsigned char *p, *log_end;
+
+ /* Check that we understand the format */
+ if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
+ xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
+ xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
+ return NULL;
+
+ log_end = elog->buffer + elog->extended_log_length;
+ p = xelog->vendor_log;
+ while (p < log_end) {
+ sect = (struct pseries_elog_section *)p;
+ if (sect->id == sect_id)
+ return sect;
+ p += sect->length;
+ }
+ return NULL;
+}
+
+/**
+ * Find the data portion of an IO Event section from event log.
+ * @elog: RTAS error/event log.
+ *
+ * Return:
+ * pointer to a valid IO event section data. NULL if not found.
+ */
+static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
+{
+ struct pseries_elog_section *sect;
+
+ /* We should only ever get called for io-event interrupts, but if
+ * we do get called for another type then something went wrong so
+ * make some noise about it.
+ * RTAS_TYPE_IO only exists in extended event log version 6 or later.
+ * No need to check event log version.
+ */
+ if (unlikely(elog->type != RTAS_TYPE_IO)) {
+ printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
+ elog->type);
+ return NULL;
+ }
+
+ sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
+ if (unlikely(!sect)) {
+ printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
+ "log does not contain an IO Event section. "
+ "Could be a bug in system firmware!\n");
+ return NULL;
+ }
+ return (struct pseries_io_event *) &sect->data;
+}
+
+/*
+ * PAPR:
+ * - check-exception returns the first found error or event and clear that
+ * error or event so it is reported once.
+ * - Each interrupt returns one event. If a plateform chooses to report
+ * multiple events through a single interrupt, it must ensure that the
+ * interrupt remains asserted until check-exception has been used to
+ * process all out-standing events for that interrupt.
+ *
+ * Implementation notes:
+ * - Events must be processed in the order they are returned. Hence,
+ * sequential in nature.
+ * - The owner of an event is determined by combinations of scope,
+ * event type, and sub-type. There is no easy way to pre-sort clients
+ * by scope or event type alone. For example, Torrent ISR route change
+ * event is reported with scope 0x00 (Not Applicatable) rather than
+ * 0x3B (Torrent-hub). It is better to let the clients to identify
+ * who owns the the event.
+ */
+
+static irqreturn_t ioei_interrupt(int irq, void *dev_id)
+{
+ struct pseries_io_event *event;
+ int rtas_rc;
+
+ for (;;) {
+ rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL,
+ RTAS_VECTOR_EXTERNAL_INTERRUPT,
+ virq_to_hw(irq),
+ RTAS_IO_EVENTS, 1 /* Time Critical */,
+ __pa(ioei_rtas_buf),
+ RTAS_DATA_BUF_SIZE);
+ if (rtas_rc != 0)
+ break;
+
+ event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf);
+ if (!event)
+ continue;
+
+ atomic_notifier_call_chain(&pseries_ioei_notifier_list,
+ 0, event);
+ }
+ return IRQ_HANDLED;
+}
+
+static int __init ioei_init(void)
+{
+ struct device_node *np;
+
+ ioei_check_exception_token = rtas_token("check-exception");
+ if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("IO Event IRQ not supported on this system !\n");
+ return -ENODEV;
+ }
+ np = of_find_node_by_path("/event-sources/ibm,io-events");
+ if (np) {
+ request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
+ of_node_put(np);
+ } else {
+ pr_err("io_event_irq: No ibm,io-events on system! "
+ "IO Event interrupt disabled.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+machine_subsys_initcall(pseries, ioei_init);
+
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 154c464cdca5..01faab9456ca 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -272,7 +272,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
return tce_ret;
}
-/* this is compatable with cells for the device tree property */
+/* this is compatible with cells for the device tree property */
struct dynamic_dma_window_prop {
__be32 liobn; /* tce table number */
__be64 dma_base; /* address hi,lo */
@@ -659,15 +659,18 @@ static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
struct property *win64;
- const u32 *ddr_avail;
+ const u32 *ddw_avail;
u64 liobn;
int len, ret;
- ddr_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
- if (!win64 || !ddr_avail || len < 3 * sizeof(u32))
+ if (!win64)
return;
+ if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
+ goto delprop;
+
dwp = win64->value;
liobn = (u64)be32_to_cpu(dwp->liobn);
@@ -681,28 +684,29 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- ret = rtas_call(ddr_avail[2], 1, 1, NULL, liobn);
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
if (ret)
pr_warning("%s: failed to remove direct window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddr_avail[2], liobn);
+ np->full_name, ret, ddw_avail[2], liobn);
else
pr_debug("%s: successfully removed direct window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddr_avail[2], liobn);
-}
+ np->full_name, ret, ddw_avail[2], liobn);
+delprop:
+ ret = prom_remove_property(np, win64);
+ if (ret)
+ pr_warning("%s: failed to remove direct window property: %d\n",
+ np->full_name, ret);
+}
-static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *pdn)
+static u64 find_existing_ddw(struct device_node *pdn)
{
- struct device_node *dn;
- struct pci_dn *pcidn;
struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
u64 dma_addr = 0;
- dn = pci_device_to_OF_node(dev);
- pcidn = PCI_DN(dn);
spin_lock(&direct_window_list_lock);
/* check if we already created a window and dupe that config if so */
list_for_each_entry(window, &direct_window_list, list) {
@@ -717,36 +721,40 @@ static int dupe_ddw_if_already_created(struct pci_dev *dev, struct device_node *
return dma_addr;
}
-static u64 dupe_ddw_if_kexec(struct pci_dev *dev, struct device_node *pdn)
+static int find_existing_ddw_windows(void)
{
- struct device_node *dn;
- struct pci_dn *pcidn;
int len;
+ struct device_node *pdn;
struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
- u64 dma_addr = 0;
- dn = pci_device_to_OF_node(dev);
- pcidn = PCI_DN(dn);
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
- if (direct64) {
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ return 0;
+
+ for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
+ if (!direct64)
+ continue;
+
window = kzalloc(sizeof(*window), GFP_KERNEL);
- if (!window) {
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
remove_ddw(pdn);
- } else {
- window->device = pdn;
- window->prop = direct64;
- spin_lock(&direct_window_list_lock);
- list_add(&window->list, &direct_window_list);
- spin_unlock(&direct_window_list_lock);
- dma_addr = direct64->dma_base;
+ continue;
}
+
+ window->device = pdn;
+ window->prop = direct64;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
}
- return dma_addr;
+ return 0;
}
+machine_arch_initcall(pseries, find_existing_ddw_windows);
-static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
+static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_query_response *query)
{
struct device_node *dn;
@@ -767,15 +775,15 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddr_avail,
if (pcidn->eeh_pe_config_addr)
cfg_addr = pcidn->eeh_pe_config_addr;
buid = pcidn->phb->buid;
- ret = rtas_call(ddr_avail[0], 3, 5, (u32 *)query,
+ ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
- " returned %d\n", ddr_avail[0], cfg_addr, BUID_HI(buid),
+ " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
BUID_LO(buid), ret);
return ret;
}
-static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
+static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_create_response *create, int page_shift,
int window_shift)
{
@@ -800,12 +808,12 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddr_avail,
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
- ret = rtas_call(ddr_avail[1], 5, 4, (u32 *)create, cfg_addr,
+ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
} while (rtas_busy_delay(ret));
dev_info(&dev->dev,
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
- "(liobn = 0x%x starting addr = %x %x)\n", ddr_avail[1],
+ "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
@@ -831,18 +839,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
int page_shift;
u64 dma_addr, max_addr;
struct device_node *dn;
- const u32 *uninitialized_var(ddr_avail);
+ const u32 *uninitialized_var(ddw_avail);
struct direct_window *window;
- struct property *uninitialized_var(win64);
+ struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
mutex_lock(&direct_window_init_mutex);
- dma_addr = dupe_ddw_if_already_created(dev, pdn);
- if (dma_addr != 0)
- goto out_unlock;
-
- dma_addr = dupe_ddw_if_kexec(dev, pdn);
+ dma_addr = find_existing_ddw(pdn);
if (dma_addr != 0)
goto out_unlock;
@@ -854,8 +858,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* for the given node in that order.
* the property is actually in the parent, not the PE
*/
- ddr_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
- if (!ddr_avail || len < 3 * sizeof(u32))
+ ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
+ if (!ddw_avail || len < 3 * sizeof(u32))
goto out_unlock;
/*
@@ -865,7 +869,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* of page sizes: supported and supported for migrate-dma.
*/
dn = pci_device_to_OF_node(dev);
- ret = query_ddw(dev, ddr_avail, &query);
+ ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
goto out_unlock;
@@ -907,13 +911,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
+ win64->length = sizeof(*ddwprop);
if (!win64->name || !win64->value) {
dev_info(&dev->dev,
"couldn't allocate property name and value\n");
goto out_free_prop;
}
- ret = create_ddw(dev, ddr_avail, &create, page_shift, len);
+ ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
if (ret != 0)
goto out_free_prop;
@@ -976,7 +981,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
/* dev setup for LPAR is a little tricky, since the device tree might
- * contain the dma-window properties per-device and not neccesarily
+ * contain the dma-window properties per-device and not necessarily
* for the bus. So we need to search upwards in the tree until we
* either hit a dma-window property, OR find a parent with a table
* already allocated.
@@ -1021,19 +1026,22 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
const void *dma_window = NULL;
u64 dma_offset;
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ if (!dev->dma_mask)
return -EIO;
+ if (!dev_is_pci(dev))
+ goto check_mask;
+
+ pdev = to_pci_dev(dev);
+
/* only attempt to use a new window if 64-bit DMA is requested */
if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
- pdev = to_pci_dev(dev);
-
dn = pci_device_to_OF_node(pdev);
dev_dbg(dev, "node is %s\n", dn->full_name);
/*
* the device tree might contain the dma-window properties
- * per-device and not neccesarily for the bus. So we need to
+ * per-device and not necessarily for the bus. So we need to
* search upwards in the tree until we either hit a dma-window
* property, OR find a parent with a table already allocated.
*/
@@ -1054,12 +1062,17 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
}
}
- /* fall-through to iommu ops */
- if (!ddw_enabled) {
- dev_info(dev, "Using 32-bit DMA via iommu\n");
+ /* fall back on iommu ops, restore table pointer with ops */
+ if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
+ dev_info(dev, "Restoring 32-bit DMA via iommu\n");
set_dma_ops(dev, &dma_iommu_ops);
+ pci_dma_dev_setup_pSeriesLP(pdev);
}
+check_mask:
+ if (!dma_supported(dev, dma_mask))
+ return -EIO;
+
*dev->dma_mask = dma_mask;
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 77d38a5e2ff9..54cf3a4aa16b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -7,15 +7,18 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+
#include <asm/machdep.h>
#include <asm/page.h>
#include <asm/firmware.h>
#include <asm/kexec.h>
#include <asm/mpic.h>
+#include <asm/xics.h>
#include <asm/smp.h>
#include "pseries.h"
-#include "xics.h"
#include "plpar_wrappers.h"
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index ca5d5898d320..39e6e0a7b2fa 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -329,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Make pHyp happy */
if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU))
hpte_r &= ~_PAGE_COHERENT;
+ if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
+ flags |= H_COALESCE_CAND;
lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
if (unlikely(lpar_rc == H_PTEG_FULL)) {
@@ -573,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
unsigned long i, pix, rc;
unsigned long flags = 0;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[9];
unsigned long va;
unsigned long hash, index, shift, hidx, slot;
@@ -771,3 +773,47 @@ out:
local_irq_restore(flags);
}
#endif
+
+/**
+ * h_get_mpp
+ * H_GET_MPP hcall returns info in 7 parms
+ */
+int h_get_mpp(struct hvcall_mpp_data *mpp_data)
+{
+ int rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9(H_GET_MPP, retbuf);
+
+ mpp_data->entitled_mem = retbuf[0];
+ mpp_data->mapped_mem = retbuf[1];
+
+ mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
+ mpp_data->pool_num = retbuf[2] & 0xffff;
+
+ mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
+ mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
+ mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
+
+ mpp_data->pool_size = retbuf[4];
+ mpp_data->loan_request = retbuf[5];
+ mpp_data->backing_mem = retbuf[6];
+
+ return rc;
+}
+EXPORT_SYMBOL(h_get_mpp);
+
+int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
+{
+ int rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
+
+ rc = plpar_hcall9(H_GET_MPP_X, retbuf);
+
+ mpp_x_data->coalesced_bytes = retbuf[0];
+ mpp_x_data->pool_coalesced_bytes = retbuf[1];
+ mpp_x_data->pool_purr_cycles = retbuf[2];
+ mpp_x_data->pool_spurr_cycles = retbuf[3];
+
+ return rc;
+}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 18ac801f8e90..38d24e7e7bb1 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -137,7 +137,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
@@ -437,7 +437,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
}
dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq);
- set_irq_msi(virq, entry);
+ irq_set_msi_desc(virq, entry);
/* Read config space back so we can restore after reset */
read_msi_msg(virq, &msg);
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 419707b07248..00cc3a094885 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -480,8 +480,32 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
const char *new_msgs, unsigned long new_len)
{
static unsigned int oops_count = 0;
+ static bool panicking = false;
size_t text_len;
+ switch (reason) {
+ case KMSG_DUMP_RESTART:
+ case KMSG_DUMP_HALT:
+ case KMSG_DUMP_POWEROFF:
+ /* These are almost always orderly shutdowns. */
+ return;
+ case KMSG_DUMP_OOPS:
+ case KMSG_DUMP_KEXEC:
+ break;
+ case KMSG_DUMP_PANIC:
+ panicking = true;
+ break;
+ case KMSG_DUMP_EMERG:
+ if (panicking)
+ /* Panic report already captured. */
+ return;
+ break;
+ default:
+ pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
+ __FUNCTION__, (int) reason);
+ return;
+ }
+
if (clobbering_unread_rtas_event())
return;
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
index 75a6f480d931..08672d9136ab 100644
--- a/arch/powerpc/platforms/pseries/offline_states.h
+++ b/arch/powerpc/platforms/pseries/offline_states.h
@@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu)
#endif
extern enum cpu_state_vals get_preferred_offline_state(int cpu);
-extern int start_secondary(void);
-extern void start_secondary_resume(void);
#endif
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index d9801117124b..4bf21207d7d3 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
lbuf[1]);
}
-static inline long plpar_eoi(unsigned long xirr)
-{
- return plpar_hcall_norets(H_EOI, xirr);
-}
-
-static inline long plpar_cppr(unsigned long cppr)
-{
- return plpar_hcall_norets(H_CPPR, cppr);
-}
-
-static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
-{
- return plpar_hcall_norets(H_IPI, servernum, mfrr);
-}
-
-static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr)
-{
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall(H_XIRR, retbuf, cppr);
-
- *xirr_ret = retbuf[0];
-
- return rc;
-}
-
#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index c55d7ad9c648..086d2ae4e06a 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -122,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
- irq_map[irq].hwirq,
+ virq_to_hw(irq),
RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
critical, __pa(&ras_log_buf),
rtas_get_error_log_max());
@@ -157,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
- irq_map[irq].hwirq,
+ virq_to_hw(irq),
RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
__pa(&ras_log_buf),
rtas_get_error_log_max());
@@ -227,7 +227,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
struct rtas_error_log *h, *errhdr = NULL;
if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
- printk(KERN_ERR "FWNMI: corrupt r3\n");
+ printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
return NULL;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 2a0089a2c829..593acceeff96 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -53,9 +53,9 @@
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/nvram.h>
-#include "xics.h"
#include <asm/pmc.h>
#include <asm/mpic.h>
+#include <asm/xics.h>
#include <asm/ppc-pci.h>
#include <asm/i8259.h>
#include <asm/udbg.h>
@@ -114,7 +114,7 @@ static void __init fwnmi_init(void)
static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
@@ -169,7 +169,7 @@ static void __init pseries_setup_i8259_cascade(void)
printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
i8259_init(found, intack);
of_node_put(found);
- set_irq_chained_handler(cascade, pseries_8259_cascade);
+ irq_set_chained_handler(cascade, pseries_8259_cascade);
}
static void __init pseries_mpic_init_IRQ(void)
@@ -205,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void)
mpic_assign_isu(mpic, n, isuaddr);
}
+ /* Setup top-level get_irq */
+ ppc_md.get_irq = mpic_get_irq;
+
/* All ISUs are setup, complete initialization */
mpic_init(mpic);
@@ -214,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void)
static void __init pseries_xics_init_IRQ(void)
{
- xics_init_IRQ();
+ xics_init();
pseries_setup_i8259_cascade();
}
@@ -238,7 +241,6 @@ static void __init pseries_discover_pic(void)
if (strstr(typep, "open-pic")) {
pSeries_mpic_node = of_node_get(np);
ppc_md.init_IRQ = pseries_mpic_init_IRQ;
- ppc_md.get_irq = mpic_get_irq;
setup_kexec_cpu_down_mpic();
smp_init_pseries_mpic();
return;
@@ -276,6 +278,8 @@ static struct notifier_block pci_dn_reconfig_nb = {
.notifier_call = pci_dn_reconfig_notifier,
};
+struct kmem_cache *dtl_cache;
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Allocate space for the dispatch trace log for all possible cpus
@@ -291,10 +295,12 @@ static int alloc_dispatch_logs(void)
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
+ if (!dtl_cache)
+ return 0;
+
for_each_possible_cpu(cpu) {
pp = &paca[cpu];
- dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
- cpu_to_node(cpu));
+ dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
if (!dtl) {
pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
cpu);
@@ -324,10 +330,27 @@ static int alloc_dispatch_logs(void)
return 0;
}
-
-early_initcall(alloc_dispatch_logs);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+static inline int alloc_dispatch_logs(void)
+{
+ return 0;
+}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+static int alloc_dispatch_log_kmem_cache(void)
+{
+ dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+ DISPATCH_LOG_BYTES, 0, NULL);
+ if (!dtl_cache) {
+ pr_warn("Failed to create dispatch trace log buffer cache\n");
+ pr_warn("Stolen time statistics will be unreliable\n");
+ return 0;
+ }
+
+ return alloc_dispatch_logs();
+}
+early_initcall(alloc_dispatch_log_kmem_cache);
+
static void __init pSeries_setup_arch(void)
{
/* Discover PIC type and setup ppc_md accordingly */
@@ -378,7 +401,7 @@ static int __init pSeries_init_panel(void)
return 0;
}
-arch_initcall(pSeries_init_panel);
+machine_arch_initcall(pseries, pSeries_init_panel);
static int pseries_set_dabr(unsigned long dabr)
{
@@ -395,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr)
#define CMO_CHARACTERISTICS_TOKEN 44
#define CMO_MAXLENGTH 1026
+void pSeries_coalesce_init(void)
+{
+ struct hvcall_mpp_x_data mpp_x_data;
+
+ if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
+ powerpc_firmware_features |= FW_FEATURE_XCMO;
+ else
+ powerpc_firmware_features &= ~FW_FEATURE_XCMO;
+}
+
/**
* fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
* handle that here. (Stolen from parse_system_parameter_string)
@@ -464,6 +497,7 @@ void pSeries_cmo_feature_init(void)
pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
CMO_SecPSP);
powerpc_firmware_features |= FW_FEATURE_CMO;
+ pSeries_coalesce_init();
} else
pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
CMO_SecPSP);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 0317cce877c6..fbffd7e47ab8 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -44,10 +44,11 @@
#include <asm/mpic.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
+#include <asm/mpic.h>
+#include <asm/xics.h>
#include "plpar_wrappers.h"
#include "pseries.h"
-#include "xics.h"
#include "offline_states.h"
@@ -64,8 +65,8 @@ int smp_query_cpu_stopped(unsigned int pcpu)
int qcss_tok = rtas_token("query-cpu-stopped-state");
if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
- printk(KERN_INFO "Firmware doesn't support "
- "query-cpu-stopped-state\n");
+ printk_once(KERN_INFO
+ "Firmware doesn't support query-cpu-stopped-state\n");
return QCSS_HARDWARE_ERROR;
}
@@ -112,10 +113,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;
-
+#ifdef CONFIG_HOTPLUG_CPU
if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE)
goto out;
-
+#endif
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
@@ -130,11 +131,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
out:
+#endif
return 1;
}
-#ifdef CONFIG_XICS
static void __devinit smp_xics_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
@@ -144,20 +146,18 @@ static void __devinit smp_xics_setup_cpu(int cpu)
vpa_init(cpu);
cpumask_clear_cpu(cpu, of_spin_mask);
+#ifdef CONFIG_HOTPLUG_CPU
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
set_default_offline_state(cpu);
-
+#endif
}
-#endif /* CONFIG_XICS */
-static void __devinit smp_pSeries_kick_cpu(int nr)
+static int __devinit smp_pSeries_kick_cpu(int nr)
{
- long rc;
- unsigned long hcpuid;
BUG_ON(nr < 0 || nr >= NR_CPUS);
if (!smp_startup_cpu(nr))
- return;
+ return -ENOENT;
/*
* The processor is currently spinning, waiting for the
@@ -165,16 +165,22 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
* the processor will continue on to secondary_start
*/
paca[nr].cpu_start = 1;
-
+#ifdef CONFIG_HOTPLUG_CPU
set_preferred_offline_state(nr, CPU_STATE_ONLINE);
if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) {
+ long rc;
+ unsigned long hcpuid;
+
hcpuid = get_hard_smp_processor_id(nr);
rc = plpar_hcall_norets(H_PROD, hcpuid);
if (rc != H_SUCCESS)
printk(KERN_ERR "Error: Prod to wake up processor %d "
"Ret= %ld\n", nr, rc);
}
+#endif
+
+ return 0;
}
static int smp_pSeries_cpu_bootable(unsigned int nr)
@@ -192,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
return 1;
}
-#ifdef CONFIG_MPIC
+
static struct smp_ops_t pSeries_mpic_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_mpic_setup_cpu,
};
-#endif
-#ifdef CONFIG_XICS
+
static struct smp_ops_t pSeries_xics_smp_ops = {
- .message_pass = smp_xics_message_pass,
- .probe = smp_xics_probe,
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
+ .probe = xics_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_xics_setup_cpu,
.cpu_bootable = smp_pSeries_cpu_bootable,
};
-#endif
/* This is called very early */
static void __init smp_init_pseries(void)
@@ -240,14 +245,12 @@ static void __init smp_init_pseries(void)
pr_debug(" <- smp_init_pSeries()\n");
}
-#ifdef CONFIG_MPIC
void __init smp_init_pseries_mpic(void)
{
smp_ops = &pSeries_mpic_smp_ops;
smp_init_pseries();
}
-#endif
void __init smp_init_pseries_xics(void)
{
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
deleted file mode 100644
index 01fea46c0335..000000000000
--- a/arch/powerpc/platforms/pseries/xics.c
+++ /dev/null
@@ -1,948 +0,0 @@
-/*
- * arch/powerpc/platforms/pseries/xics.c
- *
- * Copyright 2000 IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/radix-tree.h>
-#include <linux/cpu.h>
-#include <linux/msi.h>
-#include <linux/of.h>
-#include <linux/percpu.h>
-
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/smp.h>
-#include <asm/rtas.h>
-#include <asm/hvcall.h>
-#include <asm/machdep.h>
-
-#include "xics.h"
-#include "plpar_wrappers.h"
-
-static struct irq_host *xics_host;
-
-#define XICS_IPI 2
-#define XICS_IRQ_SPURIOUS 0
-
-/* Want a priority other than 0. Various HW issues require this. */
-#define DEFAULT_PRIORITY 5
-
-/*
- * Mark IPIs as higher priority so we can take them inside interrupts that
- * arent marked IRQF_DISABLED
- */
-#define IPI_PRIORITY 4
-
-/* The least favored priority */
-#define LOWEST_PRIORITY 0xFF
-
-/* The number of priorities defined above */
-#define MAX_NUM_PRIORITIES 3
-
-static unsigned int default_server = 0xFF;
-static unsigned int default_distrib_server = 0;
-static unsigned int interrupt_server_size = 8;
-
-/* RTAS service tokens */
-static int ibm_get_xive;
-static int ibm_set_xive;
-static int ibm_int_on;
-static int ibm_int_off;
-
-struct xics_cppr {
- unsigned char stack[MAX_NUM_PRIORITIES];
- int index;
-};
-
-static DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
-
-/* Direct hardware low level accessors */
-
-/* The part of the interrupt presentation layer that we care about */
-struct xics_ipl {
- union {
- u32 word;
- u8 bytes[4];
- } xirr_poll;
- union {
- u32 word;
- u8 bytes[4];
- } xirr;
- u32 dummy;
- union {
- u32 word;
- u8 bytes[4];
- } qirr;
-};
-
-static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
-
-static inline unsigned int direct_xirr_info_get(void)
-{
- int cpu = smp_processor_id();
-
- return in_be32(&xics_per_cpu[cpu]->xirr.word);
-}
-
-static inline void direct_xirr_info_set(unsigned int value)
-{
- int cpu = smp_processor_id();
-
- out_be32(&xics_per_cpu[cpu]->xirr.word, value);
-}
-
-static inline void direct_cppr_info(u8 value)
-{
- int cpu = smp_processor_id();
-
- out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
-}
-
-static inline void direct_qirr_info(int n_cpu, u8 value)
-{
- out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
-}
-
-
-/* LPAR low level accessors */
-
-static inline unsigned int lpar_xirr_info_get(unsigned char cppr)
-{
- unsigned long lpar_rc;
- unsigned long return_value;
-
- lpar_rc = plpar_xirr(&return_value, cppr);
- if (lpar_rc != H_SUCCESS)
- panic(" bad return code xirr - rc = %lx\n", lpar_rc);
- return (unsigned int)return_value;
-}
-
-static inline void lpar_xirr_info_set(unsigned int value)
-{
- unsigned long lpar_rc;
-
- lpar_rc = plpar_eoi(value);
- if (lpar_rc != H_SUCCESS)
- panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc,
- value);
-}
-
-static inline void lpar_cppr_info(u8 value)
-{
- unsigned long lpar_rc;
-
- lpar_rc = plpar_cppr(value);
- if (lpar_rc != H_SUCCESS)
- panic("bad return code cppr - rc = %lx\n", lpar_rc);
-}
-
-static inline void lpar_qirr_info(int n_cpu , u8 value)
-{
- unsigned long lpar_rc;
-
- lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
- if (lpar_rc != H_SUCCESS)
- panic("bad return code qirr - rc = %lx\n", lpar_rc);
-}
-
-
-/* Interface to generic irq subsystem */
-
-#ifdef CONFIG_SMP
-/*
- * For the moment we only implement delivery to all cpus or one cpu.
- *
- * If the requested affinity is cpu_all_mask, we set global affinity.
- * If not we set it to the first cpu in the mask, even if multiple cpus
- * are set. This is so things like irqbalance (which set core and package
- * wide affinities) do the right thing.
- */
-static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
- unsigned int strict_check)
-{
-
- if (!distribute_irqs)
- return default_server;
-
- if (!cpumask_subset(cpu_possible_mask, cpumask)) {
- int server = cpumask_first_and(cpu_online_mask, cpumask);
-
- if (server < nr_cpu_ids)
- return get_hard_smp_processor_id(server);
-
- if (strict_check)
- return -1;
- }
-
- /*
- * Workaround issue with some versions of JS20 firmware that
- * deliver interrupts to cpus which haven't been started. This
- * happens when using the maxcpus= boot option.
- */
- if (cpumask_equal(cpu_online_mask, cpu_present_mask))
- return default_distrib_server;
-
- return default_server;
-}
-#else
-#define get_irq_server(virq, cpumask, strict_check) (default_server)
-#endif
-
-static void xics_unmask_irq(struct irq_data *d)
-{
- unsigned int irq;
- int call_status;
- int server;
-
- pr_devel("xics: unmask virq %d\n", d->irq);
-
- irq = (unsigned int)irq_map[d->irq].hwirq;
- pr_devel(" -> map to hwirq 0x%x\n", irq);
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
- return;
-
- server = get_irq_server(d->irq, d->affinity, 0);
-
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
- DEFAULT_PRIORITY);
- if (call_status != 0) {
- printk(KERN_ERR
- "%s: ibm_set_xive irq %u server %x returned %d\n",
- __func__, irq, server, call_status);
- return;
- }
-
- /* Now unmask the interrupt (often a no-op) */
- call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
- if (call_status != 0) {
- printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
- __func__, irq, call_status);
- return;
- }
-}
-
-static unsigned int xics_startup(struct irq_data *d)
-{
- /*
- * The generic MSI code returns with the interrupt disabled on the
- * card, using the MSI mask bits. Firmware doesn't appear to unmask
- * at that level, so we do it here by hand.
- */
- if (d->msi_desc)
- unmask_msi_irq(d);
-
- /* unmask it */
- xics_unmask_irq(d);
- return 0;
-}
-
-static void xics_mask_real_irq(struct irq_data *d)
-{
- int call_status;
-
- if (d->irq == XICS_IPI)
- return;
-
- call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq);
- if (call_status != 0) {
- printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
- __func__, d->irq, call_status);
- return;
- }
-
- /* Have to set XIVE to 0xff to be able to remove a slot */
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq,
- default_server, 0xff);
- if (call_status != 0) {
- printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
- __func__, d->irq, call_status);
- return;
- }
-}
-
-static void xics_mask_irq(struct irq_data *d)
-{
- unsigned int irq;
-
- pr_devel("xics: mask virq %d\n", d->irq);
-
- irq = (unsigned int)irq_map[d->irq].hwirq;
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
- return;
- xics_mask_real_irq(d);
-}
-
-static void xics_mask_unknown_vec(unsigned int vec)
-{
- printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
- xics_mask_real_irq(irq_get_irq_data(vec));
-}
-
-static inline unsigned int xics_xirr_vector(unsigned int xirr)
-{
- /*
- * The top byte is the old cppr, to be restored on EOI.
- * The remaining 24 bits are the vector.
- */
- return xirr & 0x00ffffff;
-}
-
-static void push_cppr(unsigned int vec)
-{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
-
- if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
- return;
-
- if (vec == XICS_IPI)
- os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
- else
- os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
-}
-
-static unsigned int xics_get_irq_direct(void)
-{
- unsigned int xirr = direct_xirr_info_get();
- unsigned int vec = xics_xirr_vector(xirr);
- unsigned int irq;
-
- if (vec == XICS_IRQ_SPURIOUS)
- return NO_IRQ;
-
- irq = irq_radix_revmap_lookup(xics_host, vec);
- if (likely(irq != NO_IRQ)) {
- push_cppr(vec);
- return irq;
- }
-
- /* We don't have a linux mapping, so have rtas mask it. */
- xics_mask_unknown_vec(vec);
-
- /* We might learn about it later, so EOI it */
- direct_xirr_info_set(xirr);
- return NO_IRQ;
-}
-
-static unsigned int xics_get_irq_lpar(void)
-{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
- unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]);
- unsigned int vec = xics_xirr_vector(xirr);
- unsigned int irq;
-
- if (vec == XICS_IRQ_SPURIOUS)
- return NO_IRQ;
-
- irq = irq_radix_revmap_lookup(xics_host, vec);
- if (likely(irq != NO_IRQ)) {
- push_cppr(vec);
- return irq;
- }
-
- /* We don't have a linux mapping, so have RTAS mask it. */
- xics_mask_unknown_vec(vec);
-
- /* We might learn about it later, so EOI it */
- lpar_xirr_info_set(xirr);
- return NO_IRQ;
-}
-
-static unsigned char pop_cppr(void)
-{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
-
- if (WARN_ON(os_cppr->index < 1))
- return LOWEST_PRIORITY;
-
- return os_cppr->stack[--os_cppr->index];
-}
-
-static void xics_eoi_direct(struct irq_data *d)
-{
- unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
-
- iosync();
- direct_xirr_info_set((pop_cppr() << 24) | irq);
-}
-
-static void xics_eoi_lpar(struct irq_data *d)
-{
- unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
-
- iosync();
- lpar_xirr_info_set((pop_cppr() << 24) | irq);
-}
-
-static int
-xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
-{
- unsigned int irq;
- int status;
- int xics_status[2];
- int irq_server;
-
- irq = (unsigned int)irq_map[d->irq].hwirq;
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
- return -1;
-
- status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
-
- if (status) {
- printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
- __func__, irq, status);
- return -1;
- }
-
- irq_server = get_irq_server(d->irq, cpumask, 1);
- if (irq_server == -1) {
- char cpulist[128];
- cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
- printk(KERN_WARNING
- "%s: No online cpus in the mask %s for irq %d\n",
- __func__, cpulist, d->irq);
- return -1;
- }
-
- status = rtas_call(ibm_set_xive, 3, 1, NULL,
- irq, irq_server, xics_status[1]);
-
- if (status) {
- printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
- __func__, irq, status);
- return -1;
- }
-
- return 0;
-}
-
-static struct irq_chip xics_pic_direct = {
- .name = "XICS",
- .irq_startup = xics_startup,
- .irq_mask = xics_mask_irq,
- .irq_unmask = xics_unmask_irq,
- .irq_eoi = xics_eoi_direct,
- .irq_set_affinity = xics_set_affinity
-};
-
-static struct irq_chip xics_pic_lpar = {
- .name = "XICS",
- .irq_startup = xics_startup,
- .irq_mask = xics_mask_irq,
- .irq_unmask = xics_unmask_irq,
- .irq_eoi = xics_eoi_lpar,
- .irq_set_affinity = xics_set_affinity
-};
-
-
-/* Interface to arch irq controller subsystem layer */
-
-/* Points to the irq_chip we're actually using */
-static struct irq_chip *xics_irq_chip;
-
-static int xics_host_match(struct irq_host *h, struct device_node *node)
-{
- /* IBM machines have interrupt parents of various funky types for things
- * like vdevices, events, etc... The trick we use here is to match
- * everything here except the legacy 8259 which is compatible "chrp,iic"
- */
- return !of_device_is_compatible(node, "chrp,iic");
-}
-
-static int xics_host_map(struct irq_host *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
-
- /* Insert the interrupt mapping into the radix tree for fast lookup */
- irq_radix_revmap_insert(xics_host, virq, hw);
-
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
- return 0;
-}
-
-static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_flags)
-
-{
- /* Current xics implementation translates everything
- * to level. It is not technically right for MSIs but this
- * is irrelevant at this point. We might get smarter in the future
- */
- *out_hwirq = intspec[0];
- *out_flags = IRQ_TYPE_LEVEL_LOW;
-
- return 0;
-}
-
-static struct irq_host_ops xics_host_ops = {
- .match = xics_host_match,
- .map = xics_host_map,
- .xlate = xics_host_xlate,
-};
-
-static void __init xics_init_host(void)
-{
- if (firmware_has_feature(FW_FEATURE_LPAR))
- xics_irq_chip = &xics_pic_lpar;
- else
- xics_irq_chip = &xics_pic_direct;
-
- xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
- XICS_IRQ_SPURIOUS);
- BUG_ON(xics_host == NULL);
- irq_set_default_host(xics_host);
-}
-
-
-/* Inter-processor interrupt support */
-
-#ifdef CONFIG_SMP
-/*
- * XICS only has a single IPI, so encode the messages per CPU
- */
-static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
-
-static inline void smp_xics_do_message(int cpu, int msg)
-{
- unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
-
- set_bit(msg, tgt);
- mb();
- if (firmware_has_feature(FW_FEATURE_LPAR))
- lpar_qirr_info(cpu, IPI_PRIORITY);
- else
- direct_qirr_info(cpu, IPI_PRIORITY);
-}
-
-void smp_xics_message_pass(int target, int msg)
-{
- unsigned int i;
-
- if (target < NR_CPUS) {
- smp_xics_do_message(target, msg);
- } else {
- for_each_online_cpu(i) {
- if (target == MSG_ALL_BUT_SELF
- && i == smp_processor_id())
- continue;
- smp_xics_do_message(i, msg);
- }
- }
-}
-
-static irqreturn_t xics_ipi_dispatch(int cpu)
-{
- unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
-
- mb(); /* order mmio clearing qirr */
- while (*tgt) {
- if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) {
- smp_message_recv(PPC_MSG_CALL_FUNCTION);
- }
- if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) {
- smp_message_recv(PPC_MSG_RESCHEDULE);
- }
- if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) {
- smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
- }
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
- if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) {
- smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
- }
-#endif
- }
- return IRQ_HANDLED;
-}
-
-static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
-{
- int cpu = smp_processor_id();
-
- direct_qirr_info(cpu, 0xff);
-
- return xics_ipi_dispatch(cpu);
-}
-
-static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
-{
- int cpu = smp_processor_id();
-
- lpar_qirr_info(cpu, 0xff);
-
- return xics_ipi_dispatch(cpu);
-}
-
-static void xics_request_ipi(void)
-{
- unsigned int ipi;
- int rc;
-
- ipi = irq_create_mapping(xics_host, XICS_IPI);
- BUG_ON(ipi == NO_IRQ);
-
- /*
- * IPIs are marked IRQF_DISABLED as they must run with irqs
- * disabled
- */
- set_irq_handler(ipi, handle_percpu_irq);
- if (firmware_has_feature(FW_FEATURE_LPAR))
- rc = request_irq(ipi, xics_ipi_action_lpar,
- IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
- else
- rc = request_irq(ipi, xics_ipi_action_direct,
- IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
- BUG_ON(rc);
-}
-
-int __init smp_xics_probe(void)
-{
- xics_request_ipi();
-
- return cpumask_weight(cpu_possible_mask);
-}
-
-#endif /* CONFIG_SMP */
-
-
-/* Initialization */
-
-static void xics_update_irq_servers(void)
-{
- int i, j;
- struct device_node *np;
- u32 ilen;
- const u32 *ireg;
- u32 hcpuid;
-
- /* Find the server numbers for the boot cpu. */
- np = of_get_cpu_node(boot_cpuid, NULL);
- BUG_ON(!np);
-
- ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
- if (!ireg) {
- of_node_put(np);
- return;
- }
-
- i = ilen / sizeof(int);
- hcpuid = get_hard_smp_processor_id(boot_cpuid);
-
- /* Global interrupt distribution server is specified in the last
- * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
- * entry fom this property for current boot cpu id and use it as
- * default distribution server
- */
- for (j = 0; j < i; j += 2) {
- if (ireg[j] == hcpuid) {
- default_server = hcpuid;
- default_distrib_server = ireg[j+1];
- }
- }
-
- of_node_put(np);
-}
-
-static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
- unsigned long size)
-{
- int i;
-
- /* This may look gross but it's good enough for now, we don't quite
- * have a hard -> linux processor id matching.
- */
- for_each_possible_cpu(i) {
- if (!cpu_present(i))
- continue;
- if (hw_id == get_hard_smp_processor_id(i)) {
- xics_per_cpu[i] = ioremap(addr, size);
- return;
- }
- }
-}
-
-static void __init xics_init_one_node(struct device_node *np,
- unsigned int *indx)
-{
- unsigned int ilen;
- const u32 *ireg;
-
- /* This code does the theorically broken assumption that the interrupt
- * server numbers are the same as the hard CPU numbers.
- * This happens to be the case so far but we are playing with fire...
- * should be fixed one of these days. -BenH.
- */
- ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
-
- /* Do that ever happen ? we'll know soon enough... but even good'old
- * f80 does have that property ..
- */
- WARN_ON(ireg == NULL);
- if (ireg) {
- /*
- * set node starting index for this node
- */
- *indx = *ireg;
- }
- ireg = of_get_property(np, "reg", &ilen);
- if (!ireg)
- panic("xics_init_IRQ: can't find interrupt reg property");
-
- while (ilen >= (4 * sizeof(u32))) {
- unsigned long addr, size;
-
- /* XXX Use proper OF parsing code here !!! */
- addr = (unsigned long)*ireg++ << 32;
- ilen -= sizeof(u32);
- addr |= *ireg++;
- ilen -= sizeof(u32);
- size = (unsigned long)*ireg++ << 32;
- ilen -= sizeof(u32);
- size |= *ireg++;
- ilen -= sizeof(u32);
- xics_map_one_cpu(*indx, addr, size);
- (*indx)++;
- }
-}
-
-void __init xics_init_IRQ(void)
-{
- struct device_node *np;
- u32 indx = 0;
- int found = 0;
- const u32 *isize;
-
- ppc64_boot_msg(0x20, "XICS Init");
-
- ibm_get_xive = rtas_token("ibm,get-xive");
- ibm_set_xive = rtas_token("ibm,set-xive");
- ibm_int_on = rtas_token("ibm,int-on");
- ibm_int_off = rtas_token("ibm,int-off");
-
- for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
- found = 1;
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
- of_node_put(np);
- break;
- }
- xics_init_one_node(np, &indx);
- }
- if (found == 0)
- return;
-
- /* get the bit size of server numbers */
- found = 0;
-
- for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
- isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
-
- if (!isize)
- continue;
-
- if (!found) {
- interrupt_server_size = *isize;
- found = 1;
- } else if (*isize != interrupt_server_size) {
- printk(KERN_WARNING "XICS: "
- "mismatched ibm,interrupt-server#-size\n");
- interrupt_server_size = max(*isize,
- interrupt_server_size);
- }
- }
-
- xics_update_irq_servers();
- xics_init_host();
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
- ppc_md.get_irq = xics_get_irq_lpar;
- else
- ppc_md.get_irq = xics_get_irq_direct;
-
- xics_setup_cpu();
-
- ppc64_boot_msg(0x21, "XICS Done");
-}
-
-/* Cpu startup, shutdown, and hotplug */
-
-static void xics_set_cpu_priority(unsigned char cppr)
-{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
-
- /*
- * we only really want to set the priority when there's
- * just one cppr value on the stack
- */
- WARN_ON(os_cppr->index != 0);
-
- os_cppr->stack[0] = cppr;
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
- lpar_cppr_info(cppr);
- else
- direct_cppr_info(cppr);
- iosync();
-}
-
-/* Have the calling processor join or leave the specified global queue */
-static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
-{
- int index;
- int status;
-
- if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
- return;
-
- index = (1UL << interrupt_server_size) - 1 - gserver;
-
- status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
-
- WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
- GLOBAL_INTERRUPT_QUEUE, index, join, status);
-}
-
-void xics_setup_cpu(void)
-{
- xics_set_cpu_priority(LOWEST_PRIORITY);
-
- xics_set_cpu_giq(default_distrib_server, 1);
-}
-
-void xics_teardown_cpu(void)
-{
- struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
- int cpu = smp_processor_id();
-
- /*
- * we have to reset the cppr index to 0 because we're
- * not going to return from the IPI
- */
- os_cppr->index = 0;
- xics_set_cpu_priority(0);
-
- /* Clear any pending IPI request */
- if (firmware_has_feature(FW_FEATURE_LPAR))
- lpar_qirr_info(cpu, 0xff);
- else
- direct_qirr_info(cpu, 0xff);
-}
-
-void xics_kexec_teardown_cpu(int secondary)
-{
- xics_teardown_cpu();
-
- /*
- * we take the ipi irq but and never return so we
- * need to EOI the IPI, but want to leave our priority 0
- *
- * should we check all the other interrupts too?
- * should we be flagging idle loop instead?
- * or creating some task to be scheduled?
- */
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
- lpar_xirr_info_set((0x00 << 24) | XICS_IPI);
- else
- direct_xirr_info_set((0x00 << 24) | XICS_IPI);
-
- /*
- * Some machines need to have at least one cpu in the GIQ,
- * so leave the master cpu in the group.
- */
- if (secondary)
- xics_set_cpu_giq(default_distrib_server, 0);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* Interrupts are disabled. */
-void xics_migrate_irqs_away(void)
-{
- int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
- unsigned int irq, virq;
-
- /* If we used to be the default server, move to the new "boot_cpuid" */
- if (hw_cpu == default_server)
- xics_update_irq_servers();
-
- /* Reject any interrupt that was queued to us... */
- xics_set_cpu_priority(0);
-
- /* Remove ourselves from the global interrupt queue */
- xics_set_cpu_giq(default_distrib_server, 0);
-
- /* Allow IPIs again... */
- xics_set_cpu_priority(DEFAULT_PRIORITY);
-
- for_each_irq(virq) {
- struct irq_desc *desc;
- struct irq_chip *chip;
- int xics_status[2];
- int status;
- unsigned long flags;
-
- /* We cant set affinity on ISA interrupts */
- if (virq < NUM_ISA_INTERRUPTS)
- continue;
- if (irq_map[virq].host != xics_host)
- continue;
- irq = (unsigned int)irq_map[virq].hwirq;
- /* We need to get IPIs still. */
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
- continue;
-
- desc = irq_to_desc(virq);
-
- /* We only need to migrate enabled IRQS */
- if (desc == NULL || desc->action == NULL)
- continue;
-
- chip = get_irq_desc_chip(desc);
- if (chip == NULL || chip->irq_set_affinity == NULL)
- continue;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
-
- status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
- if (status) {
- printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
- __func__, irq, status);
- goto unlock;
- }
-
- /*
- * We only support delivery to all cpus or to one cpu.
- * The irq has to be migrated only in the single cpu
- * case.
- */
- if (xics_status[0] != hw_cpu)
- goto unlock;
-
- /* This is expected during cpu offline. */
- if (cpu_online(cpu))
- printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
- virq, cpu);
-
- /* Reset affinity to all cpus */
- cpumask_setall(desc->irq_data.affinity);
- chip->irq_set_affinity(&desc->irq_data, cpu_all_mask, true);
-unlock:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-}
-#endif
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
deleted file mode 100644
index d1d5a83039ae..000000000000
--- a/arch/powerpc/platforms/pseries/xics.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/powerpc/platforms/pseries/xics.h
- *
- * Copyright 2000 IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _POWERPC_KERNEL_XICS_H
-#define _POWERPC_KERNEL_XICS_H
-
-extern void xics_init_IRQ(void);
-extern void xics_setup_cpu(void);
-extern void xics_teardown_cpu(void);
-extern void xics_kexec_teardown_cpu(int secondary);
-extern void xics_migrate_irqs_away(void);
-extern int smp_xics_probe(void);
-extern void smp_xics_message_pass(int target, int msg);
-
-#endif /* _POWERPC_KERNEL_XICS_H */
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
new file mode 100644
index 000000000000..c3c48eb62cc1
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -0,0 +1,28 @@
+config PPC_WSP
+ bool
+ default n
+
+menu "WSP platform selection"
+ depends on PPC_BOOK3E_64
+
+config PPC_PSR2
+ bool "PSR-2 platform"
+ select PPC_A2
+ select GENERIC_TBSYNC
+ select PPC_SCOM
+ select EPAPR_BOOT
+ select PPC_WSP
+ select PPC_XICS
+ select PPC_ICP_NATIVE
+ default y
+
+endmenu
+
+config PPC_A2_DD2
+ bool "Support for DD2 based A2/WSP systems"
+ depends on PPC_A2
+
+config WORKAROUND_ERRATUM_463
+ depends on PPC_A2_DD2
+ bool "Workaround erratum 463"
+ default y
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
new file mode 100644
index 000000000000..095be73d6cd4
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -mno-minimal-toc
+
+obj-y += setup.o ics.o
+obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
+obj-$(CONFIG_PPC_WSP) += scom_wsp.o
+obj-$(CONFIG_SMP) += smp.o scom_smp.o
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
new file mode 100644
index 000000000000..e53bd9e7b125
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -0,0 +1,712 @@
+/*
+ * Copyright 2008-2011 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/xics.h>
+
+#include "wsp.h"
+#include "ics.h"
+
+
+/* WSP ICS */
+
+struct wsp_ics {
+ struct ics ics;
+ struct device_node *dn;
+ void __iomem *regs;
+ spinlock_t lock;
+ unsigned long *bitmap;
+ u32 chip_id;
+ u32 lsi_base;
+ u32 lsi_count;
+ u64 hwirq_start;
+ u64 count;
+#ifdef CONFIG_SMP
+ int *hwirq_cpu_map;
+#endif
+};
+
+#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
+
+#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
+#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
+#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
+#define XIVE_UPDATE_REG(base) ((base) + 0x28)
+#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
+
+#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
+#define TBL_SELECT_XIST (1UL << 48)
+#define TBL_SELECT_XIVT (1UL << 49)
+
+#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
+
+#define XIST_REQUIRED 0x8
+#define XIST_REJECTED 0x4
+#define XIST_PRESENTED 0x2
+#define XIST_PENDING 0x1
+
+#define XIVE_SERVER_SHIFT 42
+#define XIVE_SERVER_MASK 0xFFFFULL
+#define XIVE_PRIORITY_MASK 0xFFULL
+#define XIVE_PRIORITY_SHIFT 32
+#define XIVE_WRITE_ENABLE (1ULL << 63)
+
+/*
+ * The docs refer to a 6 bit field called ChipID, which consists of a
+ * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
+ * so we ignore it, and every where we use "chip id" in this code we
+ * mean the NodeID.
+ */
+#define WSP_ICS_CHIP_SHIFT 17
+
+
+static struct wsp_ics *ics_list;
+static int num_ics;
+
+/* ICS Source controller accessors */
+
+static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
+{
+ unsigned long flags;
+ u64 xive;
+
+ spin_lock_irqsave(&ics->lock, flags);
+ out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
+ xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
+ spin_unlock_irqrestore(&ics->lock, flags);
+
+ return xive;
+}
+
+static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
+{
+ xive &= ~XIVE_ADDR_MASK;
+ xive |= (irq & XIVE_ADDR_MASK);
+ xive |= XIVE_WRITE_ENABLE;
+
+ out_be64(XIVE_UPDATE_REG(ics->regs), xive);
+}
+
+static u64 xive_set_server(u64 xive, unsigned int server)
+{
+ u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
+
+ xive &= mask;
+ xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
+
+ return xive;
+}
+
+static u64 xive_set_priority(u64 xive, unsigned int priority)
+{
+ u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
+
+ xive &= mask;
+ xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
+
+ return xive;
+}
+
+
+#ifdef CONFIG_SMP
+/* Find logical CPUs within mask on a given chip and store result in ret */
+void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
+{
+ int cpu, chip;
+ struct device_node *cpu_dn, *dn;
+ const u32 *prop;
+
+ cpumask_clear(ret);
+ for_each_cpu(cpu, mask) {
+ cpu_dn = of_get_cpu_node(cpu, NULL);
+ if (!cpu_dn)
+ continue;
+
+ prop = of_get_property(cpu_dn, "at-node", NULL);
+ if (!prop) {
+ of_node_put(cpu_dn);
+ continue;
+ }
+
+ dn = of_find_node_by_phandle(*prop);
+ of_node_put(cpu_dn);
+
+ chip = wsp_get_chip_id(dn);
+ if (chip == chip_id)
+ cpumask_set_cpu(cpu, ret);
+
+ of_node_put(dn);
+ }
+}
+
+/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
+static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
+ const cpumask_t *affinity)
+{
+ cpumask_var_t avail, newmask;
+ int ret = -ENOMEM, cpu, cpu_rover = 0, target;
+ int index = hwirq - ics->hwirq_start;
+ unsigned int nodeid;
+
+ BUG_ON(index < 0 || index >= ics->count);
+
+ if (!ics->hwirq_cpu_map)
+ return -ENOMEM;
+
+ if (!distribute_irqs) {
+ ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
+ return 0;
+ }
+
+ /* Allocate needed CPU masks */
+ if (!alloc_cpumask_var(&avail, GFP_KERNEL))
+ goto ret;
+ if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
+ goto freeavail;
+
+ /* Find PBus attached to the source of this IRQ */
+ nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
+
+ /* Find CPUs that could handle this IRQ */
+ if (affinity)
+ cpumask_and(avail, cpu_online_mask, affinity);
+ else
+ cpumask_copy(avail, cpu_online_mask);
+
+ /* Narrow selection down to logical CPUs on the same chip */
+ cpus_on_chip(nodeid, avail, newmask);
+
+ /* Ensure we haven't narrowed it down to 0 */
+ if (unlikely(cpumask_empty(newmask))) {
+ if (unlikely(cpumask_empty(avail))) {
+ ret = -1;
+ goto out;
+ }
+ cpumask_copy(newmask, avail);
+ }
+
+ /* Choose a CPU out of those we narrowed it down to in round robin */
+ target = hwirq % cpumask_weight(newmask);
+ for_each_cpu(cpu, newmask) {
+ if (cpu_rover++ >= target) {
+ ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ /* Shouldn't happen */
+ WARN_ON(1);
+
+out:
+ free_cpumask_var(newmask);
+freeavail:
+ free_cpumask_var(avail);
+ret:
+ if (ret < 0) {
+ ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
+ pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
+ hwirq, ics->hwirq_cpu_map[index]);
+ }
+ return ret;
+}
+
+static void alloc_irq_map(struct wsp_ics *ics)
+{
+ int i;
+
+ ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
+ if (!ics->hwirq_cpu_map) {
+ pr_warning("Allocate hwirq_cpu_map failed, "
+ "IRQ balancing disabled\n");
+ return;
+ }
+
+ for (i=0; i < ics->count; i++)
+ ics->hwirq_cpu_map[i] = xics_default_server;
+}
+
+static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
+{
+ int index = hwirq - ics->hwirq_start;
+
+ BUG_ON(index < 0 || index >= ics->count);
+
+ if (!ics->hwirq_cpu_map)
+ return xics_default_server;
+
+ return ics->hwirq_cpu_map[index];
+}
+#else /* !CONFIG_SMP */
+static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
+ const cpumask_t *affinity)
+{
+ return 0;
+}
+
+static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
+{
+ return xics_default_server;
+}
+
+static void alloc_irq_map(struct wsp_ics *ics) { }
+#endif
+
+static void wsp_chip_unmask_irq(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ struct wsp_ics *ics;
+ int server;
+ u64 xive;
+
+ if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
+ return;
+
+ ics = d->chip_data;
+ if (WARN_ON(!ics))
+ return;
+
+ server = get_irq_server(ics, hw_irq);
+
+ xive = wsp_ics_get_xive(ics, hw_irq);
+ xive = xive_set_server(xive, server);
+ xive = xive_set_priority(xive, DEFAULT_PRIORITY);
+ wsp_ics_set_xive(ics, hw_irq, xive);
+}
+
+static unsigned int wsp_chip_startup(struct irq_data *d)
+{
+ /* unmask it */
+ wsp_chip_unmask_irq(d);
+ return 0;
+}
+
+static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
+{
+ u64 xive;
+
+ if (hw_irq == XICS_IPI)
+ return;
+
+ if (WARN_ON(!ics))
+ return;
+ xive = wsp_ics_get_xive(ics, hw_irq);
+ xive = xive_set_server(xive, xics_default_server);
+ xive = xive_set_priority(xive, LOWEST_PRIORITY);
+ wsp_ics_set_xive(ics, hw_irq, xive);
+}
+
+static void wsp_chip_mask_irq(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ struct wsp_ics *ics = d->chip_data;
+
+ if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
+ return;
+
+ wsp_mask_real_irq(hw_irq, ics);
+}
+
+static int wsp_chip_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask, bool force)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ struct wsp_ics *ics;
+ int ret;
+ u64 xive;
+
+ if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
+ return -1;
+
+ ics = d->chip_data;
+ if (WARN_ON(!ics))
+ return -1;
+ xive = wsp_ics_get_xive(ics, hw_irq);
+
+ /*
+ * For the moment only implement delivery to all cpus or one cpu.
+ * Get current irq_server for the given irq
+ */
+ ret = cache_hwirq_map(ics, d->irq, cpumask);
+ if (ret == -1) {
+ char cpulist[128];
+ cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
+ pr_warning("%s: No online cpus in the mask %s for irq %d\n",
+ __func__, cpulist, d->irq);
+ return -1;
+ } else if (ret == -ENOMEM) {
+ pr_warning("%s: Out of memory\n", __func__);
+ return -1;
+ }
+
+ xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
+ wsp_ics_set_xive(ics, hw_irq, xive);
+
+ return 0;
+}
+
+static struct irq_chip wsp_irq_chip = {
+ .name = "WSP ICS",
+ .irq_startup = wsp_chip_startup,
+ .irq_mask = wsp_chip_mask_irq,
+ .irq_unmask = wsp_chip_unmask_irq,
+ .irq_set_affinity = wsp_chip_set_affinity
+};
+
+static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
+{
+ /* All ICSs in the system implement a global irq number space,
+ * so match against them all. */
+ return of_device_is_compatible(dn, "ibm,ppc-xics");
+}
+
+static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
+{
+ if (hwirq >= wsp_ics->hwirq_start &&
+ hwirq < wsp_ics->hwirq_start + wsp_ics->count)
+ return 1;
+
+ return 0;
+}
+
+static int wsp_ics_map(struct ics *ics, unsigned int virq)
+{
+ struct wsp_ics *wsp_ics = to_wsp_ics(ics);
+ unsigned int hw_irq = virq_to_hw(virq);
+ unsigned long flags;
+
+ if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
+ return -ENOENT;
+
+ irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
+
+ irq_set_chip_data(virq, wsp_ics);
+
+ spin_lock_irqsave(&wsp_ics->lock, flags);
+ bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
+ spin_unlock_irqrestore(&wsp_ics->lock, flags);
+
+ return 0;
+}
+
+static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
+{
+ struct wsp_ics *wsp_ics = to_wsp_ics(ics);
+
+ if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
+ return;
+
+ pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
+ wsp_mask_real_irq(hw_irq, wsp_ics);
+}
+
+static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
+{
+ struct wsp_ics *wsp_ics = to_wsp_ics(ics);
+
+ if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
+ return -ENOENT;
+
+ return get_irq_server(wsp_ics, hw_irq);
+}
+
+/* HW Number allocation API */
+
+static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
+{
+ struct device_node *iparent;
+ int i;
+
+ iparent = of_irq_find_parent(dn);
+ if (!iparent) {
+ pr_err("wsp_ics: Failed to find interrupt parent!\n");
+ return NULL;
+ }
+
+ for(i = 0; i < num_ics; i++) {
+ if(ics_list[i].dn == iparent)
+ break;
+ }
+
+ if (i >= num_ics) {
+ pr_err("wsp_ics: Unable to find parent bitmap!\n");
+ return NULL;
+ }
+
+ return &ics_list[i];
+}
+
+int wsp_ics_alloc_irq(struct device_node *dn, int num)
+{
+ struct wsp_ics *ics;
+ int order, offset;
+
+ ics = wsp_ics_find_dn_ics(dn);
+ if (!ics)
+ return -ENODEV;
+
+ /* Fast, but overly strict if num isn't a power of two */
+ order = get_count_order(num);
+
+ spin_lock_irq(&ics->lock);
+ offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
+ spin_unlock_irq(&ics->lock);
+
+ if (offset < 0)
+ return offset;
+
+ return offset + ics->hwirq_start;
+}
+
+void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
+{
+ struct wsp_ics *ics;
+
+ ics = wsp_ics_find_dn_ics(dn);
+ if (WARN_ON(!ics))
+ return;
+
+ spin_lock_irq(&ics->lock);
+ bitmap_release_region(ics->bitmap, irq, 0);
+ spin_unlock_irq(&ics->lock);
+}
+
+/* Initialisation */
+
+static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
+ struct device_node *dn)
+{
+ int len, i, j, size;
+ u32 start, count;
+ const u32 *p;
+
+ size = BITS_TO_LONGS(ics->count) * sizeof(long);
+ ics->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!ics->bitmap) {
+ pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ics->lock);
+
+ p = of_get_property(dn, "available-ranges", &len);
+ if (!p || !len) {
+ /* FIXME this should be a WARN() once mambo is updated */
+ pr_err("wsp_ics: No available-ranges defined for %s\n",
+ dn->full_name);
+ return 0;
+ }
+
+ if (len % (2 * sizeof(u32)) != 0) {
+ /* FIXME this should be a WARN() once mambo is updated */
+ pr_err("wsp_ics: Invalid available-ranges for %s\n",
+ dn->full_name);
+ return 0;
+ }
+
+ bitmap_fill(ics->bitmap, ics->count);
+
+ for (i = 0; i < len / sizeof(u32); i += 2) {
+ start = of_read_number(p + i, 1);
+ count = of_read_number(p + i + 1, 1);
+
+ pr_devel("%s: start: %d count: %d\n", __func__, start, count);
+
+ if ((start + count) > (ics->hwirq_start + ics->count) ||
+ start < ics->hwirq_start) {
+ pr_err("wsp_ics: Invalid range! -> %d to %d\n",
+ start, start + count);
+ break;
+ }
+
+ for (j = 0; j < count; j++)
+ bitmap_release_region(ics->bitmap,
+ (start + j) - ics->hwirq_start, 0);
+ }
+
+ /* Ensure LSIs are not available for allocation */
+ bitmap_allocate_region(ics->bitmap, ics->lsi_base,
+ get_count_order(ics->lsi_count));
+
+ return 0;
+}
+
+static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
+{
+ u32 lsi_buid, msi_buid, msi_base, msi_count;
+ void __iomem *regs;
+ const u32 *p;
+ int rc, len, i;
+ u64 caps, buid;
+
+ p = of_get_property(dn, "interrupt-ranges", &len);
+ if (!p || len < (2 * sizeof(u32))) {
+ pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
+ dn->full_name);
+ return -ENOENT;
+ }
+
+ if (len > (2 * sizeof(u32))) {
+ pr_err("wsp_ics: Multiple ics ranges not supported.\n");
+ return -EINVAL;
+ }
+
+ regs = of_iomap(dn, 0);
+ if (!regs) {
+ pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
+ return -ENXIO;
+ }
+
+ ics->hwirq_start = of_read_number(p, 1);
+ ics->count = of_read_number(p + 1, 1);
+ ics->regs = regs;
+
+ ics->chip_id = wsp_get_chip_id(dn);
+ if (WARN_ON(ics->chip_id < 0))
+ ics->chip_id = 0;
+
+ /* Get some informations about the critter */
+ caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
+ buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
+ ics->lsi_count = caps >> 56;
+ msi_count = (caps >> 44) & 0x7ff;
+
+ /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
+ * rest is mixed in the interrupt number. We store the whole
+ * thing though
+ */
+ lsi_buid = (buid >> 48) & 0x1ff;
+ ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
+ msi_buid = (buid >> 37) & 0x7;
+ msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
+
+ pr_info("wsp_ics: Found %s\n", dn->full_name);
+ pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
+ ics->hwirq_start, ics->hwirq_start + ics->count - 1);
+ pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
+ ics->lsi_count, ics->lsi_base,
+ ics->lsi_base + ics->lsi_count - 1);
+ pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
+ msi_count, msi_base,
+ msi_base + msi_count - 1);
+
+ /* Let's check the HW config is sane */
+ if (ics->lsi_base < ics->hwirq_start ||
+ (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
+ pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
+ if (msi_base < ics->hwirq_start ||
+ (msi_base + msi_count) > (ics->hwirq_start + ics->count))
+ pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
+
+ /* We don't check for overlap between LSI and MSI, which will happen
+ * if we use the same BUID, I'm not sure yet how legit that is.
+ */
+
+ rc = wsp_ics_bitmap_setup(ics, dn);
+ if (rc) {
+ iounmap(regs);
+ return rc;
+ }
+
+ ics->dn = of_node_get(dn);
+ alloc_irq_map(ics);
+
+ for(i = 0; i < ics->count; i++)
+ wsp_mask_real_irq(ics->hwirq_start + i, ics);
+
+ ics->ics.map = wsp_ics_map;
+ ics->ics.mask_unknown = wsp_ics_mask_unknown;
+ ics->ics.get_server = wsp_ics_get_server;
+ ics->ics.host_match = wsp_ics_host_match;
+
+ xics_register_ics(&ics->ics);
+
+ return 0;
+}
+
+static void __init wsp_ics_set_default_server(void)
+{
+ struct device_node *np;
+ u32 hwid;
+
+ /* Find the server number for the boot cpu. */
+ np = of_get_cpu_node(boot_cpuid, NULL);
+ BUG_ON(!np);
+
+ hwid = get_hard_smp_processor_id(boot_cpuid);
+
+ pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
+ xics_default_server = hwid;
+
+ of_node_put(np);
+}
+
+static int __init wsp_ics_init(void)
+{
+ struct device_node *dn;
+ struct wsp_ics *ics;
+ int rc, found;
+
+ wsp_ics_set_default_server();
+
+ found = 0;
+ for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
+ found++;
+
+ if (found == 0) {
+ pr_err("wsp_ics: No ICS's found!\n");
+ return -ENODEV;
+ }
+
+ ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
+ if (!ics_list) {
+ pr_err("wsp_ics: No memory for structs.\n");
+ return -ENOMEM;
+ }
+
+ num_ics = 0;
+ ics = ics_list;
+ for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
+ rc = wsp_ics_setup(ics, dn);
+ if (rc == 0) {
+ ics++;
+ num_ics++;
+ }
+ }
+
+ if (found != num_ics) {
+ pr_err("wsp_ics: Failed setting up %d ICS's\n",
+ found - num_ics);
+ return -1;
+ }
+
+ return 0;
+}
+
+void __init wsp_init_irq(void)
+{
+ wsp_ics_init();
+ xics_init();
+
+ /* We need to patch our irq chip's EOI to point to the right ICP */
+ wsp_irq_chip.irq_eoi = icp_ops->eoi;
+}
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
new file mode 100644
index 000000000000..e34d53102640
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/ics.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2009 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ICS_H
+#define __ICS_H
+
+#define XIVE_ADDR_MASK 0x7FFULL
+
+extern void wsp_init_irq(void);
+
+extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
+extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
+
+#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
new file mode 100644
index 000000000000..be05631a3c1c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -0,0 +1,332 @@
+/*
+ * IBM Onboard Peripheral Bus Interrupt Controller
+ *
+ * Copyright 2010 Jack Miller, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+
+#include <asm/reg_a2.h>
+#include <asm/irq.h>
+
+#define OPB_NR_IRQS 32
+
+#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
+#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
+#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
+#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
+#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
+
+static int opb_index = 0;
+
+struct opb_pic {
+ struct irq_host *host;
+ void *regs;
+ int index;
+ spinlock_t lock;
+};
+
+static u32 opb_in(struct opb_pic *opb, int offset)
+{
+ return in_be32(opb->regs + offset);
+}
+
+static void opb_out(struct opb_pic *opb, int offset, u32 val)
+{
+ out_be32(opb->regs + offset, val);
+}
+
+static void opb_unmask_irq(struct irq_data *d)
+{
+ struct opb_pic *opb;
+ unsigned long flags;
+ u32 ier, bitset;
+
+ opb = d->chip_data;
+ bitset = (1 << (31 - irqd_to_hwirq(d)));
+
+ spin_lock_irqsave(&opb->lock, flags);
+
+ ier = opb_in(opb, OPB_MLSIER);
+ opb_out(opb, OPB_MLSIER, ier | bitset);
+ ier = opb_in(opb, OPB_MLSIER);
+
+ spin_unlock_irqrestore(&opb->lock, flags);
+}
+
+static void opb_mask_irq(struct irq_data *d)
+{
+ struct opb_pic *opb;
+ unsigned long flags;
+ u32 ier, mask;
+
+ opb = d->chip_data;
+ mask = ~(1 << (31 - irqd_to_hwirq(d)));
+
+ spin_lock_irqsave(&opb->lock, flags);
+
+ ier = opb_in(opb, OPB_MLSIER);
+ opb_out(opb, OPB_MLSIER, ier & mask);
+ ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
+
+ spin_unlock_irqrestore(&opb->lock, flags);
+}
+
+static void opb_ack_irq(struct irq_data *d)
+{
+ struct opb_pic *opb;
+ unsigned long flags;
+ u32 bitset;
+
+ opb = d->chip_data;
+ bitset = (1 << (31 - irqd_to_hwirq(d)));
+
+ spin_lock_irqsave(&opb->lock, flags);
+
+ opb_out(opb, OPB_MLSIR, bitset);
+ opb_in(opb, OPB_MLSIR); // Flush posted writes
+
+ spin_unlock_irqrestore(&opb->lock, flags);
+}
+
+static void opb_mask_ack_irq(struct irq_data *d)
+{
+ struct opb_pic *opb;
+ unsigned long flags;
+ u32 bitset;
+ u32 ier, ir;
+
+ opb = d->chip_data;
+ bitset = (1 << (31 - irqd_to_hwirq(d)));
+
+ spin_lock_irqsave(&opb->lock, flags);
+
+ ier = opb_in(opb, OPB_MLSIER);
+ opb_out(opb, OPB_MLSIER, ier & ~bitset);
+ ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
+
+ opb_out(opb, OPB_MLSIR, bitset);
+ ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
+
+ spin_unlock_irqrestore(&opb->lock, flags);
+}
+
+static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
+{
+ struct opb_pic *opb;
+ unsigned long flags;
+ int invert, ipr, mask, bit;
+
+ opb = d->chip_data;
+
+ /* The only information we're interested in in the type is whether it's
+ * a high or low trigger. For high triggered interrupts, the polarity
+ * set for it in the MLS Interrupt Polarity Register is 0, for low
+ * interrupts it's 1 so that the proper input in the MLS Interrupt Input
+ * Register is interrupted as asserting the interrupt. */
+
+ switch (flow) {
+ case IRQ_TYPE_NONE:
+ opb_mask_irq(d);
+ return 0;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ invert = 0;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ invert = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ bit = (1 << (31 - irqd_to_hwirq(d)));
+ mask = ~bit;
+
+ spin_lock_irqsave(&opb->lock, flags);
+
+ ipr = opb_in(opb, OPB_MLSIPR);
+ ipr = (ipr & mask) | (invert ? bit : 0);
+ opb_out(opb, OPB_MLSIPR, ipr);
+ ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
+
+ spin_unlock_irqrestore(&opb->lock, flags);
+
+ /* Record the type in the interrupt descriptor */
+ irqd_set_trigger_type(d, flow);
+
+ return 0;
+}
+
+static struct irq_chip opb_irq_chip = {
+ .name = "OPB",
+ .irq_mask = opb_mask_irq,
+ .irq_unmask = opb_unmask_irq,
+ .irq_mask_ack = opb_mask_ack_irq,
+ .irq_ack = opb_ack_irq,
+ .irq_set_type = opb_set_irq_type
+};
+
+static int opb_host_map(struct irq_host *host, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct opb_pic *opb;
+
+ opb = host->host_data;
+
+ /* Most of the important stuff is handled by the generic host code, like
+ * the lookup, so just attach some info to the virtual irq */
+
+ irq_set_chip_data(virq, opb);
+ irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int opb_host_xlate(struct irq_host *host, struct device_node *dn,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ /* Interrupt size must == 2 */
+ BUG_ON(intsize != 2);
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1];
+ return 0;
+}
+
+static struct irq_host_ops opb_host_ops = {
+ .map = opb_host_map,
+ .xlate = opb_host_xlate,
+};
+
+irqreturn_t opb_irq_handler(int irq, void *private)
+{
+ struct opb_pic *opb;
+ u32 ir, src, subvirq;
+
+ opb = (struct opb_pic *) private;
+
+ /* Read the OPB MLS Interrupt Register for
+ * asserted interrupts */
+ ir = opb_in(opb, OPB_MLSIR);
+ if (!ir)
+ return IRQ_NONE;
+
+ do {
+ /* Get 1 - 32 source, *NOT* bit */
+ src = 32 - ffs(ir);
+
+ /* Translate from the OPB's conception of interrupt number to
+ * Linux's virtual IRQ */
+
+ subvirq = irq_linear_revmap(opb->host, src);
+
+ generic_handle_irq(subvirq);
+ } while ((ir = opb_in(opb, OPB_MLSIR)));
+
+ return IRQ_HANDLED;
+}
+
+struct opb_pic *opb_pic_init_one(struct device_node *dn)
+{
+ struct opb_pic *opb;
+ struct resource res;
+
+ if (of_address_to_resource(dn, 0, &res)) {
+ printk(KERN_ERR "opb: Couldn't translate resource\n");
+ return NULL;
+ }
+
+ opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
+ if (!opb) {
+ printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
+ return NULL;
+ }
+
+ /* Get access to the OPB MMIO registers */
+ opb->regs = ioremap(res.start + 0x10000, 0x1000);
+ if (!opb->regs) {
+ printk(KERN_ERR "opb: Failed to allocate register space!\n");
+ goto free_opb;
+ }
+
+ /* Allocate an irq host so that Linux knows that despite only
+ * having one interrupt to issue, we're the controller for multiple
+ * hardware IRQs, so later we can lookup their virtual IRQs. */
+
+ opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR,
+ OPB_NR_IRQS, &opb_host_ops, -1);
+
+ if (!opb->host) {
+ printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
+ goto free_regs;
+ }
+
+ opb->index = opb_index++;
+ spin_lock_init(&opb->lock);
+ opb->host->host_data = opb;
+
+ /* Disable all interrupts by default */
+ opb_out(opb, OPB_MLSASIER, 0);
+ opb_out(opb, OPB_MLSIER, 0);
+
+ /* ACK any interrupts left by FW */
+ opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
+
+ return opb;
+
+free_regs:
+ iounmap(opb->regs);
+free_opb:
+ kfree(opb);
+ return NULL;
+}
+
+void __init opb_pic_init(void)
+{
+ struct device_node *dn;
+ struct opb_pic *opb;
+ int virq;
+ int rc;
+
+ /* Call init_one for each OPB device */
+ for_each_compatible_node(dn, NULL, "ibm,opb") {
+
+ /* Fill in an OPB struct */
+ opb = opb_pic_init_one(dn);
+ if (!opb) {
+ printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
+ continue;
+ }
+
+ /* Map / get opb's hardware virtual irq */
+ virq = irq_of_parse_and_map(dn, 0);
+ if (virq <= 0) {
+ printk("opb: irq_op_parse_and_map failed!\n");
+ continue;
+ }
+
+ /* Attach opb interrupt handler to new virtual IRQ */
+ rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
+ if (rc) {
+ printk("opb: request_irq failed: %d\n", rc);
+ continue;
+ }
+
+ printk("OPB%d init with %d IRQs at %p\n", opb->index,
+ OPB_NR_IRQS, opb->regs);
+ }
+}
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
new file mode 100644
index 000000000000..40f28916ff6c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2008-2011, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+#include <asm/machdep.h>
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+
+#include "ics.h"
+#include "wsp.h"
+
+
+static void psr2_spin(void)
+{
+ hard_irq_disable();
+ for (;;) ;
+}
+
+static void psr2_restart(char *cmd)
+{
+ psr2_spin();
+}
+
+static int psr2_probe_devices(void)
+{
+ struct device_node *np;
+
+ /* Our RTC is a ds1500. It seems to be programatically compatible
+ * with the ds1511 for which we have a driver so let's use that
+ */
+ np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
+ if (np != NULL) {
+ struct resource res;
+ if (of_address_to_resource(np, 0, &res) == 0)
+ platform_device_register_simple("ds1511", 0, &res, 1);
+ }
+ return 0;
+}
+machine_arch_initcall(psr2_md, psr2_probe_devices);
+
+static void __init psr2_setup_arch(void)
+{
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+ scom_init_wsp();
+
+ /* Setup SMP callback */
+#ifdef CONFIG_SMP
+ a2_setup_smp();
+#endif
+}
+
+static int __init psr2_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
+ return 0;
+
+ return 1;
+}
+
+static void __init psr2_init_irq(void)
+{
+ wsp_init_irq();
+ opb_pic_init();
+}
+
+define_machine(psr2_md) {
+ .name = "PSR2 A2",
+ .probe = psr2_probe,
+ .setup_arch = psr2_setup_arch,
+ .restart = psr2_restart,
+ .power_off = psr2_spin,
+ .halt = psr2_spin,
+ .calibrate_decr = generic_calibrate_decr,
+ .init_IRQ = psr2_init_irq,
+ .progress = udbg_progress,
+ .power_save = book3e_idle,
+};
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
new file mode 100644
index 000000000000..141e78032097
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_smp.c
@@ -0,0 +1,427 @@
+/*
+ * SCOM support for A2 platforms
+ *
+ * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
+ * Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/cputhreads.h>
+#include <asm/reg_a2.h>
+#include <asm/scom.h>
+#include <asm/udbg.h>
+
+#include "wsp.h"
+
+#define SCOM_RAMC 0x2a /* Ram Command */
+#define SCOM_RAMC_TGT1_EXT 0x80000000
+#define SCOM_RAMC_SRC1_EXT 0x40000000
+#define SCOM_RAMC_SRC2_EXT 0x20000000
+#define SCOM_RAMC_SRC3_EXT 0x10000000
+#define SCOM_RAMC_ENABLE 0x00080000
+#define SCOM_RAMC_THREADSEL 0x00060000
+#define SCOM_RAMC_EXECUTE 0x00010000
+#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
+#define SCOM_RAMC_MSR_PR 0x00004000
+#define SCOM_RAMC_MSR_GS 0x00002000
+#define SCOM_RAMC_FORCE 0x00001000
+#define SCOM_RAMC_FLUSH 0x00000800
+#define SCOM_RAMC_INTERRUPT 0x00000004
+#define SCOM_RAMC_ERROR 0x00000002
+#define SCOM_RAMC_DONE 0x00000001
+#define SCOM_RAMI 0x29 /* Ram Instruction */
+#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
+#define SCOM_RAMIC_INSN 0xffffffff00000000
+#define SCOM_RAMD 0x2d /* Ram Data */
+#define SCOM_RAMDH 0x2e /* Ram Data High */
+#define SCOM_RAMDL 0x2f /* Ram Data Low */
+#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
+#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
+#define SCOM_PCCR0_ENABLE_RAM 0x40000000
+#define SCOM_THRCTL 0x30 /* Thread Control and Status */
+#define SCOM_THRCTL_T0_STOP 0x80000000
+#define SCOM_THRCTL_T1_STOP 0x40000000
+#define SCOM_THRCTL_T2_STOP 0x20000000
+#define SCOM_THRCTL_T3_STOP 0x10000000
+#define SCOM_THRCTL_T0_STEP 0x08000000
+#define SCOM_THRCTL_T1_STEP 0x04000000
+#define SCOM_THRCTL_T2_STEP 0x02000000
+#define SCOM_THRCTL_T3_STEP 0x01000000
+#define SCOM_THRCTL_T0_RUN 0x00800000
+#define SCOM_THRCTL_T1_RUN 0x00400000
+#define SCOM_THRCTL_T2_RUN 0x00200000
+#define SCOM_THRCTL_T3_RUN 0x00100000
+#define SCOM_THRCTL_T0_PM 0x00080000
+#define SCOM_THRCTL_T1_PM 0x00040000
+#define SCOM_THRCTL_T2_PM 0x00020000
+#define SCOM_THRCTL_T3_PM 0x00010000
+#define SCOM_THRCTL_T0_UDE 0x00008000
+#define SCOM_THRCTL_T1_UDE 0x00004000
+#define SCOM_THRCTL_T2_UDE 0x00002000
+#define SCOM_THRCTL_T3_UDE 0x00001000
+#define SCOM_THRCTL_ASYNC_DIS 0x00000800
+#define SCOM_THRCTL_TB_DIS 0x00000400
+#define SCOM_THRCTL_DEC_DIS 0x00000200
+#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
+#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
+
+
+static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
+
+static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
+{
+ scom_map_t scom = per_cpu(scom_ptrs, cpu);
+ int tcpu;
+
+ if (scom_map_ok(scom)) {
+ *first_thread = 0;
+ return scom;
+ }
+
+ *first_thread = 1;
+
+ scom = scom_map_device(np, 0);
+
+ for (tcpu = cpu_first_thread_sibling(cpu);
+ tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
+ per_cpu(scom_ptrs, tcpu) = scom;
+
+ /* Hack: for the boot core, this will actually get called on
+ * the second thread up, not the first so our test above will
+ * set first_thread incorrectly. */
+ if (cpu_first_thread_sibling(cpu) == 0)
+ *first_thread = 0;
+
+ return scom;
+}
+
+static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
+{
+ u64 cmd, mask, val;
+ int n = 0;
+
+ cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
+ | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
+ mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
+
+ scom_write(scom, SCOM_RAMIC, cmd);
+
+ while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
+ pr_devel("Waiting on RAMC = 0x%llx\n", val);
+ if (++n == 3) {
+ pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
+ insn, thread);
+ return -1;
+ }
+ }
+
+ if (val & SCOM_RAMC_INTERRUPT) {
+ pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
+ insn, thread);
+ return -SCOM_RAMC_INTERRUPT;
+ }
+
+ if (val & SCOM_RAMC_ERROR) {
+ pr_err("RAMC error on instruction 0x%08x, thread %d\n",
+ insn, thread);
+ return -SCOM_RAMC_ERROR;
+ }
+
+ return 0;
+}
+
+static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
+ u64 *out_gpr)
+{
+ int rc;
+
+ /* or rN, rN, rN */
+ u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
+ rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
+ if (rc)
+ return rc;
+
+ *out_gpr = scom_read(scom, SCOM_RAMD);
+
+ return 0;
+}
+
+static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
+{
+ int rc, sprhi, sprlo;
+ u32 insn;
+
+ sprhi = spr >> 5;
+ sprlo = spr & 0x1f;
+ insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
+
+ if (spr == 0x0ff0)
+ insn = 0x7c2000a6; /* mfmsr r1 */
+
+ rc = a2_scom_ram(scom, thread, insn, 0xf);
+ if (rc)
+ return rc;
+ return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
+}
+
+static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
+ int alt, u64 val)
+{
+ u32 lis = 0x3c000000 | (gpr << 21);
+ u32 li = 0x38000000 | (gpr << 21);
+ u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
+ u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
+ u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
+ u32 highest = val >> 48;
+ u32 higher = (val >> 32) & 0xffff;
+ u32 high = (val >> 16) & 0xffff;
+ u32 low = val & 0xffff;
+ int lext = alt ? 0x8 : 0x0;
+ int oext = alt ? 0xf : 0x0;
+ int rc = 0;
+
+ if (highest)
+ rc |= a2_scom_ram(scom, thread, lis | highest, lext);
+
+ if (higher) {
+ if (highest)
+ rc |= a2_scom_ram(scom, thread, oris | higher, oext);
+ else
+ rc |= a2_scom_ram(scom, thread, li | higher, lext);
+ }
+
+ if (highest || higher)
+ rc |= a2_scom_ram(scom, thread, rldicr32, oext);
+
+ if (high) {
+ if (highest || higher)
+ rc |= a2_scom_ram(scom, thread, oris | high, oext);
+ else
+ rc |= a2_scom_ram(scom, thread, lis | high, lext);
+ }
+
+ if (highest || higher || high)
+ rc |= a2_scom_ram(scom, thread, ori | low, oext);
+ else
+ rc |= a2_scom_ram(scom, thread, li | low, lext);
+
+ return rc;
+}
+
+static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
+{
+ int sprhi = spr >> 5;
+ int sprlo = spr & 0x1f;
+ /* mtspr spr, r1 */
+ u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
+
+ if (spr == 0x0ff0)
+ insn = 0x7c200124; /* mtmsr r1 */
+
+ if (a2_scom_setgpr(scom, thread, 1, 1, val))
+ return -1;
+
+ return a2_scom_ram(scom, thread, insn, 0xf);
+}
+
+static int a2_scom_initial_tlb(scom_map_t scom, int thread)
+{
+ extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
+ extern u32 a2_tlbinit_after_iprot_flush[];
+ extern u32 a2_tlbinit_after_linear_map[];
+ u32 assoc, entries, i;
+ u64 epn, tlbcfg;
+ u32 *p;
+ int rc;
+
+ /* Invalidate all entries (including iprot) */
+
+ rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
+ if (rc)
+ goto scom_fail;
+ entries = tlbcfg & TLBnCFG_N_ENTRY;
+ assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
+ epn = 0;
+
+ /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
+ a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
+ /* Set MMUCR3 to write all thids bit to the TLB */
+ a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
+
+ /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
+ a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
+ a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
+ for (i = 0; i < entries; i++) {
+
+ a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
+
+ /* tlbwe */
+ rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
+ if (rc)
+ goto scom_fail;
+
+ /* Next entry is new address? */
+ if((i + 1) % assoc == 0) {
+ epn += (1 << 30);
+ a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
+ }
+ }
+
+ /* Setup args for linear mapping */
+ rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
+ if (rc)
+ goto scom_fail;
+
+ /* Linear mapping */
+ for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
+ rc = a2_scom_ram(scom, thread, *p, 0);
+ if (rc)
+ goto scom_fail;
+ }
+
+ /*
+ * For the boot thread, between the linear mapping and the debug
+ * mappings there is a loop to flush iprot mappings. Ramming doesn't do
+ * branches, but the secondary threads don't need to be nearly as smart
+ * (i.e. we don't need to worry about invalidating the mapping we're
+ * standing on).
+ */
+
+ /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
+ for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
+ rc = a2_scom_ram(scom, thread, *p, 0);
+ if (rc)
+ goto scom_fail;
+ }
+
+scom_fail:
+ if (rc)
+ pr_err("Setting up initial TLB failed, err %d\n", rc);
+
+ if (rc == -SCOM_RAMC_INTERRUPT) {
+ /* Interrupt, dump some status */
+ int rc[10];
+ u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
+ rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
+ rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
+ rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
+ rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
+ rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
+ rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
+ rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
+ rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
+ rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
+ rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
+ pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
+ pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
+ pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
+ pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
+ pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
+ pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
+ pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
+ pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
+ pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
+ pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
+ }
+
+ return rc;
+}
+
+int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
+ struct device_node *np)
+{
+ u64 init_iar, init_msr, init_ccr2;
+ unsigned long start_here;
+ int rc, core_setup;
+ scom_map_t scom;
+ u64 pccr0;
+
+ scom = get_scom(lcpu, np, &core_setup);
+ if (!scom) {
+ printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
+ return -1;
+ }
+
+ pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
+
+ pccr0 = scom_read(scom, SCOM_PCCR0);
+ scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
+ SCOM_PCCR0_ENABLE_RAM);
+
+ /* Stop the thead with THRCTL. If we are setting up the TLB we stop all
+ * threads. We also disable asynchronous interrupts while RAMing.
+ */
+ if (core_setup)
+ scom_write(scom, SCOM_THRCTL_OR,
+ SCOM_THRCTL_T0_STOP |
+ SCOM_THRCTL_T1_STOP |
+ SCOM_THRCTL_T2_STOP |
+ SCOM_THRCTL_T3_STOP |
+ SCOM_THRCTL_ASYNC_DIS);
+ else
+ scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
+
+ /* Flush its pipeline just in case */
+ scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
+ SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
+
+ a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
+ a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
+ a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
+
+ /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
+ rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
+ if (rc) {
+ pr_err("Failed to set MSR ! err %d\n", rc);
+ return rc;
+ }
+
+ /* RAM in an sync/isync for the sake of it */
+ a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
+ a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
+
+ if (core_setup) {
+ pr_devel("CPU%d is first thread in core, initializing TLB...\n",
+ lcpu);
+ rc = a2_scom_initial_tlb(scom, thr_idx);
+ if (rc)
+ goto fail;
+ }
+
+ start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init
+ : generic_secondary_thread_init);
+ pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
+
+ rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
+ rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
+ get_hard_smp_processor_id(lcpu));
+ /*
+ * Tell book3e_secondary_core_init not to set up the TLB, we've
+ * already done that.
+ */
+ rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
+
+ rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
+
+ scom_write(scom, SCOM_RAMC, 0);
+ scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
+ scom_write(scom, SCOM_PCCR0, pccr0);
+fail:
+ pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
+ if (rc) {
+ pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
+ init_iar, init_msr, init_ccr2);
+ }
+
+ return rc;
+}
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
new file mode 100644
index 000000000000..4052e2259f30
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/scom_wsp.c
@@ -0,0 +1,77 @@
+/*
+ * SCOM backend for WSP
+ *
+ * Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/cputhreads.h>
+#include <asm/reg_a2.h>
+#include <asm/scom.h>
+#include <asm/udbg.h>
+
+#include "wsp.h"
+
+
+static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
+{
+ struct resource r;
+ u64 xscom_addr;
+
+ if (!of_get_property(dev, "scom-controller", NULL)) {
+ pr_err("%s: device %s is not a SCOM controller\n",
+ __func__, dev->full_name);
+ return SCOM_MAP_INVALID;
+ }
+
+ if (of_address_to_resource(dev, 0, &r)) {
+ pr_debug("Failed to find SCOM controller address\n");
+ return 0;
+ }
+
+ /* Transform the SCOM address into an XSCOM offset */
+ xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
+
+ return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
+}
+
+static void wsp_scom_unmap(scom_map_t map)
+{
+ iounmap((void *)map);
+}
+
+static u64 wsp_scom_read(scom_map_t map, u32 reg)
+{
+ u64 __iomem *addr = (u64 __iomem *)map;
+
+ return in_be64(addr + reg);
+}
+
+static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
+{
+ u64 __iomem *addr = (u64 __iomem *)map;
+
+ return out_be64(addr + reg, value);
+}
+
+static const struct scom_controller wsp_scom_controller = {
+ .map = wsp_scom_map,
+ .unmap = wsp_scom_unmap,
+ .read = wsp_scom_read,
+ .write = wsp_scom_write
+};
+
+void scom_init_wsp(void)
+{
+ scom_init(&wsp_scom_controller);
+}
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c
new file mode 100644
index 000000000000..11ac2f05e01c
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/setup.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+
+#include "wsp.h"
+
+/*
+ * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
+ * Won't work for nodes that are not a descendant of a wsp node.
+ */
+int wsp_get_chip_id(struct device_node *dn)
+{
+ const u32 *p;
+ int rc;
+
+ /* Start looking at the specified node, not its parent */
+ dn = of_node_get(dn);
+ while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
+ dn = of_get_next_parent(dn);
+
+ if (!dn)
+ return -1;
+
+ rc = *p;
+ of_node_put(dn);
+
+ return rc;
+}
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
new file mode 100644
index 000000000000..9d20fa9d3710
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -0,0 +1,88 @@
+/*
+ * SMP Support for A2 platforms
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+#include <asm/dbell.h>
+#include <asm/machdep.h>
+#include <asm/xics.h>
+
+#include "ics.h"
+#include "wsp.h"
+
+static void __devinit smp_a2_setup_cpu(int cpu)
+{
+ doorbell_setup_this_cpu();
+
+ if (cpu != boot_cpuid)
+ xics_setup_cpu();
+}
+
+int __devinit smp_a2_kick_cpu(int nr)
+{
+ const char *enable_method;
+ struct device_node *np;
+ int thr_idx;
+
+ if (nr < 0 || nr >= NR_CPUS)
+ return -ENOENT;
+
+ np = of_get_cpu_node(nr, &thr_idx);
+ if (!np)
+ return -ENODEV;
+
+ enable_method = of_get_property(np, "enable-method", NULL);
+ pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
+
+ if (!enable_method) {
+ printk(KERN_ERR "CPU%d has no enable-method\n", nr);
+ return -ENOENT;
+ } else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
+ if (a2_scom_startup_cpu(nr, thr_idx, np))
+ return -1;
+ } else {
+ printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
+ nr, enable_method);
+ return -EINVAL;
+ }
+
+ /*
+ * The processor is currently spinning, waiting for the
+ * cpu_start field to become non-zero After we set cpu_start,
+ * the processor will continue on to secondary_start
+ */
+ paca[nr].cpu_start = 1;
+
+ return 0;
+}
+
+static int __init smp_a2_probe(void)
+{
+ return cpus_weight(cpu_possible_map);
+}
+
+static struct smp_ops_t a2_smp_ops = {
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = doorbell_cause_ipi,
+ .probe = smp_a2_probe,
+ .kick_cpu = smp_a2_kick_cpu,
+ .setup_cpu = smp_a2_setup_cpu,
+};
+
+void __init a2_setup_smp(void)
+{
+ smp_ops = &a2_smp_ops;
+}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
new file mode 100644
index 000000000000..7c3e087fd2f2
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -0,0 +1,17 @@
+#ifndef __WSP_H
+#define __WSP_H
+
+#include <asm/wsp.h>
+
+extern void wsp_setup_pci(void);
+extern void scom_init_wsp(void);
+
+extern void a2_setup_smp(void);
+extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
+ struct device_node *np);
+int smp_a2_cpu_bootable(unsigned int nr);
+int __devinit smp_a2_kick_cpu(int nr);
+
+void opb_pic_init(void);
+
+#endif /* __WSP_H */
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 396582835cb5..d775fd148d13 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -12,3 +12,13 @@ config PPC_MSI_BITMAP
depends on PCI_MSI
default y if MPIC
default y if FSL_PCI
+
+source "arch/powerpc/sysdev/xics/Kconfig"
+
+config PPC_SCOM
+ bool
+
+config SCOM_DEBUGFS
+ bool "Expose SCOM controllers via debugfs"
+ depends on PPC_SCOM
+ default n
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 1e0c933ef772..6076e0074a87 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -57,3 +57,9 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
ifeq ($(CONFIG_SUSPEND),y)
obj-$(CONFIG_6xx) += 6xx-suspend.o
endif
+
+obj-$(CONFIG_PPC_SCOM) += scom.o
+
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 27402c7d309d..bd0d54060b94 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -95,7 +95,7 @@ axon_ram_irq_handler(int irq, void *dev)
BUG_ON(!bank);
- dev_err(&device->dev, "Correctable memory error occured\n");
+ dev_err(&device->dev, "Correctable memory error occurred\n");
bank->ecc_counter++;
return IRQ_HANDLED;
}
@@ -216,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device)
AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);
bank->ph_addr = resource.start;
- bank->io_addr = (unsigned long) ioremap_flags(
+ bank->io_addr = (unsigned long) ioremap_prot(
bank->ph_addr, bank->size, _PAGE_NO_CACHE);
if (bank->io_addr == 0) {
dev_err(&device->dev, "ioremap() failed\n");
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
index 23a95f80dfdb..a0e2e6b19b57 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h
@@ -20,7 +20,7 @@
* struct bcom_bd - Structure describing a generic BestComm buffer descriptor
* @status: The current status of this buffer. Exact meaning depends on the
* task type
- * @data: An array of u32 extra data. Size of array is task dependant.
+ * @data: An array of u32 extra data. Size of array is task dependent.
*
* Note: Don't dereference a bcom_bd pointer as an array. The size of the
* bcom_bd is variable. Use bcom_get_bd() instead.
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
index eb0d1c883c31..3b52f3ffbdf8 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
@@ -97,7 +97,7 @@ struct bcom_task_header {
u8 reserved[8];
};
-/* Descriptors stucture & co */
+/* Descriptors structure & co */
#define BCOM_DESC_NOP 0x000001f8
#define BCOM_LCD_MASK 0x80000000
#define BCOM_DRD_EXTENDED 0x40000000
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 0476bcc7c3e1..350787c83e22 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -58,21 +58,21 @@ static struct irq_host *cpm_pic_host;
static void cpm_mask_irq(struct irq_data *d)
{
- unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
}
static void cpm_unmask_irq(struct irq_data *d)
{
- unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
}
static void cpm_end_irq(struct irq_data *d)
{
- unsigned int cpm_vec = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
}
@@ -103,8 +103,8 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
{
pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
return 0;
}
@@ -157,7 +157,7 @@ unsigned int cpm_pic_init(void)
goto end;
/* Initialize the CPM interrupt controller. */
- hwirq = (unsigned int)irq_map[sirq].hwirq;
+ hwirq = (unsigned int)virq_to_hw(sirq);
out_be32(&cpic_reg->cpic_cicr,
(CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
((hwirq/2) << 13) | CICR_HP_MASK);
@@ -223,7 +223,7 @@ void __init cpm_reset(void)
/* Set SDMA Bus Request priority 5.
* On 860T, this also enables FEC priority 6. I am not sure
- * this is what we realy want for some applications, but the
+ * this is what we really want for some applications, but the
* manual recommends it.
* Bit 25, FAM can also be set to use FEC aggressive mode (860T).
*/
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 473032556715..bcab50e2a9eb 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -81,7 +81,7 @@ static const u_char irq_to_siubit[] = {
static void cpm2_mask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = virq_to_hw(d->irq);
+ unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -93,7 +93,7 @@ static void cpm2_mask_irq(struct irq_data *d)
static void cpm2_unmask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = virq_to_hw(d->irq);
+ unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -105,7 +105,7 @@ static void cpm2_unmask_irq(struct irq_data *d)
static void cpm2_ack(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = virq_to_hw(d->irq);
+ unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -115,32 +115,25 @@ static void cpm2_ack(struct irq_data *d)
static void cpm2_end_irq(struct irq_data *d)
{
- struct irq_desc *desc;
int bit, word;
- unsigned int irq_nr = virq_to_hw(d->irq);
+ unsigned int irq_nr = irqd_to_hwirq(d);
- desc = irq_to_desc(irq_nr);
- if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))
- && desc->action) {
-
- bit = irq_to_siubit[irq_nr];
- word = irq_to_siureg[irq_nr];
+ bit = irq_to_siubit[irq_nr];
+ word = irq_to_siureg[irq_nr];
- ppc_cached_irq_mask[word] |= 1 << bit;
- out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
+ ppc_cached_irq_mask[word] |= 1 << bit;
+ out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
- /*
- * Work around large numbers of spurious IRQs on PowerPC 82xx
- * systems.
- */
- mb();
- }
+ /*
+ * Work around large numbers of spurious IRQs on PowerPC 82xx
+ * systems.
+ */
+ mb();
}
static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- unsigned int src = virq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned int vold, vnew, edibit;
/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
@@ -162,13 +155,11 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
goto err_sense;
}
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & IRQ_TYPE_LEVEL_LOW) {
- desc->status |= IRQ_LEVEL;
- desc->handle_irq = handle_level_irq;
- } else
- desc->handle_irq = handle_edge_irq;
+ irqd_set_trigger_type(d, flow_type);
+ if (flow_type & IRQ_TYPE_LEVEL_LOW)
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
/* internal IRQ senses are LEVEL_LOW
* EXT IRQ and Port C IRQ senses are programmable
@@ -179,7 +170,8 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
edibit = (31 - (CPM2_IRQ_PORTC0 - src));
else
- return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
+ return (flow_type & IRQ_TYPE_LEVEL_LOW) ?
+ IRQ_SET_MASK_OK_NOCOPY : -EINVAL;
vold = in_be32(&cpm2_intctl->ic_siexr);
@@ -190,7 +182,7 @@ static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
if (vold != vnew)
out_be32(&cpm2_intctl->ic_siexr, vnew);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
err_sense:
pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
@@ -204,6 +196,7 @@ static struct irq_chip cpm2_pic = {
.irq_ack = cpm2_ack,
.irq_eoi = cpm2_end_irq,
.irq_set_type = cpm2_set_irq_type,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
unsigned int cpm2_get_irq(void)
@@ -226,8 +219,8 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
return 0;
}
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 54fb1922fe30..116415899176 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -106,10 +106,10 @@ int __init instantiate_cache_sram(struct platform_device *dev,
goto out_free;
}
- cache_sram->base_virt = ioremap_flags(cache_sram->base_phys,
+ cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
if (!cache_sram->base_virt) {
- dev_err(&dev->dev, "%s: ioremap_flags failed\n",
+ dev_err(&dev->dev, "%s: ioremap_prot failed\n",
dev->dev.of_node->full_name);
ret = -ENOMEM;
goto out_release;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 58e09b2833f2..92e78333c47c 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -64,10 +64,10 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
struct fsl_msi *msi_data = h->host_data;
struct irq_chip *chip = &fsl_msi_chip;
- irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
+ irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
- set_irq_chip_data(virq, msi_data);
- set_irq_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_chip_data(virq, msi_data);
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
return 0;
}
@@ -110,8 +110,8 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
- msi_data = get_irq_data(entry->irq);
- set_irq_msi(entry->irq, NULL);
+ msi_data = irq_get_chip_data(entry->irq);
+ irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_data->bitmap,
virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
@@ -168,8 +168,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
rc = -ENOSPC;
goto out_free;
}
- set_irq_data(virq, msi_data);
- set_irq_msi(virq, entry);
+ /* chip_data is msi_data via host->hostdata in host->map() */
+ irq_set_msi_desc(virq, entry);
fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
write_msi_msg(virq, &msg);
@@ -183,7 +183,8 @@ out_free:
static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
unsigned int cascade_irq;
struct fsl_msi *msi_data;
int msir_index = -1;
@@ -192,20 +193,20 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 have_shift = 0;
struct fsl_msi_cascade_data *cascade_data;
- cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq);
+ cascade_data = irq_get_handler_data(irq);
msi_data = cascade_data->msi_data;
raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (chip->irq_mask_ack)
- chip->irq_mask_ack(&desc->irq_data);
+ chip->irq_mask_ack(idata);
else {
- chip->irq_mask(&desc->irq_data);
- chip->irq_ack(&desc->irq_data);
+ chip->irq_mask(idata);
+ chip->irq_ack(idata);
}
}
- if (unlikely(desc->status & IRQ_INPROGRESS))
+ if (unlikely(irqd_irq_inprogress(idata)))
goto unlock;
msir_index = cascade_data->index;
@@ -213,7 +214,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
if (msir_index >= NR_MSI_REG)
cascade_irq = NO_IRQ;
- desc->status |= IRQ_INPROGRESS;
+ irqd_set_chained_irq_inprogress(idata);
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
msir_value = fsl_msi_read(msi_data->msi_regs,
@@ -235,15 +236,15 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
have_shift += intr_index + 1;
msir_value = msir_value >> (intr_index + 1);
}
- desc->status &= ~IRQ_INPROGRESS;
+ irqd_clr_chained_irq_inprogress(idata);
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
- chip->irq_eoi(&desc->irq_data);
+ chip->irq_eoi(idata);
break;
case FSL_PIC_IP_IPIC:
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
- chip->irq_unmask(&desc->irq_data);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+ chip->irq_unmask(idata);
break;
}
unlock:
@@ -252,7 +253,7 @@ unlock:
static int fsl_of_msi_remove(struct platform_device *ofdev)
{
- struct fsl_msi *msi = ofdev->dev.platform_data;
+ struct fsl_msi *msi = platform_get_drvdata(ofdev);
int virq, i;
struct fsl_msi_cascade_data *cascade_data;
@@ -261,7 +262,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
for (i = 0; i < NR_MSI_REG; i++) {
virq = msi->msi_virqs[i];
if (virq != NO_IRQ) {
- cascade_data = get_irq_data(virq);
+ cascade_data = irq_get_handler_data(virq);
kfree(cascade_data);
irq_dispose_mapping(virq);
}
@@ -297,14 +298,16 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
msi->msi_virqs[irq_index] = virt_msir;
cascade_data->index = offset + irq_index;
cascade_data->msi_data = msi;
- set_irq_data(virt_msir, cascade_data);
- set_irq_chained_handler(virt_msir, fsl_msi_cascade);
+ irq_set_handler_data(virt_msir, cascade_data);
+ irq_set_chained_handler(virt_msir, fsl_msi_cascade);
return 0;
}
+static const struct of_device_id fsl_of_msi_ids[];
static int __devinit fsl_of_msi_probe(struct platform_device *dev)
{
+ const struct of_device_id *match;
struct fsl_msi *msi;
struct resource res;
int err, i, j, irq_index, count;
@@ -315,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
u32 offset;
static const u32 all_avail[] = { 0, NR_MSI_IRQS };
- if (!dev->dev.of_match)
+ match = of_match_device(fsl_of_msi_ids, &dev->dev);
+ if (!match)
return -EINVAL;
- features = dev->dev.of_match->data;
+ features = match->data;
printk(KERN_DEBUG "Setting up Freescale MSI support\n");
@@ -326,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
dev_err(&dev->dev, "No memory for MSI structure\n");
return -ENOMEM;
}
- dev->dev.platform_data = msi;
+ platform_set_drvdata(dev, msi);
msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
NR_MSI_IRQS, &fsl_msi_host_ops, 0);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index f8f7f28c6343..68ca9290df94 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
struct resource rsrc;
const int *bus_range;
+ if (!of_device_is_available(dev)) {
+ pr_warning("%s: disabled\n", dev->full_name);
+ return -ENODEV;
+ }
+
pr_debug("Adding PCI host bridge %s\n", dev->full_name);
/* Fetch host bridge registers address */
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 14232d57369c..49798532b477 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev)
port->ops = ops;
port->priv = priv;
port->phys_efptr = 0x100;
- rio_register_mport(port);
priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
rio_regs_win = priv->regs_win;
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev)
dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
port->sys_size ? 65536 : 256);
+ if (rio_register_mport(port))
+ goto err;
+
if (port->host_deviceid >= 0)
out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index aeda4c8d0a0a..d18bb27e4df9 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -175,28 +175,16 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
/* We block the internal cascade */
if (hw == 2)
- irq_to_desc(virq)->status |= IRQ_NOREQUEST;
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
/* We use the level handler only for now, we might want to
* be more cautious here but that works for now
*/
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
return 0;
}
-static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
-{
- /* Make sure irq is masked in hardware */
- i8259_mask_irq(irq_get_irq_data(virq));
-
- /* remove chip and handler */
- set_irq_chip_and_handler(virq, NULL, NULL);
-
- /* Make sure it's completed */
- synchronize_irq(virq);
-}
-
static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
static struct irq_host_ops i8259_host_ops = {
.match = i8259_host_match,
.map = i8259_host_map,
- .unmap = i8259_host_unmap,
.xlate = i8259_host_xlate,
};
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 7ed809676642..82fdad885d20 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -117,7 +117,7 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
- /* surpress setting of PCI_PRIMARY_BUS */
+ /* suppress setting of PCI_PRIMARY_BUS */
if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
if ((offset == PCI_PRIMARY_BUS) &&
(bus->number == hose->first_busno))
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 497047dc986e..7367d17364cb 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -18,7 +18,7 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
@@ -521,12 +521,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq)
return primary_ipic;
}
-#define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-
static void ipic_unmask_irq(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
- unsigned int src = ipic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
@@ -542,7 +540,7 @@ static void ipic_unmask_irq(struct irq_data *d)
static void ipic_mask_irq(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
- unsigned int src = ipic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
@@ -562,7 +560,7 @@ static void ipic_mask_irq(struct irq_data *d)
static void ipic_ack_irq(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
- unsigned int src = ipic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
@@ -581,7 +579,7 @@ static void ipic_ack_irq(struct irq_data *d)
static void ipic_mask_irq_and_ack(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
- unsigned int src = ipic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
@@ -604,8 +602,7 @@ static void ipic_mask_irq_and_ack(struct irq_data *d)
static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct ipic *ipic = ipic_from_irq(d->irq);
- unsigned int src = ipic_irq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned int vold, vnew, edibit;
if (flow_type == IRQ_TYPE_NONE)
@@ -623,17 +620,16 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
printk(KERN_ERR "ipic: edge sense not supported on internal "
"interrupts\n");
return -EINVAL;
+
}
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
+ irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
- desc->status |= IRQ_LEVEL;
- desc->handle_irq = handle_level_irq;
- desc->irq_data.chip = &ipic_level_irq_chip;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ d->chip = &ipic_level_irq_chip;
} else {
- desc->handle_irq = handle_edge_irq;
- desc->irq_data.chip = &ipic_edge_irq_chip;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ d->chip = &ipic_edge_irq_chip;
}
/* only EXT IRQ senses are programmable on ipic
@@ -655,7 +651,7 @@ static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
}
if (vold != vnew)
ipic_write(ipic->regs, IPIC_SECNR, vnew);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
/* level interrupts and edge interrupts have different ack operations */
@@ -687,11 +683,11 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
{
struct ipic *ipic = h->host_data;
- set_irq_chip_data(virq, ipic);
- set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, ipic);
+ irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
@@ -795,7 +791,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
int ipic_set_priority(unsigned int virq, unsigned int priority)
{
struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
+ unsigned int src = virq_to_hw(virq);
u32 temp;
if (priority > 7)
@@ -823,7 +819,7 @@ int ipic_set_priority(unsigned int virq, unsigned int priority)
void ipic_set_highest_priority(unsigned int virq)
{
struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = ipic_irq_to_hw(virq);
+ unsigned int src = virq_to_hw(virq);
u32 temp;
temp = ipic_read(ipic->regs, IPIC_SICFR);
@@ -904,7 +900,7 @@ static struct {
u32 sercr;
} ipic_saved_state;
-static int ipic_suspend(struct sys_device *sdev, pm_message_t state)
+static int ipic_suspend(void)
{
struct ipic *ipic = primary_ipic;
@@ -935,7 +931,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state)
return 0;
}
-static int ipic_resume(struct sys_device *sdev)
+static void ipic_resume(void)
{
struct ipic *ipic = primary_ipic;
@@ -951,44 +947,26 @@ static int ipic_resume(struct sys_device *sdev)
ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
-
- return 0;
}
#else
#define ipic_suspend NULL
#define ipic_resume NULL
#endif
-static struct sysdev_class ipic_sysclass = {
- .name = "ipic",
+static struct syscore_ops ipic_syscore_ops = {
.suspend = ipic_suspend,
.resume = ipic_resume,
};
-static struct sys_device device_ipic = {
- .id = 0,
- .cls = &ipic_sysclass,
-};
-
-static int __init init_ipic_sysfs(void)
+static int __init init_ipic_syscore(void)
{
- int rc;
-
if (!primary_ipic || !primary_ipic->regs)
return -ENODEV;
- printk(KERN_DEBUG "Registering ipic with sysfs...\n");
- rc = sysdev_class_register(&ipic_sysclass);
- if (rc) {
- printk(KERN_ERR "Failed registering ipic sys class\n");
- return -ENODEV;
- }
- rc = sysdev_register(&device_ipic);
- if (rc) {
- printk(KERN_ERR "Failed registering ipic sys device\n");
- return -ENODEV;
- }
+ printk(KERN_DEBUG "Registering ipic system core operations\n");
+ register_syscore_ops(&ipic_syscore_ops);
+
return 0;
}
-subsys_initcall(init_ipic_sysfs);
+subsys_initcall(init_ipic_syscore);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 207324209065..ddc877a3a23a 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -115,6 +115,8 @@ int __init mmio_nvram_init(void)
int ret;
nvram_node = of_find_node_by_type(NULL, "nvram");
+ if (!nvram_node)
+ nvram_node = of_find_compatible_node(NULL, NULL, "nvram");
if (!nvram_node) {
printk(KERN_WARNING "nvram: no node found in device-tree\n");
return -ENODEV;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 1a75a7fb4a99..20924f2246f0 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -28,7 +28,7 @@ int cpm_get_irq(struct pt_regs *regs);
static void mpc8xx_unmask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
@@ -40,7 +40,7 @@ static void mpc8xx_unmask_irq(struct irq_data *d)
static void mpc8xx_mask_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
@@ -52,7 +52,7 @@ static void mpc8xx_mask_irq(struct irq_data *d)
static void mpc8xx_ack(struct irq_data *d)
{
int bit;
- unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
@@ -61,7 +61,7 @@ static void mpc8xx_ack(struct irq_data *d)
static void mpc8xx_end_irq(struct irq_data *d)
{
int bit, word;
- unsigned int irq_nr = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
bit = irq_nr & 0x1f;
word = irq_nr >> 5;
@@ -72,22 +72,15 @@ static void mpc8xx_end_irq(struct irq_data *d)
static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
-
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
-
if (flow_type & IRQ_TYPE_EDGE_FALLING) {
- irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq;
+ irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
unsigned int siel = in_be32(&siu_reg->sc_siel);
/* only external IRQ senses are programmable */
if ((hw & 1) == 0) {
siel |= (0x80000000 >> hw);
out_be32(&siu_reg->sc_siel, siel);
- desc->handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
}
}
return 0;
@@ -124,7 +117,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
/* Set default irq handle */
- set_irq_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq);
+ irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq);
return 0;
}
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index 232e701245d7..fb4963abdf55 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -145,7 +145,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned int mask;
@@ -163,7 +163,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
+ setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -176,7 +176,7 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
+ clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -186,7 +186,7 @@ static void mpc8xxx_irq_ack(struct irq_data *d)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
- out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
+ out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -199,14 +199,14 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
setbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
+ mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
clrbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(virq_to_hw(d->irq)));
+ mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -221,7 +221,7 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
- unsigned long gpio = virq_to_hw(d->irq);
+ unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
unsigned long flags;
@@ -278,9 +278,9 @@ static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
if (mpc8xxx_gc->of_dev_id_data)
mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
- set_irq_chip_data(virq, h->host_data);
- set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
@@ -369,8 +369,8 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
out_be32(mm_gc->regs + GPIO_IMR, 0);
- set_irq_data(hwirq, mpc8xxx_gc);
- set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
+ irq_set_handler_data(hwirq, mpc8xxx_gc);
+ irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
skip_irq:
return;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0f7c6718d261..3a8de5bb628a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -6,6 +6,7 @@
* with various broken implementations of this HW.
*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
@@ -27,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/syscore_ops.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
@@ -218,6 +220,28 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu
_mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
}
+static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
+{
+ unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
+ ((tm & 3) * MPIC_INFO(TIMER_STRIDE));
+
+ if (tm >= 4)
+ offset += 0x1000 / 4;
+
+ return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
+}
+
+static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
+{
+ unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) +
+ ((tm & 3) * MPIC_INFO(TIMER_STRIDE));
+
+ if (tm >= 4)
+ offset += 0x1000 / 4;
+
+ _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
+}
+
static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
{
unsigned int cpu = mpic_processor_id(mpic);
@@ -268,6 +292,8 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v))
#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
+#define mpic_tm_read(i) _mpic_tm_read(mpic,(i))
+#define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v))
#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
@@ -361,7 +387,7 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
}
static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
- unsigned int irqflags)
+ bool level)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
unsigned long flags;
@@ -370,14 +396,14 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
if (fixup->base == NULL)
return;
- DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n",
- source, irqflags, fixup->index);
+ DBG("startup_ht_interrupt(0x%x) index: %d\n",
+ source, fixup->index);
raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
/* Enable and configure */
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
tmp = readl(fixup->base + 4);
tmp &= ~(0x23U);
- if (irqflags & IRQ_LEVEL)
+ if (level)
tmp |= 0x22;
writel(tmp, fixup->base + 4);
raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
@@ -389,8 +415,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
#endif
}
-static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
- unsigned int irqflags)
+static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
unsigned long flags;
@@ -399,7 +424,7 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
if (fixup->base == NULL)
return;
- DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags);
+ DBG("shutdown_ht_interrupt(0x%x)\n", source);
/* Disable */
raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
@@ -608,25 +633,30 @@ static int irq_choose_cpu(const struct cpumask *mask)
}
#endif
-#define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
if (irq < NUM_ISA_INTERRUPTS)
return NULL;
- return get_irq_chip_data(irq);
+ return irq_get_chip_data(irq);
}
/* Determine if the linux irq is an IPI */
static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)
{
- unsigned int src = mpic_irq_to_hw(irq);
+ unsigned int src = virq_to_hw(irq);
return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
}
+/* Determine if the linux irq is a timer */
+static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq)
+{
+ unsigned int src = virq_to_hw(irq);
+
+ return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
+}
/* Convert a cpu mask from logical to physical cpu numbers. */
static inline u32 mpic_physmask(u32 cpumask)
@@ -634,7 +664,7 @@ static inline u32 mpic_physmask(u32 cpumask)
int i;
u32 mask = 0;
- for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
+ for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
return mask;
}
@@ -650,7 +680,7 @@ static inline struct mpic * mpic_from_ipi(struct irq_data *d)
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
- return get_irq_chip_data(irq);
+ return irq_get_chip_data(irq);
}
/* Get the mpic structure from the irq data */
@@ -675,7 +705,7 @@ void mpic_unmask_irq(struct irq_data *d)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
@@ -696,7 +726,7 @@ void mpic_mask_irq(struct irq_data *d)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
@@ -734,21 +764,21 @@ void mpic_end_irq(struct irq_data *d)
static void mpic_unmask_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
mpic_unmask_irq(d);
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
}
static unsigned int mpic_startup_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
mpic_unmask_irq(d);
- mpic_startup_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status);
+ mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
return 0;
}
@@ -756,16 +786,16 @@ static unsigned int mpic_startup_ht_irq(struct irq_data *d)
static void mpic_shutdown_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
- mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status);
+ mpic_shutdown_ht_interrupt(mpic, src);
mpic_mask_irq(d);
}
static void mpic_end_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, d->irq);
@@ -775,7 +805,7 @@ static void mpic_end_ht_irq(struct irq_data *d)
* latched another edge interrupt coming in anyway
*/
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
}
@@ -786,7 +816,7 @@ static void mpic_end_ht_irq(struct irq_data *d)
static void mpic_unmask_ipi(struct irq_data *d)
{
struct mpic *mpic = mpic_from_ipi(d);
- unsigned int src = mpic_irq_to_hw(d->irq) - mpic->ipi_vecs[0];
+ unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
@@ -813,27 +843,42 @@ static void mpic_end_ipi(struct irq_data *d)
#endif /* CONFIG_SMP */
+static void mpic_unmask_tm(struct irq_data *d)
+{
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
+
+ DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src);
+ mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
+ mpic_tm_read(src);
+}
+
+static void mpic_mask_tm(struct irq_data *d)
+{
+ struct mpic *mpic = mpic_from_irq_data(d);
+ unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
+
+ mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
+ mpic_tm_read(src);
+}
+
int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
int cpuid = irq_choose_cpu(cpumask);
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
} else {
- cpumask_var_t tmp;
-
- alloc_cpumask_var(&tmp, GFP_KERNEL);
+ u32 mask = cpumask_bits(cpumask)[0];
- cpumask_and(tmp, cpumask, cpu_online_mask);
+ mask &= cpumask_bits(cpu_online_mask)[0];
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
- mpic_physmask(cpumask_bits(tmp)[0]));
-
- free_cpumask_var(tmp);
+ mpic_physmask(mask));
}
return 0;
@@ -863,8 +908,7 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct mpic *mpic = mpic_from_irq_data(d);
- unsigned int src = mpic_irq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned int vecpri, vold, vnew;
DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
@@ -879,10 +923,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
+ irqd_set_trigger_type(d, flow_type);
if (mpic_is_ht_interrupt(mpic, src))
vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
@@ -897,13 +938,13 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
if (vold != vnew)
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;;
}
void mpic_set_vector(unsigned int virq, unsigned int vector)
{
struct mpic *mpic = mpic_from_irq(virq);
- unsigned int src = mpic_irq_to_hw(virq);
+ unsigned int src = virq_to_hw(virq);
unsigned int vecpri;
DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
@@ -921,7 +962,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
void mpic_set_destination(unsigned int virq, unsigned int cpuid)
{
struct mpic *mpic = mpic_from_irq(virq);
- unsigned int src = mpic_irq_to_hw(virq);
+ unsigned int src = virq_to_hw(virq);
DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
mpic, virq, src, cpuid);
@@ -947,6 +988,12 @@ static struct irq_chip mpic_ipi_chip = {
};
#endif /* CONFIG_SMP */
+static struct irq_chip mpic_tm_chip = {
+ .irq_mask = mpic_mask_tm,
+ .irq_unmask = mpic_unmask_tm,
+ .irq_eoi = mpic_end_irq,
+};
+
#ifdef CONFIG_MPIC_U3_HT_IRQS
static struct irq_chip mpic_irq_ht_chip = {
.irq_startup = mpic_startup_ht_irq,
@@ -983,13 +1030,23 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
WARN_ON(!(mpic->flags & MPIC_PRIMARY));
DBG("mpic: mapping as IPI\n");
- set_irq_chip_data(virq, mpic);
- set_irq_chip_and_handler(virq, &mpic->hc_ipi,
+ irq_set_chip_data(virq, mpic);
+ irq_set_chip_and_handler(virq, &mpic->hc_ipi,
handle_percpu_irq);
return 0;
}
#endif /* CONFIG_SMP */
+ if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
+ WARN_ON(!(mpic->flags & MPIC_PRIMARY));
+
+ DBG("mpic: mapping as timer\n");
+ irq_set_chip_data(virq, mpic);
+ irq_set_chip_and_handler(virq, &mpic->hc_tm,
+ handle_fasteoi_irq);
+ return 0;
+ }
+
if (hw >= mpic->irq_count)
return -EINVAL;
@@ -1006,11 +1063,11 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
DBG("mpic: mapping to irq chip @%p\n", chip);
- set_irq_chip_data(virq, mpic);
- set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
+ irq_set_chip_data(virq, mpic);
+ irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
/* If the MPIC was reset, then all vectors have already been
* initialized. Otherwise, a per source lazy initialization
@@ -1030,6 +1087,7 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
+ struct mpic *mpic = h->host_data;
static unsigned char map_mpic_senses[4] = {
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_LEVEL_LOW,
@@ -1038,7 +1096,38 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
};
*out_hwirq = intspec[0];
- if (intsize > 1) {
+ if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
+ /*
+ * Freescale MPIC with extended intspec:
+ * First two cells are as usual. Third specifies
+ * an "interrupt type". Fourth is type-specific data.
+ *
+ * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
+ */
+ switch (intspec[2]) {
+ case 0:
+ case 1: /* no EISR/EIMR support for now, treat as shared IRQ */
+ break;
+ case 2:
+ if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
+ return -EINVAL;
+
+ *out_hwirq = mpic->ipi_vecs[intspec[0]];
+ break;
+ case 3:
+ if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
+ return -EINVAL;
+
+ *out_hwirq = mpic->timer_vecs[intspec[0]];
+ break;
+ default:
+ pr_debug("%s: unknown irq type %u\n",
+ __func__, intspec[2]);
+ return -EINVAL;
+ }
+
+ *out_flags = map_mpic_senses[intspec[1] & 3];
+ } else if (intsize > 1) {
u32 mask = 0x3;
/* Apple invented a new race of encoding on machines with
@@ -1114,6 +1203,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->hc_ipi.name = name;
#endif /* CONFIG_SMP */
+ mpic->hc_tm = mpic_tm_chip;
+ mpic->hc_tm.name = name;
+
mpic->flags = flags;
mpic->isu_size = isu_size;
mpic->irq_count = irq_count;
@@ -1124,10 +1216,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,
else
intvec_top = 255;
- mpic->timer_vecs[0] = intvec_top - 8;
- mpic->timer_vecs[1] = intvec_top - 7;
- mpic->timer_vecs[2] = intvec_top - 6;
- mpic->timer_vecs[3] = intvec_top - 5;
+ mpic->timer_vecs[0] = intvec_top - 12;
+ mpic->timer_vecs[1] = intvec_top - 11;
+ mpic->timer_vecs[2] = intvec_top - 10;
+ mpic->timer_vecs[3] = intvec_top - 9;
+ mpic->timer_vecs[4] = intvec_top - 8;
+ mpic->timer_vecs[5] = intvec_top - 7;
+ mpic->timer_vecs[6] = intvec_top - 6;
+ mpic->timer_vecs[7] = intvec_top - 5;
mpic->ipi_vecs[0] = intvec_top - 4;
mpic->ipi_vecs[1] = intvec_top - 3;
mpic->ipi_vecs[2] = intvec_top - 2;
@@ -1137,6 +1233,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Check for "big-endian" in device-tree */
if (node && of_get_property(node, "big-endian", NULL) != NULL)
mpic->flags |= MPIC_BIG_ENDIAN;
+ if (node && of_device_is_compatible(node, "fsl,mpic"))
+ mpic->flags |= MPIC_FSL;
/* Look for protected sources */
if (node) {
@@ -1328,15 +1426,17 @@ void __init mpic_init(struct mpic *mpic)
/* Set current processor priority to max */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
- /* Initialize timers: just disable them all */
+ /* Initialize timers to our reserved vectors and mask them for now */
for (i = 0; i < 4; i++) {
mpic_write(mpic->tmregs,
i * MPIC_INFO(TIMER_STRIDE) +
- MPIC_INFO(TIMER_DESTINATION), 0);
+ MPIC_INFO(TIMER_DESTINATION),
+ 1 << hard_smp_processor_id());
mpic_write(mpic->tmregs,
i * MPIC_INFO(TIMER_STRIDE) +
MPIC_INFO(TIMER_VECTOR_PRI),
MPIC_VECPRI_MASK |
+ (9 << MPIC_VECPRI_PRIORITY_SHIFT) |
(mpic->timer_vecs[0] + i));
}
@@ -1432,7 +1532,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable)
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
struct mpic *mpic = mpic_find(irq);
- unsigned int src = mpic_irq_to_hw(irq);
+ unsigned int src = virq_to_hw(irq);
unsigned long flags;
u32 reg;
@@ -1445,6 +1545,11 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
~MPIC_VECPRI_PRIORITY_MASK;
mpic_ipi_write(src - mpic->ipi_vecs[0],
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
+ } else if (mpic_is_tm(mpic, irq)) {
+ reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
+ ~MPIC_VECPRI_PRIORITY_MASK;
+ mpic_tm_write(src - mpic->timer_vecs[0],
+ reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else {
reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
& ~MPIC_VECPRI_PRIORITY_MASK;
@@ -1624,46 +1729,28 @@ void mpic_request_ipis(void)
}
}
-static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask)
+void smp_mpic_message_pass(int cpu, int msg)
{
struct mpic *mpic = mpic_primary;
+ u32 physmask;
BUG_ON(mpic == NULL);
-#ifdef DEBUG_IPI
- DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
-#endif
-
- mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
- ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
- mpic_physmask(cpumask_bits(cpu_mask)[0]));
-}
-
-void smp_mpic_message_pass(int target, int msg)
-{
- cpumask_var_t tmp;
-
/* make sure we're sending something that translates to an IPI */
if ((unsigned int)msg > 3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
smp_processor_id(), msg);
return;
}
- switch (target) {
- case MSG_ALL:
- mpic_send_ipi(msg, cpu_online_mask);
- break;
- case MSG_ALL_BUT_SELF:
- alloc_cpumask_var(&tmp, GFP_NOWAIT);
- cpumask_andnot(tmp, cpu_online_mask,
- cpumask_of(smp_processor_id()));
- mpic_send_ipi(msg, tmp);
- free_cpumask_var(tmp);
- break;
- default:
- mpic_send_ipi(msg, cpumask_of(target));
- break;
- }
+
+#ifdef DEBUG_IPI
+ DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
+#endif
+
+ physmask = 1 << get_hard_smp_processor_id(cpu);
+
+ mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
+ msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
}
int __init smp_mpic_probe(void)
@@ -1707,9 +1794,8 @@ void mpic_reset_core(int cpu)
#endif /* CONFIG_SMP */
#ifdef CONFIG_PM
-static int mpic_suspend(struct sys_device *dev, pm_message_t state)
+static void mpic_suspend_one(struct mpic *mpic)
{
- struct mpic *mpic = container_of(dev, struct mpic, sysdev);
int i;
for (i = 0; i < mpic->num_sources; i++) {
@@ -1718,13 +1804,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state)
mpic->save_data[i].dest =
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
}
+}
+
+static int mpic_suspend(void)
+{
+ struct mpic *mpic = mpics;
+
+ while (mpic) {
+ mpic_suspend_one(mpic);
+ mpic = mpic->next;
+ }
return 0;
}
-static int mpic_resume(struct sys_device *dev)
+static void mpic_resume_one(struct mpic *mpic)
{
- struct mpic *mpic = container_of(dev, struct mpic, sysdev);
int i;
for (i = 0; i < mpic->num_sources; i++) {
@@ -1751,33 +1846,28 @@ static int mpic_resume(struct sys_device *dev)
}
#endif
} /* end for loop */
+}
- return 0;
+static void mpic_resume(void)
+{
+ struct mpic *mpic = mpics;
+
+ while (mpic) {
+ mpic_resume_one(mpic);
+ mpic = mpic->next;
+ }
}
-#endif
-static struct sysdev_class mpic_sysclass = {
-#ifdef CONFIG_PM
+static struct syscore_ops mpic_syscore_ops = {
.resume = mpic_resume,
.suspend = mpic_suspend,
-#endif
- .name = "mpic",
};
static int mpic_init_sys(void)
{
- struct mpic *mpic = mpics;
- int error, id = 0;
-
- error = sysdev_class_register(&mpic_sysclass);
-
- while (mpic && !error) {
- mpic->sysdev.cls = &mpic_sysclass;
- mpic->sysdev.id = id++;
- error = sysdev_register(&mpic->sysdev);
- mpic = mpic->next;
- }
- return error;
+ register_syscore_ops(&mpic_syscore_ops);
+ return 0;
}
device_initcall(mpic_init_sys);
+#endif
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 0b7794acfce1..38e62382070c 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -81,7 +81,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
@@ -131,9 +131,9 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
*/
mpic_set_vector(virq, 0);
- set_irq_msi(virq, entry);
- set_irq_chip(virq, &mpic_pasemi_msi_chip);
- set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+ irq_set_msi_desc(virq, entry);
+ irq_set_chip(virq, &mpic_pasemi_msi_chip);
+ irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \
"addr 0x%x\n", virq, hwirq, msg.address_lo);
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 71900ac78270..9a7aa0ed9c1c 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -129,7 +129,7 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
@@ -166,9 +166,9 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -ENOSPC;
}
- set_irq_msi(virq, entry);
- set_irq_chip(virq, &mpic_u3msi_chip);
- set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+ irq_set_msi_desc(virq, entry);
+ irq_set_chip(virq, &mpic_u3msi_chip);
+ irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
virq, hwirq, (unsigned long)addr);
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index bc61ebb8987c..14d130268e7a 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -78,7 +78,7 @@ static struct irq_host *mv64x60_irq_host;
static void mv64x60_mask_low(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -91,7 +91,7 @@ static void mv64x60_mask_low(struct irq_data *d)
static void mv64x60_unmask_low(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -115,7 +115,7 @@ static struct irq_chip mv64x60_chip_low = {
static void mv64x60_mask_high(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -128,7 +128,7 @@ static void mv64x60_mask_high(struct irq_data *d)
static void mv64x60_unmask_high(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -152,7 +152,7 @@ static struct irq_chip mv64x60_chip_high = {
static void mv64x60_mask_gpp(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -165,7 +165,7 @@ static void mv64x60_mask_gpp(struct irq_data *d)
static void mv64x60_mask_ack_gpp(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -180,7 +180,7 @@ static void mv64x60_mask_ack_gpp(struct irq_data *d)
static void mv64x60_unmask_gpp(struct irq_data *d)
{
- int level2 = irq_map[d->irq].hwirq & MV64x60_LEVEL2_MASK;
+ int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
unsigned long flags;
spin_lock_irqsave(&mv64x60_lock, flags);
@@ -213,11 +213,12 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
{
int level1;
- irq_to_desc(virq)->status |= IRQ_LEVEL;
+ irq_set_status_flags(virq, IRQ_LEVEL);
level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
BUG_ON(level1 > MV64x60_LEVEL1_GPP);
- set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq);
+ irq_set_chip_and_handler(virq, mv64x60_chips[level1],
+ handle_level_irq);
return 0;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h
index 56d9e5deccbf..c39a134e8684 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/sysdev/ppc4xx_pci.h
@@ -324,7 +324,7 @@
#define PESDR0_460EX_IHS2 0x036D
/*
- * 460SX addtional DCRs
+ * 460SX additional DCRs
*/
#define PESDRn_460SX_RCEI 0x02
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 8c9ded8ea07c..b2acda07220d 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -189,7 +189,7 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
{
- return get_irq_chip_data(virq);
+ return irq_get_chip_data(virq);
}
static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
@@ -197,12 +197,10 @@ static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
return irq_data_get_irq_chip_data(d);
}
-#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
-
static void qe_ic_unmask_irq(struct irq_data *d)
{
struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
- unsigned int src = virq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
@@ -218,7 +216,7 @@ static void qe_ic_unmask_irq(struct irq_data *d)
static void qe_ic_mask_irq(struct irq_data *d)
{
struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
- unsigned int src = virq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
@@ -267,10 +265,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
/* Default chip */
chip = &qe_ic->hc_irq;
- set_irq_chip_data(virq, qe_ic);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
- set_irq_chip_and_handler(virq, chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
return 0;
}
@@ -386,13 +384,13 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
- set_irq_data(qe_ic->virq_low, qe_ic);
- set_irq_chained_handler(qe_ic->virq_low, low_handler);
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
if (qe_ic->virq_high != NO_IRQ &&
qe_ic->virq_high != qe_ic->virq_low) {
- set_irq_data(qe_ic->virq_high, qe_ic);
- set_irq_chained_handler(qe_ic->virq_high, high_handler);
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
}
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
new file mode 100644
index 000000000000..b2593ce30c9b
--- /dev/null
+++ b/arch/powerpc/sysdev/scom.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
+ * <benh@kernel.crashing.org>
+ * and David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <asm/scom.h>
+
+const struct scom_controller *scom_controller;
+EXPORT_SYMBOL_GPL(scom_controller);
+
+struct device_node *scom_find_parent(struct device_node *node)
+{
+ struct device_node *par, *tmp;
+ const u32 *p;
+
+ for (par = of_node_get(node); par;) {
+ if (of_get_property(par, "scom-controller", NULL))
+ break;
+ p = of_get_property(par, "scom-parent", NULL);
+ tmp = par;
+ if (p == NULL)
+ par = of_get_parent(par);
+ else
+ par = of_find_node_by_phandle(*p);
+ of_node_put(tmp);
+ }
+ return par;
+}
+EXPORT_SYMBOL_GPL(scom_find_parent);
+
+scom_map_t scom_map_device(struct device_node *dev, int index)
+{
+ struct device_node *parent;
+ unsigned int cells, size;
+ const u32 *prop;
+ u64 reg, cnt;
+ scom_map_t ret;
+
+ parent = scom_find_parent(dev);
+
+ if (parent == NULL)
+ return 0;
+
+ prop = of_get_property(parent, "#scom-cells", NULL);
+ cells = prop ? *prop : 1;
+
+ prop = of_get_property(dev, "scom-reg", &size);
+ if (!prop)
+ return 0;
+ size >>= 2;
+
+ if (index >= (size / (2*cells)))
+ return 0;
+
+ reg = of_read_number(&prop[index * cells * 2], cells);
+ cnt = of_read_number(&prop[index * cells * 2 + cells], cells);
+
+ ret = scom_map(parent, reg, cnt);
+ of_node_put(parent);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scom_map_device);
+
+#ifdef CONFIG_SCOM_DEBUGFS
+struct scom_debug_entry {
+ struct device_node *dn;
+ unsigned long addr;
+ scom_map_t map;
+ spinlock_t lock;
+ char name[8];
+ struct debugfs_blob_wrapper blob;
+};
+
+static int scom_addr_set(void *data, u64 val)
+{
+ struct scom_debug_entry *ent = data;
+
+ ent->addr = 0;
+ scom_unmap(ent->map);
+
+ ent->map = scom_map(ent->dn, val, 1);
+ if (scom_map_ok(ent->map))
+ ent->addr = val;
+ else
+ return -EFAULT;
+
+ return 0;
+}
+
+static int scom_addr_get(void *data, u64 *val)
+{
+ struct scom_debug_entry *ent = data;
+ *val = ent->addr;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set,
+ "0x%llx\n");
+
+static int scom_val_set(void *data, u64 val)
+{
+ struct scom_debug_entry *ent = data;
+
+ if (!scom_map_ok(ent->map))
+ return -EFAULT;
+
+ scom_write(ent->map, 0, val);
+
+ return 0;
+}
+
+static int scom_val_get(void *data, u64 *val)
+{
+ struct scom_debug_entry *ent = data;
+
+ if (!scom_map_ok(ent->map))
+ return -EFAULT;
+
+ *val = scom_read(ent->map, 0);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set,
+ "0x%llx\n");
+
+static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
+ int i)
+{
+ struct scom_debug_entry *ent;
+ struct dentry *dir;
+
+ ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ return -ENOMEM;
+
+ ent->dn = of_node_get(dn);
+ ent->map = SCOM_MAP_INVALID;
+ spin_lock_init(&ent->lock);
+ snprintf(ent->name, 8, "scom%d", i);
+ ent->blob.data = dn->full_name;
+ ent->blob.size = strlen(dn->full_name);
+
+ dir = debugfs_create_dir(ent->name, root);
+ if (!dir) {
+ of_node_put(dn);
+ kfree(ent);
+ return -1;
+ }
+
+ debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops);
+ debugfs_create_file("value", 0600, dir, ent, &scom_val_fops);
+ debugfs_create_blob("path", 0400, dir, &ent->blob);
+
+ return 0;
+}
+
+static int scom_debug_init(void)
+{
+ struct device_node *dn;
+ struct dentry *root;
+ int i, rc;
+
+ root = debugfs_create_dir("scom", powerpc_debugfs_root);
+ if (!root)
+ return -1;
+
+ i = rc = 0;
+ for_each_node_with_property(dn, "scom-controller")
+ rc |= scom_debug_init_one(root, dn, i++);
+
+ return rc;
+}
+device_initcall(scom_debug_init);
+#endif /* CONFIG_SCOM_DEBUGFS */
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 02c91db90037..4d18658116e5 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -391,8 +391,8 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
if ((virq >= 1) && (virq <= 4)){
irq = virq + IRQ_PCI_INTAD_BASE - 1;
- irq_to_desc(irq)->status |= IRQ_LEVEL;
- set_irq_chip(irq, &tsi108_pci_irq);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ irq_set_chip(irq, &tsi108_pci_irq);
}
return 0;
}
@@ -431,7 +431,7 @@ void __init tsi108_pci_int_init(struct device_node *node)
void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = get_pci_source();
if (cascade_irq != NO_IRQ)
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 835f7958b237..984cd2029158 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -41,8 +41,6 @@
#define UIC_VR 0x7
#define UIC_VCR 0x8
-#define uic_irq_to_hw(virq) (irq_map[virq].hwirq)
-
struct uic *primary_uic;
struct uic {
@@ -57,16 +55,15 @@ struct uic {
static void uic_unmask_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
struct uic *uic = irq_data_get_irq_chip_data(d);
- unsigned int src = uic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 er, sr;
sr = 1 << (31-src);
spin_lock_irqsave(&uic->lock, flags);
/* ack level-triggered interrupts here */
- if (desc->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
er = mfdcr(uic->dcrbase + UIC_ER);
er |= sr;
@@ -77,7 +74,7 @@ static void uic_unmask_irq(struct irq_data *d)
static void uic_mask_irq(struct irq_data *d)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
- unsigned int src = uic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 er;
@@ -91,7 +88,7 @@ static void uic_mask_irq(struct irq_data *d)
static void uic_ack_irq(struct irq_data *d)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
- unsigned int src = uic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&uic->lock, flags);
@@ -101,9 +98,8 @@ static void uic_ack_irq(struct irq_data *d)
static void uic_mask_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
struct uic *uic = irq_data_get_irq_chip_data(d);
- unsigned int src = uic_irq_to_hw(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 er, sr;
@@ -120,7 +116,7 @@ static void uic_mask_ack_irq(struct irq_data *d)
* level interrupts are ack'ed after the actual
* isr call in the uic_unmask_irq()
*/
- if (!(desc->status & IRQ_LEVEL))
+ if (!irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
spin_unlock_irqrestore(&uic->lock, flags);
}
@@ -128,8 +124,7 @@ static void uic_mask_ack_irq(struct irq_data *d)
static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
- unsigned int src = uic_irq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
int trigger, polarity;
u32 tr, pr, mask;
@@ -166,11 +161,6 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (!trigger)
- desc->status |= IRQ_LEVEL;
-
spin_unlock_irqrestore(&uic->lock, flags);
return 0;
@@ -190,13 +180,13 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
{
struct uic *uic = h->host_data;
- set_irq_chip_data(virq, uic);
+ irq_set_chip_data(virq, uic);
/* Despite the name, handle_level_irq() works for both level
* and edge irqs on UIC. FIXME: check this is correct */
- set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
@@ -220,17 +210,18 @@ static struct irq_host_ops uic_host_ops = {
void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct uic *uic = get_irq_data(virq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
+ struct uic *uic = irq_get_handler_data(virq);
u32 msr;
int src;
int subvirq;
raw_spin_lock(&desc->lock);
- if (desc->status & IRQ_LEVEL)
- chip->irq_mask(&desc->irq_data);
+ if (irqd_is_level_type(idata))
+ chip->irq_mask(idata);
else
- chip->irq_mask_ack(&desc->irq_data);
+ chip->irq_mask_ack(idata);
raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
@@ -244,10 +235,10 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
uic_irq_ret:
raw_spin_lock(&desc->lock);
- if (desc->status & IRQ_LEVEL)
- chip->irq_ack(&desc->irq_data);
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
- chip->irq_unmask(&desc->irq_data);
+ if (irqd_is_level_type(idata))
+ chip->irq_ack(idata);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+ chip->irq_unmask(idata);
raw_spin_unlock(&desc->lock);
}
@@ -336,8 +327,8 @@ void __init uic_init_tree(void)
cascade_virq = irq_of_parse_and_map(np, 0);
- set_irq_data(cascade_virq, uic);
- set_irq_chained_handler(cascade_virq, uic_irq_cascade);
+ irq_set_handler_data(cascade_virq, uic);
+ irq_set_chained_handler(cascade_virq, uic_irq_cascade);
/* FIXME: setup critical cascade?? */
}
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
new file mode 100644
index 000000000000..0031eda320c3
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -0,0 +1,13 @@
+config PPC_XICS
+ def_bool n
+ select PPC_SMP_MUXED_IPI
+
+config PPC_ICP_NATIVE
+ def_bool n
+
+config PPC_ICP_HV
+ def_bool n
+
+config PPC_ICS_RTAS
+ def_bool n
+
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
new file mode 100644
index 000000000000..b75a6059337f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -0,0 +1,6 @@
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+obj-y += xics-common.o
+obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
+obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
+obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
new file mode 100644
index 000000000000..9518d367a64f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2011 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/io.h>
+#include <asm/hvcall.h>
+
+static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_XIRR, retbuf, cppr);
+ if (rc != H_SUCCESS)
+ panic(" bad return code xirr - rc = %lx\n", rc);
+ return (unsigned int)retbuf[0];
+}
+
+static inline void icp_hv_set_xirr(unsigned int value)
+{
+ long rc = plpar_hcall_norets(H_EOI, value);
+ if (rc != H_SUCCESS)
+ panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
+}
+
+static inline void icp_hv_set_cppr(u8 value)
+{
+ long rc = plpar_hcall_norets(H_CPPR, value);
+ if (rc != H_SUCCESS)
+ panic("bad return code cppr - rc = %lx\n", rc);
+}
+
+static inline void icp_hv_set_qirr(int n_cpu , u8 value)
+{
+ long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
+ value);
+ if (rc != H_SUCCESS)
+ panic("bad return code qirr - rc = %lx\n", rc);
+}
+
+static void icp_hv_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+
+ iosync();
+ icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
+}
+
+static void icp_hv_teardown_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Clear any pending IPI */
+ icp_hv_set_qirr(cpu, 0xff);
+}
+
+static void icp_hv_flush_ipi(void)
+{
+ /* We take the ipi irq but and never return so we
+ * need to EOI the IPI, but want to leave our priority 0
+ *
+ * should we check all the other interrupts too?
+ * should we be flagging idle loop instead?
+ * or creating some task to be scheduled?
+ */
+
+ icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
+}
+
+static unsigned int icp_hv_get_irq(void)
+{
+ unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
+ unsigned int vec = xirr & 0x00ffffff;
+ unsigned int irq;
+
+ if (vec == XICS_IRQ_SPURIOUS)
+ return NO_IRQ;
+
+ irq = irq_radix_revmap_lookup(xics_host, vec);
+ if (likely(irq != NO_IRQ)) {
+ xics_push_cppr(vec);
+ return irq;
+ }
+
+ /* We don't have a linux mapping, so have rtas mask it. */
+ xics_mask_unknown_vec(vec);
+
+ /* We might learn about it later, so EOI it */
+ icp_hv_set_xirr(xirr);
+
+ return NO_IRQ;
+}
+
+static void icp_hv_set_cpu_priority(unsigned char cppr)
+{
+ xics_set_base_cppr(cppr);
+ icp_hv_set_cppr(cppr);
+ iosync();
+}
+
+#ifdef CONFIG_SMP
+
+static void icp_hv_cause_ipi(int cpu, unsigned long data)
+{
+ icp_hv_set_qirr(cpu, IPI_PRIORITY);
+}
+
+static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+
+ icp_hv_set_qirr(cpu, 0xff);
+
+ return smp_ipi_demux();
+}
+
+#endif /* CONFIG_SMP */
+
+static const struct icp_ops icp_hv_ops = {
+ .get_irq = icp_hv_get_irq,
+ .eoi = icp_hv_eoi,
+ .set_priority = icp_hv_set_cpu_priority,
+ .teardown_cpu = icp_hv_teardown_cpu,
+ .flush_ipi = icp_hv_flush_ipi,
+#ifdef CONFIG_SMP
+ .ipi_action = icp_hv_ipi_action,
+ .cause_ipi = icp_hv_cause_ipi,
+#endif
+};
+
+int icp_hv_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
+ if (!np)
+ np = of_find_node_by_type(NULL,
+ "PowerPC-External-Interrupt-Presentation");
+ if (!np)
+ return -ENODEV;
+
+ icp_ops = &icp_hv_ops;
+
+ return 0;
+}
+
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
new file mode 100644
index 000000000000..1f15ad436140
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2011 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+
+struct icp_ipl {
+ union {
+ u32 word;
+ u8 bytes[4];
+ } xirr_poll;
+ union {
+ u32 word;
+ u8 bytes[4];
+ } xirr;
+ u32 dummy;
+ union {
+ u32 word;
+ u8 bytes[4];
+ } qirr;
+ u32 link_a;
+ u32 link_b;
+ u32 link_c;
+};
+
+static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
+
+static inline unsigned int icp_native_get_xirr(void)
+{
+ int cpu = smp_processor_id();
+
+ return in_be32(&icp_native_regs[cpu]->xirr.word);
+}
+
+static inline void icp_native_set_xirr(unsigned int value)
+{
+ int cpu = smp_processor_id();
+
+ out_be32(&icp_native_regs[cpu]->xirr.word, value);
+}
+
+static inline void icp_native_set_cppr(u8 value)
+{
+ int cpu = smp_processor_id();
+
+ out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
+}
+
+static inline void icp_native_set_qirr(int n_cpu, u8 value)
+{
+ out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
+}
+
+static void icp_native_set_cpu_priority(unsigned char cppr)
+{
+ xics_set_base_cppr(cppr);
+ icp_native_set_cppr(cppr);
+ iosync();
+}
+
+static void icp_native_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+
+ iosync();
+ icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
+}
+
+static void icp_native_teardown_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Clear any pending IPI */
+ icp_native_set_qirr(cpu, 0xff);
+}
+
+static void icp_native_flush_ipi(void)
+{
+ /* We take the ipi irq but and never return so we
+ * need to EOI the IPI, but want to leave our priority 0
+ *
+ * should we check all the other interrupts too?
+ * should we be flagging idle loop instead?
+ * or creating some task to be scheduled?
+ */
+
+ icp_native_set_xirr((0x00 << 24) | XICS_IPI);
+}
+
+static unsigned int icp_native_get_irq(void)
+{
+ unsigned int xirr = icp_native_get_xirr();
+ unsigned int vec = xirr & 0x00ffffff;
+ unsigned int irq;
+
+ if (vec == XICS_IRQ_SPURIOUS)
+ return NO_IRQ;
+
+ irq = irq_radix_revmap_lookup(xics_host, vec);
+ if (likely(irq != NO_IRQ)) {
+ xics_push_cppr(vec);
+ return irq;
+ }
+
+ /* We don't have a linux mapping, so have rtas mask it. */
+ xics_mask_unknown_vec(vec);
+
+ /* We might learn about it later, so EOI it */
+ icp_native_set_xirr(xirr);
+
+ return NO_IRQ;
+}
+
+#ifdef CONFIG_SMP
+
+static void icp_native_cause_ipi(int cpu, unsigned long data)
+{
+ icp_native_set_qirr(cpu, IPI_PRIORITY);
+}
+
+static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+
+ icp_native_set_qirr(cpu, 0xff);
+
+ return smp_ipi_demux();
+}
+
+#endif /* CONFIG_SMP */
+
+static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
+ unsigned long size)
+{
+ char *rname;
+ int i, cpu = -1;
+
+ /* This may look gross but it's good enough for now, we don't quite
+ * have a hard -> linux processor id matching.
+ */
+ for_each_possible_cpu(i) {
+ if (!cpu_present(i))
+ continue;
+ if (hw_id == get_hard_smp_processor_id(i)) {
+ cpu = i;
+ break;
+ }
+ }
+
+ /* Fail, skip that CPU. Don't print, it's normal, some XICS come up
+ * with way more entries in there than you have CPUs
+ */
+ if (cpu == -1)
+ return 0;
+
+ rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
+ cpu, hw_id);
+
+ if (!request_mem_region(addr, size, rname)) {
+ pr_warning("icp_native: Could not reserve ICP MMIO"
+ " for CPU %d, interrupt server #0x%x\n",
+ cpu, hw_id);
+ return -EBUSY;
+ }
+
+ icp_native_regs[cpu] = ioremap(addr, size);
+ if (!icp_native_regs[cpu]) {
+ pr_warning("icp_native: Failed ioremap for CPU %d, "
+ "interrupt server #0x%x, addr %#lx\n",
+ cpu, hw_id, addr);
+ release_mem_region(addr, size);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init icp_native_init_one_node(struct device_node *np,
+ unsigned int *indx)
+{
+ unsigned int ilen;
+ const u32 *ireg;
+ int i;
+ int reg_tuple_size;
+ int num_servers = 0;
+
+ /* This code does the theorically broken assumption that the interrupt
+ * server numbers are the same as the hard CPU numbers.
+ * This happens to be the case so far but we are playing with fire...
+ * should be fixed one of these days. -BenH.
+ */
+ ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
+
+ /* Do that ever happen ? we'll know soon enough... but even good'old
+ * f80 does have that property ..
+ */
+ WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
+
+ if (ireg) {
+ *indx = of_read_number(ireg, 1);
+ if (ilen >= 2*sizeof(u32))
+ num_servers = of_read_number(ireg + 1, 1);
+ }
+
+ ireg = of_get_property(np, "reg", &ilen);
+ if (!ireg) {
+ pr_err("icp_native: Can't find interrupt reg property");
+ return -1;
+ }
+
+ reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4;
+ if (((ilen % reg_tuple_size) != 0)
+ || (num_servers && (num_servers != (ilen / reg_tuple_size)))) {
+ pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
+ ilen / reg_tuple_size, num_servers);
+ return -1;
+ }
+
+ for (i = 0; i < (ilen / reg_tuple_size); i++) {
+ struct resource r;
+ int err;
+
+ err = of_address_to_resource(np, i, &r);
+ if (err) {
+ pr_err("icp_native: Could not translate ICP MMIO"
+ " for interrupt server 0x%x (%d)\n", *indx, err);
+ return -1;
+ }
+
+ if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start))
+ return -1;
+
+ (*indx)++;
+ }
+ return 0;
+}
+
+static const struct icp_ops icp_native_ops = {
+ .get_irq = icp_native_get_irq,
+ .eoi = icp_native_eoi,
+ .set_priority = icp_native_set_cpu_priority,
+ .teardown_cpu = icp_native_teardown_cpu,
+ .flush_ipi = icp_native_flush_ipi,
+#ifdef CONFIG_SMP
+ .ipi_action = icp_native_ipi_action,
+ .cause_ipi = icp_native_cause_ipi,
+#endif
+};
+
+int icp_native_init(void)
+{
+ struct device_node *np;
+ u32 indx = 0;
+ int found = 0;
+
+ for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
+ if (icp_native_init_one_node(np, &indx) == 0)
+ found = 1;
+ if (!found) {
+ for_each_node_by_type(np,
+ "PowerPC-External-Interrupt-Presentation") {
+ if (icp_native_init_one_node(np, &indx) == 0)
+ found = 1;
+ }
+ }
+
+ if (found == 0)
+ return -ENODEV;
+
+ icp_ops = &icp_native_ops;
+
+ return 0;
+}
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
new file mode 100644
index 000000000000..c782f85cf7e4
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -0,0 +1,240 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/rtas.h>
+
+/* RTAS service tokens */
+static int ibm_get_xive;
+static int ibm_set_xive;
+static int ibm_int_on;
+static int ibm_int_off;
+
+static int ics_rtas_map(struct ics *ics, unsigned int virq);
+static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec);
+static long ics_rtas_get_server(struct ics *ics, unsigned long vec);
+static int ics_rtas_host_match(struct ics *ics, struct device_node *node);
+
+/* Only one global & state struct ics */
+static struct ics ics_rtas = {
+ .map = ics_rtas_map,
+ .mask_unknown = ics_rtas_mask_unknown,
+ .get_server = ics_rtas_get_server,
+ .host_match = ics_rtas_host_match,
+};
+
+static void ics_rtas_unmask_irq(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int call_status;
+ int server;
+
+ pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);
+
+ if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
+ return;
+
+ server = xics_get_irq_server(d->irq, d->affinity, 0);
+
+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
+ DEFAULT_PRIORITY);
+ if (call_status != 0) {
+ printk(KERN_ERR
+ "%s: ibm_set_xive irq %u server %x returned %d\n",
+ __func__, hw_irq, server, call_status);
+ return;
+ }
+
+ /* Now unmask the interrupt (often a no-op) */
+ call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq);
+ if (call_status != 0) {
+ printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
+ __func__, hw_irq, call_status);
+ return;
+ }
+}
+
+static unsigned int ics_rtas_startup(struct irq_data *d)
+{
+#ifdef CONFIG_PCI_MSI
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (d->msi_desc)
+ unmask_msi_irq(d);
+#endif
+ /* unmask it */
+ ics_rtas_unmask_irq(d);
+ return 0;
+}
+
+static void ics_rtas_mask_real_irq(unsigned int hw_irq)
+{
+ int call_status;
+
+ if (hw_irq == XICS_IPI)
+ return;
+
+ call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq);
+ if (call_status != 0) {
+ printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
+ __func__, hw_irq, call_status);
+ return;
+ }
+
+ /* Have to set XIVE to 0xff to be able to remove a slot */
+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq,
+ xics_default_server, 0xff);
+ if (call_status != 0) {
+ printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
+ __func__, hw_irq, call_status);
+ return;
+ }
+}
+
+static void ics_rtas_mask_irq(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+
+ pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq);
+
+ if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
+ return;
+ ics_rtas_mask_real_irq(hw_irq);
+}
+
+static int ics_rtas_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int status;
+ int xics_status[2];
+ int irq_server;
+
+ if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
+ return -1;
+
+ status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq);
+
+ if (status) {
+ printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
+ __func__, hw_irq, status);
+ return -1;
+ }
+
+ irq_server = xics_get_irq_server(d->irq, cpumask, 1);
+ if (irq_server == -1) {
+ char cpulist[128];
+ cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
+ printk(KERN_WARNING
+ "%s: No online cpus in the mask %s for irq %d\n",
+ __func__, cpulist, d->irq);
+ return -1;
+ }
+
+ status = rtas_call(ibm_set_xive, 3, 1, NULL,
+ hw_irq, irq_server, xics_status[1]);
+
+ if (status) {
+ printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
+ __func__, hw_irq, status);
+ return -1;
+ }
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ics_rtas_irq_chip = {
+ .name = "XICS",
+ .irq_startup = ics_rtas_startup,
+ .irq_mask = ics_rtas_mask_irq,
+ .irq_unmask = ics_rtas_unmask_irq,
+ .irq_eoi = NULL, /* Patched at init time */
+ .irq_set_affinity = ics_rtas_set_affinity
+};
+
+static int ics_rtas_map(struct ics *ics, unsigned int virq)
+{
+ unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
+ int status[2];
+ int rc;
+
+ if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
+ return -EINVAL;
+
+ /* Check if RTAS knows about this interrupt */
+ rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq);
+ if (rc)
+ return -ENXIO;
+
+ irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq);
+ irq_set_chip_data(virq, &ics_rtas);
+
+ return 0;
+}
+
+static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec)
+{
+ ics_rtas_mask_real_irq(vec);
+}
+
+static long ics_rtas_get_server(struct ics *ics, unsigned long vec)
+{
+ int rc, status[2];
+
+ rc = rtas_call(ibm_get_xive, 1, 3, status, vec);
+ if (rc)
+ return -1;
+ return status[0];
+}
+
+static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
+{
+ /* IBM machines have interrupt parents of various funky types for things
+ * like vdevices, events, etc... The trick we use here is to match
+ * everything here except the legacy 8259 which is compatible "chrp,iic"
+ */
+ return !of_device_is_compatible(node, "chrp,iic");
+}
+
+int ics_rtas_init(void)
+{
+ ibm_get_xive = rtas_token("ibm,get-xive");
+ ibm_set_xive = rtas_token("ibm,set-xive");
+ ibm_int_on = rtas_token("ibm,int-on");
+ ibm_int_off = rtas_token("ibm,int-off");
+
+ /* We enable the RTAS "ICS" if RTAS is present with the
+ * appropriate tokens
+ */
+ if (ibm_get_xive == RTAS_UNKNOWN_SERVICE ||
+ ibm_set_xive == RTAS_UNKNOWN_SERVICE)
+ return -ENODEV;
+
+ /* We need to patch our irq chip's EOI to point to the
+ * right ICP
+ */
+ ics_rtas_irq_chip.irq_eoi = icp_ops->eoi;
+
+ /* Register ourselves */
+ xics_register_ics(&ics_rtas);
+
+ return 0;
+}
+
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
new file mode 100644
index 000000000000..445c5a01b766
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2011 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/debugfs.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/rtas.h>
+#include <asm/xics.h>
+#include <asm/firmware.h>
+
+/* Globals common to all ICP/ICS implementations */
+const struct icp_ops *icp_ops;
+
+unsigned int xics_default_server = 0xff;
+unsigned int xics_default_distrib_server = 0;
+unsigned int xics_interrupt_server_size = 8;
+
+DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
+
+struct irq_host *xics_host;
+
+static LIST_HEAD(ics_list);
+
+void xics_update_irq_servers(void)
+{
+ int i, j;
+ struct device_node *np;
+ u32 ilen;
+ const u32 *ireg;
+ u32 hcpuid;
+
+ /* Find the server numbers for the boot cpu. */
+ np = of_get_cpu_node(boot_cpuid, NULL);
+ BUG_ON(!np);
+
+ hcpuid = get_hard_smp_processor_id(boot_cpuid);
+ xics_default_server = xics_default_distrib_server = hcpuid;
+
+ pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
+
+ ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+ if (!ireg) {
+ of_node_put(np);
+ return;
+ }
+
+ i = ilen / sizeof(int);
+
+ /* Global interrupt distribution server is specified in the last
+ * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
+ * entry fom this property for current boot cpu id and use it as
+ * default distribution server
+ */
+ for (j = 0; j < i; j += 2) {
+ if (ireg[j] == hcpuid) {
+ xics_default_distrib_server = ireg[j+1];
+ break;
+ }
+ }
+ pr_devel("xics: xics_default_distrib_server = 0x%x\n",
+ xics_default_distrib_server);
+ of_node_put(np);
+}
+
+/* GIQ stuff, currently only supported on RTAS setups, will have
+ * to be sorted properly for bare metal
+ */
+void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
+{
+#ifdef CONFIG_PPC_RTAS
+ int index;
+ int status;
+
+ if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
+ return;
+
+ index = (1UL << xics_interrupt_server_size) - 1 - gserver;
+
+ status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
+
+ WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
+ GLOBAL_INTERRUPT_QUEUE, index, join, status);
+#endif
+}
+
+void xics_setup_cpu(void)
+{
+ icp_ops->set_priority(LOWEST_PRIORITY);
+
+ xics_set_cpu_giq(xics_default_distrib_server, 1);
+}
+
+void xics_mask_unknown_vec(unsigned int vec)
+{
+ struct ics *ics;
+
+ pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
+
+ list_for_each_entry(ics, &ics_list, link)
+ ics->mask_unknown(ics, vec);
+}
+
+
+#ifdef CONFIG_SMP
+
+static void xics_request_ipi(void)
+{
+ unsigned int ipi;
+
+ ipi = irq_create_mapping(xics_host, XICS_IPI);
+ BUG_ON(ipi == NO_IRQ);
+
+ /*
+ * IPIs are marked IRQF_DISABLED as they must run with irqs
+ * disabled, and PERCPU. The handler was set in map.
+ */
+ BUG_ON(request_irq(ipi, icp_ops->ipi_action,
+ IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL));
+}
+
+int __init xics_smp_probe(void)
+{
+ /* Setup cause_ipi callback based on which ICP is used */
+ smp_ops->cause_ipi = icp_ops->cause_ipi;
+
+ /* Register all the IPIs */
+ xics_request_ipi();
+
+ return cpumask_weight(cpu_possible_mask);
+}
+
+#endif /* CONFIG_SMP */
+
+void xics_teardown_cpu(void)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ /*
+ * we have to reset the cppr index to 0 because we're
+ * not going to return from the IPI
+ */
+ os_cppr->index = 0;
+ icp_ops->set_priority(0);
+ icp_ops->teardown_cpu();
+}
+
+void xics_kexec_teardown_cpu(int secondary)
+{
+ xics_teardown_cpu();
+
+ icp_ops->flush_ipi();
+
+ /*
+ * Some machines need to have at least one cpu in the GIQ,
+ * so leave the master cpu in the group.
+ */
+ if (secondary)
+ xics_set_cpu_giq(xics_default_distrib_server, 0);
+}
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* Interrupts are disabled. */
+void xics_migrate_irqs_away(void)
+{
+ int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
+ unsigned int irq, virq;
+
+ /* If we used to be the default server, move to the new "boot_cpuid" */
+ if (hw_cpu == xics_default_server)
+ xics_update_irq_servers();
+
+ /* Reject any interrupt that was queued to us... */
+ icp_ops->set_priority(0);
+
+ /* Remove ourselves from the global interrupt queue */
+ xics_set_cpu_giq(xics_default_distrib_server, 0);
+
+ /* Allow IPIs again... */
+ icp_ops->set_priority(DEFAULT_PRIORITY);
+
+ for_each_irq(virq) {
+ struct irq_desc *desc;
+ struct irq_chip *chip;
+ long server;
+ unsigned long flags;
+ struct ics *ics;
+
+ /* We can't set affinity on ISA interrupts */
+ if (virq < NUM_ISA_INTERRUPTS)
+ continue;
+ if (!virq_is_host(virq, xics_host))
+ continue;
+ irq = (unsigned int)virq_to_hw(virq);
+ /* We need to get IPIs still. */
+ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ continue;
+ desc = irq_to_desc(virq);
+ /* We only need to migrate enabled IRQS */
+ if (!desc || !desc->action)
+ continue;
+ chip = irq_desc_get_chip(desc);
+ if (!chip || !chip->irq_set_affinity)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ /* Locate interrupt server */
+ server = -1;
+ ics = irq_get_chip_data(virq);
+ if (ics)
+ server = ics->get_server(ics, irq);
+ if (server < 0) {
+ printk(KERN_ERR "%s: Can't find server for irq %d\n",
+ __func__, irq);
+ goto unlock;
+ }
+
+ /* We only support delivery to all cpus or to one cpu.
+ * The irq has to be migrated only in the single cpu
+ * case.
+ */
+ if (server != hw_cpu)
+ goto unlock;
+
+ /* This is expected during cpu offline. */
+ if (cpu_online(cpu))
+ pr_warning("IRQ %u affinity broken off cpu %u\n",
+ virq, cpu);
+
+ /* Reset affinity to all cpus */
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_set_affinity(virq, cpu_all_mask);
+ continue;
+unlock:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_SMP
+/*
+ * For the moment we only implement delivery to all cpus or one cpu.
+ *
+ * If the requested affinity is cpu_all_mask, we set global affinity.
+ * If not we set it to the first cpu in the mask, even if multiple cpus
+ * are set. This is so things like irqbalance (which set core and package
+ * wide affinities) do the right thing.
+ *
+ * We need to fix this to implement support for the links
+ */
+int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
+ unsigned int strict_check)
+{
+
+ if (!distribute_irqs)
+ return xics_default_server;
+
+ if (!cpumask_subset(cpu_possible_mask, cpumask)) {
+ int server = cpumask_first_and(cpu_online_mask, cpumask);
+
+ if (server < nr_cpu_ids)
+ return get_hard_smp_processor_id(server);
+
+ if (strict_check)
+ return -1;
+ }
+
+ /*
+ * Workaround issue with some versions of JS20 firmware that
+ * deliver interrupts to cpus which haven't been started. This
+ * happens when using the maxcpus= boot option.
+ */
+ if (cpumask_equal(cpu_online_mask, cpu_present_mask))
+ return xics_default_distrib_server;
+
+ return xics_default_server;
+}
+#endif /* CONFIG_SMP */
+
+static int xics_host_match(struct irq_host *h, struct device_node *node)
+{
+ struct ics *ics;
+
+ list_for_each_entry(ics, &ics_list, link)
+ if (ics->host_match(ics, node))
+ return 1;
+
+ return 0;
+}
+
+/* Dummies */
+static void xics_ipi_unmask(struct irq_data *d) { }
+static void xics_ipi_mask(struct irq_data *d) { }
+
+static struct irq_chip xics_ipi_chip = {
+ .name = "XICS",
+ .irq_eoi = NULL, /* Patched at init time */
+ .irq_mask = xics_ipi_mask,
+ .irq_unmask = xics_ipi_unmask,
+};
+
+static int xics_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct ics *ics;
+
+ pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
+
+ /* Insert the interrupt mapping into the radix tree for fast lookup */
+ irq_radix_revmap_insert(xics_host, virq, hw);
+
+ /* They aren't all level sensitive but we just don't really know */
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ /* Don't call into ICS for IPIs */
+ if (hw == XICS_IPI) {
+ irq_set_chip_and_handler(virq, &xics_ipi_chip,
+ handle_percpu_irq);
+ return 0;
+ }
+
+ /* Let the ICS setup the chip data */
+ list_for_each_entry(ics, &ics_list, link)
+ if (ics->map(ics, virq) == 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ /* Current xics implementation translates everything
+ * to level. It is not technically right for MSIs but this
+ * is irrelevant at this point. We might get smarter in the future
+ */
+ *out_hwirq = intspec[0];
+ *out_flags = IRQ_TYPE_LEVEL_LOW;
+
+ return 0;
+}
+
+static struct irq_host_ops xics_host_ops = {
+ .match = xics_host_match,
+ .map = xics_host_map,
+ .xlate = xics_host_xlate,
+};
+
+static void __init xics_init_host(void)
+{
+ xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
+ XICS_IRQ_SPURIOUS);
+ BUG_ON(xics_host == NULL);
+ irq_set_default_host(xics_host);
+}
+
+void __init xics_register_ics(struct ics *ics)
+{
+ list_add(&ics->link, &ics_list);
+}
+
+static void __init xics_get_server_size(void)
+{
+ struct device_node *np;
+ const u32 *isize;
+
+ /* We fetch the interrupt server size from the first ICS node
+ * we find if any
+ */
+ np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
+ if (!np)
+ return;
+ isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
+ if (!isize)
+ return;
+ xics_interrupt_server_size = *isize;
+ of_node_put(np);
+}
+
+void __init xics_init(void)
+{
+ int rc = -1;
+
+ /* Fist locate ICP */
+#ifdef CONFIG_PPC_ICP_HV
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ rc = icp_hv_init();
+#endif
+#ifdef CONFIG_PPC_ICP_NATIVE
+ if (rc < 0)
+ rc = icp_native_init();
+#endif
+ if (rc < 0) {
+ pr_warning("XICS: Cannot find a Presentation Controller !\n");
+ return;
+ }
+
+ /* Copy get_irq callback over to ppc_md */
+ ppc_md.get_irq = icp_ops->get_irq;
+
+ /* Patch up IPI chip EOI */
+ xics_ipi_chip.irq_eoi = icp_ops->eoi;
+
+ /* Now locate ICS */
+#ifdef CONFIG_PPC_ICS_RTAS
+ rc = ics_rtas_init();
+#endif
+ if (rc < 0)
+ pr_warning("XICS: Cannot find a Source Controller !\n");
+
+ /* Initialize common bits */
+ xics_get_server_size();
+ xics_update_irq_servers();
+ xics_init_host();
+ xics_setup_cpu();
+}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 7436f3ed4df6..6183799754af 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -71,7 +71,7 @@ static unsigned char xilinx_intc_map_senses[] = {
*/
static void xilinx_intc_mask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void * regs = irq_data_get_irq_chip_data(d);
pr_debug("mask: %d\n", irq);
out_be32(regs + XINTC_CIE, 1 << irq);
@@ -79,12 +79,6 @@ static void xilinx_intc_mask(struct irq_data *d)
static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
-
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
return 0;
}
@@ -93,7 +87,7 @@ static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
*/
static void xilinx_intc_level_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void * regs = irq_data_get_irq_chip_data(d);
pr_debug("unmask: %d\n", irq);
out_be32(regs + XINTC_SIE, 1 << irq);
@@ -118,7 +112,7 @@ static struct irq_chip xilinx_intc_level_irqchip = {
*/
static void xilinx_intc_edge_unmask(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void *regs = irq_data_get_irq_chip_data(d);
pr_debug("unmask: %d\n", irq);
out_be32(regs + XINTC_SIE, 1 << irq);
@@ -126,7 +120,7 @@ static void xilinx_intc_edge_unmask(struct irq_data *d)
static void xilinx_intc_edge_ack(struct irq_data *d)
{
- int irq = virq_to_hw(d->irq);
+ int irq = irqd_to_hwirq(d);
void * regs = irq_data_get_irq_chip_data(d);
pr_debug("ack: %d\n", irq);
out_be32(regs + XINTC_IAR, 1 << irq);
@@ -170,15 +164,15 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t irq)
{
- set_irq_chip_data(virq, h->host_data);
+ irq_set_chip_data(virq, h->host_data);
if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
- set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip,
- handle_level_irq);
+ irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip,
+ handle_level_irq);
} else {
- set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
- handle_edge_irq);
+ irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
+ handle_edge_irq);
}
return 0;
}
@@ -229,7 +223,7 @@ int xilinx_intc_get_irq(void)
*/
static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
@@ -256,7 +250,7 @@ static void __init xilinx_i8259_setup_cascade(void)
}
i8259_init(cascade_node, 0);
- set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade);
+ irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade);
/* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */
/* This looks like a dirty hack to me --gcl */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 33794c1d92c3..42541bbcc7fa 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -334,7 +334,7 @@ static void release_output_lock(void)
int cpus_are_in_xmon(void)
{
- return !cpus_empty(cpus_in_xmon);
+ return !cpumask_empty(&cpus_in_xmon);
}
#endif
@@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
#ifdef CONFIG_SMP
cpu = smp_processor_id();
- if (cpu_isset(cpu, cpus_in_xmon)) {
+ if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
get_output_lock();
excprint(regs);
printf("cpu 0x%x: Exception %lx %s in xmon, "
@@ -396,10 +396,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
xmon_fault_jmp[cpu] = recurse_jmp;
- cpu_set(cpu, cpus_in_xmon);
+ cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL;
- if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
+ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
bp = at_breakpoint(regs->nip);
if (bp || unrecoverable_excp(regs))
fromipi = 0;
@@ -437,10 +437,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
xmon_owner = cpu;
mb();
if (ncpus > 1) {
- smp_send_debugger_break(MSG_ALL_BUT_SELF);
+ smp_send_debugger_break();
/* wait for other cpus to come in */
for (timeout = 100000000; timeout != 0; --timeout) {
- if (cpus_weight(cpus_in_xmon) >= ncpus)
+ if (cpumask_weight(&cpus_in_xmon) >= ncpus)
break;
barrier();
}
@@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
}
leave:
- cpu_clear(cpu, cpus_in_xmon);
+ cpumask_clear_cpu(cpu, &cpus_in_xmon);
xmon_fault_jmp[cpu] = NULL;
#else
/* UP is simple... */
@@ -529,7 +529,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
}
#else
- if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) {
+ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
int stepped = emulate_step(regs, bp->instr[0]);
@@ -578,7 +578,7 @@ static int xmon_bpt(struct pt_regs *regs)
struct bpt *bp;
unsigned long offset;
- if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
+ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
return 0;
/* Are we at the trap at bp->instr[1] for some bp? */
@@ -609,7 +609,7 @@ static int xmon_sstep(struct pt_regs *regs)
static int xmon_dabr_match(struct pt_regs *regs)
{
- if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
+ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
return 0;
if (dabr.enabled == 0)
return 0;
@@ -619,7 +619,7 @@ static int xmon_dabr_match(struct pt_regs *regs)
static int xmon_iabr_match(struct pt_regs *regs)
{
- if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
+ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
return 0;
if (iabr == NULL)
return 0;
@@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs)
static int xmon_ipi(struct pt_regs *regs)
{
#ifdef CONFIG_SMP
- if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon))
+ if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon))
xmon_core(regs, 1);
#endif
return 0;
@@ -644,7 +644,7 @@ static int xmon_fault_handler(struct pt_regs *regs)
if (in_xmon && catch_memory_errors)
handle_fault(regs); /* doesn't return */
- if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) {
+ if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
regs->nip = bp->address + offset;
@@ -929,7 +929,7 @@ static int do_step(struct pt_regs *regs)
int stepped;
/* check we are in 64-bit kernel mode, translation enabled */
- if ((regs->msr & (MSR_SF|MSR_PR|MSR_IR)) == (MSR_SF|MSR_IR)) {
+ if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) {
if (mread(regs->nip, &instr, 4) == 4) {
stepped = emulate_step(regs, instr);
if (stepped < 0) {
@@ -976,7 +976,7 @@ static int cpu_cmd(void)
printf("cpus stopped:");
count = 0;
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
- if (cpu_isset(cpu, cpus_in_xmon)) {
+ if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
if (count == 0)
printf(" %x", cpu);
++count;
@@ -992,7 +992,7 @@ static int cpu_cmd(void)
return 0;
}
/* try to switch to cpu specified */
- if (!cpu_isset(cpu, cpus_in_xmon)) {
+ if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) {
printf("cpu 0x%x isn't in xmon\n", cpu);
return 0;
}
@@ -1497,6 +1497,10 @@ static void prregs(struct pt_regs *fp)
#endif
printf("pc = ");
xmon_print_symbol(fp->nip, " ", "\n");
+ if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) {
+ printf("cfar= ");
+ xmon_print_symbol(fp->orig_gpr3, " ", "\n");
+ }
printf("lr = ");
xmon_print_symbol(fp->link, " ", "\n");
printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr);
@@ -2663,7 +2667,7 @@ static void dump_stab(void)
void dump_segments(void)
{
- if (cpu_has_feature(CPU_FTR_SLB))
+ if (mmu_has_feature(MMU_FTR_SLB))
dump_slb();
else
dump_stab();