From 3e7318584dfec11992f3ac45658c4bc1210b3778 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:38 +0200 Subject: powerpc: Remove CONFIG_PPC_FSL_BOOK3E CONFIG_PPC_FSL_BOOK3E is redundant with CONFIG_PPC_E500. Remove it. And rename five files accordingly. Signed-off-by: Christophe Leroy [mpe: Rename include guards to match new file names] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/795cb93b88c9a0279289712e674f39e3b108a1b4.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/hugetlb.h | 4 +- arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/include/asm/mmu.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/64/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/hugetlb-book3e.h | 45 --- arch/powerpc/include/asm/nohash/hugetlb-e500.h | 45 +++ arch/powerpc/include/asm/nohash/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/pte-book3e.h | 129 -------- arch/powerpc/include/asm/nohash/pte-e500.h | 129 ++++++++ arch/powerpc/include/asm/page.h | 2 +- arch/powerpc/include/asm/ppc_asm.h | 6 +- arch/powerpc/include/asm/setup.h | 2 +- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/asm-offsets.c | 4 +- arch/powerpc/kernel/cpu_setup_e500.S | 333 ++++++++++++++++++++ arch/powerpc/kernel/cpu_setup_fsl_booke.S | 333 -------------------- arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kernel/interrupt_64.S | 2 +- arch/powerpc/kernel/security.c | 10 +- arch/powerpc/kernel/smp.c | 2 +- arch/powerpc/kernel/sysfs.c | 6 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/lib/feature-fixups.c | 4 +- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/mem.c | 2 +- arch/powerpc/mm/mmu_decl.h | 4 +- arch/powerpc/mm/nohash/Makefile | 6 +- arch/powerpc/mm/nohash/book3e_hugetlbpage.c | 190 ------------ arch/powerpc/mm/nohash/e500.c | 375 +++++++++++++++++++++++ arch/powerpc/mm/nohash/e500_hugetlbpage.c | 190 ++++++++++++ arch/powerpc/mm/nohash/fsl_book3e.c | 375 ----------------------- arch/powerpc/mm/nohash/tlb.c | 16 +- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/Kconfig.cputype | 16 +- 37 files changed, 1123 insertions(+), 1131 deletions(-) delete mode 100644 arch/powerpc/include/asm/nohash/hugetlb-book3e.h create mode 100644 arch/powerpc/include/asm/nohash/hugetlb-e500.h delete mode 100644 arch/powerpc/include/asm/nohash/pte-book3e.h create mode 100644 arch/powerpc/include/asm/nohash/pte-e500.h create mode 100644 arch/powerpc/kernel/cpu_setup_e500.S delete mode 100644 arch/powerpc/kernel/cpu_setup_fsl_booke.S delete mode 100644 arch/powerpc/mm/nohash/book3e_hugetlbpage.c create mode 100644 arch/powerpc/mm/nohash/e500.c create mode 100644 arch/powerpc/mm/nohash/e500_hugetlbpage.c delete mode 100644 arch/powerpc/mm/nohash/fsl_book3e.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9d721cac20b8..a7b58645cc3f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -290,7 +290,7 @@ config PPC_LONG_DOUBLE_128 config PPC_BARRIER_NOSPEC bool default y - depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E + depends on PPC_BOOK3S_64 || PPC_E500 config EARLY_PRINTK bool diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ef2d8b15eaab..e80b2c0e9315 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -86,7 +86,7 @@ do { \ #ifdef CONFIG_PPC_BOOK3S_64 #define NOSPEC_BARRIER_SLOT nop -#elif defined(CONFIG_PPC_FSL_BOOK3E) +#elif defined(CONFIG_PPC_E500) #define NOSPEC_BARRIER_SLOT nop; nop #endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 32ce0fb7548f..ea71f7245a63 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -7,8 +7,8 @@ #ifdef CONFIG_PPC_BOOK3S_64 #include -#elif defined(CONFIG_PPC_FSL_BOOK3E) -#include +#elif defined(CONFIG_PPC_E500) +#include #elif defined(CONFIG_PPC_8xx) #include #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c2b003550dc9..caea15dcb91d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -443,7 +443,7 @@ struct kvmppc_passthru_irqmap { }; #endif -# ifdef CONFIG_PPC_FSL_BOOK3E +# ifdef CONFIG_PPC_E500 #define KVMPPC_BOOKE_IAC_NUM 2 #define KVMPPC_BOOKE_DAC_NUM 2 # else diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 5b46da9ba7f6..39057320e436 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -141,7 +141,7 @@ typedef pte_t *pgtable_t; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #include DECLARE_PER_CPU(int, next_tlbcam_idx); #endif diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 197e7552d9f6..0d40b33184eb 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -131,7 +131,7 @@ void unmap_kernel_page(unsigned long va); #elif defined(CONFIG_44x) #include #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) -#include +#include #elif defined(CONFIG_PPC_85xx) #include #elif defined(CONFIG_PPC_8xx) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 599921cc257e..879e9a6e5a87 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -70,7 +70,7 @@ /* * Include the PTE bits definitions */ -#include +#include #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-book3e.h deleted file mode 100644 index ecd8694cb229..000000000000 --- a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H -#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H - -static inline pte_t *hugepd_page(hugepd_t hpd) -{ - if (WARN_ON(!hugepd_ok(hpd))) - return NULL; - - return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); -} - -static inline unsigned int hugepd_shift(hugepd_t hpd) -{ - return hpd_val(hpd) & HUGEPD_SHIFT_MASK; -} - -static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, - unsigned int pdshift) -{ - /* - * On FSL BookE, we have multiple higher-level table entries that - * point to the same hugepte. Just use the first one since they're all - * identical. So for that case, idx=0. - */ - return hugepd_page(hpd); -} - -void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); - -static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) -{ - /* We use the old format for PPC_FSL_BOOK3E */ - *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); -} - -static inline int check_and_get_huge_psize(int shift) -{ - if (shift & 1) /* Not a power of 4 */ - return -EINVAL; - - return shift_to_mmu_psize(shift); -} - -#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/nohash/hugetlb-e500.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h new file mode 100644 index 000000000000..8f04ad20e040 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H +#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H + +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + if (WARN_ON(!hugepd_ok(hpd))) + return NULL; + + return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return hpd_val(hpd) & HUGEPD_SHIFT_MASK; +} + +static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, + unsigned int pdshift) +{ + /* + * On FSL BookE, we have multiple higher-level table entries that + * point to the same hugepte. Just use the first one since they're all + * identical. So for that case, idx=0. + */ + return hugepd_page(hpd); +} + +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); + +static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + /* We use the old format for PPC_E500 */ + *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); +} + +static inline int check_and_get_huge_psize(int shift) +{ + if (shift & 1) /* Not a power of 4 */ + return -EINVAL; + + return shift_to_mmu_psize(shift); +} + +#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 4fd73c7412d0..d9067dfc531c 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -266,7 +266,7 @@ static inline int pud_huge(pud_t pud) * We use it to ensure coherency between the i-cache and d-cache * for the page which has just been mapped in. */ -#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) +#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #else static inline diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h deleted file mode 100644 index f798640422c2..000000000000 --- a/arch/powerpc/include/asm/nohash/pte-book3e.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H -#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H -#ifdef __KERNEL__ - -/* PTE bit definitions for processors compliant to the Book3E - * architecture 2.06 or later. The position of the PTE bits - * matches the HW definition of the optional Embedded Page Table - * category. - */ - -/* Architected bits */ -#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ -#define _PAGE_SW1 0x000002 -#define _PAGE_BIT_SWAP_TYPE 2 -#define _PAGE_BAP_SR 0x000004 -#define _PAGE_BAP_UR 0x000008 -#define _PAGE_BAP_SW 0x000010 -#define _PAGE_BAP_UW 0x000020 -#define _PAGE_BAP_SX 0x000040 -#define _PAGE_BAP_UX 0x000080 -#define _PAGE_PSIZE_MSK 0x000f00 -#define _PAGE_PSIZE_4K 0x000200 -#define _PAGE_PSIZE_8K 0x000300 -#define _PAGE_PSIZE_16K 0x000400 -#define _PAGE_PSIZE_32K 0x000500 -#define _PAGE_PSIZE_64K 0x000600 -#define _PAGE_PSIZE_128K 0x000700 -#define _PAGE_PSIZE_256K 0x000800 -#define _PAGE_PSIZE_512K 0x000900 -#define _PAGE_PSIZE_1M 0x000a00 -#define _PAGE_PSIZE_2M 0x000b00 -#define _PAGE_PSIZE_4M 0x000c00 -#define _PAGE_PSIZE_8M 0x000d00 -#define _PAGE_PSIZE_16M 0x000e00 -#define _PAGE_PSIZE_32M 0x000f00 -#define _PAGE_DIRTY 0x001000 /* C: page changed */ -#define _PAGE_SW0 0x002000 -#define _PAGE_U3 0x004000 -#define _PAGE_U2 0x008000 -#define _PAGE_U1 0x010000 -#define _PAGE_U0 0x020000 -#define _PAGE_ACCESSED 0x040000 -#define _PAGE_ENDIAN 0x080000 -#define _PAGE_GUARDED 0x100000 -#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ -#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ - -/* "Higher level" linux bit combinations */ -#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ -#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ -#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) -#define _PAGE_KERNEL_RO (_PAGE_BAP_SR) -#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) -#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) -#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ -#define _PAGE_PRIVILEGED (_PAGE_BAP_SR) - -#define _PAGE_SPECIAL _PAGE_SW0 - -/* Base page size */ -#define _PAGE_PSIZE _PAGE_PSIZE_4K -#define PTE_RPN_SHIFT (24) - -#define PTE_WIMGE_SHIFT (19) -#define PTE_BAP_SHIFT (2) - -/* On 32-bit, we never clear the top part of the PTE */ -#ifdef CONFIG_PPC32 -#define _PTE_NONE_MASK 0xffffffff00000000ULL -#define _PMD_PRESENT 0 -#define _PMD_PRESENT_MASK (PAGE_MASK) -#define _PMD_BAD (~PAGE_MASK) -#define _PMD_USER 0 -#else -#define _PTE_NONE_MASK 0 -#endif - -/* - * We define 2 sets of base prot bits, one for basic pages (ie, - * cacheable kernel and user pages) and one for non cacheable - * pages. We always set _PAGE_COHERENT when SMP is enabled or - * the processor might need it for DMA coherency. - */ -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) -#if defined(CONFIG_SMP) -#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) -#else -#define _PAGE_BASE (_PAGE_BASE_NC) -#endif - -/* Permission masks used to generate the __P and __S table */ -#define PAGE_NONE __pgprot(_PAGE_BASE) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) - -#ifndef __ASSEMBLY__ -static inline pte_t pte_mkprivileged(pte_t pte) -{ - return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED); -} - -#define pte_mkprivileged pte_mkprivileged - -static inline pte_t pte_mkuser(pte_t pte) -{ - return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER); -} - -#define pte_mkuser pte_mkuser - -static inline pte_t pte_mkexec(pte_t pte) -{ - if (pte_val(pte) & _PAGE_BAP_UR) - return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); - else - return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); -} -#define pte_mkexec pte_mkexec - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h new file mode 100644 index 000000000000..0934e8965e4e --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pte-e500.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H +#define _ASM_POWERPC_NOHASH_PTE_E500_H +#ifdef __KERNEL__ + +/* PTE bit definitions for processors compliant to the Book3E + * architecture 2.06 or later. The position of the PTE bits + * matches the HW definition of the optional Embedded Page Table + * category. + */ + +/* Architected bits */ +#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ +#define _PAGE_SW1 0x000002 +#define _PAGE_BIT_SWAP_TYPE 2 +#define _PAGE_BAP_SR 0x000004 +#define _PAGE_BAP_UR 0x000008 +#define _PAGE_BAP_SW 0x000010 +#define _PAGE_BAP_UW 0x000020 +#define _PAGE_BAP_SX 0x000040 +#define _PAGE_BAP_UX 0x000080 +#define _PAGE_PSIZE_MSK 0x000f00 +#define _PAGE_PSIZE_4K 0x000200 +#define _PAGE_PSIZE_8K 0x000300 +#define _PAGE_PSIZE_16K 0x000400 +#define _PAGE_PSIZE_32K 0x000500 +#define _PAGE_PSIZE_64K 0x000600 +#define _PAGE_PSIZE_128K 0x000700 +#define _PAGE_PSIZE_256K 0x000800 +#define _PAGE_PSIZE_512K 0x000900 +#define _PAGE_PSIZE_1M 0x000a00 +#define _PAGE_PSIZE_2M 0x000b00 +#define _PAGE_PSIZE_4M 0x000c00 +#define _PAGE_PSIZE_8M 0x000d00 +#define _PAGE_PSIZE_16M 0x000e00 +#define _PAGE_PSIZE_32M 0x000f00 +#define _PAGE_DIRTY 0x001000 /* C: page changed */ +#define _PAGE_SW0 0x002000 +#define _PAGE_U3 0x004000 +#define _PAGE_U2 0x008000 +#define _PAGE_U1 0x010000 +#define _PAGE_U0 0x020000 +#define _PAGE_ACCESSED 0x040000 +#define _PAGE_ENDIAN 0x080000 +#define _PAGE_GUARDED 0x100000 +#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ +#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ + +/* "Higher level" linux bit combinations */ +#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ +#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ +#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) +#define _PAGE_KERNEL_RO (_PAGE_BAP_SR) +#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) +#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) +#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ +#define _PAGE_PRIVILEGED (_PAGE_BAP_SR) + +#define _PAGE_SPECIAL _PAGE_SW0 + +/* Base page size */ +#define _PAGE_PSIZE _PAGE_PSIZE_4K +#define PTE_RPN_SHIFT (24) + +#define PTE_WIMGE_SHIFT (19) +#define PTE_BAP_SHIFT (2) + +/* On 32-bit, we never clear the top part of the PTE */ +#ifdef CONFIG_PPC32 +#define _PTE_NONE_MASK 0xffffffff00000000ULL +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 +#else +#define _PTE_NONE_MASK 0 +#endif + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#if defined(CONFIG_SMP) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED); +} + +#define pte_mkprivileged pte_mkprivileged + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER); +} + +#define pte_mkuser pte_mkuser + +static inline pte_t pte_mkexec(pte_t pte) +{ + if (pte_val(pte) & _PAGE_BAP_UR) + return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); + else + return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); +} +#define pte_mkexec pte_mkexec + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 7f20636d13ed..edf1dd1b0ca9 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -31,7 +31,7 @@ extern unsigned int hpage_shift; #define HPAGE_SHIFT hpage_shift #elif defined(CONFIG_PPC_8xx) #define HPAGE_SHIFT 19 /* 512k pages */ -#elif defined(CONFIG_PPC_FSL_BOOK3E) +#elif defined(CONFIG_PPC_E500) #define HPAGE_SHIFT 22 /* 4M pages */ #endif #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 55149a0384db..7e4fe766e247 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -342,7 +342,7 @@ n: #endif /* various errata or part fixups */ -#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500) #define MFTB(dest) \ 90: mfspr dest, SPRN_TBRL; \ BEGIN_FTR_SECTION_NESTED(96); \ @@ -768,7 +768,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) stringify_in_c(.llong (_target);) \ stringify_in_c(.previous) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define BTB_FLUSH(reg) \ lis reg,BUCSR_INIT@h; \ ori reg,reg,BUCSR_INIT@l; \ @@ -776,6 +776,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) isync; #else #define BTB_FLUSH(reg) -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index dd461b2c825c..85143849a586 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -69,7 +69,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { } #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void __init setup_spectre_v2(void); #else static inline void setup_spectre_v2(void) {} diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 33dafd12e81d..658c4dffaa56 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -114,7 +114,7 @@ endif obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o obj-$(CONFIG_MODULES) += module.o module_$(BITS).o obj-$(CONFIG_44x) += cpu_setup_44x.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o +obj-$(CONFIG_PPC_E500) += cpu_setup_e500.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 10ce03052a19..4ce2a4aa3985 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -59,7 +59,7 @@ #endif #endif -#if defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_E500) #include "../mm/mmu_decl.h" #endif @@ -651,7 +651,7 @@ int main(void) DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2); #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); OFFSET(TLBCAM_MAS0, tlbcam, MAS0); OFFSET(TLBCAM_MAS1, tlbcam, MAS1); diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S new file mode 100644 index 000000000000..058336079069 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_e500.S @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file contains low level CPU setup functions. + * Kumar Gala + * Copyright 2009 Freescale Semiconductor, Inc. + * + * Based on cpu_setup_6xx code by + * Benjamin Herrenschmidt + */ + +#include +#include +#include +#include +#include +#include +#include + +_GLOBAL(__e500_icache_setup) + mfspr r0, SPRN_L1CSR1 + andi. r3, r0, L1CSR1_ICE + bnelr /* Already enabled */ + oris r0, r0, L1CSR1_CPE@h + ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE) + mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */ + isync + blr + +_GLOBAL(__e500_dcache_setup) + mfspr r0, SPRN_L1CSR0 + andi. r3, r0, L1CSR0_DCE + bnelr /* Already enabled */ + msync + isync + li r0, 0 + mtspr SPRN_L1CSR0, r0 /* Disable */ + msync + isync + li r0, (L1CSR0_DCFI | L1CSR0_CLFC) + mtspr SPRN_L1CSR0, r0 /* Invalidate */ + isync +1: mfspr r0, SPRN_L1CSR0 + andi. r3, r0, L1CSR0_CLFC + bne+ 1b /* Wait for lock bits reset */ + oris r0, r0, L1CSR0_CPE@h + ori r0, r0, L1CSR0_DCE + msync + isync + mtspr SPRN_L1CSR0, r0 /* Enable */ + isync + blr + +/* + * FIXME - we haven't yet done testing to determine a reasonable default + * value for PW20_WAIT_IDLE_BIT. + */ +#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ +_GLOBAL(setup_pw20_idle) + mfspr r3, SPRN_PWRMGTCR0 + + /* Set PW20_WAIT bit, enable pw20 state*/ + ori r3, r3, PWRMGTCR0_PW20_WAIT + li r11, PW20_WAIT_IDLE_BIT + + /* Set Automatic PW20 Core Idle Count */ + rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT + + mtspr SPRN_PWRMGTCR0, r3 + + blr + +/* + * FIXME - we haven't yet done testing to determine a reasonable default + * value for AV_WAIT_IDLE_BIT. + */ +#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ +_GLOBAL(setup_altivec_idle) + mfspr r3, SPRN_PWRMGTCR0 + + /* Enable Altivec Idle */ + oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h + li r11, AV_WAIT_IDLE_BIT + + /* Set Automatic AltiVec Idle Count */ + rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT + + mtspr SPRN_PWRMGTCR0, r3 + + blr + +#ifdef CONFIG_PPC_E500MC +_GLOBAL(__setup_cpu_e6500) + mflr r6 +#ifdef CONFIG_PPC64 + bl setup_altivec_ivors + /* Touch IVOR42 only if the CPU supports E.HV category */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_lrat_ivor +1: +#endif + bl setup_pw20_idle + bl setup_altivec_idle + bl __setup_cpu_e5500 + mtlr r6 + blr +#endif /* CONFIG_PPC_E500MC */ + +#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC_E500 +#ifndef CONFIG_PPC_E500MC +_GLOBAL(__setup_cpu_e500v1) +_GLOBAL(__setup_cpu_e500v2) + mflr r4 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_e500_ivors +#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI) + /* Ensure that RFXE is set */ + mfspr r3,SPRN_HID1 + oris r3,r3,HID1_RFXE@h + mtspr SPRN_HID1,r3 +#endif + mtlr r4 + blr +#else /* CONFIG_PPC_E500MC */ +_GLOBAL(__setup_cpu_e500mc) +_GLOBAL(__setup_cpu_e5500) + mflr r5 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_e500mc_ivors + /* + * We only want to touch IVOR38-41 if we're running on hardware + * that supports category E.HV. The architectural way to determine + * this is MMUCFG[LPIDSIZE]. + */ + mfspr r3, SPRN_MMUCFG + rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE + beq 1f + bl __setup_ehv_ivors + b 2f +1: + lwz r3, CPU_SPEC_FEATURES(r4) + /* We need this check as cpu_setup is also called for + * the secondary cores. So, if we have already cleared + * the feature on the primary core, avoid doing it on the + * secondary core. + */ + andi. r6, r3, CPU_FTR_EMB_HV + beq 2f + rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV + stw r3, CPU_SPEC_FEATURES(r4) +2: + mtlr r5 + blr +#endif /* CONFIG_PPC_E500MC */ +#endif /* CONFIG_PPC_E500 */ +#endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_PPC_BOOK3E_64 +_GLOBAL(__restore_cpu_e6500) + mflr r5 + bl setup_altivec_ivors + /* Touch IVOR42 only if the CPU supports E.HV category */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_lrat_ivor +1: + bl setup_pw20_idle + bl setup_altivec_idle + bl __restore_cpu_e5500 + mtlr r5 + blr + +_GLOBAL(__restore_cpu_e5500) + mflr r4 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors + /* + * We only want to touch IVOR38-41 if we're running on hardware + * that supports category E.HV. The architectural way to determine + * this is MMUCFG[LPIDSIZE]. + */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_ehv_ivors +1: + mtlr r4 + blr + +_GLOBAL(__setup_cpu_e5500) + mflr r5 + bl __e500_icache_setup + bl __e500_dcache_setup + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors + /* + * We only want to touch IVOR38-41 if we're running on hardware + * that supports category E.HV. The architectural way to determine + * this is MMUCFG[LPIDSIZE]. + */ + mfspr r10,SPRN_MMUCFG + rlwinm. r10,r10,0,MMUCFG_LPIDSIZE + beq 1f + bl setup_ehv_ivors + b 2f +1: + ld r10,CPU_SPEC_FEATURES(r4) + LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV) + andc r10,r10,r9 + std r10,CPU_SPEC_FEATURES(r4) +2: + mtlr r5 + blr +#endif + +/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */ +_GLOBAL(flush_dcache_L1) + mfmsr r10 + wrteei 0 + + mfspr r3,SPRN_L1CFG0 + rlwinm r5,r3,9,3 /* Extract cache block size */ + twlgti r5,1 /* Only 32 and 64 byte cache blocks + * are currently defined. + */ + li r4,32 + subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - + * log2(number of ways) + */ + slw r5,r4,r5 /* r5 = cache block size */ + + rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ + mulli r7,r7,13 /* An 8-way cache will require 13 + * loads per set. + */ + slw r7,r7,r6 + + /* save off HID0 and set DCFA */ + mfspr r8,SPRN_HID0 + ori r9,r8,HID0_DCFA@l + mtspr SPRN_HID0,r9 + isync + + LOAD_REG_IMMEDIATE(r6, KERNELBASE) + mr r4, r6 + mtctr r7 + +1: lwz r3,0(r4) /* Load... */ + add r4,r4,r5 + bdnz 1b + + msync + mr r4, r6 + mtctr r7 + +1: dcbf 0,r4 /* ...and flush. */ + add r4,r4,r5 + bdnz 1b + + /* restore HID0 */ + mtspr SPRN_HID0,r8 + isync + + wrtee r10 + + blr + +has_L2_cache: + /* skip L2 cache on P2040/P2040E as they have no L2 cache */ + mfspr r3, SPRN_SVR + /* shift right by 8 bits and clear E bit of SVR */ + rlwinm r4, r3, 24, ~0x800 + + lis r3, SVR_P2040@h + ori r3, r3, SVR_P2040@l + cmpw r4, r3 + beq 1f + + li r3, 1 + blr +1: + li r3, 0 + blr + +/* flush backside L2 cache */ +flush_backside_L2_cache: + mflr r10 + bl has_L2_cache + mtlr r10 + cmpwi r3, 0 + beq 2f + + /* Flush the L2 cache */ + mfspr r3, SPRN_L2CSR0 + ori r3, r3, L2CSR0_L2FL@l + msync + isync + mtspr SPRN_L2CSR0,r3 + isync + + /* check if it is complete */ +1: mfspr r3,SPRN_L2CSR0 + andi. r3, r3, L2CSR0_L2FL@l + bne 1b +2: + blr + +_GLOBAL(cpu_down_flush_e500v2) + mflr r0 + bl flush_dcache_L1 + mtlr r0 + blr + +_GLOBAL(cpu_down_flush_e500mc) +_GLOBAL(cpu_down_flush_e5500) + mflr r0 + bl flush_dcache_L1 + bl flush_backside_L2_cache + mtlr r0 + blr + +/* L1 Data Cache of e6500 contains no modified data, no flush is required */ +_GLOBAL(cpu_down_flush_e6500) + blr diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S deleted file mode 100644 index 058336079069..000000000000 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ /dev/null @@ -1,333 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * This file contains low level CPU setup functions. - * Kumar Gala - * Copyright 2009 Freescale Semiconductor, Inc. - * - * Based on cpu_setup_6xx code by - * Benjamin Herrenschmidt - */ - -#include -#include -#include -#include -#include -#include -#include - -_GLOBAL(__e500_icache_setup) - mfspr r0, SPRN_L1CSR1 - andi. r3, r0, L1CSR1_ICE - bnelr /* Already enabled */ - oris r0, r0, L1CSR1_CPE@h - ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE) - mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */ - isync - blr - -_GLOBAL(__e500_dcache_setup) - mfspr r0, SPRN_L1CSR0 - andi. r3, r0, L1CSR0_DCE - bnelr /* Already enabled */ - msync - isync - li r0, 0 - mtspr SPRN_L1CSR0, r0 /* Disable */ - msync - isync - li r0, (L1CSR0_DCFI | L1CSR0_CLFC) - mtspr SPRN_L1CSR0, r0 /* Invalidate */ - isync -1: mfspr r0, SPRN_L1CSR0 - andi. r3, r0, L1CSR0_CLFC - bne+ 1b /* Wait for lock bits reset */ - oris r0, r0, L1CSR0_CPE@h - ori r0, r0, L1CSR0_DCE - msync - isync - mtspr SPRN_L1CSR0, r0 /* Enable */ - isync - blr - -/* - * FIXME - we haven't yet done testing to determine a reasonable default - * value for PW20_WAIT_IDLE_BIT. - */ -#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ -_GLOBAL(setup_pw20_idle) - mfspr r3, SPRN_PWRMGTCR0 - - /* Set PW20_WAIT bit, enable pw20 state*/ - ori r3, r3, PWRMGTCR0_PW20_WAIT - li r11, PW20_WAIT_IDLE_BIT - - /* Set Automatic PW20 Core Idle Count */ - rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT - - mtspr SPRN_PWRMGTCR0, r3 - - blr - -/* - * FIXME - we haven't yet done testing to determine a reasonable default - * value for AV_WAIT_IDLE_BIT. - */ -#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */ -_GLOBAL(setup_altivec_idle) - mfspr r3, SPRN_PWRMGTCR0 - - /* Enable Altivec Idle */ - oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h - li r11, AV_WAIT_IDLE_BIT - - /* Set Automatic AltiVec Idle Count */ - rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT - - mtspr SPRN_PWRMGTCR0, r3 - - blr - -#ifdef CONFIG_PPC_E500MC -_GLOBAL(__setup_cpu_e6500) - mflr r6 -#ifdef CONFIG_PPC64 - bl setup_altivec_ivors - /* Touch IVOR42 only if the CPU supports E.HV category */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_lrat_ivor -1: -#endif - bl setup_pw20_idle - bl setup_altivec_idle - bl __setup_cpu_e5500 - mtlr r6 - blr -#endif /* CONFIG_PPC_E500MC */ - -#ifdef CONFIG_PPC32 -#ifdef CONFIG_PPC_E500 -#ifndef CONFIG_PPC_E500MC -_GLOBAL(__setup_cpu_e500v1) -_GLOBAL(__setup_cpu_e500v2) - mflr r4 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_e500_ivors -#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI) - /* Ensure that RFXE is set */ - mfspr r3,SPRN_HID1 - oris r3,r3,HID1_RFXE@h - mtspr SPRN_HID1,r3 -#endif - mtlr r4 - blr -#else /* CONFIG_PPC_E500MC */ -_GLOBAL(__setup_cpu_e500mc) -_GLOBAL(__setup_cpu_e5500) - mflr r5 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_e500mc_ivors - /* - * We only want to touch IVOR38-41 if we're running on hardware - * that supports category E.HV. The architectural way to determine - * this is MMUCFG[LPIDSIZE]. - */ - mfspr r3, SPRN_MMUCFG - rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE - beq 1f - bl __setup_ehv_ivors - b 2f -1: - lwz r3, CPU_SPEC_FEATURES(r4) - /* We need this check as cpu_setup is also called for - * the secondary cores. So, if we have already cleared - * the feature on the primary core, avoid doing it on the - * secondary core. - */ - andi. r6, r3, CPU_FTR_EMB_HV - beq 2f - rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV - stw r3, CPU_SPEC_FEATURES(r4) -2: - mtlr r5 - blr -#endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_PPC_E500 */ -#endif /* CONFIG_PPC32 */ - -#ifdef CONFIG_PPC_BOOK3E_64 -_GLOBAL(__restore_cpu_e6500) - mflr r5 - bl setup_altivec_ivors - /* Touch IVOR42 only if the CPU supports E.HV category */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_lrat_ivor -1: - bl setup_pw20_idle - bl setup_altivec_idle - bl __restore_cpu_e5500 - mtlr r5 - blr - -_GLOBAL(__restore_cpu_e5500) - mflr r4 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_base_ivors - bl setup_perfmon_ivor - bl setup_doorbell_ivors - /* - * We only want to touch IVOR38-41 if we're running on hardware - * that supports category E.HV. The architectural way to determine - * this is MMUCFG[LPIDSIZE]. - */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_ehv_ivors -1: - mtlr r4 - blr - -_GLOBAL(__setup_cpu_e5500) - mflr r5 - bl __e500_icache_setup - bl __e500_dcache_setup - bl __setup_base_ivors - bl setup_perfmon_ivor - bl setup_doorbell_ivors - /* - * We only want to touch IVOR38-41 if we're running on hardware - * that supports category E.HV. The architectural way to determine - * this is MMUCFG[LPIDSIZE]. - */ - mfspr r10,SPRN_MMUCFG - rlwinm. r10,r10,0,MMUCFG_LPIDSIZE - beq 1f - bl setup_ehv_ivors - b 2f -1: - ld r10,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV) - andc r10,r10,r9 - std r10,CPU_SPEC_FEATURES(r4) -2: - mtlr r5 - blr -#endif - -/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */ -_GLOBAL(flush_dcache_L1) - mfmsr r10 - wrteei 0 - - mfspr r3,SPRN_L1CFG0 - rlwinm r5,r3,9,3 /* Extract cache block size */ - twlgti r5,1 /* Only 32 and 64 byte cache blocks - * are currently defined. - */ - li r4,32 - subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - - * log2(number of ways) - */ - slw r5,r4,r5 /* r5 = cache block size */ - - rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ - mulli r7,r7,13 /* An 8-way cache will require 13 - * loads per set. - */ - slw r7,r7,r6 - - /* save off HID0 and set DCFA */ - mfspr r8,SPRN_HID0 - ori r9,r8,HID0_DCFA@l - mtspr SPRN_HID0,r9 - isync - - LOAD_REG_IMMEDIATE(r6, KERNELBASE) - mr r4, r6 - mtctr r7 - -1: lwz r3,0(r4) /* Load... */ - add r4,r4,r5 - bdnz 1b - - msync - mr r4, r6 - mtctr r7 - -1: dcbf 0,r4 /* ...and flush. */ - add r4,r4,r5 - bdnz 1b - - /* restore HID0 */ - mtspr SPRN_HID0,r8 - isync - - wrtee r10 - - blr - -has_L2_cache: - /* skip L2 cache on P2040/P2040E as they have no L2 cache */ - mfspr r3, SPRN_SVR - /* shift right by 8 bits and clear E bit of SVR */ - rlwinm r4, r3, 24, ~0x800 - - lis r3, SVR_P2040@h - ori r3, r3, SVR_P2040@l - cmpw r4, r3 - beq 1f - - li r3, 1 - blr -1: - li r3, 0 - blr - -/* flush backside L2 cache */ -flush_backside_L2_cache: - mflr r10 - bl has_L2_cache - mtlr r10 - cmpwi r3, 0 - beq 2f - - /* Flush the L2 cache */ - mfspr r3, SPRN_L2CSR0 - ori r3, r3, L2CSR0_L2FL@l - msync - isync - mtspr SPRN_L2CSR0,r3 - isync - - /* check if it is complete */ -1: mfspr r3,SPRN_L2CSR0 - andi. r3, r3, L2CSR0_L2FL@l - bne 1b -2: - blr - -_GLOBAL(cpu_down_flush_e500v2) - mflr r0 - bl flush_dcache_L1 - mtlr r0 - blr - -_GLOBAL(cpu_down_flush_e500mc) -_GLOBAL(cpu_down_flush_e5500) - mflr r0 - bl flush_dcache_L1 - bl flush_backside_L2_cache - mtlr r0 - blr - -/* L1 Data Cache of e6500 contains no modified data, no flush is required */ -_GLOBAL(cpu_down_flush_e6500) - blr diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index a2f82ced6e4a..1047dc053b47 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -34,7 +34,7 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define BOOKE_CLEAR_BTB(reg) \ START_BTB_FLUSH_SECTION \ BTB_FLUSH(reg) \ diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 4b4ba3364665..a2d3abb48075 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -230,7 +230,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_common) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b562a1d2c750..206475e3e0b4 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -35,7 +35,7 @@ static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_N bool barrier_nospec_enabled; static bool no_nospec; static bool btb_flush_enabled; -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) static bool no_spectrev2; #endif @@ -122,7 +122,7 @@ static __init int security_feature_debugfs_init(void) device_initcall(security_feature_debugfs_init); #endif /* CONFIG_DEBUG_FS */ -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) static int __init handle_nospectre_v2(char *p) { no_spectrev2 = true; @@ -130,9 +130,9 @@ static int __init handle_nospectre_v2(char *p) return 0; } early_param("nospectre_v2", handle_nospectre_v2); -#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ +#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void __init setup_spectre_v2(void) { if (no_spectrev2 || cpu_mitigations_off()) @@ -140,7 +140,7 @@ void __init setup_spectre_v2(void) else btb_flush_enabled = true; } -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 169703fead57..11ded19186b9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -708,7 +708,7 @@ static struct task_struct *current_set[NR_CPUS]; static void smp_store_cpu_info(int id) { per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 per_cpu(next_tlbcam_idx, id) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 3a10cda9c05e..ef9a61718940 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -228,7 +228,7 @@ static void __init sysfs_create_dscr_default(void) } #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define MAX_BIT 63 static u64 pw20_wt; @@ -907,7 +907,7 @@ static int register_cpu_online(unsigned int cpu) device_create_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { device_create_file(s, &dev_attr_pw20_state); device_create_file(s, &dev_attr_pw20_wait_time); @@ -1003,7 +1003,7 @@ static int unregister_cpu_online(unsigned int cpu) device_remove_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { device_remove_file(s, &dev_attr_pw20_state); device_remove_file(s, &dev_attr_pw20_wait_time); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b60d81acccfc..c025c83dfdc3 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -239,7 +239,7 @@ SECTIONS } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 . = ALIGN(8); __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { __start__btb_flush_fixup = .; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 993d3f31832a..31f40f544de5 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -550,7 +550,7 @@ void do_barrier_nospec_fixups(bool enable) } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { unsigned int instr[2], *dest; @@ -602,7 +602,7 @@ void __init do_btb_flush_fixups(void) for (; start < end; start += 2) patch_btb_flush_section(start); } -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index bc84a594ca62..8c3ea5300ac3 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -623,7 +623,7 @@ static int __init hugetlbpage_init(void) if (pdshift > shift) { if (!IS_ENABLED(CONFIG_PPC_8xx)) pgtable_cache_add(pdshift - shift); - } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + } else if (IS_ENABLED(CONFIG_PPC_E500) || IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 6ddbd6cb3a2a..84d171953ba4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -308,7 +308,7 @@ void __init mem_init(void) } #endif /* CONFIG_HIGHMEM */ -#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) +#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up * functions.... do it here for the non-smp case. diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 341c2e0c71d2..bd9784f77f2e 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -111,7 +111,7 @@ void MMU_init_hw_patch(void); unsigned long mmu_mapin_ram(unsigned long base, unsigned long top); #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init); #ifdef CONFIG_PPC32 @@ -157,7 +157,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) void mmu_mark_initmem_nx(void); void mmu_mark_rodata_ro(void); #else diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile index b467a25ee155..f3894e79d5f7 100644 --- a/arch/powerpc/mm/nohash/Makefile +++ b/arch/powerpc/mm/nohash/Makefile @@ -7,13 +7,13 @@ obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o obj-$(CONFIG_40x) += 40x.o obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_PPC_8xx) += 8xx.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_book3e.o +obj-$(CONFIG_PPC_E500) += e500.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o ifdef CONFIG_HUGETLB_PAGE -obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o +obj-$(CONFIG_PPC_E500) += e500_hugetlbpage.o endif # Disable kcov instrumentation on sensitive code # This is necessary for booting with kcov enabled on book3e machines KCOV_INSTRUMENT_tlb.o := n -KCOV_INSTRUMENT_fsl_book3e.o := n +KCOV_INSTRUMENT_e500.o := n diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c deleted file mode 100644 index c7d4b317a823..000000000000 --- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * PPC Huge TLB Page Support for Book3E MMU - * - * Copyright (C) 2009 David Gibson, IBM Corporation. - * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor - * - */ -#include -#include - -#include - -#ifdef CONFIG_PPC64 -#include - -static inline int tlb1_next(void) -{ - struct paca_struct *paca = get_paca(); - struct tlb_core_data *tcd; - int this, next; - - tcd = paca->tcd_ptr; - this = tcd->esel_next; - - next = this + 1; - if (next >= tcd->esel_max) - next = tcd->esel_first; - - tcd->esel_next = next; - return this; -} - -static inline void book3e_tlb_lock(void) -{ - struct paca_struct *paca = get_paca(); - unsigned long tmp; - int token = smp_processor_id() + 1; - - /* - * Besides being unnecessary in the absence of SMT, this - * check prevents trying to do lbarx/stbcx. on e5500 which - * doesn't implement either feature. - */ - if (!cpu_has_feature(CPU_FTR_SMT)) - return; - - asm volatile("1: lbarx %0, 0, %1;" - "cmpwi %0, 0;" - "bne 2f;" - "stbcx. %2, 0, %1;" - "bne 1b;" - "b 3f;" - "2: lbzx %0, 0, %1;" - "cmpwi %0, 0;" - "bne 2b;" - "b 1b;" - "3:" - : "=&r" (tmp) - : "r" (&paca->tcd_ptr->lock), "r" (token) - : "memory"); -} - -static inline void book3e_tlb_unlock(void) -{ - struct paca_struct *paca = get_paca(); - - if (!cpu_has_feature(CPU_FTR_SMT)) - return; - - isync(); - paca->tcd_ptr->lock = 0; -} -#else -static inline int tlb1_next(void) -{ - int index, ncams; - - ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - - index = this_cpu_read(next_tlbcam_idx); - - /* Just round-robin the entries and wrap when we hit the end */ - if (unlikely(index == ncams - 1)) - __this_cpu_write(next_tlbcam_idx, tlbcam_index); - else - __this_cpu_inc(next_tlbcam_idx); - - return index; -} - -static inline void book3e_tlb_lock(void) -{ -} - -static inline void book3e_tlb_unlock(void) -{ -} -#endif - -static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) -{ - int found = 0; - - mtspr(SPRN_MAS6, pid << 16); - asm volatile( - "tlbsx 0,%1\n" - "mfspr %0,0x271\n" - "srwi %0,%0,31\n" - : "=&r"(found) : "r"(ea)); - - return found; -} - -static void -book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) -{ - unsigned long mas1, mas2; - u64 mas7_3; - unsigned long psize, tsize, shift; - unsigned long flags; - struct mm_struct *mm; - int index; - - if (unlikely(is_kernel_addr(ea))) - return; - - mm = vma->vm_mm; - - psize = vma_mmu_pagesize(vma); - shift = __ilog2(psize); - tsize = shift - 10; - /* - * We can't be interrupted while we're setting up the MAS - * registers or after we've confirmed that no tlb exists. - */ - local_irq_save(flags); - - book3e_tlb_lock(); - - if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { - book3e_tlb_unlock(); - local_irq_restore(flags); - return; - } - - /* We have to use the CAM(TLB1) on FSL parts for hugepages */ - index = tlb1_next(); - mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); - - mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); - mas2 = ea & ~((1UL << shift) - 1); - mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; - mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; - mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; - if (!pte_dirty(pte)) - mas7_3 &= ~(MAS3_SW|MAS3_UW); - - mtspr(SPRN_MAS1, mas1); - mtspr(SPRN_MAS2, mas2); - - if (mmu_has_feature(MMU_FTR_BIG_PHYS)) - mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); - mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); - - asm volatile ("tlbwe"); - - book3e_tlb_unlock(); - local_irq_restore(flags); -} - -/* - * This is called at the end of handling a user page fault, when the - * fault has been handled by updating a PTE in the linux page tables. - * - * This must always be called with the pte lock held. - */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) -{ - if (is_vm_hugetlb_page(vma)) - book3e_hugetlb_preload(vma, address, *ptep); -} - -void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) -{ - struct hstate *hstate = hstate_file(vma->vm_file); - unsigned long tsize = huge_page_shift(hstate) - 10; - - __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); -} diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c new file mode 100644 index 000000000000..40a4e69ae1a9 --- /dev/null +++ b/arch/powerpc/mm/nohash/e500.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Modifications by Kumar Gala (galak@kernel.crashing.org) to support + * E500 Book E processors. + * + * Copyright 2004,2010 Freescale Semiconductor, Inc. + * + * This file contains the routines for initializing the MMU + * on the 4xx series of chips. + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +unsigned int tlbcam_index; + +struct tlbcam TLBCAM[NUM_TLBCAMS]; + +static struct { + unsigned long start; + unsigned long limit; + phys_addr_t phys; +} tlbcam_addrs[NUM_TLBCAMS]; + +#ifdef CONFIG_PPC_85xx +/* + * Return PA for this VA if it is mapped by a CAM, or 0 + */ +phys_addr_t v_block_mapped(unsigned long va) +{ + int b; + for (b = 0; b < tlbcam_index; ++b) + if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) + return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); + return 0; +} + +/* + * Return VA for a given PA or 0 if not mapped + */ +unsigned long p_block_mapped(phys_addr_t pa) +{ + int b; + for (b = 0; b < tlbcam_index; ++b) + if (pa >= tlbcam_addrs[b].phys + && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) + +tlbcam_addrs[b].phys) + return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); + return 0; +} +#endif + +/* + * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; + * in particular size must be a power of 4 between 4k and the max supported by + * an implementation; max may further be limited by what can be represented in + * an unsigned long (for example, 32-bit implementations cannot support a 4GB + * size). + */ +static void settlbcam(int index, unsigned long virt, phys_addr_t phys, + unsigned long size, unsigned long flags, unsigned int pid) +{ + unsigned int tsize; + + tsize = __ilog2(size) - 10; + +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) + if ((flags & _PAGE_NO_CACHE) == 0) + flags |= _PAGE_COHERENT; +#endif + + TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); + TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); + TLBCAM[index].MAS2 = virt & PAGE_MASK; + + TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; + + TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; + TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; + if (mmu_has_feature(MMU_FTR_BIG_PHYS)) + TLBCAM[index].MAS7 = (u64)phys >> 32; + + /* Below is unlikely -- only for large user pages or similar */ + if (pte_user(__pte(flags))) { + TLBCAM[index].MAS3 |= MAS3_UR; + TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; + TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; + } else { + TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; + } + + tlbcam_addrs[index].start = virt; + tlbcam_addrs[index].limit = virt + size - 1; + tlbcam_addrs[index].phys = phys; +} + +static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, + phys_addr_t phys) +{ + unsigned int camsize = __ilog2(ram); + unsigned int align = __ffs(virt | phys); + unsigned long max_cam; + + if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { + /* Convert (4^max) kB to (2^max) bytes */ + max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; + camsize &= ~1U; + align &= ~1U; + } else { + /* Convert (2^max) kB to (2^max) bytes */ + max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; + } + + if (camsize > align) + camsize = align; + if (camsize > max_cam) + camsize = max_cam; + + return 1UL << camsize; +} + +static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, + unsigned long ram, int max_cam_idx, + bool dryrun, bool init) +{ + int i; + unsigned long amount_mapped = 0; + unsigned long boundary; + + if (strict_kernel_rwx_enabled()) + boundary = (unsigned long)(_sinittext - _stext); + else + boundary = ram; + + /* Calculate CAM values */ + for (i = 0; boundary && i < max_cam_idx; i++) { + unsigned long cam_sz; + pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX; + + cam_sz = calc_cam_sz(boundary, virt, phys); + if (!dryrun) + settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); + + boundary -= cam_sz; + amount_mapped += cam_sz; + virt += cam_sz; + phys += cam_sz; + } + for (ram -= amount_mapped; ram && i < max_cam_idx; i++) { + unsigned long cam_sz; + pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL; + + cam_sz = calc_cam_sz(ram, virt, phys); + if (!dryrun) + settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); + + ram -= cam_sz; + amount_mapped += cam_sz; + virt += cam_sz; + phys += cam_sz; + } + + if (dryrun) + return amount_mapped; + + if (init) { + loadcam_multi(0, i, max_cam_idx); + tlbcam_index = i; + } else { + loadcam_multi(0, i, 0); + WARN_ON(i > tlbcam_index); + } + +#ifdef CONFIG_PPC64 + get_paca()->tcd.esel_next = i; + get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + get_paca()->tcd.esel_first = i; +#endif + + return amount_mapped; +} + +unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init) +{ + unsigned long virt = PAGE_OFFSET; + phys_addr_t phys = memstart_addr; + + return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init); +} + +#ifdef CONFIG_PPC32 + +#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) +#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" +#endif + +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) +{ + return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; +} + +void flush_instruction_cache(void) +{ + unsigned long tmp; + + tmp = mfspr(SPRN_L1CSR1); + tmp |= L1CSR1_ICFI | L1CSR1_ICLFR; + mtspr(SPRN_L1CSR1, tmp); + isync(); +} + +/* + * MMU_init_hw does the chip-specific initialization of the MMU hardware. + */ +void __init MMU_init_hw(void) +{ + flush_instruction_cache(); +} + +static unsigned long __init tlbcam_sz(int idx) +{ + return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; +} + +void __init adjust_total_lowmem(void) +{ + unsigned long ram; + int i; + + /* adjust lowmem size to __max_low_memory */ + ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); + + i = switch_to_as1(); + __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); + restore_to_as0(i, 0, NULL, 1); + + pr_info("Memory CAM mapping: "); + for (i = 0; i < tlbcam_index - 1; i++) + pr_cont("%lu/", tlbcam_sz(i) >> 20); + pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, + (unsigned int)((total_lowmem - __max_low_memory) >> 20)); + + memblock_set_current_limit(memstart_addr + __max_low_memory); +} + +#ifdef CONFIG_STRICT_KERNEL_RWX +void mmu_mark_rodata_ro(void) +{ + unsigned long remapped; + + remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); + + WARN_ON(__max_low_memory != remapped); +} +#endif + +void mmu_mark_initmem_nx(void) +{ + /* Everything is done in mmu_mark_rodata_ro() */ +} + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + phys_addr_t limit = first_memblock_base + first_memblock_size; + + /* 64M mapped initially according to head_fsl_booke.S */ + memblock_set_current_limit(min_t(u64, limit, 0x04000000)); +} + +#ifdef CONFIG_RELOCATABLE +int __initdata is_second_reloc; +notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) +{ + unsigned long base = kernstart_virt_addr; + phys_addr_t size; + + kernstart_addr = start; + if (is_second_reloc) { + virt_phys_offset = PAGE_OFFSET - memstart_addr; + kaslr_late_init(); + return; + } + + /* + * Relocatable kernel support based on processing of dynamic + * relocation entries. Before we get the real memstart_addr, + * We will compute the virt_phys_offset like this: + * virt_phys_offset = stext.run - kernstart_addr + * + * stext.run = (KERNELBASE & ~0x3ffffff) + + * (kernstart_addr & 0x3ffffff) + * When we relocate, we have : + * + * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) + * + * hence: + * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - + * (kernstart_addr & ~0x3ffffff) + * + */ + start &= ~0x3ffffff; + base &= ~0x3ffffff; + virt_phys_offset = base - start; + early_get_first_memblock_info(__va(dt_ptr), &size); + /* + * We now get the memstart_addr, then we should check if this + * address is the same as what the PAGE_OFFSET map to now. If + * not we have to change the map of PAGE_OFFSET to memstart_addr + * and do a second relocation. + */ + if (start != memstart_addr) { + int n; + long offset = start - memstart_addr; + + is_second_reloc = 1; + n = switch_to_as1(); + /* map a 64M area for the second relocation */ + if (memstart_addr > start) + map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM, + false, true); + else + map_mem_in_cams_addr(start, PAGE_OFFSET + offset, + 0x4000000, CONFIG_LOWMEM_CAM_NUM, + false, true); + restore_to_as0(n, offset, __va(dt_ptr), 1); + /* We should never reach here */ + panic("Relocation error"); + } + + kaslr_early_init(__va(dt_ptr), size); +} +#endif +#endif diff --git a/arch/powerpc/mm/nohash/e500_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c new file mode 100644 index 000000000000..c7d4b317a823 --- /dev/null +++ b/arch/powerpc/mm/nohash/e500_hugetlbpage.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PPC Huge TLB Page Support for Book3E MMU + * + * Copyright (C) 2009 David Gibson, IBM Corporation. + * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor + * + */ +#include +#include + +#include + +#ifdef CONFIG_PPC64 +#include + +static inline int tlb1_next(void) +{ + struct paca_struct *paca = get_paca(); + struct tlb_core_data *tcd; + int this, next; + + tcd = paca->tcd_ptr; + this = tcd->esel_next; + + next = this + 1; + if (next >= tcd->esel_max) + next = tcd->esel_first; + + tcd->esel_next = next; + return this; +} + +static inline void book3e_tlb_lock(void) +{ + struct paca_struct *paca = get_paca(); + unsigned long tmp; + int token = smp_processor_id() + 1; + + /* + * Besides being unnecessary in the absence of SMT, this + * check prevents trying to do lbarx/stbcx. on e5500 which + * doesn't implement either feature. + */ + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + + asm volatile("1: lbarx %0, 0, %1;" + "cmpwi %0, 0;" + "bne 2f;" + "stbcx. %2, 0, %1;" + "bne 1b;" + "b 3f;" + "2: lbzx %0, 0, %1;" + "cmpwi %0, 0;" + "bne 2b;" + "b 1b;" + "3:" + : "=&r" (tmp) + : "r" (&paca->tcd_ptr->lock), "r" (token) + : "memory"); +} + +static inline void book3e_tlb_unlock(void) +{ + struct paca_struct *paca = get_paca(); + + if (!cpu_has_feature(CPU_FTR_SMT)) + return; + + isync(); + paca->tcd_ptr->lock = 0; +} +#else +static inline int tlb1_next(void) +{ + int index, ncams; + + ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + + index = this_cpu_read(next_tlbcam_idx); + + /* Just round-robin the entries and wrap when we hit the end */ + if (unlikely(index == ncams - 1)) + __this_cpu_write(next_tlbcam_idx, tlbcam_index); + else + __this_cpu_inc(next_tlbcam_idx); + + return index; +} + +static inline void book3e_tlb_lock(void) +{ +} + +static inline void book3e_tlb_unlock(void) +{ +} +#endif + +static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) +{ + int found = 0; + + mtspr(SPRN_MAS6, pid << 16); + asm volatile( + "tlbsx 0,%1\n" + "mfspr %0,0x271\n" + "srwi %0,%0,31\n" + : "=&r"(found) : "r"(ea)); + + return found; +} + +static void +book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) +{ + unsigned long mas1, mas2; + u64 mas7_3; + unsigned long psize, tsize, shift; + unsigned long flags; + struct mm_struct *mm; + int index; + + if (unlikely(is_kernel_addr(ea))) + return; + + mm = vma->vm_mm; + + psize = vma_mmu_pagesize(vma); + shift = __ilog2(psize); + tsize = shift - 10; + /* + * We can't be interrupted while we're setting up the MAS + * registers or after we've confirmed that no tlb exists. + */ + local_irq_save(flags); + + book3e_tlb_lock(); + + if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { + book3e_tlb_unlock(); + local_irq_restore(flags); + return; + } + + /* We have to use the CAM(TLB1) on FSL parts for hugepages */ + index = tlb1_next(); + mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); + + mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); + mas2 = ea & ~((1UL << shift) - 1); + mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; + mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; + mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; + if (!pte_dirty(pte)) + mas7_3 &= ~(MAS3_SW|MAS3_UW); + + mtspr(SPRN_MAS1, mas1); + mtspr(SPRN_MAS2, mas2); + + if (mmu_has_feature(MMU_FTR_BIG_PHYS)) + mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); + mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); + + asm volatile ("tlbwe"); + + book3e_tlb_unlock(); + local_irq_restore(flags); +} + +/* + * This is called at the end of handling a user page fault, when the + * fault has been handled by updating a PTE in the linux page tables. + * + * This must always be called with the pte lock held. + */ +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + if (is_vm_hugetlb_page(vma)) + book3e_hugetlb_preload(vma, address, *ptep); +} + +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + struct hstate *hstate = hstate_file(vma->vm_file); + unsigned long tsize = huge_page_shift(hstate) - 10; + + __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); +} diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c deleted file mode 100644 index 40a4e69ae1a9..000000000000 --- a/arch/powerpc/mm/nohash/fsl_book3e.c +++ /dev/null @@ -1,375 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Modifications by Kumar Gala (galak@kernel.crashing.org) to support - * E500 Book E processors. - * - * Copyright 2004,2010 Freescale Semiconductor, Inc. - * - * This file contains the routines for initializing the MMU - * on the 4xx series of chips. - * -- paulus - * - * Derived from arch/ppc/mm/init.c: - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) - * and Cort Dougan (PReP) (cort@cs.nmt.edu) - * Copyright (C) 1996 Paul Mackerras - * - * Derived from "arch/i386/mm/init.c" - * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -unsigned int tlbcam_index; - -struct tlbcam TLBCAM[NUM_TLBCAMS]; - -static struct { - unsigned long start; - unsigned long limit; - phys_addr_t phys; -} tlbcam_addrs[NUM_TLBCAMS]; - -#ifdef CONFIG_PPC_85xx -/* - * Return PA for this VA if it is mapped by a CAM, or 0 - */ -phys_addr_t v_block_mapped(unsigned long va) -{ - int b; - for (b = 0; b < tlbcam_index; ++b) - if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) - return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); - return 0; -} - -/* - * Return VA for a given PA or 0 if not mapped - */ -unsigned long p_block_mapped(phys_addr_t pa) -{ - int b; - for (b = 0; b < tlbcam_index; ++b) - if (pa >= tlbcam_addrs[b].phys - && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) - +tlbcam_addrs[b].phys) - return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); - return 0; -} -#endif - -/* - * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; - * in particular size must be a power of 4 between 4k and the max supported by - * an implementation; max may further be limited by what can be represented in - * an unsigned long (for example, 32-bit implementations cannot support a 4GB - * size). - */ -static void settlbcam(int index, unsigned long virt, phys_addr_t phys, - unsigned long size, unsigned long flags, unsigned int pid) -{ - unsigned int tsize; - - tsize = __ilog2(size) - 10; - -#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) - if ((flags & _PAGE_NO_CACHE) == 0) - flags |= _PAGE_COHERENT; -#endif - - TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); - TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); - TLBCAM[index].MAS2 = virt & PAGE_MASK; - - TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; - TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; - - TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR; - TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0; - if (mmu_has_feature(MMU_FTR_BIG_PHYS)) - TLBCAM[index].MAS7 = (u64)phys >> 32; - - /* Below is unlikely -- only for large user pages or similar */ - if (pte_user(__pte(flags))) { - TLBCAM[index].MAS3 |= MAS3_UR; - TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0; - TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0; - } else { - TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0; - } - - tlbcam_addrs[index].start = virt; - tlbcam_addrs[index].limit = virt + size - 1; - tlbcam_addrs[index].phys = phys; -} - -static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, - phys_addr_t phys) -{ - unsigned int camsize = __ilog2(ram); - unsigned int align = __ffs(virt | phys); - unsigned long max_cam; - - if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) { - /* Convert (4^max) kB to (2^max) bytes */ - max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10; - camsize &= ~1U; - align &= ~1U; - } else { - /* Convert (2^max) kB to (2^max) bytes */ - max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10; - } - - if (camsize > align) - camsize = align; - if (camsize > max_cam) - camsize = max_cam; - - return 1UL << camsize; -} - -static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt, - unsigned long ram, int max_cam_idx, - bool dryrun, bool init) -{ - int i; - unsigned long amount_mapped = 0; - unsigned long boundary; - - if (strict_kernel_rwx_enabled()) - boundary = (unsigned long)(_sinittext - _stext); - else - boundary = ram; - - /* Calculate CAM values */ - for (i = 0; boundary && i < max_cam_idx; i++) { - unsigned long cam_sz; - pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX; - - cam_sz = calc_cam_sz(boundary, virt, phys); - if (!dryrun) - settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); - - boundary -= cam_sz; - amount_mapped += cam_sz; - virt += cam_sz; - phys += cam_sz; - } - for (ram -= amount_mapped; ram && i < max_cam_idx; i++) { - unsigned long cam_sz; - pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL; - - cam_sz = calc_cam_sz(ram, virt, phys); - if (!dryrun) - settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0); - - ram -= cam_sz; - amount_mapped += cam_sz; - virt += cam_sz; - phys += cam_sz; - } - - if (dryrun) - return amount_mapped; - - if (init) { - loadcam_multi(0, i, max_cam_idx); - tlbcam_index = i; - } else { - loadcam_multi(0, i, 0); - WARN_ON(i > tlbcam_index); - } - -#ifdef CONFIG_PPC64 - get_paca()->tcd.esel_next = i; - get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; - get_paca()->tcd.esel_first = i; -#endif - - return amount_mapped; -} - -unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init) -{ - unsigned long virt = PAGE_OFFSET; - phys_addr_t phys = memstart_addr; - - return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init); -} - -#ifdef CONFIG_PPC32 - -#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) -#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" -#endif - -unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) -{ - return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; -} - -void flush_instruction_cache(void) -{ - unsigned long tmp; - - tmp = mfspr(SPRN_L1CSR1); - tmp |= L1CSR1_ICFI | L1CSR1_ICLFR; - mtspr(SPRN_L1CSR1, tmp); - isync(); -} - -/* - * MMU_init_hw does the chip-specific initialization of the MMU hardware. - */ -void __init MMU_init_hw(void) -{ - flush_instruction_cache(); -} - -static unsigned long __init tlbcam_sz(int idx) -{ - return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; -} - -void __init adjust_total_lowmem(void) -{ - unsigned long ram; - int i; - - /* adjust lowmem size to __max_low_memory */ - ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem); - - i = switch_to_as1(); - __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true); - restore_to_as0(i, 0, NULL, 1); - - pr_info("Memory CAM mapping: "); - for (i = 0; i < tlbcam_index - 1; i++) - pr_cont("%lu/", tlbcam_sz(i) >> 20); - pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, - (unsigned int)((total_lowmem - __max_low_memory) >> 20)); - - memblock_set_current_limit(memstart_addr + __max_low_memory); -} - -#ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) -{ - unsigned long remapped; - - remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); - - WARN_ON(__max_low_memory != remapped); -} -#endif - -void mmu_mark_initmem_nx(void) -{ - /* Everything is done in mmu_mark_rodata_ro() */ -} - -void setup_initial_memory_limit(phys_addr_t first_memblock_base, - phys_addr_t first_memblock_size) -{ - phys_addr_t limit = first_memblock_base + first_memblock_size; - - /* 64M mapped initially according to head_fsl_booke.S */ - memblock_set_current_limit(min_t(u64, limit, 0x04000000)); -} - -#ifdef CONFIG_RELOCATABLE -int __initdata is_second_reloc; -notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) -{ - unsigned long base = kernstart_virt_addr; - phys_addr_t size; - - kernstart_addr = start; - if (is_second_reloc) { - virt_phys_offset = PAGE_OFFSET - memstart_addr; - kaslr_late_init(); - return; - } - - /* - * Relocatable kernel support based on processing of dynamic - * relocation entries. Before we get the real memstart_addr, - * We will compute the virt_phys_offset like this: - * virt_phys_offset = stext.run - kernstart_addr - * - * stext.run = (KERNELBASE & ~0x3ffffff) + - * (kernstart_addr & 0x3ffffff) - * When we relocate, we have : - * - * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) - * - * hence: - * virt_phys_offset = (KERNELBASE & ~0x3ffffff) - - * (kernstart_addr & ~0x3ffffff) - * - */ - start &= ~0x3ffffff; - base &= ~0x3ffffff; - virt_phys_offset = base - start; - early_get_first_memblock_info(__va(dt_ptr), &size); - /* - * We now get the memstart_addr, then we should check if this - * address is the same as what the PAGE_OFFSET map to now. If - * not we have to change the map of PAGE_OFFSET to memstart_addr - * and do a second relocation. - */ - if (start != memstart_addr) { - int n; - long offset = start - memstart_addr; - - is_second_reloc = 1; - n = switch_to_as1(); - /* map a 64M area for the second relocation */ - if (memstart_addr > start) - map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM, - false, true); - else - map_mem_in_cams_addr(start, PAGE_OFFSET + offset, - 0x4000000, CONFIG_LOWMEM_CAM_NUM, - false, true); - restore_to_as0(n, offset, __va(dt_ptr), 1); - /* We should never reach here */ - panic("Relocation error"); - } - - kaslr_early_init(__va(dt_ptr), size); -} -#endif -#endif diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index f21896ebdc5a..fcb1e5ae5c55 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -50,7 +50,7 @@ * indirect page table entries. */ #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, @@ -166,7 +166,7 @@ int extlb_level_exc; #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); @@ -441,7 +441,7 @@ static void __init setup_page_sizes(void) unsigned int eptcfg; int i, psize; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 unsigned int mmucfg = mfspr(SPRN_MMUCFG); int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); @@ -584,7 +584,7 @@ static void __init setup_mmu_htw(void) patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); break; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 case PPC_HTW_E6500: extlb_level_exc = EX_TLB_SIZE; patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); @@ -627,7 +627,7 @@ static void early_init_this_mmu(void) } mtspr(SPRN_MAS4, mas4); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; bool map = true; @@ -680,7 +680,7 @@ static void __init early_init_mmu_global(void) /* Look for HW tablewalk support */ setup_mmu_htw(); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { if (book3e_htw_mode == PPC_HTW_NONE) { extlb_level_exc = EX_TLB_SIZE; @@ -701,7 +701,7 @@ static void __init early_init_mmu_global(void) static void __init early_mmu_set_memory_limit(void) { -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { /* * Limit memory so we dont have linear faults. @@ -750,7 +750,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, * We crop it to the size of the first MEMBLOCK to * avoid going over total available memory just in case... */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned long linear_sz; unsigned int num_cams; diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index 6914bc8e4ead..e1199608ff4d 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -364,7 +364,7 @@ _GLOBAL(_tlbivax_bcast) #error Unsupported processor type ! #endif -#if defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_E500) /* * extern void loadcam_entry(unsigned int index) * diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 5b065186ace5..32c60ad8f45d 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -107,7 +107,6 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" - select PPC_FSL_BOOK3E select PPC_E500 select PPC_E500MC select PPC_FPU # Make it a choice ? @@ -259,8 +258,11 @@ config PPC_BOOK3S config PPC_E500 select FSL_EMB_PERFMON - select PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 + select PPC_SMP_MUXED_IPI + select PPC_DOORBELL + select PPC_KUEP config PPC_E500MC bool "e500mc Support" @@ -320,16 +322,6 @@ config BOOKE_OR_40x depends on BOOKE || 40x default y -# this is for common code between PPC32 & PPC64 FSL BOOKE -config PPC_FSL_BOOK3E - bool - select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 - imply FSL_EMB_PERFMON - select PPC_SMP_MUXED_IPI - select PPC_DOORBELL - select PPC_KUEP - default y if PPC_85xx - config PTE_64BIT bool depends on 44x || PPC_E500 || PPC_86xx -- cgit v1.2.3