From d6ccb1f55ddf5146219707c0e71b85e3a52179b4 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 10 Mar 2010 23:33:25 -0600 Subject: powerpc/85xx: Make sure lwarx hint isn't set on ppc32 e500v1/v2 based chips will treat any reserved field being set in an opcode as illegal. Thus always setting the hint in the opcode is a bad idea. Anton should be kept away from the powerpc opcode map. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/ppc-opcode.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index aea714797590..d553bbeb726c 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -25,7 +25,7 @@ #define PPC_INST_LDARX 0x7c0000a8 #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a -#define PPC_INST_LWARX 0x7c000029 +#define PPC_INST_LWARX 0x7c000028 #define PPC_INST_LWSYNC 0x7c2004ac #define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 @@ -62,8 +62,8 @@ #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) /* - * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have - * any side effects on all 32bit processors, we can do this all the time. + * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a + * larx with EH set as an illegal instruction. */ #ifdef CONFIG_PPC64 #define __PPC_EH(eh) (((eh) & 0x1) << 0) -- cgit v1.2.3 From 9d296cfa69b3d4abc9d556986d544f0727988eed Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 16 Mar 2010 23:39:56 -0500 Subject: powerpc/fsl-booke: Get coherent bit from PTE We shouldn't be always setting 'M' in the TLB entry since its reasonable for somethings to be mapped non-coherent. The PTE should have 'M' set properly. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_fsl_booke.S | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 25793bb0e782..a7cf4934342c 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -746,9 +746,6 @@ finish_tlb_load: rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ #else rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ -#endif -#ifdef CONFIG_SMP - ori r12, r12, MAS2_M #endif mtspr SPRN_MAS2, r12 -- cgit v1.2.3 From 409d241b7bb2cf0278186040ace1c4704fb2a82f Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Mar 2010 13:16:02 +0000 Subject: powerpc: Use correct ccr bit for syscall error status The powerpc implementations of syscall_get_error and syscall_set_return_value should use CCR0:S0 (0x10000000) for testing and setting syscall error status. Fortunately these APIs don't seem to be used at the moment. Signed-off-by: Nathan Lynch Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/syscall.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index efa7f0b879f3..23913e902fc3 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -30,7 +30,7 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0; + return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0; } static inline long syscall_get_return_value(struct task_struct *task, @@ -44,10 +44,10 @@ static inline void syscall_set_return_value(struct task_struct *task, int error, long val) { if (error) { - regs->ccr |= 0x1000L; + regs->ccr |= 0x10000000L; regs->gpr[3] = -error; } else { - regs->ccr &= ~0x1000L; + regs->ccr &= ~0x10000000L; regs->gpr[3] = val; } } -- cgit v1.2.3 From 09156a7a409cf93d1ca2706bf05d714879a5b7ea Mon Sep 17 00:00:00 2001 From: Márton Németh Date: Sat, 6 Mar 2010 22:43:55 +0000 Subject: powerpc: Do not call prink when CONFIG_PRINTK is not defined MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When printk() is disabled (CONFIG_PRINTK) at menu item General setup -> Configure standard kernel features (for small systems) -> Enable support for printk then there should be no printk() calls at all. Signed-off-by: Márton Németh Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/head_fsl_booke.S | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index a7cf4934342c..725526547994 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -884,13 +884,17 @@ KernelSPE: lwz r3,_MSR(r1) oris r3,r3,MSR_SPE@h stw r3,_MSR(r1) /* enable use of SPE after return */ +#ifdef CONFIG_PRINTK lis r3,87f@h ori r3,r3,87f@l mr r4,r2 /* current */ lwz r5,_NIP(r1) bl printk +#endif b ret_from_except +#ifdef CONFIG_PRINTK 87: .string "SPE used in kernel (task=%p, pc=%x) \n" +#endif .align 4,0 #endif /* CONFIG_SPE */ -- cgit v1.2.3 From a93272969c6b1d59883fcbb04845420bd72c9a20 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 16 Mar 2010 13:16:25 +0000 Subject: powerpc: Fix swiotlb to respect the boot option powerpc initializes swiotlb before parsing the kernel boot options so swiotlb options (e.g. specifying the swiotlb buffer size) are ignored. Any time before freeing bootmem works for swiotlb so this patch moves powerpc's swiotlb initialization after parsing the kernel boot options, mem_init (as x86 does). Signed-off-by: FUJITA Tomonori Tested-by: Becky Bruce Tested-by: Albert Herranz Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/setup_32.c | 6 ------ arch/powerpc/kernel/setup_64.c | 6 ------ arch/powerpc/mm/mem.c | 6 ++++++ 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b152de3e64d4..8f58986c2ad9 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -39,7 +39,6 @@ #include #include #include -#include #include "setup.h" @@ -343,11 +342,6 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); -#ifdef CONFIG_SWIOTLB - if (ppc_swiotlb_enable) - swiotlb_init(1); -#endif - paging_init(); /* Initialize the MMU context management stuff */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 63547394048c..914389158a9b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -61,7 +61,6 @@ #include #include #include -#include #include #include "setup.h" @@ -541,11 +540,6 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.setup_arch) ppc_md.setup_arch(); -#ifdef CONFIG_SWIOTLB - if (ppc_swiotlb_enable) - swiotlb_init(1); -#endif - paging_init(); /* Initialize the MMU context management stuff */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 311224cdb7ad..448f972b22f5 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "mmu_decl.h" @@ -320,6 +321,11 @@ void __init mem_init(void) struct page *page; unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; +#ifdef CONFIG_SWIOTLB + if (ppc_swiotlb_enable) + swiotlb_init(1); +#endif + num_physpages = lmb.memory.size >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); -- cgit v1.2.3 From 191aee58b6568cf8143901bfa3f57a9b8faa6f1c Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 2 Mar 2010 14:25:38 +0000 Subject: powerpc: Remove IOMMU_VMERGE config option The description says: Cause IO segments sent to a device for DMA to be merged virtually by the IOMMU when they happen to have been allocated contiguously. This doesn't add pressure to the IOMMU allocator. However, some drivers don't support getting large merged segments coming back from *_map_sg(). Most drivers don't have this problem; it is safe to say Y here. It's out of date. Long ago, drivers didn't have a way to tell IOMMUs about their segment length limit (that is, the maximum segment length that they can handle). So IOMMUs merged as many segments as possible and gave too large segments to drivers. dma_get_max_seg_size() was introduced to solve the above problem. Device drives can use the API to tell IOMMU about the maximum segment length that they can handle. In addition, the default limit (64K) should be safe for everyone. So this config option seems to be unnecessary. Note that this config option just enables users to disable the virtual merging by default. Users can still disable the virtual merging by the boot parameter. Signed-off-by: FUJITA Tomonori Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/Kconfig | 13 ------------- arch/powerpc/kernel/iommu.c | 7 +------ 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8a54eb8e3768..2e19500921f9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -313,19 +313,6 @@ config 8XX_MINIMAL_FPEMU It is recommended that you build a soft-float userspace instead. -config IOMMU_VMERGE - bool "Enable IOMMU virtual merging" - depends on PPC64 - default y - help - Cause IO segments sent to a device for DMA to be merged virtually - by the IOMMU when they happen to have been allocated contiguously. - This doesn't add pressure to the IOMMU allocator. However, some - drivers don't support getting large merged segments coming back - from *_map_sg(). - - Most drivers don't have this problem; it is safe to say Y here. - config IOMMU_HELPER def_bool PPC64 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5547ae6e6b0b..ec94f906ea43 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -42,12 +42,7 @@ #define DBG(...) -#ifdef CONFIG_IOMMU_VMERGE -static int novmerge = 0; -#else -static int novmerge = 1; -#endif - +static int novmerge; static int protect4gb = 1; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); -- cgit v1.2.3