diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2011-11-28 21:57:24 +0000 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2012-04-17 15:29:44 +0100 |
commit | b9d4d42ad901cc848ac87f1cb8923fded3645568 (patch) | |
tree | 37c2010e12eecb605720b4c5d41780fcba282937 | |
parent | e323969ccda2d69f02e047c08b03faa09215c72a (diff) | |
download | lwn-b9d4d42ad901cc848ac87f1cb8923fded3645568.tar.gz lwn-b9d4d42ad901cc848ac87f1cb8923fded3645568.zip |
ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs
This patch removes the __ARCH_WANT_INTERRUPTS_ON_CTXSW definition for
ARMv5 and earlier processors. On such processors, the context switch
requires a full cache flush. To avoid high interrupt latencies, this
patch defers the mm switching to the post-lock switch hook if the
interrupts are disabled.
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Tested-by: Marc Zyngier <Marc.Zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm/include/asm/mmu.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 31 |
2 files changed, 26 insertions, 14 deletions
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 20b43d6f23b3..14965658a923 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -34,13 +34,4 @@ typedef struct { #endif -/* - * switch_mm() may do a full cache flush over the context switch, - * so enable interrupts over the context switch to avoid high - * latency. - */ -#ifndef CONFIG_CPU_HAS_ASID -#define __ARCH_WANT_INTERRUPTS_ON_CTXSW -#endif - #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 8da4b9c042fe..0306bc642c0d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -105,19 +105,40 @@ static inline void finish_arch_post_lock_switch(void) #else /* !CONFIG_CPU_HAS_ASID */ +#ifdef CONFIG_MMU + static inline void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { -#ifdef CONFIG_MMU if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); - cpu_switch_mm(mm->pgd, mm); -#endif + + if (irqs_disabled()) + /* + * cpu_switch_mm() needs to flush the VIVT caches. To avoid + * high interrupt latencies, defer the call and continue + * running with the old mm. Since we only support UP systems + * on non-ASID CPUs, the old mm will remain valid until the + * finish_arch_post_lock_switch() call. + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + cpu_switch_mm(mm->pgd, mm); } -#define init_new_context(tsk,mm) 0 +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { + struct mm_struct *mm = current->mm; + cpu_switch_mm(mm->pgd, mm); + } +} -#define finish_arch_post_lock_switch() do { } while (0) +#endif /* CONFIG_MMU */ + +#define init_new_context(tsk,mm) 0 #endif /* CONFIG_CPU_HAS_ASID */ |