From f6635f873a605576fa1983c605655a8721475c22 Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Wed, 27 Mar 2019 00:41:29 +0000 Subject: riscv: move switch_mm to its own file switch_mm is an expensive operations that has two users. flush_icache_deferred is only called within switch_mm and can be moved together. The function is expected to be more complicated when ASID support is added, so clean up eagerly. By moving them to a separate file we also removes some excessive dependency of tlbflush.h and cacheflush.h. Signed-off-by: Gary Guo Reviewed-by: Anup Patel Signed-off-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/mmu_context.h | 54 ++---------------------------------- 1 file changed, 2 insertions(+), 52 deletions(-) (limited to 'arch/riscv/include/asm/mmu_context.h') diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 98c76c821367..bf4f097a9051 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,8 +20,6 @@ #include #include -#include -#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *task) @@ -39,56 +37,8 @@ static inline void destroy_context(struct mm_struct *mm) { } -/* - * When necessary, performs a deferred icache flush for the given MM context, - * on the local CPU. RISC-V has no direct mechanism for instruction cache - * shoot downs, so instead we send an IPI that informs the remote harts they - * need to flush their local instruction caches. To avoid pathologically slow - * behavior in a common case (a bunch of single-hart processes on a many-hart - * machine, ie 'make -j') we avoid the IPIs for harts that are not currently - * executing a MM context and instead schedule a deferred local instruction - * cache flush to be performed before execution resumes on each hart. This - * actually performs that local instruction cache flush, which implicitly only - * refers to the current hart. - */ -static inline void flush_icache_deferred(struct mm_struct *mm) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - cpumask_t *mask = &mm->context.icache_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - /* - * Ensure the remote hart's writes are visible to this hart. - * This pairs with a barrier in flush_icache_mm. - */ - smp_mb(); - local_flush_icache_all(); - } -#endif -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, struct task_struct *task) -{ - if (likely(prev != next)) { - /* - * Mark the current MM context as inactive, and the next as - * active. This is at least used by the icache flushing - * routines in order to determine who should - */ - unsigned int cpu = smp_processor_id(); - - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); - local_flush_tlb_all(); - - flush_icache_deferred(next); - } -} +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task); static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) -- cgit v1.2.3