diff options
author | Gary Guo <gary@garyguo.net> | 2019-03-27 00:41:29 +0000 |
---|---|---|
committer | Palmer Dabbelt <palmer@sifive.com> | 2019-05-16 20:42:12 -0700 |
commit | f6635f873a605576fa1983c605655a8721475c22 (patch) | |
tree | e90d492641c5dbbc02307894264a20ed9ccf53b7 /arch/riscv/include/asm/mmu_context.h | |
parent | 58de77545e53b94cd6c816776197dade598632c5 (diff) | |
download | lwn-f6635f873a605576fa1983c605655a8721475c22.tar.gz lwn-f6635f873a605576fa1983c605655a8721475c22.zip |
riscv: move switch_mm to its own file
switch_mm is an expensive operations that has two users.
flush_icache_deferred is only called within switch_mm and can be moved
together. The function is expected to be more complicated when ASID
support is added, so clean up eagerly.
By moving them to a separate file we also removes some excessive
dependency of tlbflush.h and cacheflush.h.
Signed-off-by: Gary Guo <gary@garyguo.net>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
Diffstat (limited to 'arch/riscv/include/asm/mmu_context.h')
-rw-r--r-- | arch/riscv/include/asm/mmu_context.h | 54 |
1 files changed, 2 insertions, 52 deletions
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 98c76c821367..bf4f097a9051 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,8 +20,6 @@ #include <linux/mm.h> #include <linux/sched.h> -#include <asm/tlbflush.h> -#include <asm/cacheflush.h> static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *task) @@ -39,56 +37,8 @@ static inline void destroy_context(struct mm_struct *mm) { } -/* - * When necessary, performs a deferred icache flush for the given MM context, - * on the local CPU. RISC-V has no direct mechanism for instruction cache - * shoot downs, so instead we send an IPI that informs the remote harts they - * need to flush their local instruction caches. To avoid pathologically slow - * behavior in a common case (a bunch of single-hart processes on a many-hart - * machine, ie 'make -j') we avoid the IPIs for harts that are not currently - * executing a MM context and instead schedule a deferred local instruction - * cache flush to be performed before execution resumes on each hart. This - * actually performs that local instruction cache flush, which implicitly only - * refers to the current hart. - */ -static inline void flush_icache_deferred(struct mm_struct *mm) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - cpumask_t *mask = &mm->context.icache_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - /* - * Ensure the remote hart's writes are visible to this hart. - * This pairs with a barrier in flush_icache_mm. - */ - smp_mb(); - local_flush_icache_all(); - } -#endif -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, struct task_struct *task) -{ - if (likely(prev != next)) { - /* - * Mark the current MM context as inactive, and the next as - * active. This is at least used by the icache flushing - * routines in order to determine who should - */ - unsigned int cpu = smp_processor_id(); - - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); - local_flush_tlb_all(); - - flush_icache_deferred(next); - } -} +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task); static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |