diff options
author | Palmer Dabbelt <palmer@rivosinc.com> | 2023-03-09 15:22:05 -0800 |
---|---|---|
committer | Palmer Dabbelt <palmer@rivosinc.com> | 2023-03-09 15:22:05 -0800 |
commit | 9b7fef255c80db8fc33fc06f2ad6d8e3ced1389b (patch) | |
tree | 1502eff3eb6bb0d198553344e0dac48d64da3cd1 | |
parent | 2a8db5ec4a28a0fce822d10224db9471a44b6925 (diff) | |
parent | 82dd33fde0268cc622d3d1ac64971f3f61634142 (diff) | |
download | lwn-9b7fef255c80db8fc33fc06f2ad6d8e3ced1389b.tar.gz lwn-9b7fef255c80db8fc33fc06f2ad6d8e3ced1389b.zip |
Merge patch series "riscv: asid: switch to alternative way to fix stale TLB entries"
Sergey Matyukevich <geomatsi@gmail.com> says:
Some time ago two different patches have been posted to fix stale TLB
entries that caused applications crashes.
The patch [0] suggested 'aggregating' mm_cpumask, i.e. current cpu is not
cleared for the switched-out task in switch_mm function. For additional
explanations see the commit message by Guo Ren. The same approach is
used by arc architecture, so another good comment is for switch_mm
in arch/arc/include/asm/mmu_context.h.
The patch [1] attempted to reduce the number of TLB flushes by deferring
(and possibly avoiding) them for CPUs not running the task.
Patch [1] has been merged. However we already have two bug reports from
different vendors. So apparently something is missing in the approach
suggested in [1]. In both cases the patch [0] fixed the issue.
This patch series reverts [1] and replaces it by [0].
[0] https://lore.kernel.org/linux-riscv/20221111075902.798571-1-guoren@kernel.org/
[1] https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/
* b4-shazam-merge:
riscv: asid: Fixup stale TLB entry cause application crash
Revert "riscv: mm: notify remote harts about mmu cache updates"
Link: https://lore.kernel.org/r/20230226150137.1919750-1-geomatsi@gmail.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
-rw-r--r-- | arch/riscv/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/tlbflush.h | 18 | ||||
-rw-r--r-- | arch/riscv/mm/context.c | 40 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 28 |
4 files changed, 37 insertions, 51 deletions
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 5ff1f19fd45c..0099dc116168 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,8 +19,6 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; - /* A local tlb flush is needed before user execution can resume. */ - cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 907b9efd39a8..801019381dea 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} - #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 80ce9caba8d2..0f784e3d307b 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,16 +196,6 @@ switch_mm_fast: if (need_flush_tlb) local_flush_tlb_all(); -#ifdef CONFIG_SMP - else { - cpumask_t *mask = &mm->context.tlb_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - local_flush_tlb_all_asid(cntx & asid_mask); - } - } -#endif } static void set_mm_noasid(struct mm_struct *mm) @@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm) local_flush_tlb_all(); } -static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +static inline void set_mm(struct mm_struct *prev, + struct mm_struct *next, unsigned int cpu) { - if (static_branch_unlikely(&use_asid_allocator)) - set_mm_asid(mm, cpu); - else - set_mm_noasid(mm); + /* + * The mm_cpumask indicates which harts' TLBs contain the virtual + * address mapping of the mm. Compared to noasid, using asid + * can't guarantee that stale TLB entries are invalidated because + * the asid mechanism wouldn't flush TLB for every switch_mm for + * performance. So when using asid, keep all CPUs footmarks in + * cpumask() until mm reset. + */ + cpumask_set_cpu(cpu, mm_cpumask(next)); + if (static_branch_unlikely(&use_asid_allocator)) { + set_mm_asid(next, cpu); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + set_mm_noasid(next); + } } static int __init asids_init(void) @@ -274,7 +276,8 @@ static int __init asids_init(void) } early_initcall(asids_init); #else -static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +static inline void set_mm(struct mm_struct *prev, + struct mm_struct *next, unsigned int cpu) { /* Nothing to do here when there is no MMU */ } @@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ cpu = smp_processor_id(); - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - set_mm(next, cpu); + set_mm(prev, next, cpu); flush_icache_deferred(next, cpu); } diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index ce7dfc81bb3f..37ed760d007c 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,7 +5,23 @@ #include <linux/sched.h> #include <asm/sbi.h> #include <asm/mmu_context.h> -#include <asm/tlbflush.h> + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} void flush_tlb_all(void) { @@ -15,7 +31,6 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { - struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); - /* - * TLB will be immediately flushed on harts concurrently - * executing this MM context. TLB flush on other harts - * is deferred until this MM context migrates there. - */ - cpumask_setall(pmask); - cpumask_clear_cpu(cpuid, pmask); - cpumask_andnot(pmask, pmask, cmask); - if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); } else if (size <= stride) { |