diff options
-rw-r--r-- | arch/arm64/include/asm/mmu.h | 29 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 11 |
2 files changed, 31 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 75beffe2ee8a..e9c30859f80c 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -27,11 +27,32 @@ typedef struct { } mm_context_t; /* - * This macro is only used by the TLBI and low-level switch_mm() code, - * neither of which can race with an ASID change. We therefore don't - * need to reload the counter using atomic64_read(). + * We use atomic64_read() here because the ASID for an 'mm_struct' can + * be reallocated when scheduling one of its threads following a + * rollover event (see new_context() and flush_context()). In this case, + * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush()) + * may use a stale ASID. This is fine in principle as the new ASID is + * guaranteed to be clean in the TLB, but the TLBI routines have to take + * care to handle the following race: + * + * CPU 0 CPU 1 CPU 2 + * + * // ptep_clear_flush(mm) + * xchg_relaxed(pte, 0) + * DSB ISHST + * old = ASID(mm) + * | <rollover> + * | new = new_context(mm) + * \-----------------> atomic_set(mm->context.id, new) + * cpu_switch_mm(mm) + * // Hardware walk of pte using new ASID + * TLBI(old) + * + * In this scenario, the barrier on CPU 0 and the dependency on CPU 1 + * ensure that the page-table walker on CPU 1 *must* see the invalid PTE + * written by CPU 0. */ -#define ASID(mm) ((mm)->context.id.counter & 0xffff) +#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff) static inline bool arm64_kernel_unmapped_at_el0(void) { diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 118dabbda553..412a3b9a3c25 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -245,9 +245,10 @@ static inline void flush_tlb_all(void) static inline void flush_tlb_mm(struct mm_struct *mm) { - unsigned long asid = __TLBI_VADDR(0, ASID(mm)); + unsigned long asid; dsb(ishst); + asid = __TLBI_VADDR(0, ASID(mm)); __tlbi(aside1is, asid); __tlbi_user(aside1is, asid); dsb(ish); @@ -256,9 +257,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr) { - unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); + unsigned long addr; dsb(ishst); + addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); } @@ -283,9 +285,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, { int num = 0; int scale = 0; - unsigned long asid = ASID(vma->vm_mm); - unsigned long addr; - unsigned long pages; + unsigned long asid, addr, pages; start = round_down(start, stride); end = round_up(end, stride); @@ -305,6 +305,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, } dsb(ishst); + asid = ASID(vma->vm_mm); /* * When the CPU does not support TLB range operations, flush the TLB |