diff options
author | Marc Zyngier <Marc.Zyngier@arm.com> | 2013-06-21 12:07:27 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-07-21 18:21:34 -0700 |
commit | e6a01df4cd3a38e8dcc05d763c0f793b2f30c5f2 (patch) | |
tree | 9cec5f21f1d4b5c46be63e57f981d0d9f261678d | |
parent | 4aa6022129a8b5e0b0e42815521071ce7a766a84 (diff) | |
download | lwn-e6a01df4cd3a38e8dcc05d763c0f793b2f30c5f2.tar.gz lwn-e6a01df4cd3a38e8dcc05d763c0f793b2f30c5f2.zip |
ARM: 7769/1: Cortex-A15: fix erratum 798181 implementation
commit 0d0752bca1f9a91fb646647aa4abbb21156f316c upstream.
Looking into the active_asids array is not enough, as we also need
to look into the reserved_asids array (they both represent processes
that are currently running).
Also, not holding the ASID allocator lock is racy, as another CPU
could schedule that process and trigger a rollover, making the erratum
workaround miss an IPI.
Exposing this outside of context.c is a little ugly on the side, so
let's define a new entry point that the erratum workaround can call
to obtain the cpumask.
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 10 | ||||
-rw-r--r-- | arch/arm/kernel/smp_tlb.c | 18 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 29 |
3 files changed, 39 insertions, 18 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a7b85e0d0cc1..dc90203c6ddb 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -27,7 +27,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) -DECLARE_PER_CPU(atomic64_t, active_asids); +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask); +#else /* !CONFIG_ARM_ERRATA_798181 */ +static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask) +{ +} +#endif /* CONFIG_ARM_ERRATA_798181 */ #else /* !CONFIG_CPU_HAS_ASID */ diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40e..a98b62dca2fa 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void) static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) { - int cpu, this_cpu; + int this_cpu; cpumask_t mask = { CPU_BITS_NONE }; if (!erratum_a15_798181()) @@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) dummy_flush_tlb_a15_erratum(); this_cpu = get_cpu(); - for_each_online_cpu(cpu) { - if (cpu == this_cpu) - continue; - /* - * We only need to send an IPI if the other CPUs are running - * the same ASID as the one being invalidated. There is no - * need for locking around the active_asids check since the - * switch_mm() function has at least one dmb() (as required by - * this workaround) in case a context switch happens on - * another CPU after the condition below. - */ - if (atomic64_read(&mm->context.id) == - atomic64_read(&per_cpu(active_asids, cpu))) - cpumask_set_cpu(cpu, &mask); - } + a15_erratum_get_cpumask(this_cpu, mm, &mask); smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); put_cpu(); } diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 83e09058f96f..eeab06ebd06e 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -45,10 +45,37 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask) +{ + int cpu; + unsigned long flags; + u64 context_id, asid; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + context_id = mm->context.id.counter; + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + /* + * We only need to send an IPI if the other CPUs are + * running the same ASID as the one being invalidated. + */ + asid = per_cpu(active_asids, cpu).counter; + if (asid == 0) + asid = per_cpu(reserved_asids, cpu); + if (context_id == asid) + cpumask_set_cpu(cpu, mask); + } + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +#endif + #ifdef CONFIG_ARM_LPAE static void cpu_set_reserved_ttbr0(void) { |