diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-19 12:15:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-19 12:15:33 -0700 |
commit | a84d2d2906f983fb80f5dcc3e8e7c3ad70aa9f0d (patch) | |
tree | 26d5ca2e6f2e799d858a18bccaa7c3f42f7d3fe3 /arch/csky/mm | |
parent | b5d72dda8976e878be47415b94bca8465d1fa22d (diff) | |
parent | bdfeb0ccea1a12b58299b95eb0f28e2aa26de4c2 (diff) | |
download | lwn-a84d2d2906f983fb80f5dcc3e8e7c3ad70aa9f0d.tar.gz lwn-a84d2d2906f983fb80f5dcc3e8e7c3ad70aa9f0d.zip |
Merge tag 'csky-for-linus-5.3-rc1' of git://github.com/c-sky/csky-linux
Pull arch/csky pupdates from Guo Ren:
"This round of csky subsystem gives two features (ASID algorithm
update, Perf pmu record support) and some fixups.
ASID updates:
- Revert mmu ASID mechanism
- Add new asid lib code from arm
- Use generic asid algorithm to implement switch_mm
- Improve tlb operation with help of asid
Perf pmu record support:
- Init pmu as a device
- Add count-width property for csky pmu
- Add pmu interrupt support
- Fix perf record in kernel/user space
- dt-bindings: Add csky PMU bindings
Fixes:
- Fixup no panic in kernel for some traps
- Fixup some error count in 810 & 860.
- Fixup abiv1 memset error"
* tag 'csky-for-linus-5.3-rc1' of git://github.com/c-sky/csky-linux:
csky: Fixup abiv1 memset error
csky: Improve tlb operation with help of asid
csky: Use generic asid algorithm to implement switch_mm
csky: Add new asid lib code from arm
csky: Revert mmu ASID mechanism
dt-bindings: csky: Add csky PMU bindings
dt-bindings: interrupt-controller: Update csky mpintc
csky: Fixup some error count in 810 & 860.
csky: Fix perf record in kernel/user space
csky: Add pmu interrupt support
csky: Add count-width property for csky pmu
csky: Init pmu as a device
csky: Fixup no panic in kernel for some traps
csky: Select intc & timer drivers
Diffstat (limited to 'arch/csky/mm')
-rw-r--r-- | arch/csky/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/csky/mm/asid.c | 189 | ||||
-rw-r--r-- | arch/csky/mm/context.c | 46 | ||||
-rw-r--r-- | arch/csky/mm/init.c | 2 | ||||
-rw-r--r-- | arch/csky/mm/tlb.c | 238 |
5 files changed, 332 insertions, 145 deletions
diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile index 4eebebdcd1bf..c94ef6481098 100644 --- a/arch/csky/mm/Makefile +++ b/arch/csky/mm/Makefile @@ -12,3 +12,5 @@ obj-y += init.o obj-y += ioremap.o obj-y += syscache.o obj-y += tlb.o +obj-y += asid.o +obj-y += context.o diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c new file mode 100644 index 000000000000..b2e914745c1d --- /dev/null +++ b/arch/csky/mm/asid.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic ASID allocator. + * + * Based on arch/arm/mm/context.c + * + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/slab.h> +#include <linux/mm_types.h> + +#include <asm/asid.h> + +#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) + +#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0)) +#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits)) + +#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) +#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info)) + +static void flush_context(struct asid_info *info) +{ + int i; + u64 asid; + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); + + for_each_possible_cpu(i) { + asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = reserved_asid(info, i); + __set_bit(asid2idx(info, asid), info->map); + reserved_asid(info, i) = asid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&info->flush_pending); +} + +static bool check_update_reserved_asid(struct asid_info *info, u64 asid, + u64 newasid) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (reserved_asid(info, cpu) == asid) { + hit = true; + reserved_asid(info, cpu) = newasid; + } + } + + return hit; +} + +static u64 new_context(struct asid_info *info, atomic64_t *pasid, + struct mm_struct *mm) +{ + static u32 cur_idx = 1; + u64 asid = atomic64_read(pasid); + u64 generation = atomic64_read(&info->generation); + + if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK(info)); + + /* + * If our current ASID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_asid(info, asid, newasid)) + return newasid; + + /* + * We had a valid ASID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(asid2idx(info, asid), info->map)) + return newasid; + } + + /* + * Allocate a free ASID. If we can't find one, take a note of the + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. + */ + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); + if (asid != NUM_CTXT_ASIDS(info)) + goto set_asid; + + /* We're out of ASIDs, so increment the global generation count */ + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info), + &info->generation); + flush_context(info); + + /* We have more ASIDs than CPUs, so this will always succeed */ + asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); + +set_asid: + __set_bit(asid, info->map); + cur_idx = asid; + cpumask_clear(mm_cpumask(mm)); + return idx2asid(info, asid) | generation; +} + +/* + * Generate a new ASID for the context. + * + * @pasid: Pointer to the current ASID batch allocated. It will be updated + * with the new ASID batch. + * @cpu: current CPU ID. Must have been acquired through get_cpu() + */ +void asid_new_context(struct asid_info *info, atomic64_t *pasid, + unsigned int cpu, struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + raw_spin_lock_irqsave(&info->lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(pasid); + if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { + asid = new_context(info, pasid, mm); + atomic64_set(pasid, asid); + } + + if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) + info->flush_cpu_ctxt_cb(); + + atomic64_set(&active_asid(info, cpu), asid); + cpumask_set_cpu(cpu, mm_cpumask(mm)); + raw_spin_unlock_irqrestore(&info->lock, flags); +} + +/* + * Initialize the ASID allocator + * + * @info: Pointer to the asid allocator structure + * @bits: Number of ASIDs available + * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are + * allocated contiguously for a given context. This value should be a power of + * 2. + */ +int asid_allocator_init(struct asid_info *info, + u32 bits, unsigned int asid_per_ctxt, + void (*flush_cpu_ctxt_cb)(void)) +{ + info->bits = bits; + info->ctxt_shift = ilog2(asid_per_ctxt); + info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb; + /* + * Expect allocation after rollover to fail if we don't have at least + * one more ASID than CPUs. ASID #0 is always reserved. + */ + WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); + atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); + info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), + sizeof(*info->map), GFP_KERNEL); + if (!info->map) + return -ENOMEM; + + raw_spin_lock_init(&info->lock); + + return 0; +} diff --git a/arch/csky/mm/context.c b/arch/csky/mm/context.c new file mode 100644 index 000000000000..0d95bdd93846 --- /dev/null +++ b/arch/csky/mm/context.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/bitops.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <asm/asid.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/tlbflush.h> + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); + +struct asid_info asid_info; + +void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) +{ + asid_check_context(&asid_info, &mm->context.asid, cpu, mm); +} + +static void asid_flush_cpu_ctxt(void) +{ + local_tlb_invalid_all(); +} + +static int asids_init(void) +{ + BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); + + if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, + asid_flush_cpu_ctxt)) + panic("Unable to initialize ASID allocator for %lu ASIDs\n", + NUM_ASIDS(&asid_info)); + + asid_info.active = &active_asids; + asid_info.reserved = &reserved_asids; + + pr_info("ASID allocator initialised with %lu entries\n", + NUM_CTXT_ASIDS(&asid_info)); + + return 0; +} +early_initcall(asids_init); diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 66e597053488..eb0dc9e5065f 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -114,8 +114,6 @@ void __init pre_mmu_init(void) TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); - asid_cache(smp_processor_id()) = ASID_FIRST_VERSION; - /* Setup page mask to 4k */ write_mmu_pagemask(0); } diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c index 08b8394e5b8f..eb3ba6c9c927 100644 --- a/arch/csky/mm/tlb.c +++ b/arch/csky/mm/tlb.c @@ -10,7 +10,12 @@ #include <asm/pgtable.h> #include <asm/setup.h> -#define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE +/* + * One C-SKY MMU TLB entry contain two PFN/page entry, ie: + * 1VPN -> 2PFN + */ +#define TLB_ENTRY_SIZE (PAGE_SIZE * 2) +#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1) void flush_tlb_all(void) { @@ -19,201 +24,148 @@ void flush_tlb_all(void) void flush_tlb_mm(struct mm_struct *mm) { - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); - +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm))); +#else tlb_invalid_all(); +#endif } +/* + * MMU operation regs only could invalid tlb entry in jtlb and we + * need change asid field to invalid I-utlb & D-utlb. + */ +#ifndef CONFIG_CPU_HAS_TLBI #define restore_asid_inv_utlb(oldpid, newpid) \ do { \ - if ((oldpid & ASID_MASK) == newpid) \ + if (oldpid == newpid) \ write_mmu_entryhi(oldpid + 1); \ write_mmu_entryhi(oldpid); \ } while (0) +#endif void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) + unsigned long end) { - struct mm_struct *mm = vma->vm_mm; - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { - unsigned long size, flags; - int newpid = cpu_asid(cpu, mm); - - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - size = (size + 1) >> 1; - if (size <= CSKY_TLB_SIZE/2) { - start &= (PAGE_MASK << 1); - end += ((PAGE_SIZE << 1) - 1); - end &= (PAGE_MASK << 1); -#ifdef CONFIG_CPU_HAS_TLBI - while (start < end) { - asm volatile("tlbi.vaas %0" - ::"r"(start | newpid)); - start += (PAGE_SIZE << 1); - } - sync_is(); -#else - { - int oldpid = read_mmu_entryhi(); - - while (start < end) { - int idx; - - write_mmu_entryhi(start | newpid); - start += (PAGE_SIZE << 1); - tlb_probe(); - idx = read_mmu_index(); - if (idx >= 0) - tlb_invalid_indexed(); - } - restore_asid_inv_utlb(oldpid, newpid); - } -#endif - } else { - drop_mmu_context(mm, cpu); - } - local_irq_restore(flags); - } -} + unsigned long newpid = cpu_asid(vma->vm_mm); -void flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - unsigned long size, flags; + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; - local_irq_save(flags); - size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - if (size <= CSKY_TLB_SIZE) { - start &= (PAGE_MASK << 1); - end += ((PAGE_SIZE << 1) - 1); - end &= (PAGE_MASK << 1); #ifdef CONFIG_CPU_HAS_TLBI - while (start < end) { - asm volatile("tlbi.vaas %0"::"r"(start)); - start += (PAGE_SIZE << 1); - } - sync_is(); -#else - { - int oldpid = read_mmu_entryhi(); - - while (start < end) { - int idx; - - write_mmu_entryhi(start); - start += (PAGE_SIZE << 1); - tlb_probe(); - idx = read_mmu_index(); - if (idx >= 0) - tlb_invalid_indexed(); - } - restore_asid_inv_utlb(oldpid, 0); - } -#endif - } else { - flush_tlb_all(); + while (start < end) { + asm volatile("tlbi.vas %0"::"r"(start | newpid)); + start += 2*PAGE_SIZE; } + sync_is(); +#else + { + unsigned long flags, oldpid; + + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + write_mmu_entryhi(start | newpid); + start += 2*PAGE_SIZE; + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + } + restore_asid_inv_utlb(oldpid, newpid); local_irq_restore(flags); + } +#endif } -void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - int cpu = smp_processor_id(); - int newpid = cpu_asid(cpu, vma->vm_mm); - - if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { - page &= (PAGE_MASK << 1); + start &= TLB_ENTRY_SIZE_MASK; + end += TLB_ENTRY_SIZE - 1; + end &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI - asm volatile("tlbi.vaas %0"::"r"(page | newpid)); - sync_is(); + while (start < end) { + asm volatile("tlbi.vaas %0"::"r"(start)); + start += 2*PAGE_SIZE; + } + sync_is(); #else - { - int oldpid, idx; - unsigned long flags; + { + unsigned long flags, oldpid; - local_irq_save(flags); - oldpid = read_mmu_entryhi(); - write_mmu_entryhi(page | newpid); + local_irq_save(flags); + oldpid = read_mmu_entryhi() & ASID_MASK; + while (start < end) { + int idx; + + write_mmu_entryhi(start | oldpid); + start += 2*PAGE_SIZE; tlb_probe(); idx = read_mmu_index(); if (idx >= 0) tlb_invalid_indexed(); - - restore_asid_inv_utlb(oldpid, newpid); - local_irq_restore(flags); - } -#endif } + restore_asid_inv_utlb(oldpid, oldpid); + local_irq_restore(flags); + } +#endif } -/* - * Remove one kernel space TLB entry. This entry is assumed to be marked - * global so we don't do the ASID thing. - */ -void flush_tlb_one(unsigned long page) +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - int oldpid; + int newpid = cpu_asid(vma->vm_mm); - oldpid = read_mmu_entryhi(); - page &= (PAGE_MASK << 1); + addr &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI - page = page | (oldpid & 0xfff); - asm volatile("tlbi.vaas %0"::"r"(page)); + asm volatile("tlbi.vas %0"::"r"(addr | newpid)); sync_is(); #else { - int idx; + int oldpid, idx; unsigned long flags; - page = page | (oldpid & 0xff); - local_irq_save(flags); - write_mmu_entryhi(page); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | newpid); tlb_probe(); idx = read_mmu_index(); if (idx >= 0) tlb_invalid_indexed(); - restore_asid_inv_utlb(oldpid, oldpid); + + restore_asid_inv_utlb(oldpid, newpid); local_irq_restore(flags); } #endif } -EXPORT_SYMBOL(flush_tlb_one); -/* show current 32 jtlbs */ -void show_jtlb_table(void) +void flush_tlb_one(unsigned long addr) { + addr &= TLB_ENTRY_SIZE_MASK; + +#ifdef CONFIG_CPU_HAS_TLBI + asm volatile("tlbi.vaas %0"::"r"(addr)); + sync_is(); +#else + { + int oldpid, idx; unsigned long flags; - int entryhi, entrylo0, entrylo1; - int entry; - int oldpid; local_irq_save(flags); - entry = 0; - pr_info("\n\n\n"); - - oldpid = read_mmu_entryhi(); - while (entry < CSKY_TLB_SIZE) { - write_mmu_index(entry); - tlb_read(); - entryhi = read_mmu_entryhi(); - entrylo0 = read_mmu_entrylo0(); - entrylo0 = entrylo0; - entrylo1 = read_mmu_entrylo1(); - entrylo1 = entrylo1; - pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;" - " entrylo1 - 0x%x\n", - entry, entryhi, entrylo0, entrylo1); - entry++; - } - write_mmu_entryhi(oldpid); + oldpid = read_mmu_entryhi() & ASID_MASK; + write_mmu_entryhi(addr | oldpid); + tlb_probe(); + idx = read_mmu_index(); + if (idx >= 0) + tlb_invalid_indexed(); + + restore_asid_inv_utlb(oldpid, oldpid); local_irq_restore(flags); + } +#endif } +EXPORT_SYMBOL(flush_tlb_one); |