diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-06 14:18:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-06 14:18:01 -0800 |
commit | 98884281027d07b93f062b7c5e7aa01e76ba12c6 (patch) | |
tree | c830c657c580cb9b62fd31682a139951bae4b8b7 /arch | |
parent | 76f6777c9cc04efe8036b1d2aa76e618c1631cc6 (diff) | |
parent | de858040ee80e6f41bf0b40090f1c71f966a61b3 (diff) | |
download | lwn-98884281027d07b93f062b7c5e7aa01e76ba12c6.tar.gz lwn-98884281027d07b93f062b7c5e7aa01e76ba12c6.zip |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- ZONE_DMA32 initialisation fix when memblocks fall entirely within the
first GB (used by ZONE_DMA in 5.5 for Raspberry Pi 4).
- Couple of ftrace fixes following the FTRACE_WITH_REGS patchset.
- access_ok() fix for the Tagged Address ABI when called from from a
kernel thread (asynchronous I/O): the kthread does not have the TIF
flags of the mm owner, so untag the user address unconditionally.
- KVM compute_layout() called before the alternatives code patching.
- Minor clean-ups.
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: entry: refine comment of stack overflow check
arm64: ftrace: fix ifdeffery
arm64: KVM: Invoke compute_layout() before alternatives are applied
arm64: Validate tagged addresses in access_ok() called from kernel threads
arm64: mm: Fix column alignment for UXN in kernel_page_tables
arm64: insn: consistently handle exit text
arm64: mm: Fix initialisation of DMA zones on non-NUMA systems
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/sections.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/entry-ftrace.S | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/insn.c | 22 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 3 | ||||
-rw-r--r-- | arch/arm64/kvm/va_layout.c | 8 | ||||
-rw-r--r-- | arch/arm64/mm/dump.c | 1 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 25 |
11 files changed, 49 insertions, 29 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index befe37d4bc0e..53d846f1bfe7 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -91,6 +91,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_compute_layout(void); static inline unsigned long __kern_hyp_va(unsigned long v) { diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 788ae971f11c..25a73aab438f 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; +extern char __exittext_begin[], __exittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 127712b0b970..32fc8061aa76 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; + /* + * Asynchronous I/O running in a kernel thread does not have the + * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag + * the user address before checking. + */ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && - test_thread_flag(TIF_TAGGED_ADDR)) + (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) addr = untagged_addr(addr); __chk_user_ptr(addr); diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 4fe1514fcbfd..7d02f9966d34 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -133,7 +133,6 @@ ENTRY(ftrace_graph_caller) bl prepare_ftrace_return b ftrace_common_return ENDPROC(ftrace_graph_caller) -#else #endif #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ @@ -287,6 +286,7 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); mcount_exit ENDPROC(ftrace_caller) +#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* @@ -307,7 +307,6 @@ ENTRY(ftrace_graph_caller) mcount_exit ENDPROC(ftrace_graph_caller) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ ENTRY(ftrace_stub) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 583f71abbe98..7c6a0a41676f 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -76,7 +76,8 @@ alternative_else_nop_endif #ifdef CONFIG_VMAP_STACK /* * Test whether the SP has overflowed, without corrupting a GPR. - * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). + * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) + * should always be zero. */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 513b29c3e735..4a9e773a177f 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -21,6 +21,7 @@ #include <asm/fixmap.h> #include <asm/insn.h> #include <asm/kprobes.h> +#include <asm/sections.h> #define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_N_BIT BIT(22) @@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn) static DEFINE_RAW_SPINLOCK(patch_lock); +static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + static void __kprobes *patch_map(void *addr, int fixmap) { unsigned long uintaddr = (uintptr_t) addr; - bool module = !core_kernel_text(uintaddr); + bool image = is_image_text(uintaddr); struct page *page; - if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) - page = vmalloc_to_page(addr); - else if (!module) + if (image) page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); else return addr; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ab149bcc3dc7..d4ed9a19d8fe 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/of.h> #include <linux/irq_work.h> #include <linux/kexec.h> +#include <linux/kvm_host.h> #include <asm/alternative.h> #include <asm/atomic.h> @@ -39,6 +40,7 @@ #include <asm/cputype.h> #include <asm/cpu_ops.h> #include <asm/daifflags.h> +#include <asm/kvm_mmu.h> #include <asm/mmu_context.h> #include <asm/numa.h> #include <asm/pgtable.h> @@ -407,6 +409,8 @@ static void __init hyp_mode_check(void) "CPU: CPUs started in inconsistent modes"); else pr_info("CPU: All CPU(s) started at EL1\n"); + if (IS_ENABLED(CONFIG_KVM_ARM_HOST)) + kvm_compute_layout(); } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 841a8b4ce114..497f9675071d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -158,9 +158,12 @@ SECTIONS __inittext_begin = .; INIT_TEXT_SECTION(8) + + __exittext_begin = .; .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) } + __exittext_end = .; . = ALIGN(4); .altinstructions : { diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 2cf7d4b606c3..dab1fea4752a 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -22,7 +22,7 @@ static u8 tag_lsb; static u64 tag_val; static u64 va_mask; -static void compute_layout(void) +__init void kvm_compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; @@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, BUG_ON(nr_inst != 5); - if (!has_vhe() && !va_mask) - compute_layout(); - for (i = 0; i < nr_inst; i++) { u32 rd, rn, insn, oinsn; @@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, return; } - if (!va_mask) - compute_layout(); - /* * Compute HYP VA by using the same computation as kern_hyp_va() */ diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 93f9f77582ae..0a920b538a89 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = { .mask = PTE_UXN, .val = PTE_UXN, .set = "UXN", + .clear = " ", }, { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index be9481cdf3b9..b65dffdfb201 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { struct memblock_region *reg; unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long max_dma32 = min; - unsigned long __maybe_unused max_dma = min; + unsigned long __maybe_unused max_dma, max_dma32; memset(zone_size, 0, sizeof(zone_size)); + max_dma = max_dma32 = min; #ifdef CONFIG_ZONE_DMA - max_dma = PFN_DOWN(arm64_dma_phys_limit); + max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; - max_dma32 = max_dma; #endif #ifdef CONFIG_ZONE_DMA32 max_dma32 = PFN_DOWN(arm64_dma32_phys_limit); @@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); - if (start >= max) - continue; #ifdef CONFIG_ZONE_DMA - if (start < max_dma) { - unsigned long dma_end = min_not_zero(end, max_dma); + if (start >= min && start < max_dma) { + unsigned long dma_end = min(end, max_dma); zhole_size[ZONE_DMA] -= dma_end - start; + start = dma_end; } #endif #ifdef CONFIG_ZONE_DMA32 - if (start < max_dma32) { + if (start >= max_dma && start < max_dma32) { unsigned long dma32_end = min(end, max_dma32); - unsigned long dma32_start = max(start, max_dma); - zhole_size[ZONE_DMA32] -= dma32_end - dma32_start; + zhole_size[ZONE_DMA32] -= dma32_end - start; + start = dma32_end; } #endif - if (end > max_dma32) { + if (start >= max_dma32 && start < max) { unsigned long normal_end = min(end, max); - unsigned long normal_start = max(start, max_dma32); - zhole_size[ZONE_NORMAL] -= normal_end - normal_start; + zhole_size[ZONE_NORMAL] -= normal_end - start; } } |