diff options
Diffstat (limited to 'arch/loongarch/kernel')
22 files changed, 1273 insertions, 144 deletions
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 9a72d91cd104..8e279f04f9e7 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -28,6 +28,8 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_inst.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_time.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE) + CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE) + CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE) endif obj-$(CONFIG_MODULES) += module.o module-sections.o @@ -52,6 +54,10 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o -obj-$(CONFIG_KPROBES) += kprobes.o kprobes_trampoline.o +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o +obj-$(CONFIG_UPROBES) += uprobes.o + +obj-$(CONFIG_JUMP_LABEL) += jump_label.o CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 98f431157e4c..9450e09073eb 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -33,6 +33,8 @@ u64 acpi_saved_sp; #define PREFIX "ACPI: " +struct acpi_madt_core_pic acpi_core_pic[NR_CPUS]; + void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size) { @@ -99,6 +101,7 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en acpi_table_print_madt_entry(&header->common); #ifdef CONFIG_SMP + acpi_core_pic[processor->core_id] = *processor; set_processor_mask(processor->core_id, processor->flags); #endif @@ -140,6 +143,35 @@ static void __init acpi_process_madt(void) loongson_sysconf.nr_cpus = num_processors; } +int pptt_enabled; + +int __init parse_acpi_topology(void) +{ + int cpu, topology_id; + + for_each_possible_cpu(cpu) { + topology_id = find_acpi_cpu_topology(cpu, 0); + if (topology_id < 0) { + pr_warn("Invalid BIOS PPTT\n"); + return -ENOENT; + } + + if (acpi_pptt_cpu_is_thread(cpu) <= 0) + cpu_data[cpu].core = topology_id; + else { + topology_id = find_acpi_cpu_topology(cpu, 1); + if (topology_id < 0) + return -ENOENT; + + cpu_data[cpu].core = topology_id; + } + } + + pptt_enabled = 1; + + return 0; +} + #ifndef CONFIG_SUSPEND int (*acpi_suspend_lowlevel)(void); #else diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 5adf0f736c6d..e925579c7a71 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -116,6 +116,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options |= LOONGARCH_CPU_FPU; elf_hwcap |= HWCAP_LOONGARCH_FPU; } +#ifdef CONFIG_CPU_HAS_LSX + if (config & CPUCFG2_LSX) { + c->options |= LOONGARCH_CPU_LSX; + elf_hwcap |= HWCAP_LOONGARCH_LSX; + } +#endif +#ifdef CONFIG_CPU_HAS_LASX + if (config & CPUCFG2_LASX) { + c->options |= LOONGARCH_CPU_LASX; + elf_hwcap |= HWCAP_LOONGARCH_LASX; + } +#endif if (config & CPUCFG2_COMPLEX) { c->options |= LOONGARCH_CPU_COMPLEX; elf_hwcap |= HWCAP_LOONGARCH_COMPLEX; @@ -124,6 +136,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options |= LOONGARCH_CPU_CRYPTO; elf_hwcap |= HWCAP_LOONGARCH_CRYPTO; } + if (config & CPUCFG2_PTW) { + c->options |= LOONGARCH_CPU_PTW; + elf_hwcap |= HWCAP_LOONGARCH_PTW; + } if (config & CPUCFG2_LVZP) { c->options |= LOONGARCH_CPU_LVZ; elf_hwcap |= HWCAP_LOONGARCH_LVZ; diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S index 8c1d229a2afa..5f23b85d78ca 100644 --- a/arch/loongarch/kernel/efi-header.S +++ b/arch/loongarch/kernel/efi-header.S @@ -24,7 +24,7 @@ .byte 0x02 /* MajorLinkerVersion */ .byte 0x14 /* MinorLinkerVersion */ .long __inittext_end - .Lefi_header_end /* SizeOfCode */ - .long _end - __initdata_begin /* SizeOfInitializedData */ + .long _kernel_vsize /* SizeOfInitializedData */ .long 0 /* SizeOfUninitializedData */ .long __efistub_efi_pe_entry - _head /* AddressOfEntryPoint */ .long .Lefi_header_end - _head /* BaseOfCode */ @@ -79,9 +79,9 @@ IMAGE_SCN_MEM_EXECUTE /* Characteristics */ .ascii ".data\0\0\0" - .long _end - __initdata_begin /* VirtualSize */ + .long _kernel_vsize /* VirtualSize */ .long __initdata_begin - _head /* VirtualAddress */ - .long _edata - __initdata_begin /* SizeOfRawData */ + .long _kernel_rsize /* SizeOfRawData */ .long __initdata_begin - _head /* PointerToRawData */ .long 0 /* PointerToRelocations */ diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index ccde94140c89..f3df5f0a4509 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -145,6 +145,154 @@ movgr2fcsr fcsr0, \tmp0 .endm + .macro sc_save_lsx base +#ifdef CONFIG_CPU_HAS_LSX + EX vst $vr0, \base, (0 * LSX_REG_WIDTH) + EX vst $vr1, \base, (1 * LSX_REG_WIDTH) + EX vst $vr2, \base, (2 * LSX_REG_WIDTH) + EX vst $vr3, \base, (3 * LSX_REG_WIDTH) + EX vst $vr4, \base, (4 * LSX_REG_WIDTH) + EX vst $vr5, \base, (5 * LSX_REG_WIDTH) + EX vst $vr6, \base, (6 * LSX_REG_WIDTH) + EX vst $vr7, \base, (7 * LSX_REG_WIDTH) + EX vst $vr8, \base, (8 * LSX_REG_WIDTH) + EX vst $vr9, \base, (9 * LSX_REG_WIDTH) + EX vst $vr10, \base, (10 * LSX_REG_WIDTH) + EX vst $vr11, \base, (11 * LSX_REG_WIDTH) + EX vst $vr12, \base, (12 * LSX_REG_WIDTH) + EX vst $vr13, \base, (13 * LSX_REG_WIDTH) + EX vst $vr14, \base, (14 * LSX_REG_WIDTH) + EX vst $vr15, \base, (15 * LSX_REG_WIDTH) + EX vst $vr16, \base, (16 * LSX_REG_WIDTH) + EX vst $vr17, \base, (17 * LSX_REG_WIDTH) + EX vst $vr18, \base, (18 * LSX_REG_WIDTH) + EX vst $vr19, \base, (19 * LSX_REG_WIDTH) + EX vst $vr20, \base, (20 * LSX_REG_WIDTH) + EX vst $vr21, \base, (21 * LSX_REG_WIDTH) + EX vst $vr22, \base, (22 * LSX_REG_WIDTH) + EX vst $vr23, \base, (23 * LSX_REG_WIDTH) + EX vst $vr24, \base, (24 * LSX_REG_WIDTH) + EX vst $vr25, \base, (25 * LSX_REG_WIDTH) + EX vst $vr26, \base, (26 * LSX_REG_WIDTH) + EX vst $vr27, \base, (27 * LSX_REG_WIDTH) + EX vst $vr28, \base, (28 * LSX_REG_WIDTH) + EX vst $vr29, \base, (29 * LSX_REG_WIDTH) + EX vst $vr30, \base, (30 * LSX_REG_WIDTH) + EX vst $vr31, \base, (31 * LSX_REG_WIDTH) +#endif + .endm + + .macro sc_restore_lsx base +#ifdef CONFIG_CPU_HAS_LSX + EX vld $vr0, \base, (0 * LSX_REG_WIDTH) + EX vld $vr1, \base, (1 * LSX_REG_WIDTH) + EX vld $vr2, \base, (2 * LSX_REG_WIDTH) + EX vld $vr3, \base, (3 * LSX_REG_WIDTH) + EX vld $vr4, \base, (4 * LSX_REG_WIDTH) + EX vld $vr5, \base, (5 * LSX_REG_WIDTH) + EX vld $vr6, \base, (6 * LSX_REG_WIDTH) + EX vld $vr7, \base, (7 * LSX_REG_WIDTH) + EX vld $vr8, \base, (8 * LSX_REG_WIDTH) + EX vld $vr9, \base, (9 * LSX_REG_WIDTH) + EX vld $vr10, \base, (10 * LSX_REG_WIDTH) + EX vld $vr11, \base, (11 * LSX_REG_WIDTH) + EX vld $vr12, \base, (12 * LSX_REG_WIDTH) + EX vld $vr13, \base, (13 * LSX_REG_WIDTH) + EX vld $vr14, \base, (14 * LSX_REG_WIDTH) + EX vld $vr15, \base, (15 * LSX_REG_WIDTH) + EX vld $vr16, \base, (16 * LSX_REG_WIDTH) + EX vld $vr17, \base, (17 * LSX_REG_WIDTH) + EX vld $vr18, \base, (18 * LSX_REG_WIDTH) + EX vld $vr19, \base, (19 * LSX_REG_WIDTH) + EX vld $vr20, \base, (20 * LSX_REG_WIDTH) + EX vld $vr21, \base, (21 * LSX_REG_WIDTH) + EX vld $vr22, \base, (22 * LSX_REG_WIDTH) + EX vld $vr23, \base, (23 * LSX_REG_WIDTH) + EX vld $vr24, \base, (24 * LSX_REG_WIDTH) + EX vld $vr25, \base, (25 * LSX_REG_WIDTH) + EX vld $vr26, \base, (26 * LSX_REG_WIDTH) + EX vld $vr27, \base, (27 * LSX_REG_WIDTH) + EX vld $vr28, \base, (28 * LSX_REG_WIDTH) + EX vld $vr29, \base, (29 * LSX_REG_WIDTH) + EX vld $vr30, \base, (30 * LSX_REG_WIDTH) + EX vld $vr31, \base, (31 * LSX_REG_WIDTH) +#endif + .endm + + .macro sc_save_lasx base +#ifdef CONFIG_CPU_HAS_LASX + EX xvst $xr0, \base, (0 * LASX_REG_WIDTH) + EX xvst $xr1, \base, (1 * LASX_REG_WIDTH) + EX xvst $xr2, \base, (2 * LASX_REG_WIDTH) + EX xvst $xr3, \base, (3 * LASX_REG_WIDTH) + EX xvst $xr4, \base, (4 * LASX_REG_WIDTH) + EX xvst $xr5, \base, (5 * LASX_REG_WIDTH) + EX xvst $xr6, \base, (6 * LASX_REG_WIDTH) + EX xvst $xr7, \base, (7 * LASX_REG_WIDTH) + EX xvst $xr8, \base, (8 * LASX_REG_WIDTH) + EX xvst $xr9, \base, (9 * LASX_REG_WIDTH) + EX xvst $xr10, \base, (10 * LASX_REG_WIDTH) + EX xvst $xr11, \base, (11 * LASX_REG_WIDTH) + EX xvst $xr12, \base, (12 * LASX_REG_WIDTH) + EX xvst $xr13, \base, (13 * LASX_REG_WIDTH) + EX xvst $xr14, \base, (14 * LASX_REG_WIDTH) + EX xvst $xr15, \base, (15 * LASX_REG_WIDTH) + EX xvst $xr16, \base, (16 * LASX_REG_WIDTH) + EX xvst $xr17, \base, (17 * LASX_REG_WIDTH) + EX xvst $xr18, \base, (18 * LASX_REG_WIDTH) + EX xvst $xr19, \base, (19 * LASX_REG_WIDTH) + EX xvst $xr20, \base, (20 * LASX_REG_WIDTH) + EX xvst $xr21, \base, (21 * LASX_REG_WIDTH) + EX xvst $xr22, \base, (22 * LASX_REG_WIDTH) + EX xvst $xr23, \base, (23 * LASX_REG_WIDTH) + EX xvst $xr24, \base, (24 * LASX_REG_WIDTH) + EX xvst $xr25, \base, (25 * LASX_REG_WIDTH) + EX xvst $xr26, \base, (26 * LASX_REG_WIDTH) + EX xvst $xr27, \base, (27 * LASX_REG_WIDTH) + EX xvst $xr28, \base, (28 * LASX_REG_WIDTH) + EX xvst $xr29, \base, (29 * LASX_REG_WIDTH) + EX xvst $xr30, \base, (30 * LASX_REG_WIDTH) + EX xvst $xr31, \base, (31 * LASX_REG_WIDTH) +#endif + .endm + + .macro sc_restore_lasx base +#ifdef CONFIG_CPU_HAS_LASX + EX xvld $xr0, \base, (0 * LASX_REG_WIDTH) + EX xvld $xr1, \base, (1 * LASX_REG_WIDTH) + EX xvld $xr2, \base, (2 * LASX_REG_WIDTH) + EX xvld $xr3, \base, (3 * LASX_REG_WIDTH) + EX xvld $xr4, \base, (4 * LASX_REG_WIDTH) + EX xvld $xr5, \base, (5 * LASX_REG_WIDTH) + EX xvld $xr6, \base, (6 * LASX_REG_WIDTH) + EX xvld $xr7, \base, (7 * LASX_REG_WIDTH) + EX xvld $xr8, \base, (8 * LASX_REG_WIDTH) + EX xvld $xr9, \base, (9 * LASX_REG_WIDTH) + EX xvld $xr10, \base, (10 * LASX_REG_WIDTH) + EX xvld $xr11, \base, (11 * LASX_REG_WIDTH) + EX xvld $xr12, \base, (12 * LASX_REG_WIDTH) + EX xvld $xr13, \base, (13 * LASX_REG_WIDTH) + EX xvld $xr14, \base, (14 * LASX_REG_WIDTH) + EX xvld $xr15, \base, (15 * LASX_REG_WIDTH) + EX xvld $xr16, \base, (16 * LASX_REG_WIDTH) + EX xvld $xr17, \base, (17 * LASX_REG_WIDTH) + EX xvld $xr18, \base, (18 * LASX_REG_WIDTH) + EX xvld $xr19, \base, (19 * LASX_REG_WIDTH) + EX xvld $xr20, \base, (20 * LASX_REG_WIDTH) + EX xvld $xr21, \base, (21 * LASX_REG_WIDTH) + EX xvld $xr22, \base, (22 * LASX_REG_WIDTH) + EX xvld $xr23, \base, (23 * LASX_REG_WIDTH) + EX xvld $xr24, \base, (24 * LASX_REG_WIDTH) + EX xvld $xr25, \base, (25 * LASX_REG_WIDTH) + EX xvld $xr26, \base, (26 * LASX_REG_WIDTH) + EX xvld $xr27, \base, (27 * LASX_REG_WIDTH) + EX xvld $xr28, \base, (28 * LASX_REG_WIDTH) + EX xvld $xr29, \base, (29 * LASX_REG_WIDTH) + EX xvld $xr30, \base, (30 * LASX_REG_WIDTH) + EX xvld $xr31, \base, (31 * LASX_REG_WIDTH) +#endif + .endm + /* * Save a thread's fp context. */ @@ -166,6 +314,76 @@ SYM_FUNC_START(_restore_fp) jr ra SYM_FUNC_END(_restore_fp) +#ifdef CONFIG_CPU_HAS_LSX + +/* + * Save a thread's LSX vector context. + */ +SYM_FUNC_START(_save_lsx) + lsx_save_all a0 t1 t2 + jr ra +SYM_FUNC_END(_save_lsx) +EXPORT_SYMBOL(_save_lsx) + +/* + * Restore a thread's LSX vector context. + */ +SYM_FUNC_START(_restore_lsx) + lsx_restore_all a0 t1 t2 + jr ra +SYM_FUNC_END(_restore_lsx) + +SYM_FUNC_START(_save_lsx_upper) + lsx_save_all_upper a0 t0 t1 + jr ra +SYM_FUNC_END(_save_lsx_upper) + +SYM_FUNC_START(_restore_lsx_upper) + lsx_restore_all_upper a0 t0 t1 + jr ra +SYM_FUNC_END(_restore_lsx_upper) + +SYM_FUNC_START(_init_lsx_upper) + lsx_init_all_upper t1 + jr ra +SYM_FUNC_END(_init_lsx_upper) +#endif + +#ifdef CONFIG_CPU_HAS_LASX + +/* + * Save a thread's LASX vector context. + */ +SYM_FUNC_START(_save_lasx) + lasx_save_all a0 t1 t2 + jr ra +SYM_FUNC_END(_save_lasx) +EXPORT_SYMBOL(_save_lasx) + +/* + * Restore a thread's LASX vector context. + */ +SYM_FUNC_START(_restore_lasx) + lasx_restore_all a0 t1 t2 + jr ra +SYM_FUNC_END(_restore_lasx) + +SYM_FUNC_START(_save_lasx_upper) + lasx_save_all_upper a0 t0 t1 + jr ra +SYM_FUNC_END(_save_lasx_upper) + +SYM_FUNC_START(_restore_lasx_upper) + lasx_restore_all_upper a0 t0 t1 + jr ra +SYM_FUNC_END(_restore_lasx_upper) + +SYM_FUNC_START(_init_lasx_upper) + lasx_init_all_upper t1 + jr ra +SYM_FUNC_END(_init_lasx_upper) +#endif + /* * Load the FPU with signalling NANS. This bit pattern we're using has * the property that no matter whether considered as single or as double @@ -244,6 +462,58 @@ SYM_FUNC_START(_restore_fp_context) jr ra SYM_FUNC_END(_restore_fp_context) +/* + * a0: fpregs + * a1: fcc + * a2: fcsr + */ +SYM_FUNC_START(_save_lsx_context) + sc_save_fcc a1, t0, t1 + sc_save_fcsr a2, t0 + sc_save_lsx a0 + li.w a0, 0 # success + jr ra +SYM_FUNC_END(_save_lsx_context) + +/* + * a0: fpregs + * a1: fcc + * a2: fcsr + */ +SYM_FUNC_START(_restore_lsx_context) + sc_restore_lsx a0 + sc_restore_fcc a1, t1, t2 + sc_restore_fcsr a2, t1 + li.w a0, 0 # success + jr ra +SYM_FUNC_END(_restore_lsx_context) + +/* + * a0: fpregs + * a1: fcc + * a2: fcsr + */ +SYM_FUNC_START(_save_lasx_context) + sc_save_fcc a1, t0, t1 + sc_save_fcsr a2, t0 + sc_save_lasx a0 + li.w a0, 0 # success + jr ra +SYM_FUNC_END(_save_lasx_context) + +/* + * a0: fpregs + * a1: fcc + * a2: fcsr + */ +SYM_FUNC_START(_restore_lasx_context) + sc_restore_lasx a0 + sc_restore_fcc a1, t1, t2 + sc_restore_fcsr a2, t1 + li.w a0, 0 # success + jr ra +SYM_FUNC_END(_restore_lasx_context) + SYM_FUNC_START(fault) li.w a0, -EFAULT # failure jr ra diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index aa64b179744f..5e828a8bc0a0 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -23,7 +23,7 @@ _head: .word MZ_MAGIC /* "MZ", MS-DOS header */ .org 0x8 .dword kernel_entry /* Kernel entry point */ - .dword _end - _text /* Kernel image effective size */ + .dword _kernel_asize /* Kernel image effective size */ .quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */ .org 0x38 /* 0x20 ~ 0x37 reserved */ .long LINUX_PE_MAGIC @@ -32,9 +32,9 @@ _head: pe_header: __EFI_PE_HEADER -SYM_DATA(kernel_asize, .long _end - _text); -SYM_DATA(kernel_fsize, .long _edata - _text); -SYM_DATA(kernel_offset, .long kernel_offset - _text); +SYM_DATA(kernel_asize, .long _kernel_asize); +SYM_DATA(kernel_fsize, .long _kernel_fsize); +SYM_DATA(kernel_offset, .long _kernel_offset); #endif diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index 258ef267cd30..3050329556d1 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -133,6 +133,51 @@ void simu_branch(struct pt_regs *regs, union loongarch_instruction insn) } } +bool insns_not_supported(union loongarch_instruction insn) +{ + switch (insn.reg3_format.opcode) { + case amswapw_op ... ammindbdu_op: + pr_notice("atomic memory access instructions are not supported\n"); + return true; + } + + switch (insn.reg2i14_format.opcode) { + case llw_op: + case lld_op: + case scw_op: + case scd_op: + pr_notice("ll and sc instructions are not supported\n"); + return true; + } + + switch (insn.reg1i21_format.opcode) { + case bceqz_op: + pr_notice("bceqz and bcnez instructions are not supported\n"); + return true; + } + + return false; +} + +bool insns_need_simulation(union loongarch_instruction insn) +{ + if (is_pc_ins(&insn)) + return true; + + if (is_branch_ins(&insn)) + return true; + + return false; +} + +void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs) +{ + if (is_pc_ins(&insn)) + simu_pc(regs, insn); + else if (is_branch_ins(&insn)) + simu_branch(regs, insn); +} + int larch_insn_read(void *addr, u32 *insnp) { int ret; @@ -208,6 +253,20 @@ u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest) return insn.word; } +u32 larch_insn_gen_break(int imm) +{ + union loongarch_instruction insn; + + if (imm < 0 || imm >= SZ_32K) { + pr_warn("The generated break instruction is out of range.\n"); + return INSN_BREAK; + } + + emit_break(&insn, imm); + + return insn.word; +} + u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk) { union loongarch_instruction insn; @@ -226,6 +285,11 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm) { union loongarch_instruction insn; + if (imm < -SZ_512K || imm >= SZ_512K) { + pr_warn("The generated lu12i.w instruction is out of range.\n"); + return INSN_BREAK; + } + emit_lu12iw(&insn, rd, imm); return insn.word; @@ -235,6 +299,11 @@ u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm) { union loongarch_instruction insn; + if (imm < -SZ_512K || imm >= SZ_512K) { + pr_warn("The generated lu32i.d instruction is out of range.\n"); + return INSN_BREAK; + } + emit_lu32id(&insn, rd, imm); return insn.word; @@ -244,16 +313,26 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) { union loongarch_instruction insn; + if (imm < -SZ_2K || imm >= SZ_2K) { + pr_warn("The generated lu52i.d instruction is out of range.\n"); + return INSN_BREAK; + } + emit_lu52id(&insn, rd, rj, imm); return insn.word; } -u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest) +u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) { union loongarch_instruction insn; - emit_jirl(&insn, rj, rd, (dest - pc) >> 2); + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) { + pr_warn("The generated jirl instruction is out of range.\n"); + return INSN_BREAK; + } + + emit_jirl(&insn, rj, rd, imm >> 2); return insn.word; } diff --git a/arch/loongarch/kernel/jump_label.c b/arch/loongarch/kernel/jump_label.c new file mode 100644 index 000000000000..31891214b767 --- /dev/null +++ b/arch/loongarch/kernel/jump_label.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Loongson Technology Corporation Limited + * + * Based on arch/arm64/kernel/jump_label.c + */ +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <asm/inst.h> + +void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) +{ + u32 insn; + void *addr = (void *)jump_entry_code(entry); + + if (type == JUMP_LABEL_JMP) + insn = larch_insn_gen_b(jump_entry_code(entry), jump_entry_target(entry)); + else + insn = larch_insn_gen_nop(); + + larch_insn_patch_text(addr, insn); +} diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c index 56c8c4b09a42..17b040bd6067 100644 --- a/arch/loongarch/kernel/kprobes.c +++ b/arch/loongarch/kernel/kprobes.c @@ -4,69 +4,16 @@ #include <linux/preempt.h> #include <asm/break.h> -static const union loongarch_instruction breakpoint_insn = { - .reg0i15_format = { - .opcode = break_op, - .immediate = BRK_KPROBE_BP, - } -}; - -static const union loongarch_instruction singlestep_insn = { - .reg0i15_format = { - .opcode = break_op, - .immediate = BRK_KPROBE_SSTEPBP, - } -}; +#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP) +#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP) DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); -static bool insns_not_supported(union loongarch_instruction insn) -{ - switch (insn.reg2i14_format.opcode) { - case llw_op: - case lld_op: - case scw_op: - case scd_op: - pr_notice("kprobe: ll and sc instructions are not supported\n"); - return true; - } - - switch (insn.reg1i21_format.opcode) { - case bceqz_op: - pr_notice("kprobe: bceqz and bcnez instructions are not supported\n"); - return true; - } - - return false; -} -NOKPROBE_SYMBOL(insns_not_supported); - -static bool insns_need_simulation(struct kprobe *p) -{ - if (is_pc_ins(&p->opcode)) - return true; - - if (is_branch_ins(&p->opcode)) - return true; - - return false; -} -NOKPROBE_SYMBOL(insns_need_simulation); - -static void arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) -{ - if (is_pc_ins(&p->opcode)) - simu_pc(regs, p->opcode); - else if (is_branch_ins(&p->opcode)) - simu_branch(regs, p->opcode); -} -NOKPROBE_SYMBOL(arch_simulate_insn); - static void arch_prepare_ss_slot(struct kprobe *p) { p->ainsn.insn[0] = *p->addr; - p->ainsn.insn[1] = singlestep_insn; + p->ainsn.insn[1] = KPROBE_SSTEPBP_INSN; p->ainsn.restore = (unsigned long)p->addr + LOONGARCH_INSN_SIZE; } NOKPROBE_SYMBOL(arch_prepare_ss_slot); @@ -79,17 +26,20 @@ NOKPROBE_SYMBOL(arch_prepare_simulate); int arch_prepare_kprobe(struct kprobe *p) { + union loongarch_instruction insn; + if ((unsigned long)p->addr & 0x3) return -EILSEQ; /* copy instruction */ p->opcode = *p->addr; + insn.word = p->opcode; /* decode instruction */ - if (insns_not_supported(p->opcode)) + if (insns_not_supported(insn)) return -EINVAL; - if (insns_need_simulation(p)) { + if (insns_need_simulation(insn)) { p->ainsn.insn = NULL; } else { p->ainsn.insn = get_insn_slot(); @@ -110,7 +60,7 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe); /* Install breakpoint in text */ void arch_arm_kprobe(struct kprobe *p) { - *p->addr = breakpoint_insn; + *p->addr = KPROBE_BP_INSN; flush_insn_slot(p); } NOKPROBE_SYMBOL(arch_arm_kprobe); @@ -205,6 +155,8 @@ NOKPROBE_SYMBOL(post_kprobe_handler); static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) { + union loongarch_instruction insn; + if (reenter) { save_previous_kprobe(kcb); set_current_kprobe(p); @@ -220,7 +172,8 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, regs->csr_era = (unsigned long)p->ainsn.insn; } else { /* simulate single steping */ - arch_simulate_insn(p, regs); + insn.word = p->opcode; + arch_simulate_insn(insn, regs); /* now go for post processing */ post_kprobe_handler(p, kcb, regs); } @@ -295,7 +248,7 @@ bool kprobe_breakpoint_handler(struct pt_regs *regs) } } - if (addr->word != breakpoint_insn.word) { + if (*addr != KPROBE_BP_INSN) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -378,27 +331,6 @@ int __init arch_init_kprobes(void) return 0; } -/* ASM function that handles the kretprobes must not be probed */ -NOKPROBE_SYMBOL(__kretprobe_trampoline); - -/* Called from __kretprobe_trampoline */ -void __used *trampoline_probe_handler(struct pt_regs *regs) -{ - return (void *)kretprobe_trampoline_handler(regs, NULL); -} -NOKPROBE_SYMBOL(trampoline_probe_handler); - -void arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs) -{ - ri->ret_addr = (kprobe_opcode_t *)regs->regs[1]; - ri->fp = NULL; - - /* Replace the return addr with trampoline addr */ - regs->regs[1] = (unsigned long)&__kretprobe_trampoline; -} -NOKPROBE_SYMBOL(arch_prepare_kretprobe); - int arch_trampoline_kprobe(struct kprobe *p) { return 0; diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 0d82907b5404..0d33cbc47e51 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -49,6 +49,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "processor\t\t: %ld\n", n); seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); + seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id); seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]); seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]); seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version); @@ -79,6 +80,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_crc32) seq_printf(m, " crc32"); if (cpu_has_complex) seq_printf(m, " complex"); if (cpu_has_crypto) seq_printf(m, " crypto"); + if (cpu_has_ptw) seq_printf(m, " ptw"); if (cpu_has_lvz) seq_printf(m, " lvz"); if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86"); if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm"); diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index b71e17c1cc0c..2e04eb07abb6 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -117,8 +117,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) */ preempt_disable(); - if (is_fpu_owner()) - save_fp(current); + if (is_fpu_owner()) { + if (is_lasx_enabled()) + save_lasx(current); + else if (is_lsx_enabled()) + save_lsx(current); + else + save_fp(current); + } preempt_enable(); @@ -285,7 +291,7 @@ unsigned long stack_top(void) /* Space for the VDSO & data page */ top -= PAGE_ALIGN(current->thread.vdso->size); - top -= PAGE_SIZE; + top -= VVAR_SIZE; /* Space to randomize the VDSO base */ if (current->flags & PF_RANDOMIZE) diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 5fcffb452367..a0767c3a0f0a 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -250,6 +250,90 @@ static int cfg_set(struct task_struct *target, return 0; } +#ifdef CONFIG_CPU_HAS_LSX + +static void copy_pad_fprs(struct task_struct *target, + const struct user_regset *regset, + struct membuf *to, unsigned int live_sz) +{ + int i, j; + unsigned long long fill = ~0ull; + unsigned int cp_sz, pad_sz; + + cp_sz = min(regset->size, live_sz); + pad_sz = regset->size - cp_sz; + WARN_ON(pad_sz % sizeof(fill)); + + for (i = 0; i < NUM_FPU_REGS; i++) { + membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); + for (j = 0; j < (pad_sz / sizeof(fill)); j++) { + membuf_store(to, fill); + } + } +} + +static int simd_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const unsigned int wr_size = NUM_FPU_REGS * regset->size; + + if (!tsk_used_math(target)) { + /* The task hasn't used FP or LSX, fill with 0xff */ + copy_pad_fprs(target, regset, &to, 0); + } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) { + /* Copy scalar FP context, fill the rest with 0xff */ + copy_pad_fprs(target, regset, &to, 8); +#ifdef CONFIG_CPU_HAS_LASX + } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) { + /* Copy LSX 128 Bit context, fill the rest with 0xff */ + copy_pad_fprs(target, regset, &to, 16); +#endif + } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { + /* Trivially copy the vector registers */ + membuf_write(&to, &target->thread.fpu.fpr, wr_size); + } else { + /* Copy as much context as possible, fill the rest with 0xff */ + copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0])); + } + + return 0; +} + +static int simd_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + const unsigned int wr_size = NUM_FPU_REGS * regset->size; + unsigned int cp_sz; + int i, err, start; + + init_fp_ctx(target); + + if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { + /* Trivially copy the vector registers */ + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fpr, + 0, wr_size); + } else { + /* Copy as much context as possible */ + cp_sz = min_t(unsigned int, regset->size, + sizeof(target->thread.fpu.fpr[0])); + + i = start = err = 0; + for (; i < NUM_FPU_REGS; i++, start += regset->size) { + err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fpr[i], + start, start + cp_sz); + } + } + + return err; +} + +#endif /* CONFIG_CPU_HAS_LSX */ + #ifdef CONFIG_HAVE_HW_BREAKPOINT /* @@ -708,6 +792,12 @@ enum loongarch_regset { REGSET_GPR, REGSET_FPR, REGSET_CPUCFG, +#ifdef CONFIG_CPU_HAS_LSX + REGSET_LSX, +#endif +#ifdef CONFIG_CPU_HAS_LASX + REGSET_LASX, +#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT REGSET_HW_BREAK, REGSET_HW_WATCH, @@ -739,6 +829,26 @@ static const struct user_regset loongarch64_regsets[] = { .regset_get = cfg_get, .set = cfg_set, }, +#ifdef CONFIG_CPU_HAS_LSX + [REGSET_LSX] = { + .core_note_type = NT_LOONGARCH_LSX, + .n = NUM_FPU_REGS, + .size = 16, + .align = 16, + .regset_get = simd_get, + .set = simd_set, + }, +#endif +#ifdef CONFIG_CPU_HAS_LASX + [REGSET_LASX] = { + .core_note_type = NT_LOONGARCH_LASX, + .n = NUM_FPU_REGS, + .size = 32, + .align = 32, + .regset_get = simd_get, + .set = simd_set, + }, +#endif #ifdef CONFIG_HAVE_HW_BREAKPOINT [REGSET_HW_BREAK] = { .core_note_type = NT_LOONGARCH_HW_BREAK, diff --git a/arch/loongarch/kernel/rethook.c b/arch/loongarch/kernel/rethook.c new file mode 100644 index 000000000000..db1c5f5024fd --- /dev/null +++ b/arch/loongarch/kernel/rethook.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic return hook for LoongArch. + */ + +#include <linux/kprobes.h> +#include <linux/rethook.h> +#include "rethook.h" + +/* This is called from arch_rethook_trampoline() */ +unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs) +{ + return rethook_trampoline_handler(regs, 0); +} +NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); + +void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount) +{ + rhn->frame = 0; + rhn->ret_addr = regs->regs[1]; + + /* replace return addr with trampoline */ + regs->regs[1] = (unsigned long)arch_rethook_trampoline; +} +NOKPROBE_SYMBOL(arch_rethook_prepare); + +/* ASM function that handles the rethook must not be probed itself */ +NOKPROBE_SYMBOL(arch_rethook_trampoline); diff --git a/arch/loongarch/kernel/rethook.h b/arch/loongarch/kernel/rethook.h new file mode 100644 index 000000000000..3f1c1edf0d0b --- /dev/null +++ b/arch/loongarch/kernel/rethook.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LOONGARCH_RETHOOK_H +#define __LOONGARCH_RETHOOK_H + +unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs); +void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount); + +#endif diff --git a/arch/loongarch/kernel/kprobes_trampoline.S b/arch/loongarch/kernel/rethook_trampoline.S index af94b0d213fa..bd5772c96338 100644 --- a/arch/loongarch/kernel/kprobes_trampoline.S +++ b/arch/loongarch/kernel/rethook_trampoline.S @@ -75,7 +75,7 @@ csrxchg t0, t1, LOONGARCH_CSR_CRMD .endm -SYM_CODE_START(__kretprobe_trampoline) +SYM_CODE_START(arch_rethook_trampoline) addi.d sp, sp, -PT_SIZE save_all_base_regs @@ -84,7 +84,7 @@ SYM_CODE_START(__kretprobe_trampoline) move a0, sp /* pt_regs */ - bl trampoline_probe_handler + bl arch_rethook_trampoline_callback /* use the result as the return-address */ move ra, a0 @@ -93,4 +93,4 @@ SYM_CODE_START(__kretprobe_trampoline) addi.d sp, sp, PT_SIZE jr ra -SYM_CODE_END(__kretprobe_trampoline) +SYM_CODE_END(arch_rethook_trampoline) diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c index 8f5b7986374b..ceb899366c0a 100644 --- a/arch/loongarch/kernel/signal.c +++ b/arch/loongarch/kernel/signal.c @@ -50,6 +50,14 @@ extern asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); extern asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); +extern asmlinkage int +_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); +extern asmlinkage int +_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); +extern asmlinkage int +_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); +extern asmlinkage int +_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr); struct rt_sigframe { struct siginfo rs_info; @@ -65,6 +73,8 @@ struct extctx_layout { unsigned long size; unsigned int flags; struct _ctx_layout fpu; + struct _ctx_layout lsx; + struct _ctx_layout lasx; struct _ctx_layout end; }; @@ -115,6 +125,96 @@ static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx) return err; } +static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx) +{ + int i; + int err = 0; + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), + ®s[2*i]); + err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), + ®s[2*i+1]); + } + err |= __put_user(current->thread.fpu.fcc, fcc); + err |= __put_user(current->thread.fpu.fcsr, fcsr); + + return err; +} + +static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx) +{ + int i; + int err = 0; + u64 fpr_val; + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= __get_user(fpr_val, ®s[2*i]); + set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); + err |= __get_user(fpr_val, ®s[2*i+1]); + set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val); + } + err |= __get_user(current->thread.fpu.fcc, fcc); + err |= __get_user(current->thread.fpu.fcsr, fcsr); + + return err; +} + +static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx) +{ + int i; + int err = 0; + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), + ®s[4*i]); + err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), + ®s[4*i+1]); + err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 2), + ®s[4*i+2]); + err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 3), + ®s[4*i+3]); + } + err |= __put_user(current->thread.fpu.fcc, fcc); + err |= __put_user(current->thread.fpu.fcsr, fcsr); + + return err; +} + +static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx) +{ + int i; + int err = 0; + u64 fpr_val; + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= __get_user(fpr_val, ®s[4*i]); + set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); + err |= __get_user(fpr_val, ®s[4*i+1]); + set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val); + err |= __get_user(fpr_val, ®s[4*i+2]); + set_fpr64(¤t->thread.fpu.fpr[i], 2, fpr_val); + err |= __get_user(fpr_val, ®s[4*i+3]); + set_fpr64(¤t->thread.fpu.fpr[i], 3, fpr_val); + } + err |= __get_user(current->thread.fpu.fcc, fcc); + err |= __get_user(current->thread.fpu.fcsr, fcsr); + + return err; +} + /* * Wrappers for the assembly _{save,restore}_fp_context functions. */ @@ -136,6 +236,42 @@ static int restore_hw_fpu_context(struct fpu_context __user *ctx) return _restore_fp_context(regs, fcc, fcsr); } +static int save_hw_lsx_context(struct lsx_context __user *ctx) +{ + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + return _save_lsx_context(regs, fcc, fcsr); +} + +static int restore_hw_lsx_context(struct lsx_context __user *ctx) +{ + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + return _restore_lsx_context(regs, fcc, fcsr); +} + +static int save_hw_lasx_context(struct lasx_context __user *ctx) +{ + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + return _save_lasx_context(regs, fcc, fcsr); +} + +static int restore_hw_lasx_context(struct lasx_context __user *ctx) +{ + uint64_t __user *regs = (uint64_t *)&ctx->regs; + uint64_t __user *fcc = &ctx->fcc; + uint32_t __user *fcsr = &ctx->fcsr; + + return _restore_lasx_context(regs, fcc, fcsr); +} + static int fcsr_pending(unsigned int __user *fcsr) { int err, sig = 0; @@ -227,6 +363,162 @@ static int protected_restore_fpu_context(struct extctx_layout *extctx) return err ?: sig; } +static int protected_save_lsx_context(struct extctx_layout *extctx) +{ + int err = 0; + struct sctx_info __user *info = extctx->lsx.addr; + struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info); + uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs; + uint64_t __user *fcc = &lsx_ctx->fcc; + uint32_t __user *fcsr = &lsx_ctx->fcsr; + + while (1) { + lock_fpu_owner(); + if (is_lsx_enabled()) + err = save_hw_lsx_context(lsx_ctx); + else { + if (is_fpu_owner()) + save_fp(current); + err = copy_lsx_to_sigcontext(lsx_ctx); + } + unlock_fpu_owner(); + + err |= __put_user(LSX_CTX_MAGIC, &info->magic); + err |= __put_user(extctx->lsx.size, &info->size); + + if (likely(!err)) + break; + /* Touch the LSX context and try again */ + err = __put_user(0, ®s[0]) | + __put_user(0, ®s[32*2-1]) | + __put_user(0, fcc) | + __put_user(0, fcsr); + if (err) + return err; /* really bad sigcontext */ + } + + return err; +} + +static int protected_restore_lsx_context(struct extctx_layout *extctx) +{ + int err = 0, sig = 0, tmp __maybe_unused; + struct sctx_info __user *info = extctx->lsx.addr; + struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info); + uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs; + uint64_t __user *fcc = &lsx_ctx->fcc; + uint32_t __user *fcsr = &lsx_ctx->fcsr; + + err = sig = fcsr_pending(fcsr); + if (err < 0) + return err; + + while (1) { + lock_fpu_owner(); + if (is_lsx_enabled()) + err = restore_hw_lsx_context(lsx_ctx); + else { + err = copy_lsx_from_sigcontext(lsx_ctx); + if (is_fpu_owner()) + restore_fp(current); + } + unlock_fpu_owner(); + + if (likely(!err)) + break; + /* Touch the LSX context and try again */ + err = __get_user(tmp, ®s[0]) | + __get_user(tmp, ®s[32*2-1]) | + __get_user(tmp, fcc) | + __get_user(tmp, fcsr); + if (err) + break; /* really bad sigcontext */ + } + + return err ?: sig; +} + +static int protected_save_lasx_context(struct extctx_layout *extctx) +{ + int err = 0; + struct sctx_info __user *info = extctx->lasx.addr; + struct lasx_context __user *lasx_ctx = + (struct lasx_context *)get_ctx_through_ctxinfo(info); + uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs; + uint64_t __user *fcc = &lasx_ctx->fcc; + uint32_t __user *fcsr = &lasx_ctx->fcsr; + + while (1) { + lock_fpu_owner(); + if (is_lasx_enabled()) + err = save_hw_lasx_context(lasx_ctx); + else { + if (is_lsx_enabled()) + save_lsx(current); + else if (is_fpu_owner()) + save_fp(current); + err = copy_lasx_to_sigcontext(lasx_ctx); + } + unlock_fpu_owner(); + + err |= __put_user(LASX_CTX_MAGIC, &info->magic); + err |= __put_user(extctx->lasx.size, &info->size); + + if (likely(!err)) + break; + /* Touch the LASX context and try again */ + err = __put_user(0, ®s[0]) | + __put_user(0, ®s[32*4-1]) | + __put_user(0, fcc) | + __put_user(0, fcsr); + if (err) + return err; /* really bad sigcontext */ + } + + return err; +} + +static int protected_restore_lasx_context(struct extctx_layout *extctx) +{ + int err = 0, sig = 0, tmp __maybe_unused; + struct sctx_info __user *info = extctx->lasx.addr; + struct lasx_context __user *lasx_ctx = + (struct lasx_context *)get_ctx_through_ctxinfo(info); + uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs; + uint64_t __user *fcc = &lasx_ctx->fcc; + uint32_t __user *fcsr = &lasx_ctx->fcsr; + + err = sig = fcsr_pending(fcsr); + if (err < 0) + return err; + + while (1) { + lock_fpu_owner(); + if (is_lasx_enabled()) + err = restore_hw_lasx_context(lasx_ctx); + else { + err = copy_lasx_from_sigcontext(lasx_ctx); + if (is_lsx_enabled()) + restore_lsx(current); + else if (is_fpu_owner()) + restore_fp(current); + } + unlock_fpu_owner(); + + if (likely(!err)) + break; + /* Touch the LASX context and try again */ + err = __get_user(tmp, ®s[0]) | + __get_user(tmp, ®s[32*4-1]) | + __get_user(tmp, fcc) | + __get_user(tmp, fcsr); + if (err) + break; /* really bad sigcontext */ + } + + return err ?: sig; +} + static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, struct extctx_layout *extctx) { @@ -240,7 +532,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, for (i = 1; i < 32; i++) err |= __put_user(regs->regs[i], &sc->sc_regs[i]); - if (extctx->fpu.addr) + if (extctx->lasx.addr) + err |= protected_save_lasx_context(extctx); + else if (extctx->lsx.addr) + err |= protected_save_lsx_context(extctx); + else if (extctx->fpu.addr) err |= protected_save_fpu_context(extctx); /* Set the "end" magic */ @@ -274,6 +570,20 @@ static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout * extctx->fpu.addr = info; break; + case LSX_CTX_MAGIC: + if (size < (sizeof(struct sctx_info) + + sizeof(struct lsx_context))) + goto invalid; + extctx->lsx.addr = info; + break; + + case LASX_CTX_MAGIC: + if (size < (sizeof(struct sctx_info) + + sizeof(struct lasx_context))) + goto invalid; + extctx->lasx.addr = info; + break; + default: goto invalid; } @@ -319,7 +629,11 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc for (i = 1; i < 32; i++) err |= __get_user(regs->regs[i], &sc->sc_regs[i]); - if (extctx.fpu.addr) + if (extctx.lasx.addr) + err |= protected_restore_lasx_context(&extctx); + else if (extctx.lsx.addr) + err |= protected_restore_lsx_context(&extctx); + else if (extctx.fpu.addr) err |= protected_restore_fpu_context(&extctx); bad: @@ -375,7 +689,13 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon extctx->size += extctx->end.size; if (extctx->flags & SC_USED_FP) { - if (cpu_has_fpu) + if (cpu_has_lasx && thread_lasx_context_live()) + new_sp = extframe_alloc(extctx, &extctx->lasx, + sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp); + else if (cpu_has_lsx && thread_lsx_context_live()) + new_sp = extframe_alloc(extctx, &extctx->lsx, + sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp); + else if (cpu_has_fpu) new_sp = extframe_alloc(extctx, &extctx->fpu, sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp); } diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index ed167e244cda..8ea1bbcf13a7 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -8,6 +8,7 @@ * Copyright (C) 2000, 2001 Silicon Graphics, Inc. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation */ +#include <linux/acpi.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/init.h> @@ -37,10 +38,6 @@ EXPORT_SYMBOL(__cpu_number_map); int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(__cpu_logical_map); -/* Number of threads (siblings) per CPU core */ -int smp_num_siblings = 1; -EXPORT_SYMBOL(smp_num_siblings); - /* Representing the threads (siblings) of each logical CPU */ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_sibling_map); @@ -118,7 +115,7 @@ static u32 ipi_read_clear(int cpu) action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS); /* Clear the ipi register to clear the interrupt */ iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR); - smp_mb(); + wbflush(); return action; } @@ -210,6 +207,7 @@ static void __init fdt_smp_setup(void) } loongson_sysconf.nr_cpus = num_processors; + set_bit(0, &(loongson_sysconf.cores_io_master)); #endif } @@ -228,9 +226,12 @@ void __init loongson_prepare_cpus(unsigned int max_cpus) { int i = 0; + parse_acpi_topology(); + for (i = 0; i < loongson_sysconf.nr_cpus; i++) { set_cpu_present(i, true); csr_mail_send(0, __cpu_logical_map[i], 0); + cpu_data[i].global_id = __cpu_logical_map[i]; } per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -271,10 +272,10 @@ void loongson_init_secondary(void) numa_add_cpu(cpu); #endif per_cpu(cpu_state, cpu) = CPU_ONLINE; - cpu_data[cpu].core = - cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; cpu_data[cpu].package = cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; + cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core : + cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; } void loongson_smp_finish(void) @@ -380,14 +381,10 @@ static inline void set_cpu_sibling_map(int cpu) cpumask_set_cpu(cpu, &cpu_sibling_setup_map); - if (smp_num_siblings <= 1) - cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); - else { - for_each_cpu(i, &cpu_sibling_setup_map) { - if (cpus_are_siblings(cpu, i)) { - cpumask_set_cpu(i, &cpu_sibling_map[cpu]); - cpumask_set_cpu(cpu, &cpu_sibling_map[i]); - } + for_each_cpu(i, &cpu_sibling_setup_map) { + if (cpus_are_siblings(cpu, i)) { + cpumask_set_cpu(i, &cpu_sibling_map[cpu]); + cpumask_set_cpu(cpu, &cpu_sibling_map[i]); } } } diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 8db26e4ca447..8fb5e7a77145 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -47,6 +47,7 @@ #include <asm/tlb.h> #include <asm/types.h> #include <asm/unwind.h> +#include <asm/uprobes.h> #include "access-helper.h" @@ -689,7 +690,6 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs) if (regs->csr_prmd & CSR_PRMD_PIE) local_irq_enable(); - current->thread.trap_nr = read_csr_excode(); if (__get_inst(&opcode, (u32 *)era, user)) goto out_sigsegv; @@ -711,18 +711,17 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs) else break; case BRK_UPROBE_BP: - if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode, - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + if (uprobe_breakpoint_handler(regs)) goto out; else break; case BRK_UPROBE_XOLBP: - if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode, - current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + if (uprobe_singlestep_handler(regs)) goto out; else break; default: + current->thread.trap_nr = read_csr_excode(); if (notify_die(DIE_TRAP, "Break", regs, bcode, current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) goto out; @@ -852,12 +851,67 @@ static void init_restore_fp(void) BUG_ON(!is_fp_enabled()); } +static void init_restore_lsx(void) +{ + enable_lsx(); + + if (!thread_lsx_context_live()) { + /* First time LSX context user */ + init_restore_fp(); + init_lsx_upper(); + set_thread_flag(TIF_LSX_CTX_LIVE); + } else { + if (!is_simd_owner()) { + if (is_fpu_owner()) { + restore_lsx_upper(current); + } else { + __own_fpu(); + restore_lsx(current); + } + } + } + + set_thread_flag(TIF_USEDSIMD); + + BUG_ON(!is_fp_enabled()); + BUG_ON(!is_lsx_enabled()); +} + +static void init_restore_lasx(void) +{ + enable_lasx(); + + if (!thread_lasx_context_live()) { + /* First time LASX context user */ + init_restore_lsx(); + init_lasx_upper(); + set_thread_flag(TIF_LASX_CTX_LIVE); + } else { + if (is_fpu_owner() || is_simd_owner()) { + init_restore_lsx(); + restore_lasx_upper(current); + } else { + __own_fpu(); + enable_lsx(); + restore_lasx(current); + } + } + + set_thread_flag(TIF_USEDSIMD); + + BUG_ON(!is_fp_enabled()); + BUG_ON(!is_lsx_enabled()); + BUG_ON(!is_lasx_enabled()); +} + asmlinkage void noinstr do_fpu(struct pt_regs *regs) { irqentry_state_t state = irqentry_enter(regs); local_irq_enable(); die_if_kernel("do_fpu invoked from kernel context!", regs); + BUG_ON(is_lsx_enabled()); + BUG_ON(is_lasx_enabled()); preempt_disable(); init_restore_fp(); @@ -872,9 +926,20 @@ asmlinkage void noinstr do_lsx(struct pt_regs *regs) irqentry_state_t state = irqentry_enter(regs); local_irq_enable(); - force_sig(SIGILL); - local_irq_disable(); + if (!cpu_has_lsx) { + force_sig(SIGILL); + goto out; + } + die_if_kernel("do_lsx invoked from kernel context!", regs); + BUG_ON(is_lasx_enabled()); + + preempt_disable(); + init_restore_lsx(); + preempt_enable(); + +out: + local_irq_disable(); irqentry_exit(regs, state); } @@ -883,9 +948,19 @@ asmlinkage void noinstr do_lasx(struct pt_regs *regs) irqentry_state_t state = irqentry_enter(regs); local_irq_enable(); - force_sig(SIGILL); - local_irq_disable(); + if (!cpu_has_lasx) { + force_sig(SIGILL); + goto out; + } + + die_if_kernel("do_lasx invoked from kernel context!", regs); + preempt_disable(); + init_restore_lasx(); + preempt_enable(); + +out: + local_irq_disable(); irqentry_exit(regs, state); } @@ -924,7 +999,7 @@ asmlinkage void cache_parity_error(void) /* For the moment, report the problem and hang. */ pr_err("Cache error exception:\n"); pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL)); - pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA)); + pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA)); panic("Can't handle the cache error!"); } diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c index 85fae3d2d71a..3abf163dda05 100644 --- a/arch/loongarch/kernel/unaligned.c +++ b/arch/loongarch/kernel/unaligned.c @@ -485,8 +485,6 @@ static int __init debugfs_unaligned(void) struct dentry *d; d = debugfs_create_dir("loongarch", NULL); - if (IS_ERR_OR_NULL(d)) - return -ENOMEM; debugfs_create_u32("unaligned_instructions_user", S_IRUGO, d, &unaligned_instructions_user); diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c new file mode 100644 index 000000000000..87abc7137b73 --- /dev/null +++ b/arch/loongarch/kernel/uprobes.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/highmem.h> +#include <linux/ptrace.h> +#include <linux/sched.h> +#include <linux/uprobes.h> +#include <asm/cacheflush.h> + +#define UPROBE_TRAP_NR UINT_MAX + +int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, + struct mm_struct *mm, unsigned long addr) +{ + int idx; + union loongarch_instruction insn; + + if (addr & 0x3) + return -EILSEQ; + + for (idx = ARRAY_SIZE(auprobe->insn) - 1; idx >= 0; idx--) { + insn.word = auprobe->insn[idx]; + if (insns_not_supported(insn)) + return -EINVAL; + } + + if (insns_need_simulation(insn)) { + auprobe->ixol[0] = larch_insn_gen_nop(); + auprobe->simulate = true; + } else { + auprobe->ixol[0] = auprobe->insn[0]; + auprobe->simulate = false; + } + + auprobe->ixol[1] = UPROBE_XOLBP_INSN; + + return 0; +} + +int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + utask->autask.saved_trap_nr = current->thread.trap_nr; + current->thread.trap_nr = UPROBE_TRAP_NR; + instruction_pointer_set(regs, utask->xol_vaddr); + user_enable_single_step(current); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); + current->thread.trap_nr = utask->autask.saved_trap_nr; + + if (auprobe->simulate) + instruction_pointer_set(regs, auprobe->resume_era); + else + instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE); + + user_disable_single_step(current); + + return 0; +} + +void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + current->thread.trap_nr = utask->autask.saved_trap_nr; + instruction_pointer_set(regs, utask->vaddr); + user_disable_single_step(current); +} + +bool arch_uprobe_xol_was_trapped(struct task_struct *t) +{ + if (t->thread.trap_nr != UPROBE_TRAP_NR) + return true; + + return false; +} + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + union loongarch_instruction insn; + + if (!auprobe->simulate) + return false; + + insn.word = auprobe->insn[0]; + arch_simulate_insn(insn, regs); + auprobe->resume_era = regs->csr_era; + + return true; +} + +unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, + struct pt_regs *regs) +{ + unsigned long ra = regs->regs[1]; + + regs->regs[1] = trampoline_vaddr; + + return ra; +} + +bool arch_uretprobe_is_alive(struct return_instance *ret, + enum rp_check ctx, struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return regs->regs[3] <= ret->stack; + else + return regs->regs[3] < ret->stack; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} + +bool uprobe_breakpoint_handler(struct pt_regs *regs) +{ + if (uprobe_pre_sstep_notifier(regs)) + return true; + + return false; +} + +bool uprobe_singlestep_handler(struct pt_regs *regs) +{ + if (uprobe_post_sstep_notifier(regs)) + return true; + + return false; +} + +unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) +{ + return instruction_pointer(regs); +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + void *kaddr = kmap_local_page(page); + void *dst = kaddr + (vaddr & ~PAGE_MASK); + + memcpy(dst, src, len); + flush_icache_range((unsigned long)dst, (unsigned long)dst + len); + kunmap_local(kaddr); +} diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c index eaebd2e0f725..14941e4be66d 100644 --- a/arch/loongarch/kernel/vdso.c +++ b/arch/loongarch/kernel/vdso.c @@ -14,6 +14,7 @@ #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/time_namespace.h> #include <linux/timekeeper_internal.h> #include <asm/page.h> @@ -26,12 +27,17 @@ extern char vdso_start[], vdso_end[]; /* Kernel-provided data used by the VDSO. */ static union { - u8 page[VDSO_DATA_SIZE]; + u8 page[PAGE_SIZE]; + struct vdso_data data[CS_BASES]; +} generic_vdso_data __page_aligned_data; + +static union { + u8 page[LOONGARCH_VDSO_DATA_SIZE]; struct loongarch_vdso_data vdata; } loongarch_vdso_data __page_aligned_data; static struct page *vdso_pages[] = { NULL }; -struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data; +struct vdso_data *vdso_data = generic_vdso_data.data; struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) @@ -41,6 +47,43 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc return 0; } +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned long pfn; + struct page *timens_page = find_timens_vvar_page(vma); + + switch (vmf->pgoff) { + case VVAR_GENERIC_PAGE_OFFSET: + if (!timens_page) + pfn = sym_to_pfn(vdso_data); + else + pfn = page_to_pfn(timens_page); + break; +#ifdef CONFIG_TIME_NS + case VVAR_TIMENS_PAGE_OFFSET: + /* + * If a task belongs to a time namespace then a namespace specific + * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real + * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset. + * See also the comment near timens_setup_vdso_data(). + */ + if (!timens_page) + return VM_FAULT_SIGBUS; + else + pfn = sym_to_pfn(vdso_data); + break; +#endif /* CONFIG_TIME_NS */ + case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END: + pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START; + break; + default: + return VM_FAULT_SIGBUS; + } + + return vmf_insert_pfn(vma, vmf->address, pfn); +} + struct loongarch_vdso_info vdso_info = { .vdso = vdso_start, .size = PAGE_SIZE, @@ -51,6 +94,7 @@ struct loongarch_vdso_info vdso_info = { }, .data_mapping = { .name = "[vvar]", + .fault = vvar_fault, }, .offset_sigreturn = vdso_offset_sigreturn, }; @@ -73,6 +117,37 @@ static int __init init_vdso(void) } subsys_initcall(init_vdso); +#ifdef CONFIG_TIME_NS +struct vdso_data *arch_get_vdso_data(void *vvar_page) +{ + return (struct vdso_data *)(vvar_page); +} + +/* + * The vvar mapping contains data for a specific time namespace, so when a + * task changes namespace we must unmap its vvar data for the old namespace. + * Subsequent faults will map in data for the new namespace. + * + * For more details see timens_setup_vdso_data(). + */ +int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) +{ + struct mm_struct *mm = task->mm; + struct vm_area_struct *vma; + + VMA_ITERATOR(vmi, mm, 0); + + mmap_read_lock(mm); + for_each_vma(vmi, vma) { + if (vma_is_special_mapping(vma, &vdso_info.data_mapping)) + zap_vma_pages(vma); + } + mmap_read_unlock(mm); + + return 0; +} +#endif + static unsigned long vdso_base(void) { unsigned long base = STACK_TOP; @@ -88,7 +163,7 @@ static unsigned long vdso_base(void) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; - unsigned long vvar_size, size, data_addr, vdso_addr; + unsigned long size, data_addr, vdso_addr; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct loongarch_vdso_info *info = current->thread.vdso; @@ -100,32 +175,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * Determine total area size. This includes the VDSO data itself * and the data pages. */ - vvar_size = VDSO_DATA_SIZE; - size = vvar_size + info->size; + size = VVAR_SIZE + info->size; data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0); if (IS_ERR_VALUE(data_addr)) { ret = data_addr; goto out; } - vdso_addr = data_addr + VDSO_DATA_SIZE; - vma = _install_special_mapping(mm, data_addr, vvar_size, - VM_READ | VM_MAYREAD, + vma = _install_special_mapping(mm, data_addr, VVAR_SIZE, + VM_READ | VM_MAYREAD | VM_PFNMAP, &info->data_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } - /* Map VDSO data page. */ - ret = remap_pfn_range(vma, data_addr, - virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT, - vvar_size, PAGE_READONLY); - if (ret) - goto out; - - /* Map VDSO code page. */ + vdso_addr = data_addr + VVAR_SIZE; vma = _install_special_mapping(mm, vdso_addr, info->size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &info->code_mapping); diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index 0c7b041be9d8..b1686afcf876 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -136,6 +136,15 @@ SECTIONS DWARF_DEBUG ELF_DETAILS +#ifdef CONFIG_EFI_STUB + /* header symbols */ + _kernel_asize = _end - _text; + _kernel_fsize = _edata - _text; + _kernel_vsize = _end - __initdata_begin; + _kernel_rsize = _edata - __initdata_begin; + _kernel_offset = kernel_offset - _text; +#endif + .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) |