diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-04 20:14:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-04 20:14:18 -0700 |
commit | 435faf5c218a47fd6258187f62d9bb1009717896 (patch) | |
tree | 2345207a7a739b4519b912ceeda15847714a6fd9 /arch/riscv/kernel | |
parent | 571d54ed91c0fae174d933683c0c2e11c84843d9 (diff) | |
parent | 09c0533d129ce460e6214c14f744ddbac3733889 (diff) | |
download | lwn-435faf5c218a47fd6258187f62d9bb1009717896.tar.gz lwn-435faf5c218a47fd6258187f62d9bb1009717896.zip |
Merge tag 'riscv-for-linus-5.8-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt:
- The remainder of the code necessary to support the Kendryte K210:
* Support for building device trees into the kernel, as the K210
doesn't have a bootloader that provides one
* A K210 device tree and the associated defconfig update
* Support for skipping PMP initialization on systems that trap on
PMP accesses rather than treating them as WARL
- Support for KGDB
- Improvements to text patching
- Some cleanups to the SiFive L2 cache driver
* tag 'riscv-for-linus-5.8-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
soc: sifive: l2 cache: Mark l2_get_priv_group as static
soc: sifive: l2 cache: Eliminate an unsigned zero compare warning
riscv: Add support to determine no. of L2 cache way enabled
riscv: cacheinfo: Implement cache_get_priv_group with a generic ops structure
riscv: Use text_mutex instead of patch_lock
riscv: Use NOKPROBE_SYMBOL() instead of __krpobes annotation
riscv: Remove the 'riscv_' prefix of function name
riscv: Add SW single-step support for KDB
riscv: Use the XML target descriptions to report 3 system registers
riscv: Add KGDB support
kgdb: Add kgdb_has_hit_break function
RISC-V: Skip setting up PMPs on traps
riscv: K210: Update defconfig
riscv: K210: Add a built-in device tree
riscv: Allow device trees to be built into the kernel
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r-- | arch/riscv/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/riscv/kernel/cacheinfo.c | 17 | ||||
-rw-r--r-- | arch/riscv/kernel/ftrace.c | 15 | ||||
-rw-r--r-- | arch/riscv/kernel/head.S | 11 | ||||
-rw-r--r-- | arch/riscv/kernel/kgdb.c | 390 | ||||
-rw-r--r-- | arch/riscv/kernel/patch.c | 46 | ||||
-rw-r--r-- | arch/riscv/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/riscv/kernel/soc.c | 27 | ||||
-rw-r--r-- | arch/riscv/kernel/traps.c | 5 | ||||
-rw-r--r-- | arch/riscv/kernel/vmlinux.lds.S | 5 |
10 files changed, 500 insertions, 21 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index d8bbd3207100..b355cf485671 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -51,5 +51,6 @@ ifeq ($(CONFIG_RISCV_SBI), y) obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o +obj-$(CONFIG_KGDB) += kgdb.o clean: diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 4c90c07d8c39..bd0f122965c3 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -7,6 +7,23 @@ #include <linux/cpu.h> #include <linux/of.h> #include <linux/of_device.h> +#include <asm/cacheinfo.h> + +static struct riscv_cacheinfo_ops *rv_cache_ops; + +void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops) +{ + rv_cache_ops = ops; +} +EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops); + +const struct attribute_group * +cache_get_priv_group(struct cacheinfo *this_leaf) +{ + if (rv_cache_ops && rv_cache_ops->get_priv_group) + return rv_cache_ops->get_priv_group(this_leaf); + return NULL; +} static void ci_leaf_init(struct cacheinfo *this_leaf, struct device_node *node, diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index ce69b34ff55d..08396614d6f4 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -7,10 +7,23 @@ #include <linux/ftrace.h> #include <linux/uaccess.h> +#include <linux/memory.h> #include <asm/cacheflush.h> #include <asm/patch.h> #ifdef CONFIG_DYNAMIC_FTRACE +int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) +{ + mutex_lock(&text_mutex); + return 0; +} + +int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) +{ + mutex_unlock(&text_mutex); + return 0; +} + static int ftrace_check_current_call(unsigned long hook_pos, unsigned int *expected) { @@ -51,7 +64,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, make_call(hook_pos, target, call); /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */ - if (riscv_patch_text_nosync + if (patch_text_nosync ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE)) return -EPERM; diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 98a406474e7d..7ed1b22950fd 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -161,11 +161,20 @@ ENTRY(_start_kernel) /* Reset all registers except ra, a0, a1 */ call reset_regs - /* Setup a PMP to permit access to all of memory. */ + /* + * Setup a PMP to permit access to all of memory. Some machines may + * not implement PMPs, so we set up a quick trap handler to just skip + * touching the PMPs on any trap. + */ + la a0, pmp_done + csrw CSR_TVEC, a0 + li a0, -1 csrw CSR_PMPADDR0, a0 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) csrw CSR_PMPCFG0, a0 +.align 2 +pmp_done: /* * The hartid in a0 is expected later on, and we have no firmware diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c new file mode 100644 index 000000000000..f16ade84a11f --- /dev/null +++ b/arch/riscv/kernel/kgdb.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 SiFive + */ + +#include <linux/ptrace.h> +#include <linux/kdebug.h> +#include <linux/bug.h> +#include <linux/kgdb.h> +#include <linux/irqflags.h> +#include <linux/string.h> +#include <asm/cacheflush.h> +#include <asm/gdb_xml.h> +#include <asm/parse_asm.h> + +enum { + NOT_KGDB_BREAK = 0, + KGDB_SW_BREAK, + KGDB_COMPILED_BREAK, + KGDB_SW_SINGLE_STEP +}; + +static unsigned long stepped_address; +static unsigned int stepped_opcode; + +#if __riscv_xlen == 32 +/* C.JAL is an RV32C-only instruction */ +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +#else +#define is_c_jal_insn(opcode) 0 +#endif +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) + +int decode_register_index(unsigned long opcode, int offset) +{ + return (opcode >> offset) & 0x1F; +} + +int decode_register_index_short(unsigned long opcode, int offset) +{ + return ((opcode >> offset) & 0x7) + 8; +} + +/* Calculate the new address for after a step */ +int get_step_address(struct pt_regs *regs, unsigned long *next_addr) +{ + unsigned long pc = regs->epc; + unsigned long *regs_ptr = (unsigned long *)regs; + unsigned int rs1_num, rs2_num; + int op_code; + + if (probe_kernel_address((void *)pc, op_code)) + return -EINVAL; + if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) { + if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) { + rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF); + *next_addr = regs_ptr[rs1_num]; + } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) { + *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc; + } else if (is_c_beqz_insn(op_code)) { + rs1_num = decode_register_index_short(op_code, + RVC_C1_RS1_OPOFF); + if (!rs1_num || regs_ptr[rs1_num] == 0) + *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; + else + *next_addr = pc + 2; + } else if (is_c_bnez_insn(op_code)) { + rs1_num = + decode_register_index_short(op_code, RVC_C1_RS1_OPOFF); + if (rs1_num && regs_ptr[rs1_num] != 0) + *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc; + else + *next_addr = pc + 2; + } else { + *next_addr = pc + 2; + } + } else { + if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) { + bool result = false; + long imm = EXTRACT_BTYPE_IMM(op_code); + unsigned long rs1_val = 0, rs2_val = 0; + + rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); + rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF); + if (rs1_num) + rs1_val = regs_ptr[rs1_num]; + if (rs2_num) + rs2_val = regs_ptr[rs2_num]; + + if (is_beq_insn(op_code)) + result = (rs1_val == rs2_val) ? true : false; + else if (is_bne_insn(op_code)) + result = (rs1_val != rs2_val) ? true : false; + else if (is_blt_insn(op_code)) + result = + ((long)rs1_val < + (long)rs2_val) ? true : false; + else if (is_bge_insn(op_code)) + result = + ((long)rs1_val >= + (long)rs2_val) ? true : false; + else if (is_bltu_insn(op_code)) + result = (rs1_val < rs2_val) ? true : false; + else if (is_bgeu_insn(op_code)) + result = (rs1_val >= rs2_val) ? true : false; + if (result) + *next_addr = imm + pc; + else + *next_addr = pc + 4; + } else if (is_jal_insn(op_code)) { + *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc; + } else if (is_jalr_insn(op_code)) { + rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); + if (rs1_num) + *next_addr = ((unsigned long *)regs)[rs1_num]; + *next_addr += EXTRACT_ITYPE_IMM(op_code); + } else if (is_sret_insn(op_code)) { + *next_addr = pc; + } else { + *next_addr = pc + 4; + } + } + return 0; +} + +int do_single_step(struct pt_regs *regs) +{ + /* Determine where the target instruction will send us to */ + unsigned long addr = 0; + int error = get_step_address(regs, &addr); + + if (error) + return error; + + /* Store the op code in the stepped address */ + error = probe_kernel_address((void *)addr, stepped_opcode); + if (error) + return error; + + stepped_address = addr; + + /* Replace the op code with the break instruction */ + error = probe_kernel_write((void *)stepped_address, + arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); + /* Flush and return */ + if (!error) { + flush_icache_range(addr, addr + BREAK_INSTR_SIZE); + kgdb_single_step = 1; + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } else { + stepped_address = 0; + stepped_opcode = 0; + } + return error; +} + +/* Undo a single step */ +static void undo_single_step(struct pt_regs *regs) +{ + if (stepped_opcode != 0) { + probe_kernel_write((void *)stepped_address, + (void *)&stepped_opcode, BREAK_INSTR_SIZE); + flush_icache_range(stepped_address, + stepped_address + BREAK_INSTR_SIZE); + } + stepped_address = 0; + stepped_opcode = 0; + kgdb_single_step = 0; + atomic_set(&kgdb_cpu_doing_single_step, -1); +} + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + {DBG_REG_ZERO, GDB_SIZEOF_REG, -1}, + {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)}, + {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)}, + {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)}, + {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)}, + {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)}, + {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)}, + {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)}, + {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)}, + {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, + {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)}, + {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, + {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)}, + {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)}, + {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)}, + {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)}, + {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)}, + {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)}, + {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)}, + {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)}, + {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)}, + {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)}, + {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)}, + {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)}, + {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)}, + {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)}, + {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)}, + {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)}, + {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)}, + {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)}, + {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)}, + {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)}, + {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)}, + {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)}, + {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)}, + {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)}, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + + gdb_regs[DBG_REG_SP_OFF] = task->thread.sp; + gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0]; + gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1]; + gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2]; + gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3]; + gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4]; + gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5]; + gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6]; + gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7]; + gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8]; + gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10]; + gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11]; + gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->epc = pc; +} + +void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, + char *remcom_out_buffer) +{ + if (!strncmp(remcom_in_buffer, gdb_xfer_read_target, + sizeof(gdb_xfer_read_target))) + strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc); + else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml, + sizeof(gdb_xfer_read_cpuxml))) + strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml); +} + +static inline void kgdb_arch_update_addr(struct pt_regs *regs, + char *remcom_in_buffer) +{ + unsigned long addr; + char *ptr; + + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &addr)) + regs->epc = addr; +} + +int kgdb_arch_handle_exception(int vector, int signo, int err_code, + char *remcom_in_buffer, char *remcom_out_buffer, + struct pt_regs *regs) +{ + int err = 0; + + undo_single_step(regs); + + switch (remcom_in_buffer[0]) { + case 'c': + case 'D': + case 'k': + if (remcom_in_buffer[0] == 'c') + kgdb_arch_update_addr(regs, remcom_in_buffer); + break; + case 's': + kgdb_arch_update_addr(regs, remcom_in_buffer); + err = do_single_step(regs); + break; + default: + err = -1; + } + return err; +} + +int kgdb_riscv_kgdbbreak(unsigned long addr) +{ + if (stepped_address == addr) + return KGDB_SW_SINGLE_STEP; + if (atomic_read(&kgdb_setting_breakpoint)) + if (addr == (unsigned long)&kgdb_compiled_break) + return KGDB_COMPILED_BREAK; + + return kgdb_has_hit_break(addr); +} + +static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd, + void *ptr) +{ + struct die_args *args = (struct die_args *)ptr; + struct pt_regs *regs = args->regs; + unsigned long flags; + int type; + + if (user_mode(regs)) + return NOTIFY_DONE; + + type = kgdb_riscv_kgdbbreak(regs->epc); + if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP) + return NOTIFY_DONE; + + local_irq_save(flags); + + if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1, + args->signr, cmd, regs)) + return NOTIFY_DONE; + + if (type == KGDB_COMPILED_BREAK) + regs->epc += 4; + + local_irq_restore(flags); + + return NOTIFY_STOP; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_riscv_notify, +}; + +int kgdb_arch_init(void) +{ + register_die_notifier(&kgdb_notifier); + + return 0; +} + +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +/* + * Global data + */ +#ifdef CONFIG_RISCV_ISA_C +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x02, 0x90}, /* c.ebreak */ +}; +#else +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00}, /* ebreak */ +}; +#endif diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 8a4fc65ee022..5805791cd5b5 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -5,22 +5,21 @@ #include <linux/spinlock.h> #include <linux/mm.h> +#include <linux/memory.h> #include <linux/uaccess.h> #include <linux/stop_machine.h> #include <asm/kprobes.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> -struct riscv_insn_patch { +struct patch_insn { void *addr; u32 insn; atomic_t cpu_count; }; #ifdef CONFIG_MMU -static DEFINE_RAW_SPINLOCK(patch_lock); - -static void __kprobes *patch_map(void *addr, int fixmap) +static void *patch_map(void *addr, int fixmap) { uintptr_t uintaddr = (uintptr_t) addr; struct page *page; @@ -37,20 +36,26 @@ static void __kprobes *patch_map(void *addr, int fixmap) return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + (uintaddr & ~PAGE_MASK)); } +NOKPROBE_SYMBOL(patch_map); -static void __kprobes patch_unmap(int fixmap) +static void patch_unmap(int fixmap) { clear_fixmap(fixmap); } +NOKPROBE_SYMBOL(patch_unmap); -static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len) +static int patch_insn_write(void *addr, const void *insn, size_t len) { void *waddr = addr; bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; - unsigned long flags = 0; int ret; - raw_spin_lock_irqsave(&patch_lock, flags); + /* + * Before reaching here, it was expected to lock the text_mutex + * already, so we don't need to give another lock here and could + * ensure that it was safe between each cores. + */ + lockdep_assert_held(&text_mutex); if (across_pages) patch_map(addr + len, FIX_TEXT_POKE1); @@ -64,38 +69,39 @@ static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len) if (across_pages) patch_unmap(FIX_TEXT_POKE1); - raw_spin_unlock_irqrestore(&patch_lock, flags); - return ret; } +NOKPROBE_SYMBOL(patch_insn_write); #else -static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len) +static int patch_insn_write(void *addr, const void *insn, size_t len) { return probe_kernel_write(addr, insn, len); } +NOKPROBE_SYMBOL(patch_insn_write); #endif /* CONFIG_MMU */ -int __kprobes riscv_patch_text_nosync(void *addr, const void *insns, size_t len) +int patch_text_nosync(void *addr, const void *insns, size_t len) { u32 *tp = addr; int ret; - ret = riscv_insn_write(tp, insns, len); + ret = patch_insn_write(tp, insns, len); if (!ret) flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len); return ret; } +NOKPROBE_SYMBOL(patch_text_nosync); -static int __kprobes riscv_patch_text_cb(void *data) +static int patch_text_cb(void *data) { - struct riscv_insn_patch *patch = data; + struct patch_insn *patch = data; int ret = 0; if (atomic_inc_return(&patch->cpu_count) == 1) { ret = - riscv_patch_text_nosync(patch->addr, &patch->insn, + patch_text_nosync(patch->addr, &patch->insn, GET_INSN_LENGTH(patch->insn)); atomic_inc(&patch->cpu_count); } else { @@ -106,15 +112,17 @@ static int __kprobes riscv_patch_text_cb(void *data) return ret; } +NOKPROBE_SYMBOL(patch_text_cb); -int __kprobes riscv_patch_text(void *addr, u32 insn) +int patch_text(void *addr, u32 insn) { - struct riscv_insn_patch patch = { + struct patch_insn patch = { .addr = addr, .insn = insn, .cpu_count = ATOMIC_INIT(0), }; - return stop_machine_cpuslocked(riscv_patch_text_cb, + return stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); } +NOKPROBE_SYMBOL(patch_text); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 145128a7e560..3e528312f615 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -75,7 +75,11 @@ void __init setup_arch(char **cmdline_p) setup_bootmem(); paging_init(); +#if IS_ENABLED(CONFIG_BUILTIN_DTB) + unflatten_and_copy_device_tree(); +#else unflatten_device_tree(); +#endif clint_init_boot_cpu(); #ifdef CONFIG_SWIOTLB diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c index 0b3b3dc9ad0f..1fc87621c728 100644 --- a/arch/riscv/kernel/soc.c +++ b/arch/riscv/kernel/soc.c @@ -26,3 +26,30 @@ void __init soc_early_init(void) } } } + +static bool soc_builtin_dtb_match(unsigned long vendor_id, + unsigned long arch_id, unsigned long imp_id, + const struct soc_builtin_dtb *entry) +{ + return entry->vendor_id == vendor_id && + entry->arch_id == arch_id && + entry->imp_id == imp_id; +} + +void * __init soc_lookup_builtin_dtb(void) +{ + unsigned long vendor_id, arch_id, imp_id; + const struct soc_builtin_dtb *s; + + __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id)); + __asm__ ("csrr %0, marchid" : "=r"(arch_id)); + __asm__ ("csrr %0, mimpid" : "=r"(imp_id)); + + for (s = (void *)&__soc_builtin_dtb_table_start; + (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) { + if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s)) + return s->dtb_func(); + } + + return NULL; +} diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 7f58fa53033f..5080fdf8c296 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -147,6 +147,11 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs) { if (user_mode(regs)) force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); +#ifdef CONFIG_KGDB + else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) + == NOTIFY_STOP) + return; +#endif else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN) regs->epc += get_break_insn_length(regs->epc); else diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 0339b6bbe11a..e6f8016b366a 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -34,6 +34,11 @@ SECTIONS KEEP(*(__soc_early_init_table)) __soc_early_init_table_end = .; } + __soc_builtin_dtb_table : { + __soc_builtin_dtb_table_start = .; + KEEP(*(__soc_builtin_dtb_table)) + __soc_builtin_dtb_table_end = .; + } /* we have to discard exit text and such at runtime, not link time */ .exit.text : { |