summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/cpufeature.c197
-rw-r--r--arch/riscv/kernel/elf_kexec.c3
-rw-r--r--arch/riscv/kernel/ftrace.c6
-rw-r--r--arch/riscv/kernel/jump_label.c4
-rw-r--r--arch/riscv/kernel/mcount.S24
-rw-r--r--arch/riscv/kernel/setup.c5
-rw-r--r--arch/riscv/kernel/smp.c2
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/kernel/suspend.c14
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c15
-rw-r--r--arch/riscv/kernel/traps_misaligned.c14
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c242
-rw-r--r--arch/riscv/kernel/vec-copy-unaligned.S2
-rw-r--r--arch/riscv/kernel/vendor_extensions.c2
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S3
17 files changed, 367 insertions, 173 deletions
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index e89455a6a0e5..16490755304e 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -36,7 +36,6 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
- OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 40ac72e407b6..2054f6c4b0ae 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -32,6 +32,7 @@
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
static bool any_cpu_has_zicboz;
+static bool any_cpu_has_zicbom;
unsigned long elf_hwcap __read_mostly;
@@ -53,9 +54,7 @@ u32 thead_vlenb_of;
*/
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
{
- if (!isa_bitmap)
- return riscv_isa[0];
- return isa_bitmap[0];
+ return !isa_bitmap ? riscv_isa[0] : isa_bitmap[0];
}
EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
@@ -76,10 +75,19 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned i
if (bit >= RISCV_ISA_EXT_MAX)
return false;
- return test_bit(bit, bmap) ? true : false;
+ return test_bit(bit, bmap);
}
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
+static int riscv_ext_f_depends(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f))
+ return 0;
+
+ return -EPROBE_DEFER;
+}
+
static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -91,6 +99,8 @@ static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data,
pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
return -EINVAL;
}
+
+ any_cpu_has_zicbom = true;
return 0;
}
@@ -109,6 +119,82 @@ static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data,
return 0;
}
+static int riscv_ext_f_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_FPU))
+ return -EINVAL;
+
+ /*
+ * Due to extension ordering, d is checked before f, so no deferral
+ * is required.
+ */
+ if (!__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) {
+ pr_warn_once("This kernel does not support systems with F but not D\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int riscv_ext_d_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_FPU))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int riscv_ext_vector_x_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int riscv_ext_vector_float_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_FPU))
+ return -EINVAL;
+
+ /*
+ * The kernel doesn't support systems that don't implement both of
+ * F and D, so if any of the vector extensions that do floating point
+ * are to be usable, both floating point extensions need to be usable.
+ *
+ * Since this function validates vector only, and v/Zve* are probed
+ * after f/d, there's no need for a deferral here.
+ */
+ if (!__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int riscv_ext_vector_crypto_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ /*
+ * It isn't the kernel's job to check that the binding is correct, so
+ * it should be enough to check that any of the vector extensions are
+ * enabled, which in-turn means that vector is usable in this kernel
+ */
+ if (!__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVE32X))
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -140,6 +226,28 @@ static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data,
return -EPROBE_DEFER;
}
+static int riscv_vector_f_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVE32F))
+ return 0;
+
+ return -EPROBE_DEFER;
+}
+
+static int riscv_ext_zvfbfwma_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZFBFMIN) &&
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVFBFMIN))
+ return 0;
+
+ return -EPROBE_DEFER;
+}
+
static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -150,6 +258,11 @@ static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data,
return 0;
}
+static const unsigned int riscv_a_exts[] = {
+ RISCV_ISA_EXT_ZAAMO,
+ RISCV_ISA_EXT_ZALRSC,
+};
+
static const unsigned int riscv_zk_bundled_exts[] = {
RISCV_ISA_EXT_ZBKB,
RISCV_ISA_EXT_ZBKC,
@@ -321,17 +434,15 @@ static const unsigned int riscv_c_exts[] = {
const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
__RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
- __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
- __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
- __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
+ __RISCV_ISA_EXT_SUPERSET(a, RISCV_ISA_EXT_a, riscv_a_exts),
+ __RISCV_ISA_EXT_DATA_VALIDATE(f, RISCV_ISA_EXT_f, riscv_ext_f_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(d, RISCV_ISA_EXT_d, riscv_ext_d_validate),
__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
__RISCV_ISA_EXT_SUPERSET(c, RISCV_ISA_EXT_c, riscv_c_exts),
- __RISCV_ISA_EXT_SUPERSET(v, RISCV_ISA_EXT_v, riscv_v_exts),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(v, RISCV_ISA_EXT_v, riscv_v_exts, riscv_ext_vector_float_validate),
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
- __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts,
- riscv_ext_zicbom_validate),
- __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts,
- riscv_ext_zicboz_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, riscv_ext_zicbom_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate),
__RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE),
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
@@ -341,10 +452,13 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
__RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP),
+ __RISCV_ISA_EXT_DATA(zaamo, RISCV_ISA_EXT_ZAAMO),
__RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA),
__RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
+ __RISCV_ISA_EXT_DATA(zalrsc, RISCV_ISA_EXT_ZALRSC),
__RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS),
__RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zfbfmin, RISCV_ISA_EXT_ZFBFMIN, riscv_ext_f_depends),
__RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
__RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
__RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA),
@@ -370,29 +484,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
__RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
__RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
- __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
- __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
- __RISCV_ISA_EXT_SUPERSET(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts),
- __RISCV_ISA_EXT_DATA(zve32x, RISCV_ISA_EXT_ZVE32X),
- __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts),
- __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts),
- __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvbc, RISCV_ISA_EXT_ZVBC, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts, riscv_ext_vector_float_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zve32x, RISCV_ISA_EXT_ZVE32X, riscv_ext_vector_x_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts, riscv_ext_vector_float_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts, riscv_ext_vector_float_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts, riscv_ext_vector_x_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvfbfmin, RISCV_ISA_EXT_ZVFBFMIN, riscv_vector_f_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvfbfwma, RISCV_ISA_EXT_ZVFBFWMA, riscv_ext_zvfbfwma_validate),
__RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
__RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
- __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
- __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
- __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
- __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
- __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
- __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
- __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
- __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
- __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
- __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkb, RISCV_ISA_EXT_ZVKB, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkg, RISCV_ISA_EXT_ZVKG, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvkn, riscv_zvkn_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvknc, riscv_zvknc_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkned, RISCV_ISA_EXT_ZVKNED, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvkng, riscv_zvkng_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvknha, RISCV_ISA_EXT_ZVKNHA, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvknhb, RISCV_ISA_EXT_ZVKNHB, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvks, riscv_zvks_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvksc, riscv_zvksc_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvksed, RISCV_ISA_EXT_ZVKSED, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvksh, RISCV_ISA_EXT_ZVKSH, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvksg, riscv_zvksg_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkt, RISCV_ISA_EXT_ZVKT, riscv_ext_vector_crypto_validate),
__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
__RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM),
__RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts),
@@ -960,16 +1076,6 @@ void __init riscv_fill_hwcap(void)
riscv_v_setup_vsize();
}
- if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
- /*
- * ISA string in device tree might have 'v' flag, but
- * CONFIG_RISCV_ISA_V is disabled in kernel.
- * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
- */
- if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
- elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
- }
-
memset(print_str, 0, sizeof(print_str));
for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
if (riscv_isa[0] & BIT_MASK(i))
@@ -1001,6 +1107,11 @@ void __init riscv_user_isa_enable(void)
current->thread.envcfg |= ENVCFG_CBZE;
else if (any_cpu_has_zicboz)
pr_warn("Zicboz disabled as it is unavailable on some harts\n");
+
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOM))
+ current->thread.envcfg |= ENVCFG_CBCFE;
+ else if (any_cpu_has_zicbom)
+ pr_warn("Zicbom disabled as it is unavailable on some harts\n");
}
#ifdef CONFIG_RISCV_ALTERNATIVE
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 3c37661801f9..e783a72d051f 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
case R_RISCV_ALIGN:
case R_RISCV_RELAX:
break;
+ case R_RISCV_64:
+ *(u64 *)loc = val;
+ break;
default:
pr_err("Unknown rela relocation: %d\n", r_type);
return -ENOEXEC;
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 3524db5e4fa0..674dcdfae7a1 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -36,7 +36,7 @@ static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
unsigned int replaced[2];
- unsigned int nops[2] = {NOP4, NOP4};
+ unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
/* we expect nops at the hook position */
if (!expected)
@@ -68,7 +68,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
bool enable, bool ra)
{
unsigned int call[2];
- unsigned int nops[2] = {NOP4, NOP4};
+ unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
if (ra)
make_call_ra(hook_pos, target, call);
@@ -97,7 +97,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- unsigned int nops[2] = {NOP4, NOP4};
+ unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
return -EPERM;
diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c
index 654ed159c830..b4c1a6a3fbd2 100644
--- a/arch/riscv/kernel/jump_label.c
+++ b/arch/riscv/kernel/jump_label.c
@@ -11,8 +11,8 @@
#include <asm/bug.h>
#include <asm/cacheflush.h>
#include <asm/text-patching.h>
+#include <asm/insn-def.h>
-#define RISCV_INSN_NOP 0x00000013U
#define RISCV_INSN_JAL 0x0000006fU
bool arch_jump_label_transform_queue(struct jump_entry *entry,
@@ -33,7 +33,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
(((u32)offset & GENMASK(10, 1)) << (21 - 1)) |
(((u32)offset & GENMASK(20, 20)) << (31 - 20));
} else {
- insn = RISCV_INSN_NOP;
+ insn = RISCV_INSN_NOP4;
}
if (early_boot_irqs_disabled) {
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 068168046e0e..da4a4000e57e 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -12,8 +12,6 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
-#define ABI_SIZE_ON_STACK 80
-
.text
.macro SAVE_ABI_STATE
@@ -28,12 +26,12 @@
* register if a0 was not saved.
*/
.macro SAVE_RET_ABI_STATE
- addi sp, sp, -ABI_SIZE_ON_STACK
- REG_S ra, 1*SZREG(sp)
- REG_S s0, 8*SZREG(sp)
- REG_S a0, 10*SZREG(sp)
- REG_S a1, 11*SZREG(sp)
- addi s0, sp, ABI_SIZE_ON_STACK
+ addi sp, sp, -FREGS_SIZE_ON_STACK
+ REG_S ra, FREGS_RA(sp)
+ REG_S s0, FREGS_S0(sp)
+ REG_S a0, FREGS_A0(sp)
+ REG_S a1, FREGS_A1(sp)
+ addi s0, sp, FREGS_SIZE_ON_STACK
.endm
.macro RESTORE_ABI_STATE
@@ -43,11 +41,11 @@
.endm
.macro RESTORE_RET_ABI_STATE
- REG_L ra, 1*SZREG(sp)
- REG_L s0, 8*SZREG(sp)
- REG_L a0, 10*SZREG(sp)
- REG_L a1, 11*SZREG(sp)
- addi sp, sp, ABI_SIZE_ON_STACK
+ REG_L ra, FREGS_RA(sp)
+ REG_L s0, FREGS_S0(sp)
+ REG_L a0, FREGS_A0(sp)
+ REG_L a1, FREGS_A1(sp)
+ addi sp, sp, FREGS_SIZE_ON_STACK
.endm
SYM_TYPED_FUNC_START(ftrace_stub)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 4fe45daa6281..c174544eefc8 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -235,11 +235,6 @@ static void __init parse_dtb(void)
} else {
pr_err("No DTB passed to the kernel\n");
}
-
-#ifdef CONFIG_CMDLINE_FORCE
- strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
- pr_info("Forcing kernel command line to: %s\n", boot_command_line);
-#endif
}
#if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index d58b5e751286..e650dec44817 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -48,6 +48,8 @@ EXPORT_SYMBOL_GPL(__cpuid_to_hartid_map);
void __init smp_setup_processor_id(void)
{
cpuid_to_hartid_map(0) = boot_cpu_hartid;
+
+ pr_info("Booting Linux on hartid %lu\n", boot_cpu_hartid);
}
static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index e36d20205bd7..601a321e0f17 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -231,6 +231,10 @@ asmlinkage __visible void smp_callin(void)
riscv_ipi_enable();
numa_add_cpu(curr_cpuid);
+
+ pr_debug("CPU%u: Booted secondary hartid %lu\n", curr_cpuid,
+ cpuid_to_hartid_map(curr_cpuid));
+
set_cpu_online(curr_cpuid, true);
/*
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index d4355c770c36..3fe9e6edef8f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -74,7 +74,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
&frame->ra);
if (pc >= (unsigned long)handle_exception &&
pc < (unsigned long)&ret_from_exception_end) {
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (unlikely(!fn(arg, pc)))
break;
pc = ((struct pt_regs *)sp)->epc;
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 9a8a0dc035b2..24b3f57d467f 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -30,6 +30,13 @@ void suspend_save_csrs(struct suspend_context *context)
*/
#ifdef CONFIG_MMU
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SSTC)) {
+ context->stimecmp = csr_read(CSR_STIMECMP);
+#if __riscv_xlen < 64
+ context->stimecmph = csr_read(CSR_STIMECMPH);
+#endif
+ }
+
context->satp = csr_read(CSR_SATP);
#endif
}
@@ -43,6 +50,13 @@ void suspend_restore_csrs(struct suspend_context *context)
csr_write(CSR_IE, context->ie);
#ifdef CONFIG_MMU
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SSTC)) {
+ csr_write(CSR_STIMECMP, context->stimecmp);
+#if __riscv_xlen < 64
+ csr_write(CSR_STIMECMPH, context->stimecmph);
+#endif
+ }
+
csr_write(CSR_SATP, context->satp);
#endif
}
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 04a4e5495512..249aec8594a9 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -95,7 +95,9 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
+ EXT_KEY(ZAAMO);
EXT_KEY(ZACAS);
+ EXT_KEY(ZALRSC);
EXT_KEY(ZAWRS);
EXT_KEY(ZBA);
EXT_KEY(ZBB);
@@ -107,10 +109,13 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZCA);
EXT_KEY(ZCB);
EXT_KEY(ZCMOP);
+ EXT_KEY(ZICBOM);
EXT_KEY(ZICBOZ);
+ EXT_KEY(ZICNTR);
EXT_KEY(ZICOND);
EXT_KEY(ZIHINTNTL);
EXT_KEY(ZIHINTPAUSE);
+ EXT_KEY(ZIHPM);
EXT_KEY(ZIMOP);
EXT_KEY(ZKND);
EXT_KEY(ZKNE);
@@ -132,6 +137,8 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZVE64D);
EXT_KEY(ZVE64F);
EXT_KEY(ZVE64X);
+ EXT_KEY(ZVFBFMIN);
+ EXT_KEY(ZVFBFWMA);
EXT_KEY(ZVFH);
EXT_KEY(ZVFHMIN);
EXT_KEY(ZVKB);
@@ -148,6 +155,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZCD);
EXT_KEY(ZCF);
EXT_KEY(ZFA);
+ EXT_KEY(ZFBFMIN);
EXT_KEY(ZFH);
EXT_KEY(ZFHMIN);
}
@@ -161,7 +169,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
pair->value &= ~missing;
}
-static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
{
struct riscv_hwprobe pair;
@@ -279,6 +287,11 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
pair->value = riscv_cboz_block_size;
break;
+ case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
+ pair->value = riscv_cbom_block_size;
+ break;
case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
pair->value = user_max_virt_addr();
break;
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7cc108aed74e..4354c87c0376 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -605,16 +605,10 @@ void check_vector_unaligned_access_emulated(struct work_struct *work __always_un
kernel_vector_end();
}
-bool check_vector_unaligned_access_emulated_all_cpus(void)
+bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
- if (!has_vector()) {
- for_each_online_cpu(cpu)
- per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- return false;
- }
-
schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu)
@@ -625,7 +619,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void)
return true;
}
#else
-bool check_vector_unaligned_access_emulated_all_cpus(void)
+bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
return false;
}
@@ -659,7 +653,7 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
}
}
-bool check_unaligned_access_emulated_all_cpus(void)
+bool __init check_unaligned_access_emulated_all_cpus(void)
{
int cpu;
@@ -684,7 +678,7 @@ bool unaligned_ctl_available(void)
return unaligned_ctl;
}
#else
-bool check_unaligned_access_emulated_all_cpus(void)
+bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 91f189cf1611..585d2dcf2dab 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -24,8 +24,12 @@
DEFINE_PER_CPU(long, misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
DEFINE_PER_CPU(long, vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
-#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
+static long unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
+static long unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
+
static cpumask_t fast_misaligned_access;
+
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
static int check_unaligned_access(void *param)
{
int cpu = smp_processor_id();
@@ -121,7 +125,7 @@ static int check_unaligned_access(void *param)
return 0;
}
-static void check_unaligned_access_nonboot_cpu(void *param)
+static void __init check_unaligned_access_nonboot_cpu(void *param)
{
unsigned int cpu = smp_processor_id();
struct page **pages = param;
@@ -130,6 +134,50 @@ static void check_unaligned_access_nonboot_cpu(void *param)
check_unaligned_access(pages[cpu]);
}
+/* Measure unaligned access speed on all CPUs present at boot in parallel. */
+static void __init check_unaligned_access_speed_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+out:
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
+}
+#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
+static void __init check_unaligned_access_speed_all_cpus(void)
+{
+}
+#endif
+
DEFINE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
@@ -175,7 +223,7 @@ static void set_unaligned_access_static_branches(void)
modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
}
-static int lock_and_set_unaligned_access_static_branch(void)
+static int __init lock_and_set_unaligned_access_static_branch(void)
{
cpus_read_lock();
set_unaligned_access_static_branches();
@@ -188,21 +236,29 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
static int riscv_online_cpu(unsigned int cpu)
{
- static struct page *buf;
-
/* We are already set since the last check */
- if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
+ goto exit;
+ } else if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
+ per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
goto exit;
-
- check_unaligned_access_emulated(NULL);
- buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!buf) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return -ENOMEM;
}
- check_unaligned_access(buf);
- __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
+ {
+ static struct page *buf;
+
+ check_unaligned_access_emulated(NULL);
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!buf) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return -ENOMEM;
+ }
+
+ check_unaligned_access(buf);
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+ }
+#endif
exit:
set_unaligned_access_static_branches();
@@ -217,59 +273,6 @@ static int riscv_offline_cpu(unsigned int cpu)
return 0;
}
-/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static int check_unaligned_access_speed_all_cpus(void)
-{
- unsigned int cpu;
- unsigned int cpu_count = num_possible_cpus();
- struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
-
- if (!bufs) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return 0;
- }
-
- /*
- * Allocate separate buffers for each CPU so there's no fighting over
- * cache lines.
- */
- for_each_cpu(cpu, cpu_online_mask) {
- bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!bufs[cpu]) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- goto out;
- }
- }
-
- /* Check everybody except 0, who stays behind to tend jiffies. */
- on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
-
- /* Check core 0. */
- smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
-
- /*
- * Setup hotplug callbacks for any new CPUs that come online or go
- * offline.
- */
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
- riscv_online_cpu, riscv_offline_cpu);
-
-out:
- for_each_cpu(cpu, cpu_online_mask) {
- if (bufs[cpu])
- __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
- }
-
- kfree(bufs);
- return 0;
-}
-#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
-static int check_unaligned_access_speed_all_cpus(void)
-{
- return 0;
-}
-#endif
-
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
static void check_vector_unaligned_access(struct work_struct *work __always_unused)
{
@@ -349,7 +352,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n",
cpu);
- return;
+ goto free;
}
if (word_cycles < byte_cycles)
@@ -363,57 +366,112 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
(speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow");
per_cpu(vector_misaligned_access, cpu) = speed;
+
+free:
+ __free_pages(page, MISALIGNED_BUFFER_ORDER);
+}
+
+/* Measure unaligned access speed on all CPUs present at boot in parallel. */
+static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+{
+ schedule_on_each_cpu(check_vector_unaligned_access);
+
+ return 0;
}
+#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
+static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+{
+ return 0;
+}
+#endif
static int riscv_online_cpu_vec(unsigned int cpu)
{
- if (!has_vector())
+ if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
+ per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
return 0;
+ }
- if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED)
+#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
+ if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
return 0;
check_vector_unaligned_access_emulated(NULL);
check_vector_unaligned_access(NULL);
+#endif
+
return 0;
}
-/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
-{
- schedule_on_each_cpu(check_vector_unaligned_access);
+static const char * const speed_str[] __initconst = { NULL, NULL, "slow", "fast", "unsupported" };
- /*
- * Setup hotplug callbacks for any new CPUs that come online or go
- * offline.
- */
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
- riscv_online_cpu_vec, NULL);
+static int __init set_unaligned_scalar_speed_param(char *str)
+{
+ if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW]))
+ unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_SCALAR_FAST]))
+ unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED]))
+ unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED;
+ else
+ return -EINVAL;
- return 0;
+ return 1;
}
-#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
-static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+__setup("unaligned_scalar_speed=", set_unaligned_scalar_speed_param);
+
+static int __init set_unaligned_vector_speed_param(char *str)
{
- return 0;
+ if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW]))
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_VECTOR_FAST]))
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED]))
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
+ else
+ return -EINVAL;
+
+ return 1;
}
-#endif
+__setup("unaligned_vector_speed=", set_unaligned_vector_speed_param);
-static int check_unaligned_access_all_cpus(void)
+static int __init check_unaligned_access_all_cpus(void)
{
- bool all_cpus_emulated, all_cpus_vec_unsupported;
+ int cpu;
+
+ if (unaligned_scalar_speed_param == RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN &&
+ !check_unaligned_access_emulated_all_cpus()) {
+ check_unaligned_access_speed_all_cpus();
+ } else {
+ pr_info("scalar unaligned access speed set to '%s' by command line\n",
+ speed_str[unaligned_scalar_speed_param]);
+ for_each_online_cpu(cpu)
+ per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
+ }
- all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
- all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
+ if (!has_vector())
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- if (!all_cpus_vec_unsupported &&
+ if (unaligned_vector_speed_param == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN &&
+ !check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
+ } else {
+ pr_info("vector unaligned access speed set to '%s' by command line\n",
+ speed_str[unaligned_vector_speed_param]);
+ for_each_online_cpu(cpu)
+ per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
}
- if (!all_cpus_emulated)
- return check_unaligned_access_speed_all_cpus();
+ /*
+ * Setup hotplug callbacks for any new CPUs that come online or go
+ * offline.
+ */
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu, riscv_offline_cpu);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu_vec, NULL);
return 0;
}
diff --git a/arch/riscv/kernel/vec-copy-unaligned.S b/arch/riscv/kernel/vec-copy-unaligned.S
index d16f19f1b3b6..7ce4de6f6e69 100644
--- a/arch/riscv/kernel/vec-copy-unaligned.S
+++ b/arch/riscv/kernel/vec-copy-unaligned.S
@@ -11,7 +11,7 @@
#define WORD_SEW CONCATENATE(e, WORD_EEW)
#define VEC_L CONCATENATE(vle, WORD_EEW).v
-#define VEC_S CONCATENATE(vle, WORD_EEW).v
+#define VEC_S CONCATENATE(vse, WORD_EEW).v
/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c
index a31ff84740eb..9feb7f67a0a3 100644
--- a/arch/riscv/kernel/vendor_extensions.c
+++ b/arch/riscv/kernel/vendor_extensions.c
@@ -61,6 +61,6 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig
if (bit >= RISCV_ISA_VENDOR_EXT_MAX)
return false;
- return test_bit(bit, bmap->isa) ? true : false;
+ return test_bit(bit, bmap->isa);
}
EXPORT_SYMBOL_GPL(__riscv_isa_vendor_extension_available);
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 002ca58dd998..61bd5ba6680a 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -97,6 +97,9 @@ SECTIONS
{
EXIT_DATA
}
+
+ RUNTIME_CONST_VARIABLES
+
PERCPU_SECTION(L1_CACHE_BYTES)
.rel.dyn : {