summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/sys_riscv.c
diff options
context:
space:
mode:
authorEvan Green <evan@rivosinc.com>2023-04-07 16:11:03 -0700
committerPalmer Dabbelt <palmer@rivosinc.com>2023-04-18 15:48:18 -0700
commitaa5af0aa90bad3f1cad5a90ee5eecd92ac9f3096 (patch)
tree4047028646908985a844d3423b5213009f59bd42 /arch/riscv/kernel/sys_riscv.c
parent287dcc2b0c831d3e3887421f68e3db6b8b8f4eb1 (diff)
downloadlwn-aa5af0aa90bad3f1cad5a90ee5eecd92ac9f3096.tar.gz
lwn-aa5af0aa90bad3f1cad5a90ee5eecd92ac9f3096.zip
RISC-V: Add hwprobe vDSO function and data
Add a vDSO function __vdso_riscv_hwprobe, which can sit in front of the riscv_hwprobe syscall and answer common queries. We stash a copy of static answers for the "all CPUs" case in the vDSO data page. This data is private to the vDSO, so we can decide later to change what's stored there or under what conditions we defer to the syscall. Currently all data can be discovered at boot, so the vDSO function answers all queries when the cpumask is set to the "all CPUs" hint. There's also a boolean in the data that lets the vDSO function know that all CPUs are the same. In that case, the vDSO will also answer queries for arbitrary CPU masks in addition to the "all CPUs" hint. Signed-off-by: Evan Green <evan@rivosinc.com> Link: https://lore.kernel.org/r/20230407231103.2622178-7-evan@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv/kernel/sys_riscv.c')
-rw-r--r--arch/riscv/kernel/sys_riscv.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 55389e7595f6..849b4170629d 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -14,6 +14,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm-generic/mman-common.h>
+#include <vdso/vsyscall.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -243,6 +244,50 @@ static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
return 0;
}
+#ifdef CONFIG_MMU
+
+static int __init init_hwprobe_vdso_data(void)
+{
+ struct vdso_data *vd = __arch_get_k_vdso_data();
+ struct arch_vdso_data *avd = &vd->arch_data;
+ u64 id_bitsmash = 0;
+ struct riscv_hwprobe pair;
+ int key;
+
+ /*
+ * Initialize vDSO data with the answers for the "all CPUs" case, to
+ * save a syscall in the common case.
+ */
+ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
+ pair.key = key;
+ hwprobe_one_pair(&pair, cpu_online_mask);
+
+ WARN_ON_ONCE(pair.key < 0);
+
+ avd->all_cpu_hwprobe_values[key] = pair.value;
+ /*
+ * Smash together the vendor, arch, and impl IDs to see if
+ * they're all 0 or any negative.
+ */
+ if (key <= RISCV_HWPROBE_KEY_MIMPID)
+ id_bitsmash |= pair.value;
+ }
+
+ /*
+ * If the arch, vendor, and implementation ID are all the same across
+ * all harts, then assume all CPUs are the same, and allow the vDSO to
+ * answer queries for arbitrary masks. However if all values are 0 (not
+ * populated) or any value returns -1 (varies across CPUs), then the
+ * vDSO should defer to the kernel for exotic cpu masks.
+ */
+ avd->homogeneous_cpus = (id_bitsmash > 0);
+ return 0;
+}
+
+arch_initcall_sync(init_hwprobe_vdso_data);
+
+#endif /* CONFIG_MMU */
+
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpu_count, unsigned long __user *,
cpus, unsigned int, flags)