summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/kernel/srmcons.c64
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/intc-compact.c2
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h14
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-pxa/irq.c2
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c4
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/traps.h4
-rw-r--r--arch/arm64/kernel/compat_alignment.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c1
-rw-r--r--arch/arm64/kernel/vdso.c9
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/csky/include/asm/pgalloc.h7
-rw-r--r--arch/hexagon/include/asm/pgalloc.h7
-rw-r--r--arch/loongarch/Kconfig7
-rw-r--r--arch/loongarch/configs/loongson3_defconfig11
-rw-r--r--arch/loongarch/include/asm/cache.h2
-rw-r--r--arch/loongarch/include/asm/irq.h2
-rw-r--r--arch/loongarch/include/asm/pgalloc.h7
-rw-r--r--arch/loongarch/include/asm/stacktrace.h3
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h10
-rw-r--r--arch/loongarch/kernel/env.c2
-rw-r--r--arch/loongarch/kernel/kgdb.c5
-rw-r--r--arch/loongarch/net/bpf_jit.c12
-rw-r--r--arch/loongarch/net/bpf_jit.h5
-rw-r--r--arch/loongarch/vdso/Makefile3
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S13
-rw-r--r--arch/m68k/amiga/amisound.c2
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h7
-rw-r--r--arch/m68k/mac/macboing.c4
-rw-r--r--arch/microblaze/mm/init.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/Makefile.postlink2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c6
-rw-r--r--arch/mips/include/asm/pgalloc.h7
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip30/ip30-irq.c2
-rw-r--r--arch/nios2/Kbuild2
-rw-r--r--arch/nios2/boot/dts/Makefile4
-rw-r--r--arch/nios2/include/asm/pgalloc.h7
-rw-r--r--arch/nios2/kernel/irq.c2
-rw-r--r--arch/nios2/kernel/prom.c2
-rw-r--r--arch/nios2/platform/Kconfig.platform11
-rw-r--r--arch/openrisc/include/asm/pgalloc.h7
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/include/asm/copro.h6
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h17
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c2
-rw-r--r--arch/powerpc/kvm/booke.c4
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c13
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c10
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/slice.c6
-rw-r--r--arch/powerpc/mm/copro_fault.c11
-rw-r--r--arch/powerpc/platforms/44x/uic.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c2
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c63
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c153
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c45
-rw-r--r--arch/powerpc/platforms/powernv/pci.c61
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/sysdev/xive/common.c2
-rw-r--r--arch/riscv/Kbuild1
-rw-r--r--arch/riscv/Kconfig85
-rw-r--r--arch/riscv/Kconfig.socs2
-rw-r--r--arch/riscv/Makefile3
-rw-r--r--arch/riscv/Makefile.postlink11
-rw-r--r--arch/riscv/boot/Makefile5
-rw-r--r--arch/riscv/boot/dts/Makefile2
-rw-r--r--arch/riscv/configs/defconfig2
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig2
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig2
-rw-r--r--arch/riscv/errata/Makefile6
-rw-r--r--arch/riscv/include/asm/arch_hweight.h6
-rw-r--r--arch/riscv/include/asm/asm.h1
-rw-r--r--arch/riscv/include/asm/bitops.h4
-rw-r--r--arch/riscv/include/asm/checksum.h3
-rw-r--r--arch/riscv/include/asm/cmpxchg.h38
-rw-r--r--arch/riscv/include/asm/cpufeature.h7
-rw-r--r--arch/riscv/include/asm/ftrace.h7
-rw-r--r--arch/riscv/include/asm/hwcap.h5
-rw-r--r--arch/riscv/include/asm/hwprobe.h2
-rw-r--r--arch/riscv/include/asm/insn-def.h3
-rw-r--r--arch/riscv/include/asm/page.h27
-rw-r--r--arch/riscv/include/asm/pgalloc.h26
-rw-r--r--arch/riscv/include/asm/pgtable.h55
-rw-r--r--arch/riscv/include/asm/ptrace.h18
-rw-r--r--arch/riscv/include/asm/runtime-const.h268
-rw-r--r--arch/riscv/include/asm/suspend.h4
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h9
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h2
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/cpufeature.c197
-rw-r--r--arch/riscv/kernel/elf_kexec.c3
-rw-r--r--arch/riscv/kernel/ftrace.c6
-rw-r--r--arch/riscv/kernel/jump_label.c4
-rw-r--r--arch/riscv/kernel/mcount.S24
-rw-r--r--arch/riscv/kernel/setup.c5
-rw-r--r--arch/riscv/kernel/smp.c2
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/stacktrace.c2
-rw-r--r--arch/riscv/kernel/suspend.c14
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c15
-rw-r--r--arch/riscv/kernel/traps_misaligned.c14
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c242
-rw-r--r--arch/riscv/kernel/vec-copy-unaligned.S2
-rw-r--r--arch/riscv/kernel/vendor_extensions.c2
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S3
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c4
-rw-r--r--arch/riscv/lib/csum.c21
-rw-r--r--arch/riscv/lib/strcmp.S5
-rw-r--r--arch/riscv/lib/strlen.S5
-rw-r--r--arch/riscv/lib/strncmp.S5
-rw-r--r--arch/riscv/mm/context.c2
-rw-r--r--arch/riscv/mm/hugetlbpage.c76
-rw-r--r--arch/riscv/mm/init.c97
-rw-r--r--arch/riscv/mm/physaddr.c2
-rw-r--r--arch/riscv/mm/tlbflush.c35
-rw-r--r--arch/riscv/purgatory/entry.S1
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/Makefile.postlink4
-rw-r--r--arch/s390/hypfs/hypfs_diag_fs.c2
-rw-r--r--arch/s390/include/asm/kvm_host.h339
-rw-r--r--arch/s390/include/asm/kvm_host_types.h348
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/processor.h19
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/topology.h6
-rw-r--r--arch/s390/kernel/asm-offsets.c7
-rw-r--r--arch/s390/kernel/dumpstack.c1
-rw-r--r--arch/s390/kernel/early.c1
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c9
-rw-r--r--arch/s390/kernel/stacktrace.c1
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/mm/cmm.c6
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/mm/pfault.c1
-rw-r--r--arch/s390/pci/pci_bus.c27
-rw-r--r--arch/sh/configs/se7712_defconfig1
-rw-r--r--arch/sh/configs/se7721_defconfig1
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig1
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sh/drivers/pci/common.c4
-rw-r--r--arch/sh/include/asm/pgalloc.h7
-rw-r--r--arch/sh/kernel/vmlinux.lds.S15
-rw-r--r--arch/sparc/kernel/led.c4
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/Kconfig12
-rw-r--r--arch/um/drivers/Makefile3
-rw-r--r--arch/um/drivers/random.c2
-rw-r--r--arch/um/drivers/rtc_user.c2
-rw-r--r--arch/um/drivers/ubd.h6
-rw-r--r--arch/um/drivers/ubd_kern.c25
-rw-r--r--arch/um/drivers/ubd_user.c14
-rw-r--r--arch/um/drivers/vector_kern.c2
-rw-r--r--arch/um/drivers/virt-pci.c699
-rw-r--r--arch/um/drivers/virt-pci.h41
-rw-r--r--arch/um/drivers/virtio_pcidev.c628
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/pgalloc.h21
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/asm/uaccess.h20
-rw-r--r--arch/um/include/linux/time-internal.h2
-rw-r--r--arch/um/include/shared/arch.h2
-rw-r--r--arch/um/include/shared/as-layout.h2
-rw-r--r--arch/um/include/shared/irq_user.h3
-rw-r--r--arch/um/include/shared/kern_util.h12
-rw-r--r--arch/um/include/shared/os.h8
-rw-r--r--arch/um/include/shared/sigio.h1
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/um/kernel/irq.c3
-rw-r--r--arch/um/kernel/maccess.c19
-rw-r--r--arch/um/kernel/mem.c11
-rw-r--r--arch/um/kernel/sigio.c26
-rw-r--r--arch/um/kernel/skas/syscall.c11
-rw-r--r--arch/um/kernel/trap.c28
-rw-r--r--arch/um/kernel/um_arch.c3
-rw-r--r--arch/um/os-Linux/helper.c67
-rw-r--r--arch/um/os-Linux/process.c51
-rw-r--r--arch/um/os-Linux/sigio.c352
-rw-r--r--arch/um/os-Linux/signal.c4
-rw-r--r--arch/um/os-Linux/skas/process.c8
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/Makefile.postlink40
-rw-r--r--arch/x86/Makefile.um7
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/boot/compressed/Makefile10
-rw-r--r--arch/x86/coco/tdx/tdx.c34
-rw-r--r--arch/x86/entry/vdso/vma.c5
-rw-r--r--arch/x86/include/asm/arch_hweight.h6
-rw-r--r--arch/x86/include/asm/iosf_mbi.h7
-rw-r--r--arch/x86/include/asm/irqflags.h40
-rw-r--r--arch/x86/include/asm/paravirt.h20
-rw-r--r--arch/x86/include/asm/paravirt_types.h3
-rw-r--r--arch/x86/include/asm/smap.h23
-rw-r--r--arch/x86/include/asm/tdx.h4
-rw-r--r--arch/x86/include/asm/xen/hypercall.h6
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c6
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kvm/xen.c4
-rw-r--r--arch/x86/lib/copy_user_64.S18
-rw-r--r--arch/x86/mm/init_64.c15
-rw-r--r--arch/x86/mm/pat/set_memory.c1
-rw-r--r--arch/x86/mm/pgtable.c8
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c13
-rw-r--r--arch/x86/power/cpu.c14
-rw-r--r--arch/x86/tools/insn_decoder_test.c2
-rw-r--r--arch/x86/um/asm/barrier.h6
-rw-r--r--arch/x86/um/asm/module.h24
-rw-r--r--arch/x86/um/os-Linux/mcontext.c15
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_32.h12
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_64.h12
-rw-r--r--arch/x86/um/vdso/vma.c17
-rw-r--r--arch/xtensa/platforms/iss/console.c2
-rw-r--r--arch/xtensa/platforms/iss/network.c2
253 files changed, 2927 insertions, 2546 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 9f6eb09ef12d..b0adb665041f 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1699,6 +1699,13 @@ config ARCH_HAS_KERNEL_FPU_SUPPORT
Architectures that select this option can run floating-point code in
the kernel, as described in Documentation/core-api/floating-point.rst.
+config ARCH_VMLINUX_NEEDS_RELOCS
+ bool
+ help
+ Whether the architecture needs vmlinux to be built with static
+ relocations preserved. This is used by some architectures to
+ construct bespoke relocation tables for KASLR.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 3e61073f4b30..a89ce84371f9 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -177,7 +177,7 @@ srmcons_close(struct tty_struct *tty, struct file *filp)
if (tty->count == 1) {
port->tty = NULL;
- del_timer(&srmconsp->timer);
+ timer_delete(&srmconsp->timer);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -196,40 +196,44 @@ static const struct tty_operations srmcons_ops = {
static int __init
srmcons_init(void)
{
+ struct tty_driver *driver;
+ int err;
+
timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
- if (srm_is_registered_console) {
- struct tty_driver *driver;
- int err;
-
- driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0);
- if (IS_ERR(driver))
- return PTR_ERR(driver);
-
- tty_port_init(&srmcons_singleton.port);
-
- driver->driver_name = "srm";
- driver->name = "srm";
- driver->major = 0; /* dynamic */
- driver->minor_start = 0;
- driver->type = TTY_DRIVER_TYPE_SYSTEM;
- driver->subtype = SYSTEM_TYPE_SYSCONS;
- driver->init_termios = tty_std_termios;
- tty_set_operations(driver, &srmcons_ops);
- tty_port_link_device(&srmcons_singleton.port, driver, 0);
- err = tty_register_driver(driver);
- if (err) {
- tty_driver_kref_put(driver);
- tty_port_destroy(&srmcons_singleton.port);
- return err;
- }
- srmcons_driver = driver;
- }
- return -ENODEV;
+ if (!srm_is_registered_console)
+ return -ENODEV;
+
+ driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+
+ tty_port_init(&srmcons_singleton.port);
+
+ driver->driver_name = "srm";
+ driver->name = "srm";
+ driver->major = 0; /* dynamic */
+ driver->minor_start = 0;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_SYSCONS;
+ driver->init_termios = tty_std_termios;
+ tty_set_operations(driver, &srmcons_ops);
+ tty_port_link_device(&srmcons_singleton.port, driver, 0);
+ err = tty_register_driver(driver);
+ if (err)
+ goto err_free_drv;
+
+ srmcons_driver = driver;
+
+ return 0;
+err_free_drv:
+ tty_driver_kref_put(driver);
+ tty_port_destroy(&srmcons_singleton.port);
+
+ return err;
}
device_initcall(srmcons_init);
-
/*
* The console driver
*/
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index f324f0e3341a..fea29d9d18d6 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -178,7 +178,7 @@ init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
* Needed for primary domain lookup to succeed
* This is a primary irqchip, and can never have a parent
*/
- irq_set_default_host(root_domain);
+ irq_set_default_domain(root_domain);
#ifdef CONFIG_SMP
irq_create_mapping(root_domain, IPI_IRQ);
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 6885e422870e..1d2ff1c6a61b 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -121,7 +121,7 @@ init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
* Needed for primary domain lookup to succeed
* This is a primary irqchip, and can never have a parent
*/
- irq_set_default_host(root_domain);
+ irq_set_default_domain(root_domain);
return 0;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 835b5f100e92..25ed6f1a7c7a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -121,7 +121,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
- select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD)
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
@@ -133,6 +133,7 @@ config ARM
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
+ select HAVE_RUST if CPU_LITTLE_ENDIAN && CPU_32v7
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 00ca7886b18e..4808d3ed98e4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,6 +150,7 @@ endif
KBUILD_CPPFLAGS +=$(cpp-y)
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include asm/unified.h -msoft-float
+KBUILD_RUSTFLAGS += --target=arm-unknown-linux-gnueabi
CHECKFLAGS += -D__arm__
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
index d60f6e83a9f7..0341973e30e1 100644
--- a/arch/arm/include/asm/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -19,7 +19,7 @@
#endif
#ifdef CONFIG_MMU
-#define ARM_MMU_KEEP(x) x
+#define ARM_MMU_KEEP(x) KEEP(x)
#define ARM_MMU_DISCARD(x)
#else
#define ARM_MMU_KEEP(x)
@@ -34,6 +34,12 @@
#define NOCROSSREFS
#endif
+#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
+#define OVERLAY_KEEP(x) KEEP(x)
+#else
+#define OVERLAY_KEEP(x) x
+#endif
+
/* Set start/end symbol names to the LMA for the section */
#define ARM_LMA(sym, section) \
sym##_start = LOADADDR(section); \
@@ -125,13 +131,13 @@
__vectors_lma = .; \
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
.vectors { \
- *(.vectors) \
+ OVERLAY_KEEP(*(.vectors)) \
} \
.vectors.bhb.loop8 { \
- *(.vectors.bhb.loop8) \
+ OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
} \
.vectors.bhb.bpiall { \
- *(.vectors.bhb.bpiall) \
+ OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
} \
} \
ARM_LMA(__vectors, .vectors); \
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3431c0553f45..50999886a8b5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -551,7 +551,8 @@ void show_ipi_list(struct seq_file *p, int prec)
if (!ipi_desc[i])
continue;
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 5eddb75a7174..f2e8d4fac068 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -63,7 +63,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index de373c6c2ae8..d592a203f9c6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index f8920d0010de..6521ab3d24fa 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -135,7 +135,7 @@ static struct timer_list perr_timer;
static void dc21285_enable_error(struct timer_list *timer)
{
- del_timer(timer);
+ timer_delete(timer);
if (timer == &serr_timer)
enable_irq(IRQ_PCI_SERR);
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index a9ef71008147..d9cadd97748a 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -152,7 +152,7 @@ pxa_init_irq_common(struct device_node *node, int irq_nr,
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
- irq_set_default_host(pxa_irq_domain);
+ irq_set_default_domain(pxa_irq_domain);
for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index dd930e3a61a4..71b282b146d0 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -913,8 +913,8 @@ static void sharpsl_pm_remove(struct platform_device *pdev)
if (sharpsl_pm.machinfo->exit)
sharpsl_pm.machinfo->exit();
- del_timer_sync(&sharpsl_pm.chrg_full_timer);
- del_timer_sync(&sharpsl_pm.ac_timer);
+ timer_delete_sync(&sharpsl_pm.chrg_full_timer);
+ timer_delete_sync(&sharpsl_pm.ac_timer);
}
static struct platform_driver sharpsl_pm_driver = {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 748c34dc953c..a182295e6f08 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -38,6 +38,7 @@ config ARM64
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2b25d671365f..1d5dfcd1c13e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -48,7 +48,7 @@ KBUILD_CFLAGS += $(CC_FLAGS_NO_FPU) \
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
-ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y)
+ifeq ($(call rustc-min-version, 108500),y)
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat
else
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index c607e0bf5e0b..d1cc0571798b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -132,6 +132,7 @@
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
+#define HISI_CPU_PART_HIP09 0xD02
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -218,6 +219,7 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 84f05f781a70..d3b538be1500 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -633,11 +633,6 @@ static inline pud_t pud_mkhuge(pud_t pud)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
-#define pud_special(pte) pte_special(pud_pte(pud))
-#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
-#endif
-
#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t pmd)
{
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d780d1bd2eac..82cf1f879c61 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -109,10 +109,9 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
- unsigned long dst, src, size;
+ unsigned long dst, size;
dst = regs->regs[dstreg];
- src = regs->regs[srcreg];
size = regs->regs[sizereg];
/*
@@ -129,6 +128,7 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
}
} else {
/* CPY* instruction */
+ unsigned long src = regs->regs[srcreg];
if (!(option_a ^ wrong_option)) {
/* Format is from Option B */
if (regs->pstate & PSR_N_BIT) {
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
index deff21bfa680..b68e1d328d4c 100644
--- a/arch/arm64/kernel/compat_alignment.c
+++ b/arch/arm64/kernel/compat_alignment.c
@@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
return 1;
}
+ if (!handler)
+ return 1;
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT)
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index d5d11fd11549..b198dde79e59 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -901,6 +901,7 @@ static u8 spectre_bhb_loop_affected(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
+ MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 887ac0b05961..78ddf6bdecad 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -130,7 +130,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|gp_flags|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_SEALED_SYSMAP,
vdso_info[abi].cm);
if (IS_ERR(ret))
goto up_fail;
@@ -256,7 +257,8 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
*/
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYEXEC,
+ VM_MAYREAD | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&aarch32_vdso_maps[AA32_MAP_VECTORS]);
return PTR_ERR_OR_ZERO(ret);
@@ -279,7 +281,8 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
*/
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
- VM_MAYWRITE | VM_MAYEXEC,
+ VM_MAYWRITE | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
if (IS_ERR(ret))
goto out;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b98f89420713..ea6695d53fb9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1361,7 +1361,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size);
else {
- max_pfn = PFN_UP(start + size);
+ /* Address of hotplugged memory can be smaller */
+ max_pfn = max(max_pfn, PFN_UP(start + size));
max_low_pfn = max_pfn;
}
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf8400c28b5a..11055c574968 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -61,11 +61,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
extern void pagetable_init(void);
extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 1ee5f5f157ca..937a11ef4c33 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -87,10 +87,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
max_kernel_seg = pmdindex;
}
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor((page_ptdesc(pte))); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 687502917ae2..067c0b994648 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -30,6 +30,7 @@ config LOONGARCH
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN
select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
@@ -177,7 +178,7 @@ config LOONGARCH
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
- select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
@@ -387,8 +388,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
- The command-line arguments provided during boot will be
- appended to the built-in command line. This is useful in
+ The built-in command line will be appended to the command-
+ line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index c9f564e1d4d9..90f21dfe22b1 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -24,9 +24,9 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -665,6 +665,10 @@ CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m
CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
CONFIG_RTW89=m
CONFIG_RTW89_8851BE=m
CONFIG_RTW89_8852AE=m
@@ -748,6 +752,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -761,6 +766,7 @@ CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set
@@ -843,6 +849,9 @@ CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
index 1b6d09617199..aa622c754414 100644
--- a/arch/loongarch/include/asm/cache.h
+++ b/arch/loongarch/include/asm/cache.h
@@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16)
+
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index a0ca84da8541..12bd15578c33 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -53,7 +53,7 @@ void spurious_interrupt(void);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2
+#define MAX_IO_PICS 8
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 7211dff8c969..b58f587f0f0a 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -55,11 +55,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return pte;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index f23adb15f418..fc8b64773794 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
+#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
+ UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
+ UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
index a01086ad9dde..2c68bc72736c 100644
--- a/arch/loongarch/include/asm/unwind_hints.h
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT_SAVE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
+
+#define UNWIND_HINT_RESTORE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
+
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 2f1f5b08638f..27144de5c5fe 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
+ of_node_put(np);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
index 445c452d72a7..7be5b4c0c900 100644
--- a/arch/loongarch/kernel/kgdb.c
+++ b/arch/loongarch/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
-void arch_kgdb_breakpoint(void)
+noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
- "nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
+STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index ea357a3edc09..fa1500d4aa3e 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
+ else
+ emit_insn(ctx, nop);
ctx->stack_size = stack_adjust;
}
@@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
- move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
break;
/* tail call */
@@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32);
+ if (bpf_pseudo_func(insn))
+ move_addr(ctx, dst, imm64);
+ else
+ move_imm(ctx, dst, imm64, is32);
return 1;
}
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index 68586338ecf8..f9c569f53949 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -27,6 +27,11 @@ struct jit_data {
struct jit_ctx ctx;
};
+static inline void emit_nop(union loongarch_instruction *insn)
+{
+ insn->word = INSN_NOP;
+}
+
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 1c26147aff70..ccd2c5e135c6 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -36,8 +36,7 @@ endif
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
- --hash-style=sysv --build-id -T
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T
#
# Shared build commands.
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
index c2733e6c3a8d..c4dd2bab8825 100644
--- a/arch/loongarch/vdso/vgetrandom-chacha.S
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -58,9 +58,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
#define copy0 t5
#define copy1 t6
#define copy2 t7
-
-/* Reuse i as copy3 */
-#define copy3 i
+#define copy3 t8
/* Packs to be used with OP_4REG */
#define line0 state0, state1, state2, state3
@@ -99,6 +97,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w copy0, 0x61707865
li.w copy1, 0x3320646e
li.w copy2, 0x79622d32
+ li.w copy3, 0x6b206574
ld.w cnt_lo, counter, 0
ld.w cnt_hi, counter, 4
@@ -108,7 +107,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
move state0, copy0
move state1, copy1
move state2, copy2
- li.w state3, 0x6b206574
+ move state3, copy3
/* state[4,5,..,11] = key */
ld.w state4, key, 0
@@ -167,12 +166,6 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
addi.w i, i, -1
bnez i, .Lpermute
- /*
- * copy[3] = "expa", materialize it here because copy[3] shares the
- * same register with i which just became dead.
- */
- li.w copy3, 0x6b206574
-
/* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
OP_4REG add.w line0, copy
st.w state0, output, 0
diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c
index 714fe8ec6afa..5fd93dfab809 100644
--- a/arch/m68k/amiga/amisound.c
+++ b/arch/m68k/amiga/amisound.c
@@ -78,7 +78,7 @@ void amiga_mksound( unsigned int hz, unsigned int ticks )
return;
local_irq_save(flags);
- del_timer( &sound_timer );
+ timer_delete(&sound_timer);
if (hz > 20 && hz < 32767) {
unsigned long period = (clock_constant / hz);
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 80afc3a18724..1e21c758b774 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -17,11 +17,8 @@
extern const char bad_pmd_string[];
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c
index faea2265a540..6312d5b600a5 100644
--- a/arch/m68k/mac/macboing.c
+++ b/arch/m68k/mac/macboing.c
@@ -183,7 +183,7 @@ void mac_mksound( unsigned int freq, unsigned int length )
local_irq_save(flags);
- del_timer( &mac_sound_timer );
+ timer_delete(&mac_sound_timer);
for ( i = 0; i < 0x800; i++ )
mac_asc_regs[ i ] = 0;
@@ -277,7 +277,7 @@ static void mac_quadra_ring_bell(struct timer_list *unused)
local_irq_save(flags);
- del_timer( &mac_sound_timer );
+ timer_delete(&mac_sound_timer);
if ( mac_bell_duration-- > 0 )
{
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 65f0d1fb8a2a..31d475cdb1c5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -118,7 +118,7 @@ int page_is_ram(unsigned long pfn)
/*
* Check for command-line options that affect what MMU_init will do.
*/
-static void mm_cmdline_setup(void)
+static void __init mm_cmdline_setup(void)
{
unsigned long maxmem = 0;
char *p = cmd_line;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 85928260863b..fc0772c1bad4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2618,6 +2618,7 @@ config RELOCATABLE
CPU_MIPS32_R6 || CPU_MIPS64_R6 || \
CPU_P5600 || CAVIUM_OCTEON_SOC || \
CPU_LOONGSON64
+ select ARCH_VMLINUX_NEEDS_RELOCS
help
This builds a kernel image that retains relocation information
so it can be loaded someplace besides the default 1MB.
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index be8cb44a89fd..d9057e29bc62 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -100,10 +100,6 @@ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
-ifeq ($(CONFIG_RELOCATABLE),y)
-LDFLAGS_vmlinux += --emit-relocs
-endif
-
cflags-y += -ffreestanding
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
diff --git a/arch/mips/Makefile.postlink b/arch/mips/Makefile.postlink
index 6cfdc149d3bc..ea0add7d56b2 100644
--- a/arch/mips/Makefile.postlink
+++ b/arch/mips/Makefile.postlink
@@ -22,7 +22,7 @@ quiet_cmd_relocs = RELOCS $@
# `@true` prevents complaint when there is nothing to be done
-vmlinux: FORCE
+vmlinux vmlinux.unstripped: FORCE
@true
ifeq ($(CONFIG_CPU_LOONGSON3_WORKAROUNDS),y)
$(call if_changed,ls3_llsc)
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 8425a6b38aa2..e6b4d9c0c169 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1505,7 +1505,7 @@ static int __init octeon_irq_init_ciu(
ciu_domain = irq_domain_add_tree(
ciu_node, &octeon_irq_domain_ciu_ops, dd);
- irq_set_default_host(ciu_domain);
+ irq_set_default_domain(ciu_domain);
/* CIU_0 */
for (i = 0; i < 16; i++) {
@@ -2076,7 +2076,7 @@ static int __init octeon_irq_init_ciu2(
ciu_domain = irq_domain_add_tree(
ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
- irq_set_default_host(ciu_domain);
+ irq_set_default_domain(ciu_domain);
/* CUI2 */
for (i = 0; i < 64; i++) {
@@ -2929,7 +2929,7 @@ static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
/* Only do per CPU things if it is the CIU of the boot node. */
octeon_irq_ciu3_alloc_resources(ciu3_info);
if (node == 0)
- irq_set_default_host(domain);
+ irq_set_default_domain(domain);
octeon_irq_use_ip4 = false;
/* Enable the CIU lines */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 26c7a6ede983..bbca420c96d3 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -48,11 +48,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern void pgd_init(void *addr);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
index dae856fb3e5b..e00c38620d14 100644
--- a/arch/mips/pci/pci-xtalk-bridge.c
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -620,7 +620,7 @@ static int bridge_probe(struct platform_device *pdev)
if (bridge_get_partnum(virt_to_phys((void *)bd->bridge_addr), partnum))
return -EPROBE_DEFER; /* not available yet */
- parent = irq_get_default_host();
+ parent = irq_get_default_domain();
if (!parent)
return -ENODEV;
fn = irq_domain_alloc_named_fwnode("BRIDGE");
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 8f0861c58080..8539f562f5b8 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -98,7 +98,7 @@ static void blink_timeout(struct timer_list *unused)
static void debounce(struct timer_list *unused)
{
- del_timer(&debounce_timer);
+ timer_delete(&debounce_timer);
if (sgint->istat1 & SGINT_ISTAT1_PWR) {
/* Interrupt still being sent. */
debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 00e63e9ef61d..288d4d17eddd 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -297,7 +297,7 @@ void __init arch_init_irq(void)
if (WARN_ON(domain == NULL))
return;
- irq_set_default_host(domain);
+ irq_set_default_domain(domain);
irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
index 423c32cb66ed..9fb905e2cf14 100644
--- a/arch/mips/sgi-ip30/ip30-irq.c
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -313,7 +313,7 @@ void __init arch_init_irq(void)
if (!domain)
return;
- irq_set_default_host(domain);
+ irq_set_default_domain(domain);
irq_set_percpu_devid(IP30_HEART_L0_IRQ);
irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
diff --git a/arch/nios2/Kbuild b/arch/nios2/Kbuild
index fc2952edd2de..fa64c5954b20 100644
--- a/arch/nios2/Kbuild
+++ b/arch/nios2/Kbuild
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += kernel/ mm/ platform/ boot/dts/
+obj-y += kernel/ mm/ platform/
# for cleaning
subdir- += boot
diff --git a/arch/nios2/boot/dts/Makefile b/arch/nios2/boot/dts/Makefile
index 1a2e8996bec7..1b8f41c4154f 100644
--- a/arch/nios2/boot/dts/Makefile
+++ b/arch/nios2/boot/dts/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y := $(patsubst %.dts,%.dtb.o,$(CONFIG_NIOS2_DTB_SOURCE))
+dtb-y := $(addsuffix .dtb, $(CONFIG_BUILTIN_DTB_NAME))
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
+dtb-$(CONFIG_OF_ALL_DTBS) += $(patsubst $(src)/%.dts,%.dtb, $(wildcard $(src)/*.dts))
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 12a536b7bfbd..db122b093a8b 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -28,10 +28,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, addr) \
- do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
- } while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif /* _ASM_NIOS2_PGALLOC_H */
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index 6b7890e5f7af..8fa280660051 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -72,7 +72,7 @@ void __init init_IRQ(void)
domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
BUG_ON(!domain);
- irq_set_default_host(domain);
+ irq_set_default_domain(domain);
of_node_put(node);
/* Load the initial ienable value */
ienable = RDCTL(CTL_IENABLE);
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index db049249766f..4f8c14da6490 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -32,7 +32,7 @@ void __init early_init_devtree(void *params)
}
#endif
-#ifdef CONFIG_NIOS2_DTB_SOURCE_BOOL
+#ifdef CONFIG_BUILTIN_DTB
if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
params = (void *)__dtb_start;
#endif
diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform
index e849daff6fd1..c75cadd92388 100644
--- a/arch/nios2/platform/Kconfig.platform
+++ b/arch/nios2/platform/Kconfig.platform
@@ -35,19 +35,20 @@ config NIOS2_DTB_PHYS_ADDR
help
Physical address of a dtb blob.
-config NIOS2_DTB_SOURCE_BOOL
+config BUILTIN_DTB
bool "Compile and link device tree into kernel image"
depends on !COMPILE_TEST
+ select GENERIC_BUILTIN_DTB
help
This allows you to specify a dts (device tree source) file
which will be compiled and linked into the kernel image.
-config NIOS2_DTB_SOURCE
- string "Device tree source file"
- depends on NIOS2_DTB_SOURCE_BOOL
+config BUILTIN_DTB_NAME
+ string "Built-in device tree name"
+ depends on BUILTIN_DTB
default ""
help
- Absolute path to the device tree source (dts) file describing your
+ Relative path to the device tree without suffix describing your
system.
comment "Nios II instructions"
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 3372f4e6ab4b..3f110931d8f6 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -64,10 +64,7 @@ extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1eb446452fc0..3086c4a12d6d 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -78,7 +78,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_NVME=m
CONFIG_NVME_MULTIPATH=y
CONFIG_EEPROM_AT24=m
-# CONFIG_CXL is not set
# CONFIG_OCXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
index fd2e166ea02a..81bd176203ab 100644
--- a/arch/powerpc/include/asm/copro.h
+++ b/arch/powerpc/include/asm/copro.h
@@ -18,10 +18,4 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
-
-#ifdef CONFIG_PPC_COPRO_BASE
-void copro_flush_all_slbs(struct mm_struct *mm);
-#else
-static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
-#endif
#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 47ed639f3b8f..a4dc27655b3e 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -38,9 +38,6 @@ struct dev_archdata {
#ifdef CONFIG_FAIL_IOMMU
int fail_iommu;
#endif
-#ifdef CONFIG_CXL_BASE
- struct cxl_context *cxl_ctx;
-#endif
#ifdef CONFIG_PCI_IOV
void *iov_data;
#endif
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 8afc92860dbb..7e9a479951a3 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -10,7 +10,6 @@
#include <linux/pci_hotplug.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <misc/cxl-base.h>
#include <asm/opal-api.h>
#define PCI_SLOT_ID_PREFIX (1UL << 63)
@@ -25,25 +24,9 @@ extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
struct opal_msg *msg);
-extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr,
- int enable);
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq);
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
-int pnv_cxl_get_irq_count(struct pci_dev *dev);
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
int64_t pnv_opal_pci_msi_eoi(struct irq_data *d);
bool is_pnv_opal_msi(struct irq_chip *chip);
-#ifdef CONFIG_CXL_BASE
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num);
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev);
-#endif
-
struct pnv_php_slot {
struct hotplug_slot slot;
uint64_t id;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 86bff159c51e..19f4d298dd17 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -6041,7 +6041,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
* the underlying calls, which will EOI the interrupt in real
* mode, need an HW IRQ number mapped in the XICS IRQ domain.
*/
- host_data = irq_domain_get_irq_data(irq_get_default_host(), host_irq);
+ host_data = irq_domain_get_irq_data(irq_get_default_domain(), host_irq);
irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data);
if (i == pimap->n_mapped)
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 1362c672387e..1302b5ac5672 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1555,7 +1555,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
struct kvmppc_xive_src_block *sb;
struct kvmppc_xive_irq_state *state;
struct irq_data *host_data =
- irq_domain_get_irq_data(irq_get_default_host(), host_irq);
+ irq_domain_get_irq_data(irq_get_default_domain(), host_irq);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
u16 idx;
u8 prio;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6a5be025a8af..6a4805968966 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -622,7 +622,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
else
- del_timer(&vcpu->arch.wdt_timer);
+ timer_delete(&vcpu->arch.wdt_timer);
spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
}
@@ -1441,7 +1441,7 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- del_timer_sync(&vcpu->arch.wdt_timer);
+ timer_delete_sync(&vcpu->arch.wdt_timer);
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 430d1d935a7c..e9e2dd70c060 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -27,8 +27,6 @@
#include <asm/ppc-opcode.h>
#include <asm/feature-fixups.h>
-#include <misc/cxl-base.h>
-
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
@@ -217,11 +215,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
static inline void tlbie(unsigned long vpn, int psize, int apsize,
int ssize, int local)
{
- unsigned int use_local;
+ unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
- use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
-
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
@@ -789,10 +785,6 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
- unsigned int use_local;
-
- use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@@ -827,7 +819,8 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
- if (use_local) {
+ if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 734610052cf4..5158aefe4873 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -56,7 +56,7 @@
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/sections.h>
-#include <asm/copro.h>
+#include <asm/spu.h>
#include <asm/udbg.h>
#include <asm/text-patching.h>
#include <asm/fadump.h>
@@ -1600,7 +1600,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
return;
slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
copy_mm_to_paca(mm);
@@ -1869,7 +1871,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index c0c45d033cba..8f7d41ce2ca1 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -10,7 +10,6 @@
#include <linux/pkeys.h>
#include <linux/debugfs.h>
#include <linux/proc_fs.h>
-#include <misc/cxl-base.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index bc9a39821d1c..28bec5bc7879 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -22,7 +22,7 @@
#include <linux/security.h>
#include <asm/mman.h>
#include <asm/mmu.h>
-#include <asm/copro.h>
+#include <asm/spu.h>
#include <asm/hugetlb.h>
#include <asm/mmu_context.h>
@@ -248,7 +248,9 @@ static void slice_convert(struct mm_struct *mm,
spin_unlock_irqrestore(&slice_convert_lock, flags);
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
/*
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index f49fd873df8d..f5f8692e2c69 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -12,8 +12,6 @@
#include <linux/export.h>
#include <asm/reg.h>
#include <asm/copro.h>
-#include <asm/spu.h>
-#include <misc/cxl-base.h>
/*
* This ought to be kept in sync with the powerpc specific do_page_fault
@@ -135,13 +133,4 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
return 0;
}
EXPORT_SYMBOL_GPL(copro_calculate_slb);
-
-void copro_flush_all_slbs(struct mm_struct *mm)
-{
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
- cxl_slbia(mm);
-}
-EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
#endif
diff --git a/arch/powerpc/platforms/44x/uic.c b/arch/powerpc/platforms/44x/uic.c
index 8b03ae4cb3f6..31f760c2ec5d 100644
--- a/arch/powerpc/platforms/44x/uic.c
+++ b/arch/powerpc/platforms/44x/uic.c
@@ -291,7 +291,7 @@ void __init uic_init_tree(void)
if (!primary_uic)
panic("Unable to initialize primary UIC %pOF\n", np);
- irq_set_default_host(primary_uic->irqhost);
+ irq_set_default_domain(primary_uic->irqhost);
of_node_put(np);
/* The scan again for cascaded UICs */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 1e0a5e9644dc..43c881d31ca6 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -453,7 +453,7 @@ void __init mpc52xx_init_irq(void)
if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n");
- irq_set_default_host(mpc52xx_irqhost);
+ irq_set_default_domain(mpc52xx_irqhost);
pr_info("MPC52xx PIC is up and running!\n");
}
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 2c8dc0886912..33f852a7625f 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -109,7 +109,7 @@ static void __init amigaone_init_IRQ(void)
i8259_init(pic, int_ack);
ppc_md.get_irq = i8259_irq;
- irq_set_default_host(i8259_get_host());
+ irq_set_default_domain(i8259_get_host());
}
static int __init request_isa_regions(void)
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
index 827d338deaf4..2c2999de6bfa 100644
--- a/arch/powerpc/platforms/cell/spufs/gang.c
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void)
mutex_init(&gang->aff_mutex);
INIT_LIST_HEAD(&gang->list);
INIT_LIST_HEAD(&gang->aff_list_head);
+ gang->alive = 1;
out:
return gang;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 70236d1df3d3..9f9e4b871627 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir,
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
- if (ret)
+ if (ret) {
+ dput(dentry);
return ret;
+ }
files++;
}
return 0;
}
+static void unuse_gang(struct dentry *dir)
+{
+ struct inode *inode = dir->d_inode;
+ struct spu_gang *gang = SPUFS_I(inode)->i_gang;
+
+ if (gang) {
+ bool dead;
+
+ inode_lock(inode); // exclusion with spufs_create_context()
+ dead = !--gang->alive;
+ inode_unlock(inode);
+
+ if (dead)
+ simple_recursive_removal(dir, NULL);
+ }
+}
+
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct inode *parent;
@@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
inode_unlock(parent);
WARN_ON(ret);
+ unuse_gang(dir->d_parent);
return dcache_dir_close(inode, file);
}
@@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
{
int ret;
int affinity;
- struct spu_gang *gang;
+ struct spu_gang *gang = SPUFS_I(inode)->i_gang;
struct spu_context *neighbor;
struct path path = {.mnt = mnt, .dentry = dentry};
@@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
return -ENODEV;
- gang = NULL;
+ if (gang) {
+ if (!gang->alive)
+ return -ENOENT;
+ gang->alive++;
+ }
+
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
- gang = SPUFS_I(inode)->i_gang;
if (!gang)
return -EINVAL;
mutex_lock(&gang->aff_mutex);
@@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
- if (ret)
+ if (ret) {
+ if (neighbor)
+ put_spu_context(neighbor);
goto out_aff_unlock;
+ }
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
@@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
+ if (ret && gang)
+ gang->alive--; // can't reach 0
return ret;
}
@@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
+ dget(dentry);
inc_nlink(dir);
inc_nlink(d_inode(dentry));
return ret;
@@ -492,6 +522,21 @@ out:
return ret;
}
+static int spufs_gang_close(struct inode *inode, struct file *file)
+{
+ unuse_gang(file->f_path.dentry);
+ return dcache_dir_close(inode, file);
+}
+
+static const struct file_operations spufs_gang_fops = {
+ .open = dcache_dir_open,
+ .release = spufs_gang_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .iterate_shared = dcache_readdir,
+ .fsync = noop_fsync,
+};
+
static int spufs_gang_open(const struct path *path)
{
int ret;
@@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path)
return PTR_ERR(filp);
}
- filp->f_op = &simple_dir_operations;
+ filp->f_op = &spufs_gang_fops;
fd_install(ret, filp);
return ret;
}
@@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode,
ret = spufs_mkgang(inode, dentry, mode & 0777);
if (!ret) {
ret = spufs_gang_open(&path);
- if (ret < 0) {
- int err = simple_rmdir(inode, dentry);
- WARN_ON(err);
- }
+ if (ret < 0)
+ unuse_gang(dentry);
}
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 610ca8570682..8e7ed010bfde 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -508,7 +508,7 @@ static void __spu_del_from_rq(struct spu_context *ctx)
if (!list_empty(&ctx->rq)) {
if (!--spu_prio->nr_waiting)
- del_timer(&spusched_timer);
+ timer_delete(&spusched_timer);
list_del_init(&ctx->rq);
if (list_empty(&spu_prio->runq[prio]))
@@ -1126,8 +1126,8 @@ void spu_sched_exit(void)
remove_proc_entry("spu_loadavg", NULL);
- del_timer_sync(&spusched_timer);
- del_timer_sync(&spuloadavg_timer);
+ timer_delete_sync(&spusched_timer);
+ timer_delete_sync(&spuloadavg_timer);
kthread_stop(spusched_task);
for (node = 0; node < MAX_NUMNODES; node++) {
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 84958487f696..d33787c57c39 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -151,6 +151,8 @@ struct spu_gang {
int aff_flags;
struct spu *aff_ref_spu;
atomic_t aff_sched_count;
+
+ int alive;
};
/* Flag bits for spu_gang aff_flags */
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 36ee3a5056a1..c1bfa4c3444c 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -486,7 +486,7 @@ static void __init chrp_find_8259(void)
i8259_init(pic, chrp_int_ack);
if (ppc_md.get_irq == NULL) {
ppc_md.get_irq = i8259_irq;
- irq_set_default_host(i8259_get_host());
+ irq_set_default_domain(i8259_get_host());
}
if (chrp_mpic != NULL) {
cascade_irq = irq_of_parse_and_map(pic, 0);
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 4d9200bdba78..013d66304c31 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -190,7 +190,7 @@ void __init flipper_pic_probe(void)
flipper_irq_host = flipper_pic_init(np);
BUG_ON(!flipper_irq_host);
- irq_set_default_host(flipper_irq_host);
+ irq_set_default_domain(flipper_irq_host);
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 0761d98e5be3..d03b41336901 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -228,7 +228,7 @@ static void __init nemo_init_IRQ(struct mpic *mpic)
irq_set_chained_handler(gpio_virq, sb600_8259_cascade);
mpic_unmask_irq(irq_get_irq_data(gpio_virq));
- irq_set_default_host(mpic->irqhost);
+ irq_set_default_domain(mpic->irqhost);
}
#else
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index c097d591670e..a0ae58636e10 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -347,7 +347,7 @@ static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
- del_timer(&host->timeout_timer);
+ timer_delete(&host->timeout_timer);
kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
if (host->state != state_idle) {
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 2202bf77c7a3..03a7c51f2645 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -330,7 +330,7 @@ static void __init pmac_pic_probe_oldstyle(void)
pmac_pic_host = irq_domain_add_linear(master, max_irqs,
&pmac_pic_host_ops, NULL);
BUG_ON(pmac_pic_host == NULL);
- irq_set_default_host(pmac_pic_host);
+ irq_set_default_domain(pmac_pic_host);
/* Get addresses of first controller if we have a node for it */
BUG_ON(of_address_to_resource(master, 0, &r));
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 19f0fc5c6f1b..9e5d0c847ee2 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_OPAL_CORE) += opal-core.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o
obj-$(CONFIG_PCI_IOV) += pci-sriov.o
-obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
deleted file mode 100644
index 7e419de71db8..000000000000
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014-2016 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <misc/cxl-base.h>
-#include <asm/pnv-pci.h>
-#include <asm/opal.h>
-
-#include "pci.h"
-
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pe;
- int rc;
-
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- pe_info(pe, "Switching PHB to CXL\n");
-
- rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
- if (rc == OPAL_UNSUPPORTED)
- dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
- else if (rc)
- dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
-
- return rc;
-}
-EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
-
-/* Find PHB for cxl dev and allocate MSI hwirqs?
- * Returns the absolute hardware IRQ number
- */
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
-
- if (hwirq < 0) {
- dev_warn(&dev->dev, "Failed to find a free MSI\n");
- return -ENOSPC;
- }
-
- return phb->msi_base + hwirq;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
-
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
-
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq;
-
- for (i = 1; i < CXL_IRQ_RANGES; i++) {
- if (!irqs->range[i])
- continue;
- pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
- i, irqs->offset[i],
- irqs->range[i]);
- hwirq = irqs->offset[i] - phb->msi_base;
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
- irqs->range[i]);
- }
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
-
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq, try;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- /* 0 is reserved for the multiplexed PSL DSI interrupt */
- for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
- if (hwirq >= 0)
- break;
- try /= 2;
- }
- if (!try)
- goto fail;
-
- irqs->offset[i] = phb->msi_base + hwirq;
- irqs->range[i] = try;
- pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
- i, irqs->offset[i], irqs->range[i]);
- num -= try;
- }
- if (num)
- goto fail;
-
- return 0;
-fail:
- pnv_cxl_release_hwirq_ranges(irqs, dev);
- return -ENOSPC;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
-
-int pnv_cxl_get_irq_count(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return phb->msi_bmp.irq_count;
-}
-EXPORT_SYMBOL(pnv_cxl_get_irq_count);
-
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- unsigned int xive_num = hwirq - phb->msi_base;
- struct pnv_ioda_pe *pe;
- int rc;
-
- if (!(pe = pnv_ioda_get_pe(dev)))
- return -ENODEV;
-
- /* Assign XIVE to PE */
- rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
- if (rc) {
- pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
- "hwirq 0x%x XIVE 0x%x PE\n",
- pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
- return -EIO;
- }
- pnv_set_msi_irq_chip(phb, virq);
-
- return 0;
-}
-EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index b0a14e48175c..ae4b549b5ca0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -39,8 +39,6 @@
#include <asm/mmzone.h>
#include <asm/xive.h>
-#include <misc/cxl-base.h>
-
#include "powernv.h"
#include "pci.h"
#include "../../../../drivers/pci/pci.h"
@@ -1636,47 +1634,6 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d)
return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq);
}
-/*
- * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers
- */
-static void pnv_ioda2_msi_eoi(struct irq_data *d)
-{
- int64_t rc;
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- struct pci_controller *hose = irq_data_get_irq_chip_data(d);
- struct pnv_phb *phb = hose->private_data;
-
- rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
- WARN_ON_ONCE(rc);
-
- icp_native_eoi(d);
-}
-
-/* P8/CXL only */
-void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
-{
- struct irq_data *idata;
- struct irq_chip *ichip;
-
- /* The MSI EOI OPAL call is only needed on PHB3 */
- if (phb->model != PNV_PHB_MODEL_PHB3)
- return;
-
- if (!phb->ioda.irq_chip_init) {
- /*
- * First time we setup an MSI IRQ, we need to setup the
- * corresponding IRQ chip to route correctly.
- */
- idata = irq_get_irq_data(virq);
- ichip = irq_data_get_irq_chip(idata);
- phb->ioda.irq_chip_init = 1;
- phb->ioda.irq_chip = *ichip;
- phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
- }
- irq_set_chip(virq, &phb->ioda.irq_chip);
- irq_set_chip_data(virq, phb->hose);
-}
-
static struct irq_chip pnv_pci_msi_irq_chip;
/*
@@ -1924,7 +1881,7 @@ static const struct irq_domain_ops pnv_irq_domain_ops = {
static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count)
{
struct pnv_phb *phb = hose->private_data;
- struct irq_domain *parent = irq_get_default_host();
+ struct irq_domain *parent = irq_get_default_domain();
hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id);
if (!hose->fwnode)
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 35f566aa0424..b2c1da025410 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/msi.h>
#include <linux/iommu.h>
-#include <linux/sched/mm.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -33,8 +32,6 @@
#include "powernv.h"
#include "pci.h"
-static DEFINE_MUTEX(tunnel_mutex);
-
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
{
struct device_node *node = np;
@@ -744,64 +741,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
return tbl;
}
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- return of_node_get(hose->dn);
-}
-EXPORT_SYMBOL(pnv_pci_get_phb_node);
-
-int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
-{
- struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
- u64 tunnel_bar;
- __be64 val;
- int rc;
-
- if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
- return -ENXIO;
- if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
- return -ENXIO;
-
- mutex_lock(&tunnel_mutex);
- rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
- if (rc != OPAL_SUCCESS) {
- rc = -EIO;
- goto out;
- }
- tunnel_bar = be64_to_cpu(val);
- if (enable) {
- /*
- * Only one device per PHB can use atomics.
- * Our policy is first-come, first-served.
- */
- if (tunnel_bar) {
- if (tunnel_bar != addr)
- rc = -EBUSY;
- else
- rc = 0; /* Setting same address twice is ok */
- goto out;
- }
- } else {
- /*
- * The device that owns atomics and wants to release
- * them must pass the same address with enable == 0.
- */
- if (tunnel_bar != addr) {
- rc = -EPERM;
- goto out;
- }
- addr = 0x0ULL;
- }
- rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
- rc = opal_error_code(rc);
-out:
- mutex_unlock(&tunnel_mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
-
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 93fba1f8661f..42075501663b 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -163,7 +163,6 @@ struct pnv_phb {
unsigned int *io_segmap;
/* IRQ chip */
- int irq_chip_init;
struct irq_chip irq_chip;
/* Sorted list of used PE's based
@@ -281,7 +280,6 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
-extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels);
extern int pnv_eeh_post_init(void);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index af3fe9f04f24..95e96bd61a20 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -744,7 +744,7 @@ void __init ps3_init_IRQ(void)
struct irq_domain *host;
host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
- irq_set_default_host(host);
+ irq_set_default_domain(host);
for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index fdc2f7f38dc9..f9d80111c322 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -611,7 +611,7 @@ static const struct irq_domain_ops pseries_irq_domain_ops = {
static int __pseries_msi_allocate_domains(struct pci_controller *phb,
unsigned int count)
{
- struct irq_domain *parent = irq_get_default_host();
+ struct irq_domain *parent = irq_get_default_domain();
phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI",
phb->global_number);
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 040827671d21..fb502b72fca1 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -291,5 +291,5 @@ void __init ehv_pic_init(void)
ehv_pic->coreint_flag = of_property_read_bool(np, "has-external-proxy");
global_ehv_pic = ehv_pic;
- irq_set_default_host(global_ehv_pic->irqhost);
+ irq_set_default_domain(global_ehv_pic->irqhost);
}
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 037b04bf9a9f..a35be0232978 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -757,7 +757,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
ipic_write(ipic->regs, IPIC_SEMSR, temp);
primary_ipic = ipic;
- irq_set_default_host(primary_ipic->irqhost);
+ irq_set_default_domain(primary_ipic->irqhost);
ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index d94cf36b0f65..4afbab83a2e2 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1520,7 +1520,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
if (!(mpic->flags & MPIC_SECONDARY)) {
mpic_primary = mpic;
- irq_set_default_host(mpic->irqhost);
+ irq_set_default_domain(mpic->irqhost);
}
return mpic;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index d3a4156e8788..c3fa539a9898 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -472,7 +472,7 @@ static int __init xics_allocate_domain(void)
return -ENOMEM;
}
- irq_set_default_host(xics_host);
+ irq_set_default_domain(xics_host);
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index a6c388bdf5d0..dc2e61837396 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1467,7 +1467,7 @@ static void __init xive_init_host(struct device_node *np)
xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
if (WARN_ON(xive_irq_domain == NULL))
return;
- irq_set_default_host(xive_irq_domain);
+ irq_set_default_domain(xive_irq_domain);
}
static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
index 2c585f7a0b6e..126fb738fc44 100644
--- a/arch/riscv/Kbuild
+++ b/arch/riscv/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/ net/
-obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
obj-$(CONFIG_CRYPTO) += crypto/
obj-y += errata/
obj-$(CONFIG_KVM) += kvm/
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e776fb35667e..bbec87b79309 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -65,6 +65,7 @@ config RISCV
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS if MMU
# LLD >= 14: https://github.com/llvm/llvm-project/issues/50505
select ARCH_SUPPORTS_LTO_CLANG if LLD_VERSION >= 140000
@@ -153,7 +154,7 @@ config RISCV
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_GRAPH_FUNC
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
- select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select HAVE_EBPF_JIT if MMU
@@ -205,6 +206,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI
select PCI_ECAM if (ACPI && PCI)
select PCI_MSI if PCI
+ select RELOCATABLE if !MMU && !PHYS_RAM_BASE_FIXED
select RISCV_ALTERNATIVE if !XIP_KERNEL
select RISCV_APLIC
select RISCV_IMSIC
@@ -292,13 +294,6 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
-config PAGE_OFFSET
- hex
- default 0x80000000 if !MMU && RISCV_M_MODE
- default 0x80200000 if !MMU
- default 0xc0000000 if 32BIT
- default 0xff60000000000000 if 64BIT
-
config KASAN_SHADOW_OFFSET
hex
depends on KASAN_GENERIC
@@ -570,7 +565,8 @@ config RISCV_ISA_C
help
Adds "C" to the ISA subsets that the toolchain is allowed to emit
when building Linux, which results in compressed instructions in the
- Linux binary.
+ Linux binary. This option produces a kernel that will not run on
+ systems that do not support compressed instructions.
If you don't know what to do here, say Y.
@@ -591,8 +587,8 @@ config RISCV_ISA_SVNAPOT
depends on RISCV_ALTERNATIVE
default y
help
- Allow kernel to detect the Svnapot ISA-extension dynamically at boot
- time and enable its usage.
+ Enable support for the Svnapot ISA-extension when it is detected
+ at boot.
The Svnapot extension is used to mark contiguous PTEs as a range
of contiguous virtual-to-physical translations for a naturally
@@ -610,9 +606,8 @@ config RISCV_ISA_SVPBMT
depends on RISCV_ALTERNATIVE
default y
help
- Adds support to dynamically detect the presence of the Svpbmt
- ISA-extension (Supervisor-mode: page-based memory types) and
- enable its usage.
+ Add support for the Svpbmt ISA-extension (Supervisor-mode:
+ page-based memory types) in the kernel when it is detected at boot.
The memory type for a page contains a combination of attributes
that indicate the cacheability, idempotency, and ordering
@@ -631,14 +626,15 @@ config TOOLCHAIN_HAS_V
depends on AS_HAS_OPTION_ARCH
config RISCV_ISA_V
- bool "VECTOR extension support"
+ bool "Vector extension support"
depends on TOOLCHAIN_HAS_V
depends on FPU
select DYNAMIC_SIGFRAME
default y
help
- Say N here if you want to disable all vector related procedure
- in the kernel.
+ Add support for the Vector extension when it is detected at boot.
+ When this option is disabled, neither the kernel nor userspace may
+ use vector procedures.
If you don't know what to do here, say Y.
@@ -737,6 +733,14 @@ config TOOLCHAIN_HAS_VECTOR_CRYPTO
def_bool $(as-instr, .option arch$(comma) +v$(comma) +zvkb)
depends on AS_HAS_OPTION_ARCH
+config TOOLCHAIN_HAS_ZBA
+ bool
+ default y
+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zba)
+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zba)
+ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900
+ depends on AS_HAS_OPTION_ARCH
+
config RISCV_ISA_ZBA
bool "Zba extension support for bit manipulation instructions"
default y
@@ -751,12 +755,12 @@ config RISCV_ISA_ZBA
config RISCV_ISA_ZBB
bool "Zbb extension support for bit manipulation instructions"
- depends on TOOLCHAIN_HAS_ZBB
depends on RISCV_ALTERNATIVE
default y
help
- Adds support to dynamically detect the presence of the ZBB
- extension (basic bit manipulation) and enable its usage.
+ Add support for enabling optimisations in the kernel when the
+ Zbb extension is detected at boot. Some optimisations may
+ additionally depend on toolchain support for Zbb.
The Zbb extension provides instructions to accelerate a number
of bit-specific operations (count bit population, sign extending,
@@ -787,6 +791,28 @@ config RISCV_ISA_ZBC
If you don't know what to do here, say Y.
+config TOOLCHAIN_HAS_ZBKB
+ bool
+ default y
+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zbkb)
+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zbkb)
+ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900
+ depends on AS_HAS_OPTION_ARCH
+
+config RISCV_ISA_ZBKB
+ bool "Zbkb extension support for bit manipulation instructions"
+ depends on TOOLCHAIN_HAS_ZBKB
+ depends on RISCV_ALTERNATIVE
+ default y
+ help
+ Adds support to dynamically detect the presence of the ZBKB
+ extension (bit manipulation for cryptography) and enable its usage.
+
+ The Zbkb extension provides instructions to accelerate a number
+ of common cryptography operations (pack, zip, etc).
+
+ If you don't know what to do here, say Y.
+
config RISCV_ISA_ZICBOM
bool "Zicbom extension support for non-coherent DMA operation"
depends on MMU
@@ -795,9 +821,9 @@ config RISCV_ISA_ZICBOM
select RISCV_DMA_NONCOHERENT
select DMA_DIRECT_REMAP
help
- Adds support to dynamically detect the presence of the ZICBOM
- extension (Cache Block Management Operations) and enable its
- usage.
+ Add support for the Zicbom extension (Cache Block Management
+ Operations) and enable its use in the kernel when it is detected
+ at boot.
The Zicbom extension can be used to handle for example
non-coherent DMA support on devices that need it.
@@ -810,7 +836,7 @@ config RISCV_ISA_ZICBOZ
default y
help
Enable the use of the Zicboz extension (cbo.zero instruction)
- when available.
+ in the kernel when it is detected at boot.
The Zicboz extension is used for faster zeroing of memory.
@@ -848,8 +874,9 @@ config FPU
bool "FPU support"
default y
help
- Say N here if you want to disable all floating-point related procedure
- in the kernel.
+ Add support for floating point operations when an FPU is detected at
+ boot. When this option is disabled, neither the kernel nor userspace
+ may use the floating point unit.
If you don't know what to do here, say Y.
@@ -1079,8 +1106,9 @@ config PARAVIRT_TIME_ACCOUNTING
config RELOCATABLE
bool "Build a relocatable kernel"
- depends on MMU && 64BIT && !XIP_KERNEL
+ depends on !XIP_KERNEL
select MODULE_SECTIONS if MODULES
+ select ARCH_VMLINUX_NEEDS_RELOCS
help
This builds a kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the
@@ -1273,13 +1301,14 @@ config RISCV_ISA_FALLBACK
config BUILTIN_DTB
bool "Built-in device tree"
depends on OF && NONPORTABLE
+ select GENERIC_BUILTIN_DTB
help
Build a device tree into the Linux image.
This option should be selected if no bootloader is being used.
If unsure, say N.
-config BUILTIN_DTB_SOURCE
+config BUILTIN_DTB_NAME
string "Built-in device tree source"
depends on BUILTIN_DTB
help
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 17606940bb52..8b503e54fa1b 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -59,7 +59,6 @@ config ARCH_THEAD
config ARCH_VIRT
bool "QEMU Virt Machine"
- select CLINT_TIMER if RISCV_M_MODE
select POWER_RESET
select POWER_RESET_SYSCON
select POWER_RESET_SYSCON_POWEROFF
@@ -79,7 +78,6 @@ config ARCH_CANAAN
config SOC_CANAAN_K210
bool "Canaan Kendryte K210 SoC"
depends on !MMU && ARCH_CANAAN
- select CLINT_TIMER if RISCV_M_MODE
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
select COMMON_CLK
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 13fbc0f94238..539d2aef5cab 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -8,7 +8,7 @@
LDFLAGS_vmlinux := -z norelro
ifeq ($(CONFIG_RELOCATABLE),y)
- LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --emit-relocs
+ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext
KBUILD_CFLAGS += -fPIE
endif
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
@@ -98,7 +98,6 @@ KBUILD_AFLAGS += -march=$(riscv-march-y)
CC_FLAGS_FPU := -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)([^v_]*)v?/\1\2/')
KBUILD_CFLAGS += -mno-save-restore
-KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
KBUILD_CFLAGS += -mcmodel=medlow
diff --git a/arch/riscv/Makefile.postlink b/arch/riscv/Makefile.postlink
index 6b0580949b6a..0e4cf8ad2f14 100644
--- a/arch/riscv/Makefile.postlink
+++ b/arch/riscv/Makefile.postlink
@@ -10,26 +10,17 @@ __archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
-include $(srctree)/scripts/Makefile.lib
quiet_cmd_relocs_check = CHKREL $@
cmd_relocs_check = \
$(CONFIG_SHELL) $(srctree)/arch/riscv/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
-ifdef CONFIG_RELOCATABLE
-quiet_cmd_cp_vmlinux_relocs = CPREL vmlinux.relocs
-cmd_cp_vmlinux_relocs = cp vmlinux vmlinux.relocs
-
-endif
-
# `@true` prevents complaint when there is nothing to be done
-vmlinux: FORCE
+vmlinux vmlinux.unstripped: FORCE
@true
ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
- $(call if_changed,cp_vmlinux_relocs)
- $(call if_changed,strip_relocs)
endif
clean:
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index b25d524ce5eb..bfc3d0b75b9b 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -32,10 +32,7 @@ $(obj)/xipImage: vmlinux FORCE
endif
ifdef CONFIG_RELOCATABLE
-vmlinux.relocs: vmlinux
- @ (! [ -f vmlinux.relocs ] && echo "vmlinux.relocs can't be found, please remove vmlinux and try again") || true
-
-$(obj)/Image: vmlinux.relocs FORCE
+$(obj)/Image: vmlinux.unstripped FORCE
else
$(obj)/Image: vmlinux FORCE
endif
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index bff887d38abe..64a898da9aee 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -8,5 +8,3 @@ subdir-y += sophgo
subdir-y += spacemit
subdir-y += starfive
subdir-y += thead
-
-obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 0f7dcbe3c45b..3c8e16d71e17 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -26,7 +26,6 @@ CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_ARCH_MICROCHIP=y
-CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_SIFIVE=y
CONFIG_ARCH_SOPHGO=y
CONFIG_ARCH_SPACEMIT=y
@@ -202,7 +201,6 @@ CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
-# CONFIG_USB_XHCI_RCAR is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 87ff5a1233af..ee18d1e333f2 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -35,7 +35,7 @@ CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
CONFIG_CMDLINE_FORCE=y
CONFIG_BUILTIN_DTB=y
-CONFIG_BUILTIN_DTB_SOURCE="canaan/k210_generic"
+CONFIG_BUILTIN_DTB_NAME="canaan/k210_generic"
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
# CONFIG_GCC_PLUGINS is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 95cbd574f291..e770d81b738e 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -27,7 +27,7 @@ CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
CONFIG_CMDLINE_FORCE=y
CONFIG_BUILTIN_DTB=y
-CONFIG_BUILTIN_DTB_SOURCE="canaan/k210_generic"
+CONFIG_BUILTIN_DTB_NAME="canaan/k210_generic"
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
# CONFIG_GCC_PLUGINS is not set
diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile
index f0da9d7b39c3..bc6c77ba837d 100644
--- a/arch/riscv/errata/Makefile
+++ b/arch/riscv/errata/Makefile
@@ -1,5 +1,9 @@
ifdef CONFIG_RELOCATABLE
-KBUILD_CFLAGS += -fno-pie
+# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel
+# doesn't have a GOT setup at that point. So instead just use medany: it's
+# usually position-independent, so it should be good enough for the errata
+# handling.
+KBUILD_CFLAGS += -fno-pie -mcmodel=medany
endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h
index 613769b9cdc9..0e7cdbbec8ef 100644
--- a/arch/riscv/include/asm/arch_hweight.h
+++ b/arch/riscv/include/asm/arch_hweight.h
@@ -19,7 +19,7 @@
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
-#ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -50,7 +50,7 @@ static inline unsigned int __arch_hweight8(unsigned int w)
#if BITS_PER_LONG == 64
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
-# ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -64,7 +64,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
return w;
legacy:
-# endif
+#endif
return __sw_hweight64(w);
}
#else /* BITS_PER_LONG == 64 */
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 776354895b81..a8a2af6dfe9d 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -27,6 +27,7 @@
#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2)
+#define SRLI __REG_SEL(srliw, srli)
#if __SIZEOF_POINTER__ == 8
#ifdef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 49a0f48d93df..d59310f74c2b 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -15,7 +15,7 @@
#include <asm/barrier.h>
#include <asm/bitsperlong.h>
-#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE)
+#if !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE)
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/ffs.h>
@@ -175,7 +175,7 @@ legacy:
variable_fls(x_); \
})
-#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */
+#endif /* !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE) */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
index 88e6f1499e88..da378856f1d5 100644
--- a/arch/riscv/include/asm/checksum.h
+++ b/arch/riscv/include/asm/checksum.h
@@ -49,8 +49,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
* ZBB only saves three instructions on 32-bit and five on 64-bit so not
* worth checking if supported without Alternatives.
*/
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
- IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
unsigned long fold_temp;
asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 427c41dde643..2ec119eb147b 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -365,16 +365,48 @@ static __always_inline void __cmpwait(volatile void *ptr,
{
unsigned long tmp;
+ u32 *__ptr32b;
+ ulong __s, __val, __mask;
+
asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
0, RISCV_ISA_EXT_ZAWRS, 1)
: : : : no_zawrs);
switch (size) {
case 1:
- fallthrough;
+ __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+ __s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE;
+ __val = val << __s;
+ __mask = 0xff << __s;
+
+ asm volatile(
+ " lr.w %0, %1\n"
+ " and %0, %0, %3\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(__ptr32b))
+ : "r" (__val), "r" (__mask)
+ : "memory");
+ break;
case 2:
- /* RISC-V doesn't have lr instructions on byte and half-word. */
- goto no_zawrs;
+ __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+ __s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE;
+ __val = val << __s;
+ __mask = 0xffff << __s;
+
+ asm volatile(
+ " lr.w %0, %1\n"
+ " and %0, %0, %3\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(__ptr32b))
+ : "r" (__val), "r" (__mask)
+ : "memory");
+ break;
case 4:
asm volatile(
" lr.w %0, %1\n"
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 569140d6e639..f56b409361fb 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -56,6 +56,9 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
_RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \
ARRAY_SIZE(_bundled_exts), NULL)
+#define __RISCV_ISA_EXT_BUNDLE_VALIDATE(_name, _bundled_exts, _validate) \
+ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \
+ ARRAY_SIZE(_bundled_exts), _validate)
/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
@@ -63,7 +66,7 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
-bool check_unaligned_access_emulated_all_cpus(void);
+bool __init check_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
@@ -76,7 +79,7 @@ static inline bool unaligned_ctl_available(void)
}
#endif
-bool check_vector_unaligned_access_emulated_all_cpus(void);
+bool __init check_vector_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
DECLARE_PER_CPU(long, vector_misaligned_access);
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index c4721ce44ca4..d627f63ee289 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -79,7 +79,6 @@ struct dyn_arch_ftrace {
#define AUIPC_RA (0x00000097)
#define JALR_T0 (0x000282e7)
#define AUIPC_T0 (0x00000297)
-#define NOP4 (0x00000013)
#define to_jalr_t0(offset) \
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
@@ -92,7 +91,7 @@ struct dyn_arch_ftrace {
#define make_call_t0(caller, callee, call) \
do { \
unsigned int offset = \
- (unsigned long) callee - (unsigned long) caller; \
+ (unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_t0(offset); \
call[1] = to_jalr_t0(offset); \
} while (0)
@@ -108,7 +107,7 @@ do { \
#define make_call_ra(caller, callee, call) \
do { \
unsigned int offset = \
- (unsigned long) callee - (unsigned long) caller; \
+ (unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_ra(offset); \
call[1] = to_jalr_ra(offset); \
} while (0)
@@ -207,7 +206,7 @@ ftrace_partial_regs(const struct ftrace_regs *fregs, struct pt_regs *regs)
{
struct __arch_ftrace_regs *afregs = arch_ftrace_regs(fregs);
- memcpy(&regs->a0, afregs->args, sizeof(afregs->args));
+ memcpy(&regs->a_regs, afregs->args, sizeof(afregs->args));
regs->epc = afregs->epc;
regs->ra = afregs->ra;
regs->sp = afregs->sp;
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 869da082252a..e3cbf203cdde 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -100,6 +100,11 @@
#define RISCV_ISA_EXT_ZICCRSE 91
#define RISCV_ISA_EXT_SVADE 92
#define RISCV_ISA_EXT_SVADU 93
+#define RISCV_ISA_EXT_ZFBFMIN 94
+#define RISCV_ISA_EXT_ZVFBFMIN 95
+#define RISCV_ISA_EXT_ZVFBFWMA 96
+#define RISCV_ISA_EXT_ZAAMO 97
+#define RISCV_ISA_EXT_ZALRSC 98
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index dd624523981c..1f690fea0e03 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 11
+#define RISCV_HWPROBE_MAX_KEY 12
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
index 9a913010cdd9..71060a2f838e 100644
--- a/arch/riscv/include/asm/insn-def.h
+++ b/arch/riscv/include/asm/insn-def.h
@@ -199,5 +199,8 @@
#define RISCV_PAUSE ".4byte 0x100000f"
#define ZAWRS_WRS_NTO ".4byte 0x00d00073"
#define ZAWRS_WRS_STO ".4byte 0x01d00073"
+#define RISCV_NOP4 ".4byte 0x00000013"
+
+#define RISCV_INSN_NOP4 _AC(0x00000013, U)
#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 125f5ecd9565..572a141ddecd 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -24,21 +24,22 @@
* When not using MMU this corresponds to the first free page in
* physical memory (aligned on a page boundary).
*/
-#ifdef CONFIG_64BIT
#ifdef CONFIG_MMU
-#define PAGE_OFFSET kernel_map.page_offset
-#else
-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-#endif
-/*
- * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
- * define the PAGE_OFFSET value for SV48 and SV39.
- */
+#ifdef CONFIG_64BIT
+#define PAGE_OFFSET_L5 _AC(0xff60000000000000, UL)
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd600000000, UL)
+#ifdef CONFIG_XIP_KERNEL
+#define PAGE_OFFSET PAGE_OFFSET_L3
#else
-#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#define PAGE_OFFSET kernel_map.page_offset
+#endif /* CONFIG_XIP_KERNEL */
+#else
+#define PAGE_OFFSET _AC(0xc0000000, UL)
#endif /* CONFIG_64BIT */
+#else
+#define PAGE_OFFSET ((unsigned long)phys_ram_base)
+#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__
@@ -95,14 +96,9 @@ typedef struct page *pgtable_t;
#define MIN_MEMBLOCK_ADDR 0
#endif
-#ifdef CONFIG_MMU
#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
-#else
-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
-#endif /* CONFIG_MMU */
struct kernel_mapping {
- unsigned long page_offset;
unsigned long virt_addr;
unsigned long virt_offset;
uintptr_t phys_addr;
@@ -116,6 +112,7 @@ struct kernel_mapping {
uintptr_t xiprom;
uintptr_t xiprom_sz;
#else
+ unsigned long page_offset;
unsigned long va_kernel_pa_offset;
#endif
};
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 3e2aebea6312..770ce18a7328 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -15,24 +15,6 @@
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
-/*
- * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
- * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
- * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
- * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
- * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
-{
- if (riscv_use_sbi_for_rfence()) {
- tlb_remove_ptdesc(tlb, pt);
- } else {
- pagetable_dtor(pt);
- tlb_remove_page_ptdesc(tlb, pt);
- }
-}
-
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
@@ -108,14 +90,14 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr)
{
if (pgtable_l4_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr)
{
if (pgtable_l5_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -143,7 +125,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -151,7 +133,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte));
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#endif /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 050fdc49b5ad..428e48e5f57d 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -12,7 +12,11 @@
#include <asm/pgtable-bits.h>
#ifndef CONFIG_MMU
-#define KERNEL_LINK_ADDR PAGE_OFFSET
+#ifdef CONFIG_RELOCATABLE
+#define KERNEL_LINK_ADDR UL(0)
+#else
+#define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL)
+#endif
#define KERN_VIRT_SIZE (UL(-1))
#else
@@ -341,6 +345,14 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+#define pte_pgprot pte_pgprot
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -674,6 +686,11 @@ static inline pmd_t pte_pmd(pte_t pte)
return __pmd(pte_val(pte));
}
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud(pte_val(pte));
+}
+
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
@@ -699,6 +716,18 @@ static inline unsigned long pud_pfn(pud_t pud)
return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
}
+#define pmd_pgprot pmd_pgprot
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
+{
+ return pte_pgprot(pmd_pte(pmd));
+}
+
+#define pud_pgprot pud_pgprot
+static inline pgprot_t pud_pgprot(pud_t pud)
+{
+ return pte_pgprot(pud_pte(pud));
+}
+
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
@@ -768,6 +797,30 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
return pte_pmd(pte_mkdevmap(pmd_pte(pmd)));
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return pte_special(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
+}
+#endif
+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return pte_special(pud_pte(pud));
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pte_pud(pte_mkspecial(pud_pte(pud)));
+}
+#endif
+
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index b5b0adcc85c1..2910231977cb 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -23,14 +23,16 @@ struct pt_regs {
unsigned long t2;
unsigned long s0;
unsigned long s1;
- unsigned long a0;
- unsigned long a1;
- unsigned long a2;
- unsigned long a3;
- unsigned long a4;
- unsigned long a5;
- unsigned long a6;
- unsigned long a7;
+ struct_group(a_regs,
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ );
unsigned long s2;
unsigned long s3;
unsigned long s4;
diff --git a/arch/riscv/include/asm/runtime-const.h b/arch/riscv/include/asm/runtime-const.h
new file mode 100644
index 000000000000..451fd76b8811
--- /dev/null
+++ b/arch/riscv/include/asm/runtime-const.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_RUNTIME_CONST_H
+#define _ASM_RISCV_RUNTIME_CONST_H
+
+#include <asm/asm.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/insn-def.h>
+#include <linux/memory.h>
+#include <asm/text-patching.h>
+
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_32BIT
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret; \
+ asm_inline(".option push\n\t" \
+ ".option norvc\n\t" \
+ "1:\t" \
+ "lui %[__ret],0x89abd\n\t" \
+ "addi %[__ret],%[__ret],-0x211\n\t" \
+ ".option pop\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret)); \
+ __ret; \
+})
+#else
+/*
+ * Loading 64-bit constants into a register from immediates is a non-trivial
+ * task on riscv64. To get it somewhat performant, load 32 bits into two
+ * different registers and then combine the results.
+ *
+ * If the processor supports the Zbkb extension, we can combine the final
+ * "slli,slli,srli,add" into the single "pack" instruction. If the processor
+ * doesn't support Zbkb but does support the Zbb extension, we can
+ * combine the final "slli,srli,add" into one instruction "add.uw".
+ */
+#define RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ".option push\n\t" \
+ ".option norvc\n\t" \
+ "1:\t" \
+ "lui %[__ret],0x89abd\n\t" \
+ "lui %[__tmp],0x1234\n\t" \
+ "addiw %[__ret],%[__ret],-0x211\n\t" \
+ "addiw %[__tmp],%[__tmp],0x567\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_BASE \
+ "slli %[__tmp],%[__tmp],32\n\t" \
+ "slli %[__ret],%[__ret],32\n\t" \
+ "srli %[__ret],%[__ret],32\n\t" \
+ "add %[__ret],%[__ret],%[__tmp]\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_ZBA \
+ ".option push\n\t" \
+ ".option arch,+zba\n\t" \
+ ".option norvc\n\t" \
+ "slli %[__tmp],%[__tmp],32\n\t" \
+ "add.uw %[__ret],%[__ret],%[__tmp]\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".option pop\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_ZBKB \
+ ".option push\n\t" \
+ ".option arch,+zbkb\n\t" \
+ ".option norvc\n\t" \
+ "pack %[__ret],%[__ret],%[__tmp]\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".option pop\n\t" \
+
+#define RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ ".option pop\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+
+#if defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_TOOLCHAIN_HAS_ZBA) \
+ && defined(CONFIG_RISCV_ISA_ZBKB)
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ALTERNATIVE_2( \
+ RISCV_RUNTIME_CONST_64_BASE, \
+ RISCV_RUNTIME_CONST_64_ZBA, \
+ 0, RISCV_ISA_EXT_ZBA, 1, \
+ RISCV_RUNTIME_CONST_64_ZBKB, \
+ 0, RISCV_ISA_EXT_ZBKB, 1 \
+ ) \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#elif defined(CONFIG_RISCV_ISA_ZBA) && defined(CONFIG_TOOLCHAIN_HAS_ZBA)
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ALTERNATIVE( \
+ RISCV_RUNTIME_CONST_64_BASE, \
+ RISCV_RUNTIME_CONST_64_ZBA, \
+ 0, RISCV_ISA_EXT_ZBA, 1 \
+ ) \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#elif defined(CONFIG_RISCV_ISA_ZBKB)
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ ALTERNATIVE( \
+ RISCV_RUNTIME_CONST_64_BASE, \
+ RISCV_RUNTIME_CONST_64_ZBKB, \
+ 0, RISCV_ISA_EXT_ZBKB, 1 \
+ ) \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#else
+#define runtime_const_ptr(sym) \
+({ \
+ typeof(sym) __ret, __tmp; \
+ asm_inline(RISCV_RUNTIME_CONST_64_PREAMBLE \
+ RISCV_RUNTIME_CONST_64_BASE \
+ RISCV_RUNTIME_CONST_64_POSTAMBLE(sym) \
+ : [__ret] "=r" (__ret), [__tmp] "=r" (__tmp)); \
+ __ret; \
+})
+#endif
+#endif
+
+#define runtime_const_shift_right_32(val, sym) \
+({ \
+ u32 __ret; \
+ asm_inline(".option push\n\t" \
+ ".option norvc\n\t" \
+ "1:\t" \
+ SRLI " %[__ret],%[__val],12\n\t" \
+ ".option pop\n\t" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ : [__ret] "=r" (__ret) \
+ : [__val] "r" (val)); \
+ __ret; \
+})
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+static inline void __runtime_fixup_caches(void *where, unsigned int insns)
+{
+ /* On riscv there are currently only cache-wide flushes so va is ignored. */
+ __always_unused uintptr_t va = (uintptr_t)where;
+
+ flush_icache_range(va, va + 4 * insns);
+}
+
+/*
+ * The 32-bit immediate is stored in a lui+addi pairing.
+ * lui holds the upper 20 bits of the immediate in the first 20 bits of the instruction.
+ * addi holds the lower 12 bits of the immediate in the first 12 bits of the instruction.
+ */
+static inline void __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, unsigned int val)
+{
+ unsigned int lower_immediate, upper_immediate;
+ u32 lui_insn, addi_insn, addi_insn_mask;
+ __le32 lui_res, addi_res;
+
+ /* Mask out upper 12 bit of addi */
+ addi_insn_mask = 0x000fffff;
+
+ lui_insn = (u32)le16_to_cpu(lui_parcel[0]) | (u32)le16_to_cpu(lui_parcel[1]) << 16;
+ addi_insn = (u32)le16_to_cpu(addi_parcel[0]) | (u32)le16_to_cpu(addi_parcel[1]) << 16;
+
+ lower_immediate = sign_extend32(val, 11);
+ upper_immediate = (val - lower_immediate);
+
+ if (upper_immediate & 0xfffff000) {
+ /* replace upper 20 bits of lui with upper immediate */
+ lui_insn &= 0x00000fff;
+ lui_insn |= upper_immediate & 0xfffff000;
+ } else {
+ /* replace lui with nop if immediate is small enough to fit in addi */
+ lui_insn = RISCV_INSN_NOP4;
+ /*
+ * lui is being skipped, so do a load instead of an add. A load
+ * is performed by adding with the x0 register. Setting rs to
+ * zero with the following mask will accomplish this goal.
+ */
+ addi_insn_mask &= 0x07fff;
+ }
+
+ if (lower_immediate & 0x00000fff) {
+ /* replace upper 12 bits of addi with lower 12 bits of val */
+ addi_insn &= addi_insn_mask;
+ addi_insn |= (lower_immediate & 0x00000fff) << 20;
+ } else {
+ /* replace addi with nop if lower_immediate is empty */
+ addi_insn = RISCV_INSN_NOP4;
+ }
+
+ addi_res = cpu_to_le32(addi_insn);
+ lui_res = cpu_to_le32(lui_insn);
+ mutex_lock(&text_mutex);
+ patch_insn_write(addi_parcel, &addi_res, sizeof(addi_res));
+ patch_insn_write(lui_parcel, &lui_res, sizeof(lui_res));
+ mutex_unlock(&text_mutex);
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+#ifdef CONFIG_32BIT
+ __runtime_fixup_32(where, where + 4, val);
+ __runtime_fixup_caches(where, 2);
+#else
+ __runtime_fixup_32(where, where + 8, val);
+ __runtime_fixup_32(where + 4, where + 12, val >> 32);
+ __runtime_fixup_caches(where, 4);
+#endif
+}
+
+/*
+ * Replace the least significant 5 bits of the srli/srliw immediate that is
+ * located at bits 20-24
+ */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ __le16 *parcel = where;
+ __le32 res;
+ u32 insn;
+
+ insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
+
+ insn &= 0xfe0fffff;
+ insn |= (val & 0b11111) << 20;
+
+ res = cpu_to_le32(insn);
+ mutex_lock(&text_mutex);
+ patch_text_nosync(where, &res, sizeof(insn));
+ mutex_unlock(&text_mutex);
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif /* _ASM_RISCV_RUNTIME_CONST_H */
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 4ffb022b097f..dc5782b5fbad 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -18,6 +18,10 @@ struct suspend_context {
unsigned long ie;
#ifdef CONFIG_MMU
unsigned long satp;
+ unsigned long stimecmp;
+#if __riscv_xlen < 64
+ unsigned long stimecmph;
+#endif
#endif
};
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index c3c1cc951cb9..3c2fce939673 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -73,6 +73,14 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47)
#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48)
#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49)
+#define RISCV_HWPROBE_EXT_ZICNTR (1ULL << 50)
+#define RISCV_HWPROBE_EXT_ZIHPM (1ULL << 51)
+#define RISCV_HWPROBE_EXT_ZFBFMIN (1ULL << 52)
+#define RISCV_HWPROBE_EXT_ZVFBFMIN (1ULL << 53)
+#define RISCV_HWPROBE_EXT_ZVFBFWMA (1ULL << 54)
+#define RISCV_HWPROBE_EXT_ZICBOM (1ULL << 55)
+#define RISCV_HWPROBE_EXT_ZAAMO (1ULL << 56)
+#define RISCV_HWPROBE_EXT_ZALRSC (1ULL << 57)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -95,6 +103,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
#define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11
+#define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 12
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index f06bc5efcd79..5f59fd226cc5 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -182,6 +182,8 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_SVVPTC,
KVM_RISCV_ISA_EXT_ZABHA,
KVM_RISCV_ISA_EXT_ZICCRSE,
+ KVM_RISCV_ISA_EXT_ZAAMO,
+ KVM_RISCV_ISA_EXT_ZALRSC,
KVM_RISCV_ISA_EXT_MAX,
};
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index e89455a6a0e5..16490755304e 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -36,7 +36,6 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
- OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 40ac72e407b6..2054f6c4b0ae 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -32,6 +32,7 @@
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
static bool any_cpu_has_zicboz;
+static bool any_cpu_has_zicbom;
unsigned long elf_hwcap __read_mostly;
@@ -53,9 +54,7 @@ u32 thead_vlenb_of;
*/
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
{
- if (!isa_bitmap)
- return riscv_isa[0];
- return isa_bitmap[0];
+ return !isa_bitmap ? riscv_isa[0] : isa_bitmap[0];
}
EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
@@ -76,10 +75,19 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned i
if (bit >= RISCV_ISA_EXT_MAX)
return false;
- return test_bit(bit, bmap) ? true : false;
+ return test_bit(bit, bmap);
}
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
+static int riscv_ext_f_depends(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f))
+ return 0;
+
+ return -EPROBE_DEFER;
+}
+
static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -91,6 +99,8 @@ static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data,
pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
return -EINVAL;
}
+
+ any_cpu_has_zicbom = true;
return 0;
}
@@ -109,6 +119,82 @@ static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data,
return 0;
}
+static int riscv_ext_f_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_FPU))
+ return -EINVAL;
+
+ /*
+ * Due to extension ordering, d is checked before f, so no deferral
+ * is required.
+ */
+ if (!__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) {
+ pr_warn_once("This kernel does not support systems with F but not D\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int riscv_ext_d_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_FPU))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int riscv_ext_vector_x_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int riscv_ext_vector_float_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_FPU))
+ return -EINVAL;
+
+ /*
+ * The kernel doesn't support systems that don't implement both of
+ * F and D, so if any of the vector extensions that do floating point
+ * are to be usable, both floating point extensions need to be usable.
+ *
+ * Since this function validates vector only, and v/Zve* are probed
+ * after f/d, there's no need for a deferral here.
+ */
+ if (!__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int riscv_ext_vector_crypto_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ /*
+ * It isn't the kernel's job to check that the binding is correct, so
+ * it should be enough to check that any of the vector extensions are
+ * enabled, which in-turn means that vector is usable in this kernel
+ */
+ if (!__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVE32X))
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -140,6 +226,28 @@ static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data,
return -EPROBE_DEFER;
}
+static int riscv_vector_f_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
+ return -EINVAL;
+
+ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVE32F))
+ return 0;
+
+ return -EPROBE_DEFER;
+}
+
+static int riscv_ext_zvfbfwma_validate(const struct riscv_isa_ext_data *data,
+ const unsigned long *isa_bitmap)
+{
+ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZFBFMIN) &&
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVFBFMIN))
+ return 0;
+
+ return -EPROBE_DEFER;
+}
+
static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data,
const unsigned long *isa_bitmap)
{
@@ -150,6 +258,11 @@ static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data,
return 0;
}
+static const unsigned int riscv_a_exts[] = {
+ RISCV_ISA_EXT_ZAAMO,
+ RISCV_ISA_EXT_ZALRSC,
+};
+
static const unsigned int riscv_zk_bundled_exts[] = {
RISCV_ISA_EXT_ZBKB,
RISCV_ISA_EXT_ZBKC,
@@ -321,17 +434,15 @@ static const unsigned int riscv_c_exts[] = {
const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
__RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
- __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
- __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
- __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
+ __RISCV_ISA_EXT_SUPERSET(a, RISCV_ISA_EXT_a, riscv_a_exts),
+ __RISCV_ISA_EXT_DATA_VALIDATE(f, RISCV_ISA_EXT_f, riscv_ext_f_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(d, RISCV_ISA_EXT_d, riscv_ext_d_validate),
__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
__RISCV_ISA_EXT_SUPERSET(c, RISCV_ISA_EXT_c, riscv_c_exts),
- __RISCV_ISA_EXT_SUPERSET(v, RISCV_ISA_EXT_v, riscv_v_exts),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(v, RISCV_ISA_EXT_v, riscv_v_exts, riscv_ext_vector_float_validate),
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
- __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts,
- riscv_ext_zicbom_validate),
- __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts,
- riscv_ext_zicboz_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, riscv_ext_zicbom_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate),
__RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE),
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
@@ -341,10 +452,13 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
__RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP),
+ __RISCV_ISA_EXT_DATA(zaamo, RISCV_ISA_EXT_ZAAMO),
__RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA),
__RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
+ __RISCV_ISA_EXT_DATA(zalrsc, RISCV_ISA_EXT_ZALRSC),
__RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS),
__RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zfbfmin, RISCV_ISA_EXT_ZFBFMIN, riscv_ext_f_depends),
__RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
__RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
__RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA),
@@ -370,29 +484,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
__RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
__RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
- __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
- __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
- __RISCV_ISA_EXT_SUPERSET(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts),
- __RISCV_ISA_EXT_DATA(zve32x, RISCV_ISA_EXT_ZVE32X),
- __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts),
- __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts),
- __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvbc, RISCV_ISA_EXT_ZVBC, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts, riscv_ext_vector_float_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zve32x, RISCV_ISA_EXT_ZVE32X, riscv_ext_vector_x_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts, riscv_ext_vector_float_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts, riscv_ext_vector_float_validate),
+ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts, riscv_ext_vector_x_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvfbfmin, RISCV_ISA_EXT_ZVFBFMIN, riscv_vector_f_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvfbfwma, RISCV_ISA_EXT_ZVFBFWMA, riscv_ext_zvfbfwma_validate),
__RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
__RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
- __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
- __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
- __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
- __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
- __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
- __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
- __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
- __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
- __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
- __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
- __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkb, RISCV_ISA_EXT_ZVKB, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkg, RISCV_ISA_EXT_ZVKG, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvkn, riscv_zvkn_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvknc, riscv_zvknc_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkned, RISCV_ISA_EXT_ZVKNED, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvkng, riscv_zvkng_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvknha, RISCV_ISA_EXT_ZVKNHA, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvknhb, RISCV_ISA_EXT_ZVKNHB, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvks, riscv_zvks_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvksc, riscv_zvksc_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvksed, RISCV_ISA_EXT_ZVKSED, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvksh, RISCV_ISA_EXT_ZVKSH, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_BUNDLE_VALIDATE(zvksg, riscv_zvksg_bundled_exts, riscv_ext_vector_crypto_validate),
+ __RISCV_ISA_EXT_DATA_VALIDATE(zvkt, RISCV_ISA_EXT_ZVKT, riscv_ext_vector_crypto_validate),
__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
__RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM),
__RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts),
@@ -960,16 +1076,6 @@ void __init riscv_fill_hwcap(void)
riscv_v_setup_vsize();
}
- if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
- /*
- * ISA string in device tree might have 'v' flag, but
- * CONFIG_RISCV_ISA_V is disabled in kernel.
- * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
- */
- if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
- elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
- }
-
memset(print_str, 0, sizeof(print_str));
for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
if (riscv_isa[0] & BIT_MASK(i))
@@ -1001,6 +1107,11 @@ void __init riscv_user_isa_enable(void)
current->thread.envcfg |= ENVCFG_CBZE;
else if (any_cpu_has_zicboz)
pr_warn("Zicboz disabled as it is unavailable on some harts\n");
+
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOM))
+ current->thread.envcfg |= ENVCFG_CBCFE;
+ else if (any_cpu_has_zicbom)
+ pr_warn("Zicbom disabled as it is unavailable on some harts\n");
}
#ifdef CONFIG_RISCV_ALTERNATIVE
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 3c37661801f9..e783a72d051f 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
case R_RISCV_ALIGN:
case R_RISCV_RELAX:
break;
+ case R_RISCV_64:
+ *(u64 *)loc = val;
+ break;
default:
pr_err("Unknown rela relocation: %d\n", r_type);
return -ENOEXEC;
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 3524db5e4fa0..674dcdfae7a1 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -36,7 +36,7 @@ static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
unsigned int replaced[2];
- unsigned int nops[2] = {NOP4, NOP4};
+ unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
/* we expect nops at the hook position */
if (!expected)
@@ -68,7 +68,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
bool enable, bool ra)
{
unsigned int call[2];
- unsigned int nops[2] = {NOP4, NOP4};
+ unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
if (ra)
make_call_ra(hook_pos, target, call);
@@ -97,7 +97,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- unsigned int nops[2] = {NOP4, NOP4};
+ unsigned int nops[2] = {RISCV_INSN_NOP4, RISCV_INSN_NOP4};
if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
return -EPERM;
diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c
index 654ed159c830..b4c1a6a3fbd2 100644
--- a/arch/riscv/kernel/jump_label.c
+++ b/arch/riscv/kernel/jump_label.c
@@ -11,8 +11,8 @@
#include <asm/bug.h>
#include <asm/cacheflush.h>
#include <asm/text-patching.h>
+#include <asm/insn-def.h>
-#define RISCV_INSN_NOP 0x00000013U
#define RISCV_INSN_JAL 0x0000006fU
bool arch_jump_label_transform_queue(struct jump_entry *entry,
@@ -33,7 +33,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
(((u32)offset & GENMASK(10, 1)) << (21 - 1)) |
(((u32)offset & GENMASK(20, 20)) << (31 - 20));
} else {
- insn = RISCV_INSN_NOP;
+ insn = RISCV_INSN_NOP4;
}
if (early_boot_irqs_disabled) {
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 068168046e0e..da4a4000e57e 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -12,8 +12,6 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
-#define ABI_SIZE_ON_STACK 80
-
.text
.macro SAVE_ABI_STATE
@@ -28,12 +26,12 @@
* register if a0 was not saved.
*/
.macro SAVE_RET_ABI_STATE
- addi sp, sp, -ABI_SIZE_ON_STACK
- REG_S ra, 1*SZREG(sp)
- REG_S s0, 8*SZREG(sp)
- REG_S a0, 10*SZREG(sp)
- REG_S a1, 11*SZREG(sp)
- addi s0, sp, ABI_SIZE_ON_STACK
+ addi sp, sp, -FREGS_SIZE_ON_STACK
+ REG_S ra, FREGS_RA(sp)
+ REG_S s0, FREGS_S0(sp)
+ REG_S a0, FREGS_A0(sp)
+ REG_S a1, FREGS_A1(sp)
+ addi s0, sp, FREGS_SIZE_ON_STACK
.endm
.macro RESTORE_ABI_STATE
@@ -43,11 +41,11 @@
.endm
.macro RESTORE_RET_ABI_STATE
- REG_L ra, 1*SZREG(sp)
- REG_L s0, 8*SZREG(sp)
- REG_L a0, 10*SZREG(sp)
- REG_L a1, 11*SZREG(sp)
- addi sp, sp, ABI_SIZE_ON_STACK
+ REG_L ra, FREGS_RA(sp)
+ REG_L s0, FREGS_S0(sp)
+ REG_L a0, FREGS_A0(sp)
+ REG_L a1, FREGS_A1(sp)
+ addi sp, sp, FREGS_SIZE_ON_STACK
.endm
SYM_TYPED_FUNC_START(ftrace_stub)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 4fe45daa6281..c174544eefc8 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -235,11 +235,6 @@ static void __init parse_dtb(void)
} else {
pr_err("No DTB passed to the kernel\n");
}
-
-#ifdef CONFIG_CMDLINE_FORCE
- strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
- pr_info("Forcing kernel command line to: %s\n", boot_command_line);
-#endif
}
#if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index d58b5e751286..e650dec44817 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -48,6 +48,8 @@ EXPORT_SYMBOL_GPL(__cpuid_to_hartid_map);
void __init smp_setup_processor_id(void)
{
cpuid_to_hartid_map(0) = boot_cpu_hartid;
+
+ pr_info("Booting Linux on hartid %lu\n", boot_cpu_hartid);
}
static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index e36d20205bd7..601a321e0f17 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -231,6 +231,10 @@ asmlinkage __visible void smp_callin(void)
riscv_ipi_enable();
numa_add_cpu(curr_cpuid);
+
+ pr_debug("CPU%u: Booted secondary hartid %lu\n", curr_cpuid,
+ cpuid_to_hartid_map(curr_cpuid));
+
set_cpu_online(curr_cpuid, true);
/*
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index d4355c770c36..3fe9e6edef8f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -74,7 +74,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
&frame->ra);
if (pc >= (unsigned long)handle_exception &&
pc < (unsigned long)&ret_from_exception_end) {
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (unlikely(!fn(arg, pc)))
break;
pc = ((struct pt_regs *)sp)->epc;
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 9a8a0dc035b2..24b3f57d467f 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -30,6 +30,13 @@ void suspend_save_csrs(struct suspend_context *context)
*/
#ifdef CONFIG_MMU
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SSTC)) {
+ context->stimecmp = csr_read(CSR_STIMECMP);
+#if __riscv_xlen < 64
+ context->stimecmph = csr_read(CSR_STIMECMPH);
+#endif
+ }
+
context->satp = csr_read(CSR_SATP);
#endif
}
@@ -43,6 +50,13 @@ void suspend_restore_csrs(struct suspend_context *context)
csr_write(CSR_IE, context->ie);
#ifdef CONFIG_MMU
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SSTC)) {
+ csr_write(CSR_STIMECMP, context->stimecmp);
+#if __riscv_xlen < 64
+ csr_write(CSR_STIMECMPH, context->stimecmph);
+#endif
+ }
+
csr_write(CSR_SATP, context->satp);
#endif
}
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 04a4e5495512..249aec8594a9 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -95,7 +95,9 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
+ EXT_KEY(ZAAMO);
EXT_KEY(ZACAS);
+ EXT_KEY(ZALRSC);
EXT_KEY(ZAWRS);
EXT_KEY(ZBA);
EXT_KEY(ZBB);
@@ -107,10 +109,13 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZCA);
EXT_KEY(ZCB);
EXT_KEY(ZCMOP);
+ EXT_KEY(ZICBOM);
EXT_KEY(ZICBOZ);
+ EXT_KEY(ZICNTR);
EXT_KEY(ZICOND);
EXT_KEY(ZIHINTNTL);
EXT_KEY(ZIHINTPAUSE);
+ EXT_KEY(ZIHPM);
EXT_KEY(ZIMOP);
EXT_KEY(ZKND);
EXT_KEY(ZKNE);
@@ -132,6 +137,8 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZVE64D);
EXT_KEY(ZVE64F);
EXT_KEY(ZVE64X);
+ EXT_KEY(ZVFBFMIN);
+ EXT_KEY(ZVFBFWMA);
EXT_KEY(ZVFH);
EXT_KEY(ZVFHMIN);
EXT_KEY(ZVKB);
@@ -148,6 +155,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZCD);
EXT_KEY(ZCF);
EXT_KEY(ZFA);
+ EXT_KEY(ZFBFMIN);
EXT_KEY(ZFH);
EXT_KEY(ZFHMIN);
}
@@ -161,7 +169,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
pair->value &= ~missing;
}
-static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
{
struct riscv_hwprobe pair;
@@ -279,6 +287,11 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
pair->value = riscv_cboz_block_size;
break;
+ case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
+ pair->value = riscv_cbom_block_size;
+ break;
case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
pair->value = user_max_virt_addr();
break;
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 7cc108aed74e..4354c87c0376 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -605,16 +605,10 @@ void check_vector_unaligned_access_emulated(struct work_struct *work __always_un
kernel_vector_end();
}
-bool check_vector_unaligned_access_emulated_all_cpus(void)
+bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
- if (!has_vector()) {
- for_each_online_cpu(cpu)
- per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- return false;
- }
-
schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu)
@@ -625,7 +619,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void)
return true;
}
#else
-bool check_vector_unaligned_access_emulated_all_cpus(void)
+bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
return false;
}
@@ -659,7 +653,7 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
}
}
-bool check_unaligned_access_emulated_all_cpus(void)
+bool __init check_unaligned_access_emulated_all_cpus(void)
{
int cpu;
@@ -684,7 +678,7 @@ bool unaligned_ctl_available(void)
return unaligned_ctl;
}
#else
-bool check_unaligned_access_emulated_all_cpus(void)
+bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 91f189cf1611..585d2dcf2dab 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -24,8 +24,12 @@
DEFINE_PER_CPU(long, misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
DEFINE_PER_CPU(long, vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
-#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
+static long unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
+static long unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
+
static cpumask_t fast_misaligned_access;
+
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
static int check_unaligned_access(void *param)
{
int cpu = smp_processor_id();
@@ -121,7 +125,7 @@ static int check_unaligned_access(void *param)
return 0;
}
-static void check_unaligned_access_nonboot_cpu(void *param)
+static void __init check_unaligned_access_nonboot_cpu(void *param)
{
unsigned int cpu = smp_processor_id();
struct page **pages = param;
@@ -130,6 +134,50 @@ static void check_unaligned_access_nonboot_cpu(void *param)
check_unaligned_access(pages[cpu]);
}
+/* Measure unaligned access speed on all CPUs present at boot in parallel. */
+static void __init check_unaligned_access_speed_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+out:
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
+}
+#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
+static void __init check_unaligned_access_speed_all_cpus(void)
+{
+}
+#endif
+
DEFINE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
@@ -175,7 +223,7 @@ static void set_unaligned_access_static_branches(void)
modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
}
-static int lock_and_set_unaligned_access_static_branch(void)
+static int __init lock_and_set_unaligned_access_static_branch(void)
{
cpus_read_lock();
set_unaligned_access_static_branches();
@@ -188,21 +236,29 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
static int riscv_online_cpu(unsigned int cpu)
{
- static struct page *buf;
-
/* We are already set since the last check */
- if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
+ goto exit;
+ } else if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
+ per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
goto exit;
-
- check_unaligned_access_emulated(NULL);
- buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!buf) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return -ENOMEM;
}
- check_unaligned_access(buf);
- __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
+ {
+ static struct page *buf;
+
+ check_unaligned_access_emulated(NULL);
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!buf) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return -ENOMEM;
+ }
+
+ check_unaligned_access(buf);
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+ }
+#endif
exit:
set_unaligned_access_static_branches();
@@ -217,59 +273,6 @@ static int riscv_offline_cpu(unsigned int cpu)
return 0;
}
-/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static int check_unaligned_access_speed_all_cpus(void)
-{
- unsigned int cpu;
- unsigned int cpu_count = num_possible_cpus();
- struct page **bufs = kcalloc(cpu_count, sizeof(*bufs), GFP_KERNEL);
-
- if (!bufs) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- return 0;
- }
-
- /*
- * Allocate separate buffers for each CPU so there's no fighting over
- * cache lines.
- */
- for_each_cpu(cpu, cpu_online_mask) {
- bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
- if (!bufs[cpu]) {
- pr_warn("Allocation failure, not measuring misaligned performance\n");
- goto out;
- }
- }
-
- /* Check everybody except 0, who stays behind to tend jiffies. */
- on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
-
- /* Check core 0. */
- smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
-
- /*
- * Setup hotplug callbacks for any new CPUs that come online or go
- * offline.
- */
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
- riscv_online_cpu, riscv_offline_cpu);
-
-out:
- for_each_cpu(cpu, cpu_online_mask) {
- if (bufs[cpu])
- __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
- }
-
- kfree(bufs);
- return 0;
-}
-#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
-static int check_unaligned_access_speed_all_cpus(void)
-{
- return 0;
-}
-#endif
-
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
static void check_vector_unaligned_access(struct work_struct *work __always_unused)
{
@@ -349,7 +352,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n",
cpu);
- return;
+ goto free;
}
if (word_cycles < byte_cycles)
@@ -363,57 +366,112 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
(speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow");
per_cpu(vector_misaligned_access, cpu) = speed;
+
+free:
+ __free_pages(page, MISALIGNED_BUFFER_ORDER);
+}
+
+/* Measure unaligned access speed on all CPUs present at boot in parallel. */
+static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+{
+ schedule_on_each_cpu(check_vector_unaligned_access);
+
+ return 0;
}
+#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
+static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+{
+ return 0;
+}
+#endif
static int riscv_online_cpu_vec(unsigned int cpu)
{
- if (!has_vector())
+ if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
+ per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
return 0;
+ }
- if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED)
+#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
+ if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
return 0;
check_vector_unaligned_access_emulated(NULL);
check_vector_unaligned_access(NULL);
+#endif
+
return 0;
}
-/* Measure unaligned access speed on all CPUs present at boot in parallel. */
-static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
-{
- schedule_on_each_cpu(check_vector_unaligned_access);
+static const char * const speed_str[] __initconst = { NULL, NULL, "slow", "fast", "unsupported" };
- /*
- * Setup hotplug callbacks for any new CPUs that come online or go
- * offline.
- */
- cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
- riscv_online_cpu_vec, NULL);
+static int __init set_unaligned_scalar_speed_param(char *str)
+{
+ if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW]))
+ unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_SCALAR_FAST]))
+ unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED]))
+ unaligned_scalar_speed_param = RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED;
+ else
+ return -EINVAL;
- return 0;
+ return 1;
}
-#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
-static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
+__setup("unaligned_scalar_speed=", set_unaligned_scalar_speed_param);
+
+static int __init set_unaligned_vector_speed_param(char *str)
{
- return 0;
+ if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW]))
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_VECTOR_FAST]))
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
+ else if (!strcmp(str, speed_str[RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED]))
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
+ else
+ return -EINVAL;
+
+ return 1;
}
-#endif
+__setup("unaligned_vector_speed=", set_unaligned_vector_speed_param);
-static int check_unaligned_access_all_cpus(void)
+static int __init check_unaligned_access_all_cpus(void)
{
- bool all_cpus_emulated, all_cpus_vec_unsupported;
+ int cpu;
+
+ if (unaligned_scalar_speed_param == RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN &&
+ !check_unaligned_access_emulated_all_cpus()) {
+ check_unaligned_access_speed_all_cpus();
+ } else {
+ pr_info("scalar unaligned access speed set to '%s' by command line\n",
+ speed_str[unaligned_scalar_speed_param]);
+ for_each_online_cpu(cpu)
+ per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
+ }
- all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
- all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
+ if (!has_vector())
+ unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- if (!all_cpus_vec_unsupported &&
+ if (unaligned_vector_speed_param == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN &&
+ !check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
+ } else {
+ pr_info("vector unaligned access speed set to '%s' by command line\n",
+ speed_str[unaligned_vector_speed_param]);
+ for_each_online_cpu(cpu)
+ per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
}
- if (!all_cpus_emulated)
- return check_unaligned_access_speed_all_cpus();
+ /*
+ * Setup hotplug callbacks for any new CPUs that come online or go
+ * offline.
+ */
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu, riscv_offline_cpu);
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu_vec, NULL);
return 0;
}
diff --git a/arch/riscv/kernel/vec-copy-unaligned.S b/arch/riscv/kernel/vec-copy-unaligned.S
index d16f19f1b3b6..7ce4de6f6e69 100644
--- a/arch/riscv/kernel/vec-copy-unaligned.S
+++ b/arch/riscv/kernel/vec-copy-unaligned.S
@@ -11,7 +11,7 @@
#define WORD_SEW CONCATENATE(e, WORD_EEW)
#define VEC_L CONCATENATE(vle, WORD_EEW).v
-#define VEC_S CONCATENATE(vle, WORD_EEW).v
+#define VEC_S CONCATENATE(vse, WORD_EEW).v
/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c
index a31ff84740eb..9feb7f67a0a3 100644
--- a/arch/riscv/kernel/vendor_extensions.c
+++ b/arch/riscv/kernel/vendor_extensions.c
@@ -61,6 +61,6 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig
if (bit >= RISCV_ISA_VENDOR_EXT_MAX)
return false;
- return test_bit(bit, bmap->isa) ? true : false;
+ return test_bit(bit, bmap->isa);
}
EXPORT_SYMBOL_GPL(__riscv_isa_vendor_extension_available);
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 002ca58dd998..61bd5ba6680a 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -97,6 +97,9 @@ SECTIONS
{
EXIT_DATA
}
+
+ RUNTIME_CONST_VARIABLES
+
PERCPU_SECTION(L1_CACHE_BYTES)
.rel.dyn : {
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 43ee8e33ba23..2e1b646f0d61 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -47,8 +47,10 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(SVVPTC),
+ KVM_ISA_EXT_ARR(ZAAMO),
KVM_ISA_EXT_ARR(ZABHA),
KVM_ISA_EXT_ARR(ZACAS),
+ KVM_ISA_EXT_ARR(ZALRSC),
KVM_ISA_EXT_ARR(ZAWRS),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
@@ -149,8 +151,10 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_SVVPTC:
+ case KVM_RISCV_ISA_EXT_ZAAMO:
case KVM_RISCV_ISA_EXT_ZABHA:
case KVM_RISCV_ISA_EXT_ZACAS:
+ case KVM_RISCV_ISA_EXT_ZALRSC:
case KVM_RISCV_ISA_EXT_ZAWRS:
case KVM_RISCV_ISA_EXT_ZBA:
case KVM_RISCV_ISA_EXT_ZBB:
diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c
index 7fb12c59e571..9408f50ca59a 100644
--- a/arch/riscv/lib/csum.c
+++ b/arch/riscv/lib/csum.c
@@ -40,12 +40,7 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
uproto = (__force unsigned int)htonl(proto);
sum += uproto;
- /*
- * Zbb support saves 4 instructions, so not worth checking without
- * alternatives if supported
- */
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
- IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
unsigned long fold_temp;
/*
@@ -157,12 +152,7 @@ do_csum_with_alignment(const unsigned char *buff, int len)
csum = do_csum_common(ptr, end, data);
#ifdef CC_HAS_ASM_GOTO_TIED_OUTPUT
- /*
- * Zbb support saves 6 instructions, so not worth checking without
- * alternatives if supported
- */
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
- IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
unsigned long fold_temp;
/*
@@ -244,12 +234,7 @@ do_csum_no_alignment(const unsigned char *buff, int len)
end = (const unsigned long *)(buff + len);
csum = do_csum_common(ptr, end, data);
- /*
- * Zbb support saves 6 instructions, so not worth checking without
- * alternatives if supported
- */
- if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
- IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB)) {
unsigned long fold_temp;
/*
diff --git a/arch/riscv/lib/strcmp.S b/arch/riscv/lib/strcmp.S
index 57a5c0066231..65027e742af1 100644
--- a/arch/riscv/lib/strcmp.S
+++ b/arch/riscv/lib/strcmp.S
@@ -8,7 +8,8 @@
/* int strcmp(const char *cs, const char *ct) */
SYM_FUNC_START(strcmp)
- ALTERNATIVE("nop", "j strcmp_zbb", 0, RISCV_ISA_EXT_ZBB, CONFIG_RISCV_ISA_ZBB)
+ __ALTERNATIVE_CFG("nop", "j strcmp_zbb", 0, RISCV_ISA_EXT_ZBB,
+ IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB))
/*
* Returns
@@ -43,7 +44,7 @@ SYM_FUNC_START(strcmp)
* The code was published as part of the bitmanip manual
* in Appendix A.
*/
-#ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
strcmp_zbb:
.option push
diff --git a/arch/riscv/lib/strlen.S b/arch/riscv/lib/strlen.S
index 962983b73251..eb4d2b7ed22b 100644
--- a/arch/riscv/lib/strlen.S
+++ b/arch/riscv/lib/strlen.S
@@ -8,7 +8,8 @@
/* int strlen(const char *s) */
SYM_FUNC_START(strlen)
- ALTERNATIVE("nop", "j strlen_zbb", 0, RISCV_ISA_EXT_ZBB, CONFIG_RISCV_ISA_ZBB)
+ __ALTERNATIVE_CFG("nop", "j strlen_zbb", 0, RISCV_ISA_EXT_ZBB,
+ IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB))
/*
* Returns
@@ -33,7 +34,7 @@ SYM_FUNC_START(strlen)
/*
* Variant of strlen using the ZBB extension if available
*/
-#ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
strlen_zbb:
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/arch/riscv/lib/strncmp.S b/arch/riscv/lib/strncmp.S
index 7b2d0ff9ed6c..062000c468c8 100644
--- a/arch/riscv/lib/strncmp.S
+++ b/arch/riscv/lib/strncmp.S
@@ -8,7 +8,8 @@
/* int strncmp(const char *cs, const char *ct, size_t count) */
SYM_FUNC_START(strncmp)
- ALTERNATIVE("nop", "j strncmp_zbb", 0, RISCV_ISA_EXT_ZBB, CONFIG_RISCV_ISA_ZBB)
+ __ALTERNATIVE_CFG("nop", "j strncmp_zbb", 0, RISCV_ISA_EXT_ZBB,
+ IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZBB))
/*
* Returns
@@ -46,7 +47,7 @@ SYM_FUNC_START(strncmp)
/*
* Variant of strncmp using the ZBB extension if available
*/
-#ifdef CONFIG_RISCV_ISA_ZBB
+#if defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)
strncmp_zbb:
.option push
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 4abe3de23225..55c20ad1f744 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -158,7 +158,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
*
* - We get a zero back from the cmpxchg and end up waiting on the
* lock. Taking the lock synchronises with the rollover and so
- * we are forced to see the updated verion.
+ * we are forced to see the updated version.
*
* - We get a valid context back from the cmpxchg then we continue
* using old ASID because __flush_context() would have marked ASID
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index b4a78a4b35cf..375dd96bb4a0 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
- unsigned long pte_num)
+ unsigned long ncontig)
{
- pte_t orig_pte = ptep_get(ptep);
- unsigned long i;
-
- for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) {
- pte_t pte = ptep_get_and_clear(mm, addr, ptep);
-
- if (pte_dirty(pte))
- orig_pte = pte_mkdirty(orig_pte);
-
- if (pte_young(pte))
- orig_pte = pte_mkyoung(orig_pte);
+ pte_t pte, tmp_pte;
+ bool present;
+
+ pte = ptep_get_and_clear(mm, addr, ptep);
+ present = pte_present(pte);
+ while (--ncontig) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_get_and_clear(mm, addr, ptep);
+ if (present) {
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
}
-
- return orig_pte;
+ return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm,
flush_tlb_range(&vma, saddr, addr);
}
+static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize)
+{
+ unsigned long hugepage_shift;
+
+ if (sz >= PGDIR_SIZE)
+ hugepage_shift = PGDIR_SHIFT;
+ else if (sz >= P4D_SIZE)
+ hugepage_shift = P4D_SHIFT;
+ else if (sz >= PUD_SIZE)
+ hugepage_shift = PUD_SHIFT;
+ else if (sz >= PMD_SIZE)
+ hugepage_shift = PMD_SHIFT;
+ else
+ hugepage_shift = PAGE_SHIFT;
+
+ *pgsize = 1 << hugepage_shift;
+
+ return sz >> hugepage_shift;
+}
+
/*
* When dealing with NAPOT mappings, the privileged specification indicates that
* "if an update needs to be made, the OS generally should first mark all of the
@@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm,
pte_t pte,
unsigned long sz)
{
- unsigned long hugepage_shift, pgsize;
+ size_t pgsize;
int i, pte_num;
- if (sz >= PGDIR_SIZE)
- hugepage_shift = PGDIR_SHIFT;
- else if (sz >= P4D_SIZE)
- hugepage_shift = P4D_SHIFT;
- else if (sz >= PUD_SIZE)
- hugepage_shift = PUD_SHIFT;
- else if (sz >= PMD_SIZE)
- hugepage_shift = PMD_SHIFT;
- else
- hugepage_shift = PAGE_SHIFT;
-
- pte_num = sz >> hugepage_shift;
- pgsize = 1 << hugepage_shift;
+ pte_num = num_contig_ptes_from_size(sz, &pgsize);
if (!pte_present(pte)) {
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
@@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long sz)
{
+ size_t pgsize;
pte_t orig_pte = ptep_get(ptep);
int pte_num;
if (!pte_napot(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
- pte_num = napot_pte_num(napot_cont_order(orig_pte));
+ pte_num = num_contig_ptes_from_size(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pte_num);
}
@@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
+ size_t pgsize;
pte_t pte = ptep_get(ptep);
int i, pte_num;
@@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm,
return;
}
- pte_num = napot_pte_num(napot_cont_order(pte));
- for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
+ pte_num = num_contig_ptes_from_size(sz, &pgsize);
+
+ for (i = 0; i < pte_num; i++, addr += pgsize, ptep++)
pte_clear(mm, addr, ptep);
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 66ee5ee42aa8..ab475ec6ca42 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -20,15 +20,13 @@
#include <linux/dma-map-ops.h>
#include <linux/crash_dump.h>
#include <linux/hugetlb.h>
-#ifdef CONFIG_RELOCATABLE
-#include <linux/elf.h>
-#endif
#include <linux/kfence.h>
#include <linux/execmem.h>
#include <asm/fixmap.h>
#include <asm/io.h>
#include <asm/kasan.h>
+#include <asm/module.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
@@ -320,6 +318,44 @@ static void __init setup_bootmem(void)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
}
+#ifdef CONFIG_RELOCATABLE
+extern unsigned long __rela_dyn_start, __rela_dyn_end;
+
+static void __init relocate_kernel(void)
+{
+ Elf_Rela *rela = (Elf_Rela *)&__rela_dyn_start;
+ /*
+ * This holds the offset between the linked virtual address and the
+ * relocated virtual address.
+ */
+ uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
+ /*
+ * This holds the offset between kernel linked virtual address and
+ * physical address.
+ */
+ uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
+
+ for ( ; rela < (Elf_Rela *)&__rela_dyn_end; rela++) {
+ Elf_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
+ Elf_Addr relocated_addr = rela->r_addend;
+
+ if (rela->r_info != R_RISCV_RELATIVE)
+ continue;
+
+ /*
+ * Make sure to not relocate vdso symbols like rt_sigreturn
+ * which are linked from the address 0 in vmlinux since
+ * vdso symbol addresses are actually used as an offset from
+ * mm->context.vdso in VDSO_OFFSET macro.
+ */
+ if (relocated_addr >= KERNEL_LINK_ADDR)
+ relocated_addr += reloc_offset;
+
+ *(Elf_Addr *)addr = relocated_addr;
+ }
+}
+#endif /* CONFIG_RELOCATABLE */
+
#ifdef CONFIG_MMU
struct pt_alloc_ops pt_ops __meminitdata;
@@ -820,6 +856,8 @@ static __init void set_satp_mode(uintptr_t dtb_pa)
uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa);
+ kernel_map.page_offset = PAGE_OFFSET_L5;
+
if (satp_mode_cmdline == SATP_MODE_57) {
disable_pgtable_l5();
} else if (satp_mode_cmdline == SATP_MODE_48) {
@@ -890,44 +928,6 @@ retry:
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
-#ifdef CONFIG_RELOCATABLE
-extern unsigned long __rela_dyn_start, __rela_dyn_end;
-
-static void __init relocate_kernel(void)
-{
- Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
- /*
- * This holds the offset between the linked virtual address and the
- * relocated virtual address.
- */
- uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
- /*
- * This holds the offset between kernel linked virtual address and
- * physical address.
- */
- uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
-
- for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
- Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
- Elf64_Addr relocated_addr = rela->r_addend;
-
- if (rela->r_info != R_RISCV_RELATIVE)
- continue;
-
- /*
- * Make sure to not relocate vdso symbols like rt_sigreturn
- * which are linked from the address 0 in vmlinux since
- * vdso symbol addresses are actually used as an offset from
- * mm->context.vdso in VDSO_OFFSET macro.
- */
- if (relocated_addr >= KERNEL_LINK_ADDR)
- relocated_addr += reloc_offset;
-
- *(Elf64_Addr *)addr = relocated_addr;
- }
-}
-#endif /* CONFIG_RELOCATABLE */
-
#ifdef CONFIG_XIP_KERNEL
static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
@@ -1105,11 +1105,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
#ifdef CONFIG_XIP_KERNEL
-#ifdef CONFIG_64BIT
- kernel_map.page_offset = PAGE_OFFSET_L3;
-#else
- kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
-#endif
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
@@ -1124,7 +1119,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr
+ (uintptr_t)&_sdata - (uintptr_t)&_start;
#else
- kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
@@ -1171,7 +1165,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* makes the kernel cross over a PUD_SIZE boundary, raise a bug
* since a part of the kernel would not get mapped.
*/
- BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
+ if (IS_ENABLED(CONFIG_64BIT))
+ BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
relocate_kernel();
#endif
@@ -1375,6 +1370,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
dtb_early_va = (void *)dtb_pa;
dtb_early_pa = dtb_pa;
+
+#ifdef CONFIG_RELOCATABLE
+ kernel_map.virt_addr = (uintptr_t)_start;
+ kernel_map.phys_addr = (uintptr_t)_start;
+ relocate_kernel();
+#endif
}
static inline void setup_vm_final(void)
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
index 18706f457da7..559d291fac5c 100644
--- a/arch/riscv/mm/physaddr.c
+++ b/arch/riscv/mm/physaddr.c
@@ -12,7 +12,7 @@ phys_addr_t __virt_to_phys(unsigned long x)
* Boundary checking aginst the kernel linear mapping space.
*/
WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
- "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ "virt_to_phys used for non-linear address: %p (%pS)\n",
(void *)x, (void *)x);
return __va_to_pa_nodebug(x);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 74dd9307fbf1..f9e27ba1df99 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -4,6 +4,7 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
+#include <linux/mmu_notifier.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
@@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+ return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID;
+}
+
+static void __flush_tlb_range(struct mm_struct *mm,
+ const struct cpumask *cmask,
unsigned long start, unsigned long size,
unsigned long stride)
{
+ unsigned long asid = get_mm_asid(mm);
unsigned int cpu;
if (cpumask_empty(cmask))
@@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
}
put_cpu();
-}
-static inline unsigned long get_mm_asid(struct mm_struct *mm)
-{
- return cntx2asid(atomic_long_read(&mm->context.id));
+ if (mm)
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size);
}
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
- 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
}
void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
- __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
- start, end - start, page_size);
+ __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
addr, PAGE_SIZE, PAGE_SIZE);
}
@@ -161,13 +165,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
start, end - start, stride_size);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
+ __flush_tlb_range(NULL, cpu_online_mask,
start, end - start, PAGE_SIZE);
}
@@ -175,7 +179,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm),
start, end - start, PMD_SIZE);
}
#endif
@@ -189,6 +193,7 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm, unsigned long start, unsigned long end)
{
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
void arch_flush_tlb_batched_pending(struct mm_struct *mm)
@@ -198,7 +203,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
- FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(NULL, &batch->cpumask,
+ 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
}
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
index 0e6ca6d5ae4b..c5db2f072c34 100644
--- a/arch/riscv/purgatory/entry.S
+++ b/arch/riscv/purgatory/entry.S
@@ -12,6 +12,7 @@
.text
+.align 2
SYM_CODE_START(purgatory_start)
lla sp, .Lstack
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c809c486d136..db8161ebb43c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -137,6 +137,7 @@ config S390
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && CC_IS_CLANG
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
@@ -239,6 +240,7 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
+ select HOTPLUG_SMT
select IOMMU_HELPER if PCI
select IOMMU_SUPPORT if PCI
select KASAN_VMALLOC if KASAN
@@ -629,6 +631,7 @@ endchoice
config RELOCATABLE
def_bool y
+ select ARCH_VMLINUX_NEEDS_RELOCS
help
This builds a kernel image that retains relocation information
so it can be loaded at an arbitrary address.
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 5fae311203c2..b06dc53bfed5 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -15,7 +15,7 @@ KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
KBUILD_CFLAGS += -fPIC
-LDFLAGS_vmlinux := -no-pie --emit-relocs --discard-none
+LDFLAGS_vmlinux := $(call ld-option,-no-pie)
extra_tools := relocs
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
diff --git a/arch/s390/Makefile.postlink b/arch/s390/Makefile.postlink
index 1ae5478cd6ac..c2b737500a91 100644
--- a/arch/s390/Makefile.postlink
+++ b/arch/s390/Makefile.postlink
@@ -11,7 +11,6 @@ __archpost:
-include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
-include $(srctree)/scripts/Makefile.lib
CMD_RELOCS=arch/s390/tools/relocs
OUT_RELOCS = arch/s390/boot
@@ -20,9 +19,8 @@ quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/relocs.S
mkdir -p $(OUT_RELOCS); \
$(CMD_RELOCS) $@ > $(OUT_RELOCS)/relocs.S
-vmlinux: FORCE
+vmlinux.unstripped: FORCE
$(call cmd,relocs)
- $(call cmd,strip_relocs)
clean:
@rm -f $(OUT_RELOCS)/relocs.S
diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c
index 1e17e288cee4..ede951dc0085 100644
--- a/arch/s390/hypfs/hypfs_diag_fs.c
+++ b/arch/s390/hypfs/hypfs_diag_fs.c
@@ -209,6 +209,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
cpu_info));
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
+ if (IS_ERR(cpu_dir))
+ return PTR_ERR(cpu_dir);
rc = hypfs_create_u64(cpu_dir, "mgmtime",
cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 424f899d8163..cb89e54ada25 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -20,14 +20,13 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mmu_notifier.h>
+#include <asm/kvm_host_types.h>
#include <asm/debug.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/isc.h>
#include <asm/guarded_storage.h>
-#define KVM_S390_BSCA_CPU_SLOTS 64
-#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS 255
#define KVM_INTERNAL_MEM_SLOTS 1
@@ -51,342 +50,6 @@
#define KVM_REQ_REFRESH_GUEST_PREFIX \
KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define SIGP_CTRL_C 0x80
-#define SIGP_CTRL_SCN_MASK 0x3f
-
-union bsca_sigp_ctrl {
- __u8 value;
- struct {
- __u8 c : 1;
- __u8 r : 1;
- __u8 scn : 6;
- };
-};
-
-union esca_sigp_ctrl {
- __u16 value;
- struct {
- __u8 c : 1;
- __u8 reserved: 7;
- __u8 scn;
- };
-};
-
-struct esca_entry {
- union esca_sigp_ctrl sigp_ctrl;
- __u16 reserved1[3];
- __u64 sda;
- __u64 reserved2[6];
-};
-
-struct bsca_entry {
- __u8 reserved0;
- union bsca_sigp_ctrl sigp_ctrl;
- __u16 reserved[3];
- __u64 sda;
- __u64 reserved2[2];
-};
-
-union ipte_control {
- unsigned long val;
- struct {
- unsigned long k : 1;
- unsigned long kh : 31;
- unsigned long kg : 32;
- };
-};
-
-/*
- * Utility is defined as two bytes but having it four bytes wide
- * generates more efficient code. Since the following bytes are
- * reserved this makes no functional difference.
- */
-union sca_utility {
- __u32 val;
- struct {
- __u32 mtcr : 1;
- __u32 : 31;
- };
-};
-
-struct bsca_block {
- union ipte_control ipte_control;
- __u64 reserved[5];
- __u64 mcn;
- union sca_utility utility;
- __u8 reserved2[4];
- struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
-};
-
-struct esca_block {
- union ipte_control ipte_control;
- __u64 reserved1[6];
- union sca_utility utility;
- __u8 reserved2[4];
- __u64 mcn[4];
- __u64 reserved3[20];
- struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
-};
-
-/*
- * This struct is used to store some machine check info from lowcore
- * for machine checks that happen while the guest is running.
- * This info in host's lowcore might be overwritten by a second machine
- * check from host when host is in the machine check's high-level handling.
- * The size is 24 bytes.
- */
-struct mcck_volatile_info {
- __u64 mcic;
- __u64 failing_storage_address;
- __u32 ext_damage_code;
- __u32 reserved;
-};
-
-#define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \
- CR0_MEASUREMENT_ALERT_SUBMASK)
-#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \
- CR14_EXTERNAL_DAMAGE_SUBMASK)
-
-#define SIDAD_SIZE_MASK 0xff
-#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
-#define sida_size(sie_block) \
- ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
-
-#define CPUSTAT_STOPPED 0x80000000
-#define CPUSTAT_WAIT 0x10000000
-#define CPUSTAT_ECALL_PEND 0x08000000
-#define CPUSTAT_STOP_INT 0x04000000
-#define CPUSTAT_IO_INT 0x02000000
-#define CPUSTAT_EXT_INT 0x01000000
-#define CPUSTAT_RUNNING 0x00800000
-#define CPUSTAT_RETAINED 0x00400000
-#define CPUSTAT_TIMING_SUB 0x00020000
-#define CPUSTAT_SIE_SUB 0x00010000
-#define CPUSTAT_RRF 0x00008000
-#define CPUSTAT_SLSV 0x00004000
-#define CPUSTAT_SLSR 0x00002000
-#define CPUSTAT_ZARCH 0x00000800
-#define CPUSTAT_MCDS 0x00000100
-#define CPUSTAT_KSS 0x00000200
-#define CPUSTAT_SM 0x00000080
-#define CPUSTAT_IBS 0x00000040
-#define CPUSTAT_GED2 0x00000010
-#define CPUSTAT_G 0x00000008
-#define CPUSTAT_GED 0x00000004
-#define CPUSTAT_J 0x00000002
-#define CPUSTAT_P 0x00000001
-
-struct kvm_s390_sie_block {
- atomic_t cpuflags; /* 0x0000 */
- __u32 : 1; /* 0x0004 */
- __u32 prefix : 18;
- __u32 : 1;
- __u32 ibc : 12;
- __u8 reserved08[4]; /* 0x0008 */
-#define PROG_IN_SIE (1<<0)
- __u32 prog0c; /* 0x000c */
- union {
- __u8 reserved10[16]; /* 0x0010 */
- struct {
- __u64 pv_handle_cpu;
- __u64 pv_handle_config;
- };
- };
-#define PROG_BLOCK_SIE (1<<0)
-#define PROG_REQUEST (1<<1)
- atomic_t prog20; /* 0x0020 */
- __u8 reserved24[4]; /* 0x0024 */
- __u64 cputm; /* 0x0028 */
- __u64 ckc; /* 0x0030 */
- __u64 epoch; /* 0x0038 */
- __u32 svcc; /* 0x0040 */
-#define LCTL_CR0 0x8000
-#define LCTL_CR6 0x0200
-#define LCTL_CR9 0x0040
-#define LCTL_CR10 0x0020
-#define LCTL_CR11 0x0010
-#define LCTL_CR14 0x0002
- __u16 lctl; /* 0x0044 */
- __s16 icpua; /* 0x0046 */
-#define ICTL_OPEREXC 0x80000000
-#define ICTL_PINT 0x20000000
-#define ICTL_LPSW 0x00400000
-#define ICTL_STCTL 0x00040000
-#define ICTL_ISKE 0x00004000
-#define ICTL_SSKE 0x00002000
-#define ICTL_RRBE 0x00001000
-#define ICTL_TPROT 0x00000200
- __u32 ictl; /* 0x0048 */
-#define ECA_CEI 0x80000000
-#define ECA_IB 0x40000000
-#define ECA_SIGPI 0x10000000
-#define ECA_MVPGI 0x01000000
-#define ECA_AIV 0x00200000
-#define ECA_VX 0x00020000
-#define ECA_PROTEXCI 0x00002000
-#define ECA_APIE 0x00000008
-#define ECA_SII 0x00000001
- __u32 eca; /* 0x004c */
-#define ICPT_INST 0x04
-#define ICPT_PROGI 0x08
-#define ICPT_INSTPROGI 0x0C
-#define ICPT_EXTREQ 0x10
-#define ICPT_EXTINT 0x14
-#define ICPT_IOREQ 0x18
-#define ICPT_WAIT 0x1c
-#define ICPT_VALIDITY 0x20
-#define ICPT_STOP 0x28
-#define ICPT_OPEREXC 0x2C
-#define ICPT_PARTEXEC 0x38
-#define ICPT_IOINST 0x40
-#define ICPT_KSS 0x5c
-#define ICPT_MCHKREQ 0x60
-#define ICPT_INT_ENABLE 0x64
-#define ICPT_PV_INSTR 0x68
-#define ICPT_PV_NOTIFY 0x6c
-#define ICPT_PV_PREF 0x70
- __u8 icptcode; /* 0x0050 */
- __u8 icptstatus; /* 0x0051 */
- __u16 ihcpu; /* 0x0052 */
- __u8 reserved54; /* 0x0054 */
-#define IICTL_CODE_NONE 0x00
-#define IICTL_CODE_MCHK 0x01
-#define IICTL_CODE_EXT 0x02
-#define IICTL_CODE_IO 0x03
-#define IICTL_CODE_RESTART 0x04
-#define IICTL_CODE_SPECIFICATION 0x10
-#define IICTL_CODE_OPERAND 0x11
- __u8 iictl; /* 0x0055 */
- __u16 ipa; /* 0x0056 */
- __u32 ipb; /* 0x0058 */
- __u32 scaoh; /* 0x005c */
-#define FPF_BPBC 0x20
- __u8 fpf; /* 0x0060 */
-#define ECB_GS 0x40
-#define ECB_TE 0x10
-#define ECB_SPECI 0x08
-#define ECB_SRSI 0x04
-#define ECB_HOSTPROTINT 0x02
-#define ECB_PTF 0x01
- __u8 ecb; /* 0x0061 */
-#define ECB2_CMMA 0x80
-#define ECB2_IEP 0x20
-#define ECB2_PFMFI 0x08
-#define ECB2_ESCA 0x04
-#define ECB2_ZPCI_LSI 0x02
- __u8 ecb2; /* 0x0062 */
-#define ECB3_AISI 0x20
-#define ECB3_AISII 0x10
-#define ECB3_DEA 0x08
-#define ECB3_AES 0x04
-#define ECB3_RI 0x01
- __u8 ecb3; /* 0x0063 */
-#define ESCA_SCAOL_MASK ~0x3fU
- __u32 scaol; /* 0x0064 */
- __u8 sdf; /* 0x0068 */
- __u8 epdx; /* 0x0069 */
- __u8 cpnc; /* 0x006a */
- __u8 reserved6b; /* 0x006b */
- __u32 todpr; /* 0x006c */
-#define GISA_FORMAT1 0x00000001
- __u32 gd; /* 0x0070 */
- __u8 reserved74[12]; /* 0x0074 */
- __u64 mso; /* 0x0080 */
- __u64 msl; /* 0x0088 */
- psw_t gpsw; /* 0x0090 */
- __u64 gg14; /* 0x00a0 */
- __u64 gg15; /* 0x00a8 */
- __u8 reservedb0[8]; /* 0x00b0 */
-#define HPID_KVM 0x4
-#define HPID_VSIE 0x5
- __u8 hpid; /* 0x00b8 */
- __u8 reservedb9[7]; /* 0x00b9 */
- union {
- struct {
- __u32 eiparams; /* 0x00c0 */
- __u16 extcpuaddr; /* 0x00c4 */
- __u16 eic; /* 0x00c6 */
- };
- __u64 mcic; /* 0x00c0 */
- } __packed;
- __u32 reservedc8; /* 0x00c8 */
- union {
- struct {
- __u16 pgmilc; /* 0x00cc */
- __u16 iprcc; /* 0x00ce */
- };
- __u32 edc; /* 0x00cc */
- } __packed;
- union {
- struct {
- __u32 dxc; /* 0x00d0 */
- __u16 mcn; /* 0x00d4 */
- __u8 perc; /* 0x00d6 */
- __u8 peratmid; /* 0x00d7 */
- };
- __u64 faddr; /* 0x00d0 */
- } __packed;
- __u64 peraddr; /* 0x00d8 */
- __u8 eai; /* 0x00e0 */
- __u8 peraid; /* 0x00e1 */
- __u8 oai; /* 0x00e2 */
- __u8 armid; /* 0x00e3 */
- __u8 reservede4[4]; /* 0x00e4 */
- union {
- __u64 tecmc; /* 0x00e8 */
- struct {
- __u16 subchannel_id; /* 0x00e8 */
- __u16 subchannel_nr; /* 0x00ea */
- __u32 io_int_parm; /* 0x00ec */
- __u32 io_int_word; /* 0x00f0 */
- };
- } __packed;
- __u8 reservedf4[8]; /* 0x00f4 */
-#define CRYCB_FORMAT_MASK 0x00000003
-#define CRYCB_FORMAT0 0x00000000
-#define CRYCB_FORMAT1 0x00000001
-#define CRYCB_FORMAT2 0x00000003
- __u32 crycbd; /* 0x00fc */
- __u64 gcr[16]; /* 0x0100 */
- union {
- __u64 gbea; /* 0x0180 */
- __u64 sidad;
- };
- __u8 reserved188[8]; /* 0x0188 */
- __u64 sdnxo; /* 0x0190 */
- __u8 reserved198[8]; /* 0x0198 */
- __u32 fac; /* 0x01a0 */
- __u8 reserved1a4[20]; /* 0x01a4 */
- __u64 cbrlo; /* 0x01b8 */
- __u8 reserved1c0[8]; /* 0x01c0 */
-#define ECD_HOSTREGMGMT 0x20000000
-#define ECD_MEF 0x08000000
-#define ECD_ETOKENF 0x02000000
-#define ECD_ECC 0x00200000
-#define ECD_HMAC 0x00004000
- __u32 ecd; /* 0x01c8 */
- __u8 reserved1cc[18]; /* 0x01cc */
- __u64 pp; /* 0x01de */
- __u8 reserved1e6[2]; /* 0x01e6 */
- __u64 itdba; /* 0x01e8 */
- __u64 riccbd; /* 0x01f0 */
- __u64 gvrd; /* 0x01f8 */
-} __packed __aligned(512);
-
-struct kvm_s390_itdb {
- __u8 data[256];
-};
-
-struct sie_page {
- struct kvm_s390_sie_block sie_block;
- struct mcck_volatile_info mcck_info; /* 0x0200 */
- __u8 reserved218[360]; /* 0x0218 */
- __u64 pv_grregs[16]; /* 0x0380 */
- __u8 reserved400[512]; /* 0x0400 */
- struct kvm_s390_itdb itdb; /* 0x0600 */
- __u8 reserved700[2304]; /* 0x0700 */
-};
-
struct kvm_vcpu_stat {
struct kvm_vcpu_stat_generic generic;
u64 exit_userspace;
diff --git a/arch/s390/include/asm/kvm_host_types.h b/arch/s390/include/asm/kvm_host_types.h
new file mode 100644
index 000000000000..1394d3fb648f
--- /dev/null
+++ b/arch/s390/include/asm/kvm_host_types.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_KVM_HOST_TYPES_H
+#define _ASM_KVM_HOST_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+
+#define KVM_S390_BSCA_CPU_SLOTS 64
+#define KVM_S390_ESCA_CPU_SLOTS 248
+
+#define SIGP_CTRL_C 0x80
+#define SIGP_CTRL_SCN_MASK 0x3f
+
+union bsca_sigp_ctrl {
+ __u8 value;
+ struct {
+ __u8 c : 1;
+ __u8 r : 1;
+ __u8 scn : 6;
+ };
+};
+
+union esca_sigp_ctrl {
+ __u16 value;
+ struct {
+ __u8 c : 1;
+ __u8 reserved: 7;
+ __u8 scn;
+ };
+};
+
+struct esca_entry {
+ union esca_sigp_ctrl sigp_ctrl;
+ __u16 reserved1[3];
+ __u64 sda;
+ __u64 reserved2[6];
+};
+
+struct bsca_entry {
+ __u8 reserved0;
+ union bsca_sigp_ctrl sigp_ctrl;
+ __u16 reserved[3];
+ __u64 sda;
+ __u64 reserved2[2];
+};
+
+union ipte_control {
+ unsigned long val;
+ struct {
+ unsigned long k : 1;
+ unsigned long kh : 31;
+ unsigned long kg : 32;
+ };
+};
+
+/*
+ * Utility is defined as two bytes but having it four bytes wide
+ * generates more efficient code. Since the following bytes are
+ * reserved this makes no functional difference.
+ */
+union sca_utility {
+ __u32 val;
+ struct {
+ __u32 mtcr : 1;
+ __u32 : 31;
+ };
+};
+
+struct bsca_block {
+ union ipte_control ipte_control;
+ __u64 reserved[5];
+ __u64 mcn;
+ union sca_utility utility;
+ __u8 reserved2[4];
+ struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
+};
+
+struct esca_block {
+ union ipte_control ipte_control;
+ __u64 reserved1[6];
+ union sca_utility utility;
+ __u8 reserved2[4];
+ __u64 mcn[4];
+ __u64 reserved3[20];
+ struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
+};
+
+/*
+ * This struct is used to store some machine check info from lowcore
+ * for machine checks that happen while the guest is running.
+ * This info in host's lowcore might be overwritten by a second machine
+ * check from host when host is in the machine check's high-level handling.
+ * The size is 24 bytes.
+ */
+struct mcck_volatile_info {
+ __u64 mcic;
+ __u64 failing_storage_address;
+ __u32 ext_damage_code;
+ __u32 reserved;
+};
+
+#define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \
+ CR0_MEASUREMENT_ALERT_SUBMASK)
+#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \
+ CR14_EXTERNAL_DAMAGE_SUBMASK)
+
+#define SIDAD_SIZE_MASK 0xff
+#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
+#define sida_size(sie_block) \
+ ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
+
+#define CPUSTAT_STOPPED 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_KSS 0x00000200
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_IBS 0x00000040
+#define CPUSTAT_GED2 0x00000010
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_GED 0x00000004
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+
+struct kvm_s390_sie_block {
+ atomic_t cpuflags; /* 0x0000 */
+ __u32 : 1; /* 0x0004 */
+ __u32 prefix : 18;
+ __u32 : 1;
+ __u32 ibc : 12;
+ __u8 reserved08[4]; /* 0x0008 */
+#define PROG_IN_SIE (1<<0)
+ __u32 prog0c; /* 0x000c */
+ union {
+ __u8 reserved10[16]; /* 0x0010 */
+ struct {
+ __u64 pv_handle_cpu;
+ __u64 pv_handle_config;
+ };
+ };
+#define PROG_BLOCK_SIE (1<<0)
+#define PROG_REQUEST (1<<1)
+ atomic_t prog20; /* 0x0020 */
+ __u8 reserved24[4]; /* 0x0024 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u32 svcc; /* 0x0040 */
+#define LCTL_CR0 0x8000
+#define LCTL_CR6 0x0200
+#define LCTL_CR9 0x0040
+#define LCTL_CR10 0x0020
+#define LCTL_CR11 0x0010
+#define LCTL_CR14 0x0002
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+#define ICTL_OPEREXC 0x80000000
+#define ICTL_PINT 0x20000000
+#define ICTL_LPSW 0x00400000
+#define ICTL_STCTL 0x00040000
+#define ICTL_ISKE 0x00004000
+#define ICTL_SSKE 0x00002000
+#define ICTL_RRBE 0x00001000
+#define ICTL_TPROT 0x00000200
+ __u32 ictl; /* 0x0048 */
+#define ECA_CEI 0x80000000
+#define ECA_IB 0x40000000
+#define ECA_SIGPI 0x10000000
+#define ECA_MVPGI 0x01000000
+#define ECA_AIV 0x00200000
+#define ECA_VX 0x00020000
+#define ECA_PROTEXCI 0x00002000
+#define ECA_APIE 0x00000008
+#define ECA_SII 0x00000001
+ __u32 eca; /* 0x004c */
+#define ICPT_INST 0x04
+#define ICPT_PROGI 0x08
+#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTREQ 0x10
+#define ICPT_EXTINT 0x14
+#define ICPT_IOREQ 0x18
+#define ICPT_WAIT 0x1c
+#define ICPT_VALIDITY 0x20
+#define ICPT_STOP 0x28
+#define ICPT_OPEREXC 0x2C
+#define ICPT_PARTEXEC 0x38
+#define ICPT_IOINST 0x40
+#define ICPT_KSS 0x5c
+#define ICPT_MCHKREQ 0x60
+#define ICPT_INT_ENABLE 0x64
+#define ICPT_PV_INSTR 0x68
+#define ICPT_PV_NOTIFY 0x6c
+#define ICPT_PV_PREF 0x70
+ __u8 icptcode; /* 0x0050 */
+ __u8 icptstatus; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54; /* 0x0054 */
+#define IICTL_CODE_NONE 0x00
+#define IICTL_CODE_MCHK 0x01
+#define IICTL_CODE_EXT 0x02
+#define IICTL_CODE_IO 0x03
+#define IICTL_CODE_RESTART 0x04
+#define IICTL_CODE_SPECIFICATION 0x10
+#define IICTL_CODE_OPERAND 0x11
+ __u8 iictl; /* 0x0055 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
+#define ECB_GS 0x40
+#define ECB_TE 0x10
+#define ECB_SPECI 0x08
+#define ECB_SRSI 0x04
+#define ECB_HOSTPROTINT 0x02
+#define ECB_PTF 0x01
+ __u8 ecb; /* 0x0061 */
+#define ECB2_CMMA 0x80
+#define ECB2_IEP 0x20
+#define ECB2_PFMFI 0x08
+#define ECB2_ESCA 0x04
+#define ECB2_ZPCI_LSI 0x02
+ __u8 ecb2; /* 0x0062 */
+#define ECB3_AISI 0x20
+#define ECB3_AISII 0x10
+#define ECB3_DEA 0x08
+#define ECB3_AES 0x04
+#define ECB3_RI 0x01
+ __u8 ecb3; /* 0x0063 */
+#define ESCA_SCAOL_MASK ~0x3fU
+ __u32 scaol; /* 0x0064 */
+ __u8 sdf; /* 0x0068 */
+ __u8 epdx; /* 0x0069 */
+ __u8 cpnc; /* 0x006a */
+ __u8 reserved6b; /* 0x006b */
+ __u32 todpr; /* 0x006c */
+#define GISA_FORMAT1 0x00000001
+ __u32 gd; /* 0x0070 */
+ __u8 reserved74[12]; /* 0x0074 */
+ __u64 mso; /* 0x0080 */
+ __u64 msl; /* 0x0088 */
+ psw_t gpsw; /* 0x0090 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[8]; /* 0x00b0 */
+#define HPID_KVM 0x4
+#define HPID_VSIE 0x5
+ __u8 hpid; /* 0x00b8 */
+ __u8 reservedb9[7]; /* 0x00b9 */
+ union {
+ struct {
+ __u32 eiparams; /* 0x00c0 */
+ __u16 extcpuaddr; /* 0x00c4 */
+ __u16 eic; /* 0x00c6 */
+ };
+ __u64 mcic; /* 0x00c0 */
+ } __packed;
+ __u32 reservedc8; /* 0x00c8 */
+ union {
+ struct {
+ __u16 pgmilc; /* 0x00cc */
+ __u16 iprcc; /* 0x00ce */
+ };
+ __u32 edc; /* 0x00cc */
+ } __packed;
+ union {
+ struct {
+ __u32 dxc; /* 0x00d0 */
+ __u16 mcn; /* 0x00d4 */
+ __u8 perc; /* 0x00d6 */
+ __u8 peratmid; /* 0x00d7 */
+ };
+ __u64 faddr; /* 0x00d0 */
+ } __packed;
+ __u64 peraddr; /* 0x00d8 */
+ __u8 eai; /* 0x00e0 */
+ __u8 peraid; /* 0x00e1 */
+ __u8 oai; /* 0x00e2 */
+ __u8 armid; /* 0x00e3 */
+ __u8 reservede4[4]; /* 0x00e4 */
+ union {
+ __u64 tecmc; /* 0x00e8 */
+ struct {
+ __u16 subchannel_id; /* 0x00e8 */
+ __u16 subchannel_nr; /* 0x00ea */
+ __u32 io_int_parm; /* 0x00ec */
+ __u32 io_int_word; /* 0x00f0 */
+ };
+ } __packed;
+ __u8 reservedf4[8]; /* 0x00f4 */
+#define CRYCB_FORMAT_MASK 0x00000003
+#define CRYCB_FORMAT0 0x00000000
+#define CRYCB_FORMAT1 0x00000001
+#define CRYCB_FORMAT2 0x00000003
+ __u32 crycbd; /* 0x00fc */
+ __u64 gcr[16]; /* 0x0100 */
+ union {
+ __u64 gbea; /* 0x0180 */
+ __u64 sidad;
+ };
+ __u8 reserved188[8]; /* 0x0188 */
+ __u64 sdnxo; /* 0x0190 */
+ __u8 reserved198[8]; /* 0x0198 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[20]; /* 0x01a4 */
+ __u64 cbrlo; /* 0x01b8 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+#define ECD_HOSTREGMGMT 0x20000000
+#define ECD_MEF 0x08000000
+#define ECD_ETOKENF 0x02000000
+#define ECD_ECC 0x00200000
+#define ECD_HMAC 0x00004000
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
+ __u64 pp; /* 0x01de */
+ __u8 reserved1e6[2]; /* 0x01e6 */
+ __u64 itdba; /* 0x01e8 */
+ __u64 riccbd; /* 0x01f0 */
+ __u64 gvrd; /* 0x01f8 */
+} __packed __aligned(512);
+
+struct kvm_s390_itdb {
+ __u8 data[256];
+};
+
+struct sie_page {
+ struct kvm_s390_sie_block sie_block;
+ struct mcck_volatile_info mcck_info; /* 0x0200 */
+ __u8 reserved218[360]; /* 0x0218 */
+ __u64 pv_grregs[16]; /* 0x0380 */
+ __u8 reserved400[512]; /* 0x0400 */
+ struct kvm_s390_itdb itdb; /* 0x0600 */
+ __u8 reserved700[2304]; /* 0x0700 */
+};
+
+#endif /* _ASM_KVM_HOST_TYPES_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 1a31f1f93ed3..e99e9c87b1ce 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -164,9 +164,7 @@ struct lowcore {
__u32 spinlock_index; /* 0x03b0 */
__u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b8 */
- __u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
- __u64 machine_flags; /* 0x03c8 */
- __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
+ __u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */
__u32 return_lpswe; /* 0x0400 */
__u32 return_mcck_lpswe; /* 0x0404 */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d09a92db95f7..6c8063cb8fe7 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -31,6 +31,7 @@
#include <linux/cpumask.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <linux/bitops.h>
#include <asm/fpu-types.h>
#include <asm/cpu.h>
#include <asm/page.h>
@@ -62,33 +63,27 @@ static __always_inline struct pcpu *this_pcpu(void)
static __always_inline void set_cpu_flag(int flag)
{
- this_pcpu()->flags |= (1UL << flag);
+ set_bit(flag, &this_pcpu()->flags);
}
static __always_inline void clear_cpu_flag(int flag)
{
- this_pcpu()->flags &= ~(1UL << flag);
+ clear_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_cpu_flag(int flag)
{
- return this_pcpu()->flags & (1UL << flag);
+ return test_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_and_set_cpu_flag(int flag)
{
- if (test_cpu_flag(flag))
- return true;
- set_cpu_flag(flag);
- return false;
+ return test_and_set_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_and_clear_cpu_flag(int flag)
{
- if (!test_cpu_flag(flag))
- return false;
- clear_cpu_flag(flag);
- return true;
+ return test_and_clear_bit(flag, &this_pcpu()->flags);
}
/*
@@ -97,7 +92,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag)
*/
static __always_inline bool test_cpu_flag_of(int flag, int cpu)
{
- return per_cpu(pcpu_devices, cpu).flags & (1UL << flag);
+ return test_bit(flag, &per_cpu(pcpu_devices, cpu).flags);
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 1ad5e82c2f65..91f569cae1ce 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,9 +9,6 @@
#define _ASM_THREAD_INFO_H
#include <linux/bits.h>
-#ifndef ASM_OFFSETS_C
-#include <asm/asm-offsets.h>
-#endif
/*
* General size of kernel stacks
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index cef06bffad80..44110847342a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -61,6 +61,12 @@ static inline void topology_expect_change(void) { }
#endif /* CONFIG_SCHED_TOPOLOGY */
+static inline bool topology_is_primary_thread(unsigned int cpu)
+{
+ return smp_get_base_cpu(cpu) == cpu;
+}
+#define topology_is_primary_thread topology_is_primary_thread
+
#define POLARIZATION_UNKNOWN (-1)
#define POLARIZATION_HRZ (0)
#define POLARIZATION_VL (1)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 49bb197c8c81..841e05f7fa7e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -5,15 +5,14 @@
* and format the required data.
*/
-#define ASM_OFFSETS_C
-
#include <linux/kbuild.h>
-#include <linux/kvm_host.h>
#include <linux/sched.h>
#include <linux/purgatory.h>
#include <linux/pgtable.h>
-#include <linux/ftrace.h>
+#include <linux/ftrace_regs.h>
+#include <asm/kvm_host_types.h>
#include <asm/stacktrace.h>
+#include <asm/ptrace.h>
int main(void)
{
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 911b95cd57e5..dd410962ecbe 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
+#include <asm/asm-offsets.h>
#include <asm/processor.h>
#include <asm/debug.h>
#include <asm/dis.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b6d3c7a6209d..54cf0923050f 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -22,6 +22,7 @@
#include <asm/asm-extable.h>
#include <linux/memblock.h>
#include <asm/access-regs.h>
+#include <asm/asm-offsets.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b0c2356697fd..dd291c9ad6a6 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -468,7 +468,7 @@ SYM_CODE_START(mcck_int_handler)
clgrjl %r9,%r14, 4f
larl %r14,.Lsie_leave
clgrjhe %r9,%r14, 4f
- lg %r10,__LC_PCPU
+ lg %r10,__LC_PCPU(%r13)
oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT __SF_SIE_CONTROL(%r15),%r13
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b6686d63b754..f244c5560e7f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -414,7 +414,6 @@ static void __init setup_lowcore(void)
lc->clock_comparator = clock_comparator_max;
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
- lc->machine_flags = get_lowcore()->machine_flags;
lc->preempt_count = get_lowcore()->preempt_count;
nmi_alloc_mcesa_early(&lc->mcesad);
lc->sys_enter_timer = get_lowcore()->sys_enter_timer;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f2f05c5277f4..63f41dfaba85 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -99,13 +99,6 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
static unsigned int smp_max_threads __initdata = -1U;
cpumask_t cpu_setup_mask;
-static int __init early_nosmt(char *s)
-{
- smp_max_threads = 1;
- return 0;
-}
-early_param("nosmt", early_nosmt);
-
static int __init early_smt(char *s)
{
get_option(&s, &smp_max_threads);
@@ -265,7 +258,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = get_lowcore()->kernel_asce;
lc->user_asce = s390_invalid_asce;
- lc->machine_flags = get_lowcore()->machine_flags;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
abs_lc = get_abs_lowcore();
@@ -809,6 +801,7 @@ void __init smp_detect_cpus(void)
mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
pcpu_set_smt(mtid);
+ cpu_smt_set_num_threads(smp_cpu_mtid + 1, smp_cpu_mtid + 1);
/* Print number of CPUs */
c_cpus = s_cpus = 0;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 40edfde25f5b..b153a395f46d 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -9,6 +9,7 @@
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
+#include <asm/asm-offsets.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
#include <asm/kprobes.h>
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c900deddd36d..fed17d407a44 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -680,7 +680,7 @@ static void stp_work_fn(struct work_struct *work)
if (!stp_online) {
chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
- del_timer_sync(&stp_timer);
+ timer_delete_sync(&stp_timer);
goto out_unlock;
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 70c8f9ad13cd..430feb1a5013 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -80,7 +80,7 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
- VM_READ|VM_EXEC|
+ VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 39f44b6256e0..e2a6eb92420f 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -201,7 +201,7 @@ static void cmm_set_timer(void)
{
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
if (timer_pending(&cmm_timer))
- del_timer(&cmm_timer);
+ timer_delete(&cmm_timer);
return;
}
mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds));
@@ -424,7 +424,7 @@ out_smsg:
#endif
unregister_sysctl_table(cmm_sysctl_header);
out_sysctl:
- del_timer_sync(&cmm_timer);
+ timer_delete_sync(&cmm_timer);
return rc;
}
module_init(cmm_init);
@@ -437,7 +437,7 @@ static void __exit cmm_exit(void)
#endif
unregister_oom_notifier(&cmm_oom_nb);
kthread_stop(cmm_thread_ptr);
- del_timer_sync(&cmm_timer);
+ timer_delete_sync(&cmm_timer);
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 31a763e05287..da84ff6770de 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -376,6 +376,7 @@ void do_protection_exception(struct pt_regs *regs)
if (unlikely(!teid.b61)) {
if (user_mode(regs)) {
/* Low-address protection in user mode: cannot happen */
+ dump_fault_info(regs);
die(regs, "Low-address protection");
}
/*
diff --git a/arch/s390/mm/pfault.c b/arch/s390/mm/pfault.c
index b01e73f5b9b8..e6175d75e4b0 100644
--- a/arch/s390/mm/pfault.c
+++ b/arch/s390/mm/pfault.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/asm-extable.h>
+#include <asm/asm-offsets.h>
#include <asm/pfault.h>
#include <asm/diag.h>
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 0e725039861f..81bdb54ad5e3 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -287,23 +287,21 @@ static struct zpci_bus *zpci_bus_alloc(int topo, bool topo_is_tid)
static void pci_dma_range_setup(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
- struct bus_dma_region *map;
- u64 aligned_end;
+ u64 aligned_end, size;
+ dma_addr_t dma_start;
+ int ret;
- map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return;
-
- map->cpu_start = 0;
- map->dma_start = PAGE_ALIGN(zdev->start_dma);
+ dma_start = PAGE_ALIGN(zdev->start_dma);
aligned_end = PAGE_ALIGN_DOWN(zdev->end_dma + 1);
- if (aligned_end >= map->dma_start)
- map->size = aligned_end - map->dma_start;
+ if (aligned_end >= dma_start)
+ size = aligned_end - dma_start;
else
- map->size = 0;
- WARN_ON_ONCE(map->size == 0);
+ size = 0;
+ WARN_ON_ONCE(size == 0);
- pdev->dev.dma_range_map = map;
+ ret = dma_direct_set_offset(&pdev->dev, 0, dma_start, size);
+ if (ret)
+ pr_err("Failed to allocate DMA range map for %s\n", pci_name(pdev));
}
void pcibios_bus_add_device(struct pci_dev *pdev)
@@ -360,6 +358,9 @@ static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev
{
struct pci_dev *pdev;
+ if (!zdev->vfn)
+ return false;
+
pdev = zpci_iov_find_parent_pf(zbus, zdev);
if (!pdev)
return true;
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 20f07aee5bde..49a4961889de 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -57,7 +57,6 @@ CONFIG_NET_SCH_TBF=y
CONFIG_NET_SCH_GRED=y
CONFIG_NET_SCH_DSMARK=y
CONFIG_NET_SCH_NETEM=y
-CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_ROUTE4=y
CONFIG_NET_CLS_FW=y
CONFIG_MTD=y
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index 00862d3c030d..de293792db84 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -56,7 +56,6 @@ CONFIG_NET_SCH_TBF=y
CONFIG_NET_SCH_GRED=y
CONFIG_NET_SCH_DSMARK=y
CONFIG_NET_SCH_NETEM=y
-CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_ROUTE4=y
CONFIG_NET_CLS_FW=y
CONFIG_MTD=y
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index 99a5d0760532..5b151bb2bc43 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -27,7 +27,6 @@ CONFIG_NETFILTER=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=y
CONFIG_NET_CLS_BASIC=y
-CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_ROUTE4=y
CONFIG_NET_CLS_U32=y
CONFIG_MTD=y
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 11ff5fd510de..8e85f205d8f5 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -119,7 +119,6 @@ CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
diff --git a/arch/sh/drivers/pci/common.c b/arch/sh/drivers/pci/common.c
index ab9e791070b4..5442475d132e 100644
--- a/arch/sh/drivers/pci/common.c
+++ b/arch/sh/drivers/pci/common.c
@@ -90,7 +90,7 @@ static void pcibios_enable_err(struct timer_list *t)
{
struct pci_channel *hose = from_timer(hose, t, err_timer);
- del_timer(&hose->err_timer);
+ timer_delete(&hose->err_timer);
printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
enable_irq(hose->err_irq);
}
@@ -99,7 +99,7 @@ static void pcibios_enable_serr(struct timer_list *t)
{
struct pci_channel *hose = from_timer(hose, t, serr_timer);
- del_timer(&hose->serr_timer);
+ timer_delete(&hose->serr_timer);
printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
enable_irq(hose->serr_irq);
}
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 96d938fdf224..6fe7123d38fa 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -32,10 +32,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 9644fe187a3f..008c30289eaa 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -71,7 +71,20 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
- BSS_SECTION(0, PAGE_SIZE, 4)
+ __bss_start = .;
+ SBSS(0)
+ . = ALIGN(PAGE_SIZE);
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+ BSS_FIRST_SECTIONS
+ . = ALIGN(PAGE_SIZE);
+ *(.bss..page_aligned)
+ . = ALIGN(PAGE_SIZE);
+ *(.dynbss)
+ *(BSS_MAIN)
+ *(COMMON)
+ . = ALIGN(8);
+ }
+ __bss_stop = .;
_end = . ;
STABS_DEBUG
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index ab657b359789..f4fb82b019bb 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -84,7 +84,7 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
/* before we change anything we want to stop any running timers,
* otherwise calls such as on will have no persistent effect
*/
- del_timer_sync(&led_blink_timer);
+ timer_delete_sync(&led_blink_timer);
if (!strcmp(buf, "on")) {
auxio_set_led(AUXIO_LED_ON);
@@ -134,7 +134,7 @@ static int __init led_init(void)
static void __exit led_exit(void)
{
remove_proc_entry("led", NULL);
- del_timer_sync(&led_blink_timer);
+ timer_delete_sync(&led_blink_timer);
}
module_init(led_init);
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 18051b1cfce0..79509c7f39de 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,6 +12,7 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
+ select ARCH_HAS_STRICT_KERNEL_RWX
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index ede40a160c5e..9cb196070614 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -345,16 +345,20 @@ config UML_RTC
by providing a fake RTC clock that causes a wakeup at the right
time.
-config UML_PCI_OVER_VIRTIO
- bool "Enable PCI over VIRTIO device simulation"
- # in theory, just VIRTIO is enough, but that causes recursion
- depends on VIRTIO_UML
+config UML_PCI
+ bool
select FORCE_PCI
select UML_IOMEM_EMULATION
select UML_DMA_EMULATION
select PCI_MSI
select PCI_LOCKLESS_CONFIG
+config UML_PCI_OVER_VIRTIO
+ bool "Enable PCI over VIRTIO device simulation"
+ # in theory, just VIRTIO is enough, but that causes recursion
+ depends on VIRTIO_UML
+ select UML_PCI
+
config UML_PCI_OVER_VIRTIO_DEVICE_ID
int "set the virtio device ID for PCI emulation"
default -1
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 57882e6bc215..0a5820343ad3 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -60,7 +60,8 @@ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
obj-$(CONFIG_UML_RANDOM) += random.o
obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
obj-$(CONFIG_UML_RTC) += rtc.o
-obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
+obj-$(CONFIG_UML_PCI) += virt-pci.o
+obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virtio_pcidev.o
# pcap_user.o must be added explicitly.
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o vde_user.o vector_user.o
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index da985e0dc69a..ca08c91f47a3 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -79,7 +79,7 @@ static int __init rng_init (void)
if (err < 0)
goto err_out_cleanup_hw;
- sigio_broken(random_fd);
+ sigio_broken();
hwrng.name = RNG_MODULE_NAME;
hwrng.read = rng_dev_read;
diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
index 7c3cec4c68cf..51e79f3148cd 100644
--- a/arch/um/drivers/rtc_user.c
+++ b/arch/um/drivers/rtc_user.c
@@ -39,7 +39,7 @@ int uml_rtc_start(bool timetravel)
}
/* apparently timerfd won't send SIGIO, use workaround */
- sigio_broken(uml_rtc_irq_fds[0]);
+ sigio_broken();
err = add_sigio_fd(uml_rtc_irq_fds[0]);
if (err < 0) {
close(uml_rtc_irq_fds[0]);
diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h
index f016fe15499f..2985c14661f4 100644
--- a/arch/um/drivers/ubd.h
+++ b/arch/um/drivers/ubd.h
@@ -7,8 +7,10 @@
#ifndef __UM_UBD_USER_H
#define __UM_UBD_USER_H
-extern int start_io_thread(unsigned long sp, int *fds_out);
-extern int io_thread(void *arg);
+#include <os.h>
+
+int start_io_thread(struct os_helper_thread **td_out, int *fd_out);
+void *io_thread(void *arg);
extern int kernel_fd;
extern int ubd_read_poll(int timeout);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0b1e61f72fb3..4de6613e7468 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -474,12 +474,12 @@ static irqreturn_t ubd_intr(int irq, void *dev)
}
/* Only changed by ubd_init, which is an initcall. */
-static int io_pid = -1;
+static struct os_helper_thread *io_td;
static void kill_io_thread(void)
{
- if(io_pid != -1)
- os_kill_process(io_pid, 1);
+ if (io_td)
+ os_kill_helper_thread(io_td);
}
__uml_exitcall(kill_io_thread);
@@ -1104,8 +1104,8 @@ static int __init ubd_init(void)
late_initcall(ubd_init);
-static int __init ubd_driver_init(void){
- unsigned long stack;
+static int __init ubd_driver_init(void)
+{
int err;
/* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/
@@ -1114,13 +1114,11 @@ static int __init ubd_driver_init(void){
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
- stack = alloc_stack(0, 0);
- io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
- if(io_pid < 0){
+ err = start_io_thread(&io_td, &thread_fd);
+ if (err < 0) {
printk(KERN_ERR
"ubd : Failed to start I/O thread (errno = %d) - "
- "falling back to synchronous I/O\n", -io_pid);
- io_pid = -1;
+ "falling back to synchronous I/O\n", -err);
return 0;
}
err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
@@ -1496,12 +1494,11 @@ int kernel_fd = -1;
/* Only changed by the io thread. XXX: currently unused. */
static int io_count;
-int io_thread(void *arg)
+void *io_thread(void *arg)
{
int n, count, written, res;
- os_set_pdeathsig();
- os_fix_helper_signals();
+ os_fix_helper_thread_signals();
while(1){
n = bulk_req_safe_read(
@@ -1543,5 +1540,5 @@ int io_thread(void *arg)
} while (written < n);
}
- return 0;
+ return NULL;
}
diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
index b4f8b8e60564..c5e6545f6fcf 100644
--- a/arch/um/drivers/ubd_user.c
+++ b/arch/um/drivers/ubd_user.c
@@ -25,9 +25,9 @@
static struct pollfd kernel_pollfd;
-int start_io_thread(unsigned long sp, int *fd_out)
+int start_io_thread(struct os_helper_thread **td_out, int *fd_out)
{
- int pid, fds[2], err;
+ int fds[2], err;
err = os_pipe(fds, 1, 1);
if(err < 0){
@@ -47,14 +47,14 @@ int start_io_thread(unsigned long sp, int *fd_out)
goto out_close;
}
- pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL);
- if(pid < 0){
- err = -errno;
- printk("start_io_thread - clone failed : errno = %d\n", errno);
+ err = os_run_helper_thread(td_out, io_thread, NULL);
+ if (err < 0) {
+ printk("%s - failed to run helper thread, err = %d\n",
+ __func__, -err);
goto out_close;
}
- return(pid);
+ return 0;
out_close:
os_close_file(fds[0]);
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 85b129e2b70b..b97bb52dd562 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1112,7 +1112,7 @@ static int vector_net_close(struct net_device *dev)
struct vector_private *vp = netdev_priv(dev);
netif_stop_queue(dev);
- del_timer(&vp->tl);
+ timer_delete(&vp->tl);
vp->opened = false;
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index dd5580f975cc..b83b5a765d4e 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -5,52 +5,19 @@
*/
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
#include <linux/logic_iomem.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
-#include <linux/virtio_pcidev.h>
-#include <linux/virtio-uml.h>
-#include <linux/delay.h>
#include <linux/msi.h>
#include <linux/unaligned.h>
#include <irq_kern.h>
+#include "virt-pci.h"
+
#define MAX_DEVICES 8
#define MAX_MSI_VECTORS 32
#define CFG_SPACE_SIZE 4096
-/* for MSI-X we have a 32-bit payload */
-#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
-#define NUM_IRQ_MSGS 10
-
-struct um_pci_message_buffer {
- struct virtio_pcidev_msg hdr;
- u8 data[8];
-};
-
-struct um_pci_device {
- struct virtio_device *vdev;
-
- /* for now just standard BARs */
- u8 resptr[PCI_STD_NUM_BARS];
-
- struct virtqueue *cmd_vq, *irq_vq;
-
-#define UM_PCI_WRITE_BUFS 20
- struct um_pci_message_buffer bufs[UM_PCI_WRITE_BUFS + 1];
- void *extra_ptrs[UM_PCI_WRITE_BUFS + 1];
- DECLARE_BITMAP(used_bufs, UM_PCI_WRITE_BUFS);
-
-#define UM_PCI_STAT_WAITING 0
- unsigned long status;
-
- int irq;
-
- bool platform;
-};
-
struct um_pci_device_reg {
struct um_pci_device *dev;
void __iomem *iomem;
@@ -65,179 +32,15 @@ static struct irq_domain *um_pci_inner_domain;
static struct irq_domain *um_pci_msi_domain;
static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
-static unsigned int um_pci_max_delay_us = 40000;
-module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644);
-
-static int um_pci_get_buf(struct um_pci_device *dev, bool *posted)
-{
- int i;
-
- for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
- if (!test_and_set_bit(i, dev->used_bufs))
- return i;
- }
-
- *posted = false;
- return UM_PCI_WRITE_BUFS;
-}
-
-static void um_pci_free_buf(struct um_pci_device *dev, void *buf)
-{
- int i;
-
- if (buf == &dev->bufs[UM_PCI_WRITE_BUFS]) {
- kfree(dev->extra_ptrs[UM_PCI_WRITE_BUFS]);
- dev->extra_ptrs[UM_PCI_WRITE_BUFS] = NULL;
- return;
- }
-
- for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
- if (buf == &dev->bufs[i]) {
- kfree(dev->extra_ptrs[i]);
- dev->extra_ptrs[i] = NULL;
- WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
- return;
- }
- }
-
- WARN_ON(1);
-}
-
-static int um_pci_send_cmd(struct um_pci_device *dev,
- struct virtio_pcidev_msg *cmd,
- unsigned int cmd_size,
- const void *extra, unsigned int extra_size,
- void *out, unsigned int out_size)
-{
- struct scatterlist out_sg, extra_sg, in_sg;
- struct scatterlist *sgs_list[] = {
- [0] = &out_sg,
- [1] = extra ? &extra_sg : &in_sg,
- [2] = extra ? &in_sg : NULL,
- };
- struct um_pci_message_buffer *buf;
- int delay_count = 0;
- bool bounce_out;
- int ret, len;
- int buf_idx;
- bool posted;
-
- if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
- return -EINVAL;
-
- switch (cmd->op) {
- case VIRTIO_PCIDEV_OP_CFG_WRITE:
- case VIRTIO_PCIDEV_OP_MMIO_WRITE:
- case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
- /* in PCI, writes are posted, so don't wait */
- posted = !out;
- WARN_ON(!posted);
- break;
- default:
- posted = false;
- break;
- }
-
- bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
- out && out_size <= sizeof(buf->data);
-
- buf_idx = um_pci_get_buf(dev, &posted);
- buf = &dev->bufs[buf_idx];
- memcpy(buf, cmd, cmd_size);
-
- if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
- dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
- GFP_ATOMIC);
-
- if (!dev->extra_ptrs[buf_idx]) {
- um_pci_free_buf(dev, buf);
- return -ENOMEM;
- }
- extra = dev->extra_ptrs[buf_idx];
- } else if (extra && extra_size <= sizeof(buf) - cmd_size) {
- memcpy((u8 *)buf + cmd_size, extra, extra_size);
- cmd_size += extra_size;
- extra_size = 0;
- extra = NULL;
- cmd = (void *)buf;
- } else {
- cmd = (void *)buf;
- }
-
- sg_init_one(&out_sg, cmd, cmd_size);
- if (extra)
- sg_init_one(&extra_sg, extra, extra_size);
- /* allow stack for small buffers */
- if (bounce_out)
- sg_init_one(&in_sg, buf->data, out_size);
- else if (out)
- sg_init_one(&in_sg, out, out_size);
-
- /* add to internal virtio queue */
- ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
- extra ? 2 : 1,
- out ? 1 : 0,
- cmd, GFP_ATOMIC);
- if (ret) {
- um_pci_free_buf(dev, buf);
- return ret;
- }
-
- if (posted) {
- virtqueue_kick(dev->cmd_vq);
- return 0;
- }
-
- /* kick and poll for getting a response on the queue */
- set_bit(UM_PCI_STAT_WAITING, &dev->status);
- virtqueue_kick(dev->cmd_vq);
- ret = 0;
-
- while (1) {
- void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
-
- if (completed == buf)
- break;
-
- if (completed)
- um_pci_free_buf(dev, completed);
-
- if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
- ++delay_count > um_pci_max_delay_us,
- "um virt-pci delay: %d", delay_count)) {
- ret = -EIO;
- break;
- }
- udelay(1);
- }
- clear_bit(UM_PCI_STAT_WAITING, &dev->status);
-
- if (bounce_out)
- memcpy(out, buf->data, out_size);
-
- um_pci_free_buf(dev, buf);
-
- return ret;
-}
-
static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
int size)
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_READ,
- .size = size,
- .addr = offset,
- };
- /* max 8, we might not use it all */
- u8 data[8];
if (!dev)
return ULONG_MAX;
- memset(data, 0xff, sizeof(data));
-
switch (size) {
case 1:
case 2:
@@ -251,23 +54,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
return ULONG_MAX;
}
- if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
- return ULONG_MAX;
-
- switch (size) {
- case 1:
- return data[0];
- case 2:
- return le16_to_cpup((void *)data);
- case 4:
- return le32_to_cpup((void *)data);
-#ifdef CONFIG_64BIT
- case 8:
- return le64_to_cpup((void *)data);
-#endif
- default:
- return ULONG_MAX;
- }
+ return dev->ops->cfgspace_read(dev, offset, size);
}
static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
@@ -275,42 +62,24 @@ static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
- struct {
- struct virtio_pcidev_msg hdr;
- /* maximum size - we may only use parts of it */
- u8 data[8];
- } msg = {
- .hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
- .size = size,
- .addr = offset,
- },
- };
if (!dev)
return;
switch (size) {
case 1:
- msg.data[0] = (u8)val;
- break;
case 2:
- put_unaligned_le16(val, (void *)msg.data);
- break;
case 4:
- put_unaligned_le32(val, (void *)msg.data);
- break;
#ifdef CONFIG_64BIT
case 8:
- put_unaligned_le64(val, (void *)msg.data);
- break;
#endif
+ break;
default:
WARN(1, "invalid config space write size %d\n", size);
return;
}
- WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+ dev->ops->cfgspace_write(dev, offset, size, val);
}
static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
@@ -318,30 +87,14 @@ static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
.write = um_pci_cfgspace_write,
};
-static void um_pci_bar_copy_from(void *priv, void *buffer,
- unsigned int offset, int size)
+static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
+ int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_MMIO_READ,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- };
-
- memset(buffer, 0xff, size);
-
- um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
-}
-
-static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
- int size)
-{
- /* 8 is maximum size - we may only use parts of it */
- u8 data[8];
+ u8 bar = *resptr;
switch (size) {
case 1:
@@ -352,72 +105,60 @@ static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
#endif
break;
default:
- WARN(1, "invalid config space read size %d\n", size);
+ WARN(1, "invalid bar read size %d\n", size);
return ULONG_MAX;
}
- um_pci_bar_copy_from(priv, data, offset, size);
+ return dev->ops->bar_read(dev, bar, offset, size);
+}
+
+static void um_pci_bar_write(void *priv, unsigned int offset, int size,
+ unsigned long val)
+{
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ u8 bar = *resptr;
switch (size) {
case 1:
- return data[0];
case 2:
- return le16_to_cpup((void *)data);
case 4:
- return le32_to_cpup((void *)data);
#ifdef CONFIG_64BIT
case 8:
- return le64_to_cpup((void *)data);
#endif
+ break;
default:
- return ULONG_MAX;
+ WARN(1, "invalid bar write size %d\n", size);
+ return;
}
+
+ dev->ops->bar_write(dev, bar, offset, size, val);
}
-static void um_pci_bar_copy_to(void *priv, unsigned int offset,
- const void *buffer, int size)
+static void um_pci_bar_copy_from(void *priv, void *buffer,
+ unsigned int offset, int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- };
+ u8 bar = *resptr;
- um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+ dev->ops->bar_copy_from(dev, bar, buffer, offset, size);
}
-static void um_pci_bar_write(void *priv, unsigned int offset, int size,
- unsigned long val)
+static void um_pci_bar_copy_to(void *priv, unsigned int offset,
+ const void *buffer, int size)
{
- /* maximum size - we may only use parts of it */
- u8 data[8];
-
- switch (size) {
- case 1:
- data[0] = (u8)val;
- break;
- case 2:
- put_unaligned_le16(val, (void *)data);
- break;
- case 4:
- put_unaligned_le32(val, (void *)data);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- put_unaligned_le64(val, (void *)data);
- break;
-#endif
- default:
- WARN(1, "invalid config space write size %d\n", size);
- return;
- }
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ u8 bar = *resptr;
- um_pci_bar_copy_to(priv, offset, data, size);
+ dev->ops->bar_copy_to(dev, bar, offset, buffer, size);
}
static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
@@ -426,20 +167,9 @@ static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct {
- struct virtio_pcidev_msg hdr;
- u8 data;
- } msg = {
- .hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- },
- .data = value,
- };
+ u8 bar = *resptr;
- um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+ dev->ops->bar_set(dev, bar, offset, value, size);
}
static const struct logic_iomem_ops um_pci_device_bar_ops = {
@@ -486,76 +216,6 @@ static void um_pci_rescan(void)
pci_unlock_rescan_remove();
}
-static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
-{
- struct scatterlist sg[1];
-
- sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
- if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
- kfree(buf);
- else if (kick)
- virtqueue_kick(vq);
-}
-
-static void um_pci_handle_irq_message(struct virtqueue *vq,
- struct virtio_pcidev_msg *msg)
-{
- struct virtio_device *vdev = vq->vdev;
- struct um_pci_device *dev = vdev->priv;
-
- if (!dev->irq)
- return;
-
- /* we should properly chain interrupts, but on ARCH=um we don't care */
-
- switch (msg->op) {
- case VIRTIO_PCIDEV_OP_INT:
- generic_handle_irq(dev->irq);
- break;
- case VIRTIO_PCIDEV_OP_MSI:
- /* our MSI message is just the interrupt number */
- if (msg->size == sizeof(u32))
- generic_handle_irq(le32_to_cpup((void *)msg->data));
- else
- generic_handle_irq(le16_to_cpup((void *)msg->data));
- break;
- case VIRTIO_PCIDEV_OP_PME:
- /* nothing to do - we already woke up due to the message */
- break;
- default:
- dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
- break;
- }
-}
-
-static void um_pci_cmd_vq_cb(struct virtqueue *vq)
-{
- struct virtio_device *vdev = vq->vdev;
- struct um_pci_device *dev = vdev->priv;
- void *cmd;
- int len;
-
- if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
- return;
-
- while ((cmd = virtqueue_get_buf(vq, &len)))
- um_pci_free_buf(dev, cmd);
-}
-
-static void um_pci_irq_vq_cb(struct virtqueue *vq)
-{
- struct virtio_pcidev_msg *msg;
- int len;
-
- while ((msg = virtqueue_get_buf(vq, &len))) {
- if (len >= sizeof(*msg))
- um_pci_handle_irq_message(vq, msg);
-
- /* recycle the message buffer */
- um_pci_irq_vq_addbuf(vq, msg, true);
- }
-}
-
#ifdef CONFIG_OF
/* Copied from arch/x86/kernel/devicetree.c */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
@@ -577,200 +237,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
}
#endif
-static int um_pci_init_vqs(struct um_pci_device *dev)
-{
- struct virtqueue_info vqs_info[] = {
- { "cmd", um_pci_cmd_vq_cb },
- { "irq", um_pci_irq_vq_cb },
- };
- struct virtqueue *vqs[2];
- int err, i;
-
- err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
- if (err)
- return err;
-
- dev->cmd_vq = vqs[0];
- dev->irq_vq = vqs[1];
-
- virtio_device_ready(dev->vdev);
-
- for (i = 0; i < NUM_IRQ_MSGS; i++) {
- void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
-
- if (msg)
- um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
- }
-
- virtqueue_kick(dev->irq_vq);
-
- return 0;
-}
-
-static void __um_pci_virtio_platform_remove(struct virtio_device *vdev,
- struct um_pci_device *dev)
-{
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
-
- mutex_lock(&um_pci_mtx);
- um_pci_platform_device = NULL;
- mutex_unlock(&um_pci_mtx);
-
- kfree(dev);
-}
-
-static int um_pci_virtio_platform_probe(struct virtio_device *vdev,
- struct um_pci_device *dev)
-{
- int ret;
-
- dev->platform = true;
-
- mutex_lock(&um_pci_mtx);
-
- if (um_pci_platform_device) {
- mutex_unlock(&um_pci_mtx);
- ret = -EBUSY;
- goto out_free;
- }
-
- ret = um_pci_init_vqs(dev);
- if (ret) {
- mutex_unlock(&um_pci_mtx);
- goto out_free;
- }
-
- um_pci_platform_device = dev;
-
- mutex_unlock(&um_pci_mtx);
-
- ret = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
- if (ret)
- __um_pci_virtio_platform_remove(vdev, dev);
-
- return ret;
-
-out_free:
- kfree(dev);
- return ret;
-}
-
-static int um_pci_virtio_probe(struct virtio_device *vdev)
-{
- struct um_pci_device *dev;
- int i, free = -1;
- int err = -ENOSPC;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->vdev = vdev;
- vdev->priv = dev;
-
- if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
- return um_pci_virtio_platform_probe(vdev, dev);
-
- mutex_lock(&um_pci_mtx);
- for (i = 0; i < MAX_DEVICES; i++) {
- if (um_pci_devices[i].dev)
- continue;
- free = i;
- break;
- }
-
- if (free < 0)
- goto error;
-
- err = um_pci_init_vqs(dev);
- if (err)
- goto error;
-
- dev->irq = irq_alloc_desc(numa_node_id());
- if (dev->irq < 0) {
- err = dev->irq;
- goto err_reset;
- }
- um_pci_devices[free].dev = dev;
- vdev->priv = dev;
-
- mutex_unlock(&um_pci_mtx);
-
- device_set_wakeup_enable(&vdev->dev, true);
-
- /*
- * In order to do suspend-resume properly, don't allow VQs
- * to be suspended.
- */
- virtio_uml_set_no_vq_suspend(vdev, true);
-
- um_pci_rescan();
- return 0;
-err_reset:
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
-error:
- mutex_unlock(&um_pci_mtx);
- kfree(dev);
- return err;
-}
-
-static void um_pci_virtio_remove(struct virtio_device *vdev)
-{
- struct um_pci_device *dev = vdev->priv;
- int i;
-
- if (dev->platform) {
- of_platform_depopulate(&vdev->dev);
- __um_pci_virtio_platform_remove(vdev, dev);
- return;
- }
-
- device_set_wakeup_enable(&vdev->dev, false);
-
- mutex_lock(&um_pci_mtx);
- for (i = 0; i < MAX_DEVICES; i++) {
- if (um_pci_devices[i].dev != dev)
- continue;
-
- um_pci_devices[i].dev = NULL;
- irq_free_desc(dev->irq);
-
- break;
- }
- mutex_unlock(&um_pci_mtx);
-
- if (i < MAX_DEVICES) {
- struct pci_dev *pci_dev;
-
- pci_dev = pci_get_slot(bridge->bus, i);
- if (pci_dev)
- pci_stop_and_remove_bus_device_locked(pci_dev);
- }
-
- /* Stop all virtqueues */
- virtio_reset_device(vdev);
- dev->cmd_vq = NULL;
- dev->irq_vq = NULL;
- vdev->config->del_vqs(vdev);
-
- kfree(dev);
-}
-
-static struct virtio_device_id id_table[] = {
- { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
- { 0 },
-};
-MODULE_DEVICE_TABLE(virtio, id_table);
-
-static struct virtio_driver um_pci_virtio_driver = {
- .driver.name = "virtio-pci",
- .id_table = id_table,
- .probe = um_pci_virtio_probe,
- .remove = um_pci_virtio_remove,
-};
-
static struct resource virt_cfgspace_resource = {
.name = "PCI config space",
.start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
@@ -889,7 +355,7 @@ static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static struct irq_chip um_pci_msi_bottom_irq_chip = {
- .name = "UM virtio MSI",
+ .name = "UM virtual MSI",
.irq_compose_msi_msg = um_pci_compose_msi_msg,
};
@@ -939,7 +405,7 @@ static const struct irq_domain_ops um_pci_inner_domain_ops = {
};
static struct irq_chip um_pci_msi_irq_chip = {
- .name = "UM virtio PCIe MSI",
+ .name = "UM virtual PCIe MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
@@ -998,6 +464,78 @@ static struct resource virt_platform_resource = {
.flags = IORESOURCE_MEM,
};
+int um_pci_device_register(struct um_pci_device *dev)
+{
+ int i, free = -1;
+ int err = 0;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev)
+ continue;
+ free = i;
+ break;
+ }
+
+ if (free < 0) {
+ err = -ENOSPC;
+ goto out;
+ }
+
+ dev->irq = irq_alloc_desc(numa_node_id());
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto out;
+ }
+
+ um_pci_devices[free].dev = dev;
+
+out:
+ mutex_unlock(&um_pci_mtx);
+ if (!err)
+ um_pci_rescan();
+ return err;
+}
+
+void um_pci_device_unregister(struct um_pci_device *dev)
+{
+ int i;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev != dev)
+ continue;
+ um_pci_devices[i].dev = NULL;
+ irq_free_desc(dev->irq);
+ break;
+ }
+ mutex_unlock(&um_pci_mtx);
+
+ if (i < MAX_DEVICES) {
+ struct pci_dev *pci_dev;
+
+ pci_dev = pci_get_slot(bridge->bus, i);
+ if (pci_dev)
+ pci_stop_and_remove_bus_device_locked(pci_dev);
+ }
+}
+
+int um_pci_platform_device_register(struct um_pci_device *dev)
+{
+ guard(mutex)(&um_pci_mtx);
+ if (um_pci_platform_device)
+ return -EBUSY;
+ um_pci_platform_device = dev;
+ return 0;
+}
+
+void um_pci_platform_device_unregister(struct um_pci_device *dev)
+{
+ guard(mutex)(&um_pci_mtx);
+ if (um_pci_platform_device == dev)
+ um_pci_platform_device = NULL;
+}
+
static int __init um_pci_init(void)
{
struct irq_domain_info inner_domain_info = {
@@ -1014,10 +552,6 @@ static int __init um_pci_init(void)
WARN_ON(logic_iomem_add_region(&virt_platform_resource,
&um_pci_platform_ops));
- if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
- "No virtio device ID configured for PCI - no PCI support\n"))
- return 0;
-
bridge = pci_alloc_host_bridge(0);
if (!bridge) {
err = -ENOMEM;
@@ -1065,10 +599,8 @@ static int __init um_pci_init(void)
if (err)
goto free;
- err = register_virtio_driver(&um_pci_virtio_driver);
- if (err)
- goto free;
return 0;
+
free:
if (!IS_ERR_OR_NULL(um_pci_inner_domain))
irq_domain_remove(um_pci_inner_domain);
@@ -1080,11 +612,10 @@ free:
}
return err;
}
-module_init(um_pci_init);
+device_initcall(um_pci_init);
static void __exit um_pci_exit(void)
{
- unregister_virtio_driver(&um_pci_virtio_driver);
irq_domain_remove(um_pci_msi_domain);
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
diff --git a/arch/um/drivers/virt-pci.h b/arch/um/drivers/virt-pci.h
new file mode 100644
index 000000000000..b20d1475d1eb
--- /dev/null
+++ b/arch/um/drivers/virt-pci.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_VIRT_PCI_H
+#define __UM_VIRT_PCI_H
+
+#include <linux/pci.h>
+
+struct um_pci_device {
+ const struct um_pci_ops *ops;
+
+ /* for now just standard BARs */
+ u8 resptr[PCI_STD_NUM_BARS];
+
+ int irq;
+};
+
+struct um_pci_ops {
+ unsigned long (*cfgspace_read)(struct um_pci_device *dev,
+ unsigned int offset, int size);
+ void (*cfgspace_write)(struct um_pci_device *dev, unsigned int offset,
+ int size, unsigned long val);
+
+ unsigned long (*bar_read)(struct um_pci_device *dev, int bar,
+ unsigned int offset, int size);
+ void (*bar_write)(struct um_pci_device *dev, int bar,
+ unsigned int offset, int size, unsigned long val);
+
+ void (*bar_copy_from)(struct um_pci_device *dev, int bar, void *buffer,
+ unsigned int offset, int size);
+ void (*bar_copy_to)(struct um_pci_device *dev, int bar,
+ unsigned int offset, const void *buffer, int size);
+ void (*bar_set)(struct um_pci_device *dev, int bar,
+ unsigned int offset, u8 value, int size);
+};
+
+int um_pci_device_register(struct um_pci_device *dev);
+void um_pci_device_unregister(struct um_pci_device *dev);
+
+int um_pci_platform_device_register(struct um_pci_device *dev);
+void um_pci_platform_device_unregister(struct um_pci_device *dev);
+
+#endif /* __UM_VIRT_PCI_H */
diff --git a/arch/um/drivers/virtio_pcidev.c b/arch/um/drivers/virtio_pcidev.c
new file mode 100644
index 000000000000..3c4c4c928fdd
--- /dev/null
+++ b/arch/um/drivers/virtio_pcidev.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/logic_iomem.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
+#include <linux/virtio_pcidev.h>
+#include <linux/virtio-uml.h>
+#include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/unaligned.h>
+#include <irq_kern.h>
+
+#include "virt-pci.h"
+
+#define to_virtio_pcidev(_pdev) \
+ container_of(_pdev, struct virtio_pcidev_device, pdev)
+
+/* for MSI-X we have a 32-bit payload */
+#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
+#define NUM_IRQ_MSGS 10
+
+struct virtio_pcidev_message_buffer {
+ struct virtio_pcidev_msg hdr;
+ u8 data[8];
+};
+
+struct virtio_pcidev_device {
+ struct um_pci_device pdev;
+ struct virtio_device *vdev;
+
+ struct virtqueue *cmd_vq, *irq_vq;
+
+#define VIRTIO_PCIDEV_WRITE_BUFS 20
+ struct virtio_pcidev_message_buffer bufs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
+ void *extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
+ DECLARE_BITMAP(used_bufs, VIRTIO_PCIDEV_WRITE_BUFS);
+
+#define UM_PCI_STAT_WAITING 0
+ unsigned long status;
+
+ bool platform;
+};
+
+static unsigned int virtio_pcidev_max_delay_us = 40000;
+module_param_named(max_delay_us, virtio_pcidev_max_delay_us, uint, 0644);
+
+static int virtio_pcidev_get_buf(struct virtio_pcidev_device *dev, bool *posted)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
+ if (!test_and_set_bit(i, dev->used_bufs))
+ return i;
+ }
+
+ *posted = false;
+ return VIRTIO_PCIDEV_WRITE_BUFS;
+}
+
+static void virtio_pcidev_free_buf(struct virtio_pcidev_device *dev, void *buf)
+{
+ int i;
+
+ if (buf == &dev->bufs[VIRTIO_PCIDEV_WRITE_BUFS]) {
+ kfree(dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS]);
+ dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS] = NULL;
+ return;
+ }
+
+ for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
+ if (buf == &dev->bufs[i]) {
+ kfree(dev->extra_ptrs[i]);
+ dev->extra_ptrs[i] = NULL;
+ WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev,
+ struct virtio_pcidev_msg *cmd,
+ unsigned int cmd_size,
+ const void *extra, unsigned int extra_size,
+ void *out, unsigned int out_size)
+{
+ struct scatterlist out_sg, extra_sg, in_sg;
+ struct scatterlist *sgs_list[] = {
+ [0] = &out_sg,
+ [1] = extra ? &extra_sg : &in_sg,
+ [2] = extra ? &in_sg : NULL,
+ };
+ struct virtio_pcidev_message_buffer *buf;
+ int delay_count = 0;
+ bool bounce_out;
+ int ret, len;
+ int buf_idx;
+ bool posted;
+
+ if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
+ return -EINVAL;
+
+ switch (cmd->op) {
+ case VIRTIO_PCIDEV_OP_CFG_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
+ /* in PCI, writes are posted, so don't wait */
+ posted = !out;
+ WARN_ON(!posted);
+ break;
+ default:
+ posted = false;
+ break;
+ }
+
+ bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
+ out && out_size <= sizeof(buf->data);
+
+ buf_idx = virtio_pcidev_get_buf(dev, &posted);
+ buf = &dev->bufs[buf_idx];
+ memcpy(buf, cmd, cmd_size);
+
+ if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
+ dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
+ GFP_ATOMIC);
+
+ if (!dev->extra_ptrs[buf_idx]) {
+ virtio_pcidev_free_buf(dev, buf);
+ return -ENOMEM;
+ }
+ extra = dev->extra_ptrs[buf_idx];
+ } else if (extra && extra_size <= sizeof(buf) - cmd_size) {
+ memcpy((u8 *)buf + cmd_size, extra, extra_size);
+ cmd_size += extra_size;
+ extra_size = 0;
+ extra = NULL;
+ cmd = (void *)buf;
+ } else {
+ cmd = (void *)buf;
+ }
+
+ sg_init_one(&out_sg, cmd, cmd_size);
+ if (extra)
+ sg_init_one(&extra_sg, extra, extra_size);
+ /* allow stack for small buffers */
+ if (bounce_out)
+ sg_init_one(&in_sg, buf->data, out_size);
+ else if (out)
+ sg_init_one(&in_sg, out, out_size);
+
+ /* add to internal virtio queue */
+ ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
+ extra ? 2 : 1,
+ out ? 1 : 0,
+ cmd, GFP_ATOMIC);
+ if (ret) {
+ virtio_pcidev_free_buf(dev, buf);
+ return ret;
+ }
+
+ if (posted) {
+ virtqueue_kick(dev->cmd_vq);
+ return 0;
+ }
+
+ /* kick and poll for getting a response on the queue */
+ set_bit(UM_PCI_STAT_WAITING, &dev->status);
+ virtqueue_kick(dev->cmd_vq);
+ ret = 0;
+
+ while (1) {
+ void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
+
+ if (completed == buf)
+ break;
+
+ if (completed)
+ virtio_pcidev_free_buf(dev, completed);
+
+ if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
+ ++delay_count > virtio_pcidev_max_delay_us,
+ "um virt-pci delay: %d", delay_count)) {
+ ret = -EIO;
+ break;
+ }
+ udelay(1);
+ }
+ clear_bit(UM_PCI_STAT_WAITING, &dev->status);
+
+ if (bounce_out)
+ memcpy(out, buf->data, out_size);
+
+ virtio_pcidev_free_buf(dev, buf);
+
+ return ret;
+}
+
+static unsigned long virtio_pcidev_cfgspace_read(struct um_pci_device *pdev,
+ unsigned int offset, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_READ,
+ .size = size,
+ .addr = offset,
+ };
+ /* max 8, we might not use it all */
+ u8 data[8];
+
+ memset(data, 0xff, sizeof(data));
+
+ /* size has been checked in um_pci_cfgspace_read() */
+ if (virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
+ return ULONG_MAX;
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ULONG_MAX;
+ }
+}
+
+static void virtio_pcidev_cfgspace_write(struct um_pci_device *pdev,
+ unsigned int offset, int size,
+ unsigned long val)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .size = size,
+ .addr = offset,
+ },
+ };
+
+ /* size has been checked in um_pci_cfgspace_write() */
+ switch (size) {
+ case 1:
+ msg.data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)msg.data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)msg.data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)msg.data);
+ break;
+#endif
+ }
+
+ WARN_ON(virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+}
+
+static void virtio_pcidev_bar_copy_from(struct um_pci_device *pdev,
+ int bar, void *buffer,
+ unsigned int offset, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_READ,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ };
+
+ memset(buffer, 0xff, size);
+
+ virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
+}
+
+static unsigned long virtio_pcidev_bar_read(struct um_pci_device *pdev, int bar,
+ unsigned int offset, int size)
+{
+ /* 8 is maximum size - we may only use parts of it */
+ u8 data[8];
+
+ /* size has been checked in um_pci_bar_read() */
+ virtio_pcidev_bar_copy_from(pdev, bar, data, offset, size);
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ULONG_MAX;
+ }
+}
+
+static void virtio_pcidev_bar_copy_to(struct um_pci_device *pdev,
+ int bar, unsigned int offset,
+ const void *buffer, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ };
+
+ virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+}
+
+static void virtio_pcidev_bar_write(struct um_pci_device *pdev, int bar,
+ unsigned int offset, int size,
+ unsigned long val)
+{
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+
+ /* size has been checked in um_pci_bar_write() */
+ switch (size) {
+ case 1:
+ data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)data);
+ break;
+#endif
+ }
+
+ virtio_pcidev_bar_copy_to(pdev, bar, offset, data, size);
+}
+
+static void virtio_pcidev_bar_set(struct um_pci_device *pdev, int bar,
+ unsigned int offset, u8 value, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ u8 data;
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ },
+ .data = value,
+ };
+
+ virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+}
+
+static const struct um_pci_ops virtio_pcidev_um_pci_ops = {
+ .cfgspace_read = virtio_pcidev_cfgspace_read,
+ .cfgspace_write = virtio_pcidev_cfgspace_write,
+ .bar_read = virtio_pcidev_bar_read,
+ .bar_write = virtio_pcidev_bar_write,
+ .bar_copy_from = virtio_pcidev_bar_copy_from,
+ .bar_copy_to = virtio_pcidev_bar_copy_to,
+ .bar_set = virtio_pcidev_bar_set,
+};
+
+static void virtio_pcidev_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
+ if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
+ kfree(buf);
+ else if (kick)
+ virtqueue_kick(vq);
+}
+
+static void virtio_pcidev_handle_irq_message(struct virtqueue *vq,
+ struct virtio_pcidev_msg *msg)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pcidev_device *dev = vdev->priv;
+
+ if (!dev->pdev.irq)
+ return;
+
+ /* we should properly chain interrupts, but on ARCH=um we don't care */
+
+ switch (msg->op) {
+ case VIRTIO_PCIDEV_OP_INT:
+ generic_handle_irq(dev->pdev.irq);
+ break;
+ case VIRTIO_PCIDEV_OP_MSI:
+ /* our MSI message is just the interrupt number */
+ if (msg->size == sizeof(u32))
+ generic_handle_irq(le32_to_cpup((void *)msg->data));
+ else
+ generic_handle_irq(le16_to_cpup((void *)msg->data));
+ break;
+ case VIRTIO_PCIDEV_OP_PME:
+ /* nothing to do - we already woke up due to the message */
+ break;
+ default:
+ dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
+ break;
+ }
+}
+
+static void virtio_pcidev_cmd_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pcidev_device *dev = vdev->priv;
+ void *cmd;
+ int len;
+
+ if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
+ return;
+
+ while ((cmd = virtqueue_get_buf(vq, &len)))
+ virtio_pcidev_free_buf(dev, cmd);
+}
+
+static void virtio_pcidev_irq_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_pcidev_msg *msg;
+ int len;
+
+ while ((msg = virtqueue_get_buf(vq, &len))) {
+ if (len >= sizeof(*msg))
+ virtio_pcidev_handle_irq_message(vq, msg);
+
+ /* recycle the message buffer */
+ virtio_pcidev_irq_vq_addbuf(vq, msg, true);
+ }
+}
+
+static int virtio_pcidev_init_vqs(struct virtio_pcidev_device *dev)
+{
+ struct virtqueue_info vqs_info[] = {
+ { "cmd", virtio_pcidev_cmd_vq_cb },
+ { "irq", virtio_pcidev_irq_vq_cb },
+ };
+ struct virtqueue *vqs[2];
+ int err, i;
+
+ err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
+ if (err)
+ return err;
+
+ dev->cmd_vq = vqs[0];
+ dev->irq_vq = vqs[1];
+
+ virtio_device_ready(dev->vdev);
+
+ for (i = 0; i < NUM_IRQ_MSGS; i++) {
+ void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
+
+ if (msg)
+ virtio_pcidev_irq_vq_addbuf(dev->irq_vq, msg, false);
+ }
+
+ virtqueue_kick(dev->irq_vq);
+
+ return 0;
+}
+
+static void __virtio_pcidev_virtio_platform_remove(struct virtio_device *vdev,
+ struct virtio_pcidev_device *dev)
+{
+ um_pci_platform_device_unregister(&dev->pdev);
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+
+ kfree(dev);
+}
+
+static int virtio_pcidev_virtio_platform_probe(struct virtio_device *vdev,
+ struct virtio_pcidev_device *dev)
+{
+ int err;
+
+ dev->platform = true;
+
+ err = virtio_pcidev_init_vqs(dev);
+ if (err)
+ goto err_free;
+
+ err = um_pci_platform_device_register(&dev->pdev);
+ if (err)
+ goto err_reset;
+
+ err = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
+ if (err)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ um_pci_platform_device_unregister(&dev->pdev);
+err_reset:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+err_free:
+ kfree(dev);
+ return err;
+}
+
+static int virtio_pcidev_virtio_probe(struct virtio_device *vdev)
+{
+ struct virtio_pcidev_device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vdev = vdev;
+ vdev->priv = dev;
+
+ dev->pdev.ops = &virtio_pcidev_um_pci_ops;
+
+ if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
+ return virtio_pcidev_virtio_platform_probe(vdev, dev);
+
+ err = virtio_pcidev_init_vqs(dev);
+ if (err)
+ goto err_free;
+
+ err = um_pci_device_register(&dev->pdev);
+ if (err)
+ goto err_reset;
+
+ device_set_wakeup_enable(&vdev->dev, true);
+
+ /*
+ * In order to do suspend-resume properly, don't allow VQs
+ * to be suspended.
+ */
+ virtio_uml_set_no_vq_suspend(vdev, true);
+
+ return 0;
+
+err_reset:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+err_free:
+ kfree(dev);
+ return err;
+}
+
+static void virtio_pcidev_virtio_remove(struct virtio_device *vdev)
+{
+ struct virtio_pcidev_device *dev = vdev->priv;
+
+ if (dev->platform) {
+ of_platform_depopulate(&vdev->dev);
+ __virtio_pcidev_virtio_platform_remove(vdev, dev);
+ return;
+ }
+
+ device_set_wakeup_enable(&vdev->dev, false);
+
+ um_pci_device_unregister(&dev->pdev);
+
+ /* Stop all virtqueues */
+ virtio_reset_device(vdev);
+ dev->cmd_vq = NULL;
+ dev->irq_vq = NULL;
+ vdev->config->del_vqs(vdev);
+
+ kfree(dev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_pcidev_virtio_driver = {
+ .driver.name = "virtio-pci",
+ .id_table = id_table,
+ .probe = virtio_pcidev_virtio_probe,
+ .remove = virtio_pcidev_virtio_remove,
+};
+
+static int __init virtio_pcidev_init(void)
+{
+ if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
+ "No virtio device ID configured for PCI - no PCI support\n"))
+ return 0;
+
+ return register_virtio_driver(&virtio_pcidev_virtio_driver);
+}
+late_initcall(virtio_pcidev_init);
+
+static void __exit virtio_pcidev_exit(void)
+{
+ unregister_virtio_driver(&virtio_pcidev_virtio_driver);
+}
+module_exit(virtio_pcidev_exit);
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 428f2c5158c2..04ab3b653a48 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mmiowb.h
+generic-y += module.h
generic-y += module.lds.h
generic-y += param.h
generic-y += parport.h
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index f0af23c3aeb2..826ec44b58cd 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -25,27 +25,18 @@
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#if CONFIG_PGTABLE_LEVELS > 2
-#define __pmd_free_tlb(tlb, pmd, address) \
-do { \
- pagetable_dtor(virt_to_ptdesc(pmd)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
-} while (0)
+#define __pmd_free_tlb(tlb, pmd, address) \
+ tlb_remove_ptdesc((tlb), virt_to_ptdesc(pmd))
#if CONFIG_PGTABLE_LEVELS > 3
-#define __pud_free_tlb(tlb, pud, address) \
-do { \
- pagetable_dtor(virt_to_ptdesc(pud)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
-} while (0)
+#define __pud_free_tlb(tlb, pud, address) \
+ tlb_remove_ptdesc((tlb), virt_to_ptdesc(pud))
#endif
#endif
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 5d6356eafffe..8a789c17acd8 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -31,6 +31,8 @@ struct thread_struct {
} thread;
} request;
+ void *segv_continue;
+
/* Contains variable sized FP registers */
struct pt_regs regs;
};
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 1d4b6bbc1b65..3a08f9029a3f 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -9,6 +9,7 @@
#include <asm/elf.h>
#include <linux/unaligned.h>
+#include <sysdep/faultinfo.h>
#define __under_task_size(addr, size) \
(((unsigned long) (addr) < TASK_SIZE) && \
@@ -44,19 +45,28 @@ static inline int __access_ok(const void __user *ptr, unsigned long size)
__access_ok_vsyscall(addr, size));
}
-/* no pagefaults for kernel addresses in um */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
- *((type *)dst) = get_unaligned((type *)(src)); \
- if (0) /* make sure the label looks used to the compiler */ \
+ int __faulted; \
+ \
+ ___backtrack_faulted(__faulted); \
+ if (__faulted) { \
+ *((type *)dst) = (type) 0; \
goto err_label; \
+ } \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ current->thread.segv_continue = NULL; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- put_unaligned(*((type *)src), (type *)(dst)); \
- if (0) /* make sure the label looks used to the compiler */ \
+ int __faulted; \
+ \
+ ___backtrack_faulted(__faulted); \
+ if (__faulted) \
goto err_label; \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ current->thread.segv_continue = NULL; \
} while (0)
#endif
diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
index b22226634ff6..138908b999d7 100644
--- a/arch/um/include/linux/time-internal.h
+++ b/arch/um/include/linux/time-internal.h
@@ -83,6 +83,8 @@ extern void time_travel_not_configured(void);
#define time_travel_del_event(...) time_travel_not_configured()
#endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
+extern unsigned long tt_extra_sched_jiffies;
+
/*
* Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
* which is intentional since we really shouldn't link it in that case.
diff --git a/arch/um/include/shared/arch.h b/arch/um/include/shared/arch.h
index 880ee42a3329..cc398a21ad96 100644
--- a/arch/um/include/shared/arch.h
+++ b/arch/um/include/shared/arch.h
@@ -12,4 +12,6 @@ extern void arch_check_bugs(void);
extern int arch_fixup(unsigned long address, struct uml_pt_regs *regs);
extern void arch_examine_signal(int sig, struct uml_pt_regs *regs);
+void mc_set_rip(void *_mc, void *target);
+
#endif
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index ea65f151bf48..4f44dcce8a7c 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -50,7 +50,7 @@ extern int linux_main(int argc, char **argv, char **envp);
extern void uml_finishsetup(void);
struct siginfo;
-extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *);
+extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *, void *);
#endif
diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h
index da0f6eea30d0..88835b52ae2b 100644
--- a/arch/um/include/shared/irq_user.h
+++ b/arch/um/include/shared/irq_user.h
@@ -15,7 +15,8 @@ enum um_irq_type {
};
struct siginfo;
-extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void sigio_handler(int sig, struct siginfo *unused_si,
+ struct uml_pt_regs *regs, void *mc);
void sigio_run_timetravel_handlers(void);
extern void free_irq_by_fd(int fd);
extern void deactivate_fd(int fd, int irqnum);
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index f21dc8517538..00ca3e12fd9a 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -24,10 +24,12 @@ extern void free_stack(unsigned long stack, int order);
struct pt_regs;
extern void do_signal(struct pt_regs *regs);
extern void interrupt_end(void);
-extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs);
+extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs,
+ void *mc);
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
- int is_user, struct uml_pt_regs *regs);
+ int is_user, struct uml_pt_regs *regs,
+ void *mc);
extern int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out);
@@ -59,8 +61,10 @@ extern unsigned long from_irq_stack(int nested);
extern int singlestepping(void);
-extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
-extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc);
+extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc);
extern void fatal_sigsegv(void) __attribute__ ((noreturn));
void um_idle_sleep(void);
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 5babad8c5f75..152a60080d5b 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -213,7 +213,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
extern int os_unmap_memory(void *addr, int len);
extern int os_drop_memory(void *addr, int length);
extern int can_drop_memory(void);
-extern int os_mincore(void *addr, unsigned long len);
void os_set_pdeathsig(void);
@@ -225,6 +224,11 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
unsigned int flags, unsigned long *stack_out);
extern int helper_wait(int pid);
+struct os_helper_thread;
+int os_run_helper_thread(struct os_helper_thread **td_out,
+ void *(*routine)(void *), void *arg);
+void os_kill_helper_thread(struct os_helper_thread *td);
+void os_fix_helper_thread_signals(void);
/* umid.c */
extern int umid_file_name(char *name, char *buf, int len);
@@ -310,7 +314,7 @@ extern void um_irqs_resume(void);
extern int add_sigio_fd(int fd);
extern int ignore_sigio_fd(int fd);
extern void maybe_sigio_broken(int fd);
-extern void sigio_broken(int fd);
+extern void sigio_broken(void);
/*
* unlocked versions for IRQ controller code.
*
diff --git a/arch/um/include/shared/sigio.h b/arch/um/include/shared/sigio.h
index e60c8b227844..c6c2edce1f6d 100644
--- a/arch/um/include/shared/sigio.h
+++ b/arch/um/include/shared/sigio.h
@@ -6,7 +6,6 @@
#ifndef __SIGIO_H__
#define __SIGIO_H__
-extern int write_sigio_irq(int fd);
extern void sigio_lock(void);
extern void sigio_unlock(void);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index f8567b933ffa..4df1cd0d2017 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -17,7 +17,7 @@ extra-y := vmlinux.lds
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
+ um_arch.o umid.o kmsg_dump.o capflags.o skas/
obj-y += load_file.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a4991746f5ea..abe8f30a521c 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -236,7 +236,8 @@ static void _sigio_handler(struct uml_pt_regs *regs,
free_irqs();
}
-void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
preempt_disable();
_sigio_handler(regs, irqs_suspended);
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
deleted file mode 100644
index 8ccd56813f68..000000000000
--- a/arch/um/kernel/maccess.c
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
- */
-
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <os.h>
-
-bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
-{
- void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
-
- if ((unsigned long)src < PAGE_SIZE || size <= 0)
- return false;
- if (os_mincore(psrc, size + src - psrc) <= 0)
- return false;
- return true;
-}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 379f33a1babf..76bec7de81b5 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -9,6 +9,8 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/sections.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <as-layout.h>
@@ -66,6 +68,7 @@ void __init arch_mm_preinit(void)
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
+ min_low_pfn = PFN_UP(__pa(uml_reserved));
max_pfn = max_low_pfn;
}
@@ -242,3 +245,11 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
};
DECLARE_VM_GET_PAGE_PROT
+
+void mark_rodata_ro(void)
+{
+ unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+ unsigned long rodata_end = PFN_ALIGN(__end_rodata);
+
+ os_protect_memory((void *)rodata_start, rodata_end - rodata_start, 1, 0, 0);
+}
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index 5085a50c3b8c..4fc04742048a 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -8,32 +8,6 @@
#include <os.h>
#include <sigio.h>
-/* Protected by sigio_lock() called from write_sigio_workaround */
-static int sigio_irq_fd = -1;
-
-static irqreturn_t sigio_interrupt(int irq, void *data)
-{
- char c;
-
- os_read_file(sigio_irq_fd, &c, sizeof(c));
- return IRQ_HANDLED;
-}
-
-int write_sigio_irq(int fd)
-{
- int err;
-
- err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
- 0, "write sigio", NULL);
- if (err < 0) {
- printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
- "err = %d\n", err);
- return -1;
- }
- sigio_irq_fd = fd;
- return 0;
-}
-
/* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
static DEFINE_MUTEX(sigio_mutex);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index b09e85279d2b..a5beaea2967e 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -31,6 +31,17 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
+
+ /*
+ * If no time passes, then sched_yield may not actually yield, causing
+ * broken spinlock implementations in userspace (ASAN) to hang for long
+ * periods of time.
+ */
+ if ((time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) &&
+ syscall == __NR_sched_yield)
+ tt_extra_sched_jiffies += 1;
+
if (syscall >= 0 && syscall < __NR_syscalls) {
unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index cdaee3e94273..ce073150dc20 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -16,6 +16,7 @@
#include <kern_util.h>
#include <os.h>
#include <skas.h>
+#include <arch.h>
/*
* Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
@@ -175,12 +176,14 @@ void fatal_sigsegv(void)
* @sig: the signal number
* @unused_si: the signal info struct; unused in this handler
* @regs: the ptrace register information
+ * @mc: the mcontext of the signal
*
* The handler first extracts the faultinfo from the UML ptrace regs struct.
* If the userfault did not happen in an UML userspace process, bad_segv is called.
* Otherwise the signal did happen in a cloned userspace process, handle it.
*/
-void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
@@ -189,7 +192,7 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
bad_segv(*fi, UPT_IP(regs));
return;
}
- segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
+ segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs, mc);
}
/*
@@ -199,7 +202,7 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
* give us bad data!
*/
unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
- struct uml_pt_regs *regs)
+ struct uml_pt_regs *regs, void *mc)
{
int si_code;
int err;
@@ -223,6 +226,19 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
goto out;
}
else if (current->mm == NULL) {
+ if (current->pagefault_disabled) {
+ if (!mc) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault with pagefaults disabled but no mcontext");
+ }
+ if (!current->thread.segv_continue) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault without recovery target");
+ }
+ mc_set_rip(mc, current->thread.segv_continue);
+ current->thread.segv_continue = NULL;
+ goto out;
+ }
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}
@@ -274,7 +290,8 @@ out:
return 0;
}
-void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
+void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs,
+ void *mc)
{
int code, err;
if (!UPT_IS_USER(regs)) {
@@ -302,7 +319,8 @@ void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
}
}
-void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
do_IRQ(WINCH_IRQ, regs);
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 050279814eac..d4b3b6742ec8 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -12,6 +12,7 @@
#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/utsname.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -78,7 +79,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "model name\t: UML\n");
seq_printf(m, "mode\t\t: skas\n");
seq_printf(m, "host\t\t: %s\n", host_info);
- seq_printf(m, "fpu\t\t: %s\n", cpu_has(&boot_cpu_data, X86_FEATURE_FPU) ? "yes" : "no");
+ seq_printf(m, "fpu\t\t: %s\n", str_yes_no(cpu_has(&boot_cpu_data, X86_FEATURE_FPU)));
seq_printf(m, "flags\t\t:");
for (i = 0; i < 32*NCAPINTS; i++)
if (cpu_has(&boot_cpu_data, i) && (x86_cap_flags[i] != NULL))
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 3cb8ac63be6e..89c2ad2a4e3a 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -8,6 +8,7 @@
#include <unistd.h>
#include <errno.h>
#include <sched.h>
+#include <pthread.h>
#include <linux/limits.h>
#include <sys/socket.h>
#include <sys/wait.h>
@@ -121,6 +122,10 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long stack, sp;
int pid, status, err;
+ /* To share memory space, use os_run_helper_thread() instead. */
+ if (flags & CLONE_VM)
+ return -EINVAL;
+
stack = alloc_stack(0, __uml_cant_sleep());
if (stack == 0)
return -ENOMEM;
@@ -167,3 +172,65 @@ int helper_wait(int pid)
} else
return 0;
}
+
+struct os_helper_thread {
+ pthread_t handle;
+};
+
+int os_run_helper_thread(struct os_helper_thread **td_out,
+ void *(*routine)(void *), void *arg)
+{
+ struct os_helper_thread *td;
+ sigset_t sigset, oset;
+ int err, flags;
+
+ flags = __uml_cant_sleep() ? UM_GFP_ATOMIC : UM_GFP_KERNEL;
+ td = uml_kmalloc(sizeof(*td), flags);
+ if (!td)
+ return -ENOMEM;
+
+ sigfillset(&sigset);
+ if (sigprocmask(SIG_SETMASK, &sigset, &oset) < 0) {
+ err = -errno;
+ kfree(td);
+ return err;
+ }
+
+ err = pthread_create(&td->handle, NULL, routine, arg);
+
+ if (sigprocmask(SIG_SETMASK, &oset, NULL) < 0)
+ panic("Failed to restore the signal mask: %d", errno);
+
+ if (err != 0)
+ kfree(td);
+ else
+ *td_out = td;
+
+ return -err;
+}
+
+void os_kill_helper_thread(struct os_helper_thread *td)
+{
+ pthread_cancel(td->handle);
+ pthread_join(td->handle, NULL);
+ kfree(td);
+}
+
+void os_fix_helper_thread_signals(void)
+{
+ sigset_t sigset;
+
+ sigemptyset(&sigset);
+
+ sigaddset(&sigset, SIGWINCH);
+ sigaddset(&sigset, SIGPIPE);
+ sigaddset(&sigset, SIGPROF);
+ sigaddset(&sigset, SIGINT);
+ sigaddset(&sigset, SIGTERM);
+ sigaddset(&sigset, SIGCHLD);
+ sigaddset(&sigset, SIGALRM);
+ sigaddset(&sigset, SIGIO);
+ sigaddset(&sigset, SIGUSR1);
+
+ pthread_sigmask(SIG_SETMASK, &sigset, NULL);
+}
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 9f086f939420..184566edeee9 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -142,57 +142,6 @@ out:
return ok;
}
-static int os_page_mincore(void *addr)
-{
- char vec[2];
- int ret;
-
- ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
- if (ret < 0) {
- if (errno == ENOMEM || errno == EINVAL)
- return 0;
- else
- return -errno;
- }
-
- return vec[0] & 1;
-}
-
-int os_mincore(void *addr, unsigned long len)
-{
- char *vec;
- int ret, i;
-
- if (len <= UM_KERN_PAGE_SIZE)
- return os_page_mincore(addr);
-
- vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
- if (!vec)
- return -ENOMEM;
-
- ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
- if (ret < 0) {
- if (errno == ENOMEM || errno == EINVAL)
- ret = 0;
- else
- ret = -errno;
-
- goto out;
- }
-
- for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
- if (!(vec[i] & 1)) {
- ret = 0;
- goto out;
- }
- }
-
- ret = 1;
-out:
- free(vec);
- return ret;
-}
-
void init_new_thread_signals(void)
{
set_handler(SIGSEGV);
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index 9aac8def4d63..a05a6ecee756 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -11,6 +11,7 @@
#include <sched.h>
#include <signal.h>
#include <string.h>
+#include <sys/epoll.h>
#include <kern_util.h>
#include <init.h>
#include <os.h>
@@ -21,184 +22,51 @@
* Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
-static int write_sigio_pid = -1;
-static unsigned long write_sigio_stack;
+static struct os_helper_thread *write_sigio_td;
-/*
- * These arrays are initialized before the sigio thread is started, and
- * the descriptors closed after it is killed. So, it can't see them change.
- * On the UML side, they are changed under the sigio_lock.
- */
-#define SIGIO_FDS_INIT {-1, -1}
-
-static int write_sigio_fds[2] = SIGIO_FDS_INIT;
-static int sigio_private[2] = SIGIO_FDS_INIT;
+static int epollfd = -1;
-struct pollfds {
- struct pollfd *poll;
- int size;
- int used;
-};
+#define MAX_EPOLL_EVENTS 64
-/*
- * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
- * synchronizes with it.
- */
-static struct pollfds current_poll;
-static struct pollfds next_poll;
-static struct pollfds all_sigio_fds;
+static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
-static int write_sigio_thread(void *unused)
+static void *write_sigio_thread(void *unused)
{
- struct pollfds *fds, tmp;
- struct pollfd *p;
- int i, n, respond_fd;
- char c;
-
- os_set_pdeathsig();
- os_fix_helper_signals();
- fds = &current_poll;
+ int pid = getpid();
+ int r;
+
+ os_fix_helper_thread_signals();
+
while (1) {
- n = poll(fds->poll, fds->used, -1);
- if (n < 0) {
+ r = epoll_wait(epollfd, epoll_events, MAX_EPOLL_EVENTS, -1);
+ if (r < 0) {
if (errno == EINTR)
continue;
- printk(UM_KERN_ERR "write_sigio_thread : poll returned "
- "%d, errno = %d\n", n, errno);
+ printk(UM_KERN_ERR "%s: epoll_wait failed, errno = %d\n",
+ __func__, errno);
}
- for (i = 0; i < fds->used; i++) {
- p = &fds->poll[i];
- if (p->revents == 0)
- continue;
- if (p->fd == sigio_private[1]) {
- CATCH_EINTR(n = read(sigio_private[1], &c,
- sizeof(c)));
- if (n != sizeof(c))
- printk(UM_KERN_ERR
- "write_sigio_thread : "
- "read on socket failed, "
- "err = %d\n", errno);
- tmp = current_poll;
- current_poll = next_poll;
- next_poll = tmp;
- respond_fd = sigio_private[1];
- }
- else {
- respond_fd = write_sigio_fds[1];
- fds->used--;
- memmove(&fds->poll[i], &fds->poll[i + 1],
- (fds->used - i) * sizeof(*fds->poll));
- }
-
- CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
- if (n != sizeof(c))
- printk(UM_KERN_ERR "write_sigio_thread : "
- "write on socket failed, err = %d\n",
- errno);
- }
- }
- return 0;
-}
-
-static int need_poll(struct pollfds *polls, int n)
-{
- struct pollfd *new;
-
- if (n <= polls->size)
- return 0;
-
- new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
- if (new == NULL) {
- printk(UM_KERN_ERR "need_poll : failed to allocate new "
- "pollfds\n");
- return -ENOMEM;
+ CATCH_EINTR(r = tgkill(pid, pid, SIGIO));
+ if (r < 0)
+ printk(UM_KERN_ERR "%s: tgkill failed, errno = %d\n",
+ __func__, errno);
}
- memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
- kfree(polls->poll);
-
- polls->poll = new;
- polls->size = n;
- return 0;
-}
-
-/*
- * Must be called with sigio_lock held, because it's needed by the marked
- * critical section.
- */
-static void update_thread(void)
-{
- unsigned long flags;
- int n;
- char c;
-
- flags = um_set_signals_trace(0);
- CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
- errno);
- goto fail;
- }
-
- CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
- errno);
- goto fail;
- }
-
- um_set_signals_trace(flags);
- return;
- fail:
- /* Critical section start */
- if (write_sigio_pid != -1) {
- os_kill_process(write_sigio_pid, 1);
- free_stack(write_sigio_stack, 0);
- }
- write_sigio_pid = -1;
- close(sigio_private[0]);
- close(sigio_private[1]);
- close(write_sigio_fds[0]);
- close(write_sigio_fds[1]);
- /* Critical section end */
- um_set_signals_trace(flags);
+ return NULL;
}
int __add_sigio_fd(int fd)
{
- struct pollfd *p;
- int err, i, n;
-
- for (i = 0; i < all_sigio_fds.used; i++) {
- if (all_sigio_fds.poll[i].fd == fd)
- break;
- }
- if (i == all_sigio_fds.used)
- return -ENOSPC;
-
- p = &all_sigio_fds.poll[i];
-
- for (i = 0; i < current_poll.used; i++) {
- if (current_poll.poll[i].fd == fd)
- return 0;
- }
-
- n = current_poll.used;
- err = need_poll(&next_poll, n + 1);
- if (err)
- return err;
-
- memcpy(next_poll.poll, current_poll.poll,
- current_poll.used * sizeof(struct pollfd));
- next_poll.poll[n] = *p;
- next_poll.used = n + 1;
- update_thread();
-
- return 0;
+ struct epoll_event event = {
+ .data.fd = fd,
+ .events = EPOLLIN | EPOLLET,
+ };
+ int r;
+
+ CATCH_EINTR(r = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event));
+ return r < 0 ? -errno : 0;
}
-
int add_sigio_fd(int fd)
{
int err;
@@ -212,38 +80,11 @@ int add_sigio_fd(int fd)
int __ignore_sigio_fd(int fd)
{
- struct pollfd *p;
- int err, i, n = 0;
-
- /*
- * This is called from exitcalls elsewhere in UML - if
- * sigio_cleanup has already run, then update_thread will hang
- * or fail because the thread is no longer running.
- */
- if (write_sigio_pid == -1)
- return -EIO;
-
- for (i = 0; i < current_poll.used; i++) {
- if (current_poll.poll[i].fd == fd)
- break;
- }
- if (i == current_poll.used)
- return -ENOENT;
-
- err = need_poll(&next_poll, current_poll.used - 1);
- if (err)
- return err;
-
- for (i = 0; i < current_poll.used; i++) {
- p = &current_poll.poll[i];
- if (p->fd != fd)
- next_poll.poll[n++] = *p;
- }
- next_poll.used = current_poll.used - 1;
-
- update_thread();
+ struct epoll_event event;
+ int r;
- return 0;
+ CATCH_EINTR(r = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event));
+ return r < 0 ? -errno : 0;
}
int ignore_sigio_fd(int fd)
@@ -257,125 +98,37 @@ int ignore_sigio_fd(int fd)
return err;
}
-static struct pollfd *setup_initial_poll(int fd)
-{
- struct pollfd *p;
-
- p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
- if (p == NULL) {
- printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
- "poll\n");
- return NULL;
- }
- *p = ((struct pollfd) { .fd = fd,
- .events = POLLIN,
- .revents = 0 });
- return p;
-}
-
static void write_sigio_workaround(void)
{
- struct pollfd *p;
int err;
- int l_write_sigio_fds[2];
- int l_sigio_private[2];
- int l_write_sigio_pid;
- /* We call this *tons* of times - and most ones we must just fail. */
sigio_lock();
- l_write_sigio_pid = write_sigio_pid;
- sigio_unlock();
-
- if (l_write_sigio_pid != -1)
- return;
+ if (write_sigio_td)
+ goto out;
- err = os_pipe(l_write_sigio_fds, 1, 1);
- if (err < 0) {
- printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
- "err = %d\n", -err);
- return;
+ epollfd = epoll_create(MAX_EPOLL_EVENTS);
+ if (epollfd < 0) {
+ printk(UM_KERN_ERR "%s: epoll_create failed, errno = %d\n",
+ __func__, errno);
+ goto out;
}
- err = os_pipe(l_sigio_private, 1, 1);
+
+ err = os_run_helper_thread(&write_sigio_td, write_sigio_thread, NULL);
if (err < 0) {
- printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
- "err = %d\n", -err);
- goto out_close1;
+ printk(UM_KERN_ERR "%s: os_run_helper_thread failed, errno = %d\n",
+ __func__, -err);
+ close(epollfd);
+ epollfd = -1;
+ goto out;
}
- p = setup_initial_poll(l_sigio_private[1]);
- if (!p)
- goto out_close2;
-
- sigio_lock();
-
- /*
- * Did we race? Don't try to optimize this, please, it's not so likely
- * to happen, and no more than once at the boot.
- */
- if (write_sigio_pid != -1)
- goto out_free;
-
- current_poll = ((struct pollfds) { .poll = p,
- .used = 1,
- .size = 1 });
-
- if (write_sigio_irq(l_write_sigio_fds[0]))
- goto out_clear_poll;
-
- memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
- memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
-
- write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
- CLONE_FILES | CLONE_VM,
- &write_sigio_stack);
-
- if (write_sigio_pid < 0)
- goto out_clear;
-
- sigio_unlock();
- return;
-
-out_clear:
- write_sigio_pid = -1;
- write_sigio_fds[0] = -1;
- write_sigio_fds[1] = -1;
- sigio_private[0] = -1;
- sigio_private[1] = -1;
-out_clear_poll:
- current_poll = ((struct pollfds) { .poll = NULL,
- .size = 0,
- .used = 0 });
-out_free:
+out:
sigio_unlock();
- kfree(p);
-out_close2:
- close(l_sigio_private[0]);
- close(l_sigio_private[1]);
-out_close1:
- close(l_write_sigio_fds[0]);
- close(l_write_sigio_fds[1]);
}
-void sigio_broken(int fd)
+void sigio_broken(void)
{
- int err;
-
write_sigio_workaround();
-
- sigio_lock();
- err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
- if (err) {
- printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
- "for descriptor %d\n", fd);
- goto out;
- }
-
- all_sigio_fds.poll[all_sigio_fds.used++] =
- ((struct pollfd) { .fd = fd,
- .events = POLLIN,
- .revents = 0 });
-out:
- sigio_unlock();
}
/* Changed during early boot */
@@ -389,17 +142,16 @@ void maybe_sigio_broken(int fd)
if (pty_output_sigio)
return;
- sigio_broken(fd);
+ sigio_broken();
}
static void sigio_cleanup(void)
{
- if (write_sigio_pid == -1)
+ if (!write_sigio_td)
return;
- os_kill_process(write_sigio_pid, 1);
- free_stack(write_sigio_stack, 0);
- write_sigio_pid = -1;
+ os_kill_helper_thread(write_sigio_td);
+ write_sigio_td = NULL;
}
__uml_exitcall(sigio_cleanup);
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9ea7269ffb77..e71e5b4878d1 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -21,7 +21,7 @@
#include <sys/ucontext.h>
#include <timetravel.h>
-void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *, void *mc) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
@@ -47,7 +47,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
if ((sig != SIGIO) && (sig != SIGWINCH))
unblock_signals_trace();
- (*sig_info[sig])(sig, si, &r);
+ (*sig_info[sig])(sig, si, &r, mc);
errno = save_errno;
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index e2f8f156402f..ae2aea062f06 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -166,7 +166,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
static void handle_segv(int pid, struct uml_pt_regs *regs)
{
get_skas_faultinfo(pid, &regs->faultinfo);
- segv(regs->faultinfo, 0, 1, NULL);
+ segv(regs->faultinfo, 0, 1, NULL, NULL);
}
static void handle_trap(int pid, struct uml_pt_regs *regs)
@@ -525,7 +525,7 @@ void userspace(struct uml_pt_regs *regs)
get_skas_faultinfo(pid,
&regs->faultinfo);
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
- regs);
+ regs, NULL);
}
else handle_segv(pid, regs);
break;
@@ -533,7 +533,7 @@ void userspace(struct uml_pt_regs *regs)
handle_trap(pid, regs);
break;
case SIGTRAP:
- relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
+ relay_signal(SIGTRAP, (struct siginfo *)&si, regs, NULL);
break;
case SIGALRM:
break;
@@ -543,7 +543,7 @@ void userspace(struct uml_pt_regs *regs)
case SIGFPE:
case SIGWINCH:
block_signals_trace();
- (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
+ (*sig_info[sig])(sig, (struct siginfo *)&si, regs, NULL);
unblock_signals_trace();
break;
default:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9395ec37bb64..4b9f378e05f6 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86_64
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_PTDUMP
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
@@ -889,6 +890,7 @@ config INTEL_TDX_GUEST
depends on X86_64 && CPU_SUP_INTEL
depends on X86_X2APIC
depends on EFI_STUB
+ depends on PARAVIRT
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE
@@ -2136,6 +2138,7 @@ config RANDOMIZE_BASE
config X86_NEED_RELOCS
def_bool y
depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE)
+ select ARCH_VMLINUX_NEEDS_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 27efe2dc2aa8..594723005d95 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -245,12 +245,6 @@ endif
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
-ifdef CONFIG_X86_NEED_RELOCS
-LDFLAGS_vmlinux := --emit-relocs --discard-none
-else
-LDFLAGS_vmlinux :=
-endif
-
#
# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
# the linker to force 2MB page size regardless of the default page size used
diff --git a/arch/x86/Makefile.postlink b/arch/x86/Makefile.postlink
deleted file mode 100644
index 8b8a68162c94..000000000000
--- a/arch/x86/Makefile.postlink
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# ===========================================================================
-# Post-link x86 pass
-# ===========================================================================
-#
-# 1. Separate relocations from vmlinux into vmlinux.relocs.
-# 2. Strip relocations from vmlinux.
-
-PHONY := __archpost
-__archpost:
-
--include include/config/auto.conf
-include $(srctree)/scripts/Kbuild.include
-include $(srctree)/scripts/Makefile.lib
-
-CMD_RELOCS = arch/x86/tools/relocs
-OUT_RELOCS = arch/x86/boot/compressed
-quiet_cmd_relocs = RELOCS $(OUT_RELOCS)/$@.relocs
- cmd_relocs = \
- mkdir -p $(OUT_RELOCS); \
- $(CMD_RELOCS) $@ > $(OUT_RELOCS)/$@.relocs; \
- $(CMD_RELOCS) --abs-relocs $@
-
-# `@true` prevents complaint when there is nothing to be done
-
-vmlinux: FORCE
- @true
-ifeq ($(CONFIG_X86_NEED_RELOCS),y)
- $(call cmd,relocs)
- $(call cmd,strip_relocs)
-endif
-
-clean:
- @rm -f $(OUT_RELOCS)/vmlinux.relocs
-
-PHONY += FORCE clean
-
-FORCE:
-
-.PHONY: $(PHONY)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index a46b1397ad01..c86cbd9cbba3 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -7,12 +7,13 @@ core-y += arch/x86/crypto/
# GCC versions < 11. See:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
#
-ifeq ($(CONFIG_CC_IS_CLANG),y)
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
-KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
endif
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+
ifeq ($(CONFIG_X86_32),y)
START := 0x8048000
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 8589471b65a1..81f55da81967 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,7 +53,6 @@ targets += cpustr.h
KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
-KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 2eb63536c5d0..fdbce022db55 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -38,7 +38,6 @@ KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += -Wno-pointer-sign
-KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
# Disable relocation relaxation in case the link is not PIE.
@@ -117,9 +116,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE
targets += $(patsubst $(obj)/%,%,$(vmlinux-objs-y)) vmlinux.bin.all vmlinux.relocs
-# vmlinux.relocs is created by the vmlinux postlink step.
-$(obj)/vmlinux.relocs: vmlinux
- @true
+CMD_RELOCS = arch/x86/tools/relocs
+quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
+
+$(obj)/vmlinux.relocs: vmlinux.unstripped FORCE
+ $(call if_changed,relocs)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
vmlinux.bin.all-$(CONFIG_X86_NEED_RELOCS) += $(obj)/vmlinux.relocs
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 7772b01ab738..edab6d6049be 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -14,6 +14,7 @@
#include <asm/ia32.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
+#include <asm/paravirt_types.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/traps.h>
@@ -392,13 +393,21 @@ static int handle_halt(struct ve_info *ve)
{
const bool irq_disabled = irqs_disabled();
+ /*
+ * HLT with IRQs enabled is unsafe, as an IRQ that is intended to be a
+ * wake event may be consumed before requesting HLT emulation, leaving
+ * the vCPU blocking indefinitely.
+ */
+ if (WARN_ONCE(!irq_disabled, "HLT emulation with IRQs enabled"))
+ return -EIO;
+
if (__halt(irq_disabled))
return -EIO;
return ve_instr_len(ve);
}
-void __cpuidle tdx_safe_halt(void)
+void __cpuidle tdx_halt(void)
{
const bool irq_disabled = false;
@@ -409,6 +418,16 @@ void __cpuidle tdx_safe_halt(void)
WARN_ONCE(1, "HLT instruction emulation failed\n");
}
+static void __cpuidle tdx_safe_halt(void)
+{
+ tdx_halt();
+ /*
+ * "__cpuidle" section doesn't support instrumentation, so stick
+ * with raw_* variant that avoids tracing hooks.
+ */
+ raw_local_irq_enable();
+}
+
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
{
struct tdx_module_args args = {
@@ -1110,6 +1129,19 @@ void __init tdx_early_init(void)
x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
/*
+ * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
+ * will enable interrupts before HLT TDCALL invocation if executed
+ * in STI-shadow, possibly resulting in missed wakeup events.
+ *
+ * Modify all possible HLT execution paths to use TDX specific routines
+ * that directly execute TDCALL and toggle the interrupt state as
+ * needed after TDCALL completion. This also reduces HLT related #VEs
+ * in addition to having a reliable halt logic execution.
+ */
+ pv_ops.irq.safe_halt = tdx_safe_halt;
+ pv_ops.irq.halt = tdx_halt;
+
+ /*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
* bringup low level code. That raises #VE which cannot be handled
* there.
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 9518bf1ddf35..adb299d3b6a1 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -162,7 +162,8 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
text_start,
image->size,
VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_SEALED_SYSMAP,
&vdso_mapping);
if (IS_ERR(vma)) {
@@ -181,7 +182,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
VDSO_VCLOCK_PAGES_START(addr),
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
- VM_PFNMAP,
+ VM_PFNMAP|VM_SEALED_SYSMAP,
&vvar_vclock_mapping);
if (IS_ERR(vma)) {
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index b5982b94bdba..cbc6157f0b4b 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -16,7 +16,8 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm_inline (ALTERNATIVE("call __sw_hweight32",
+ asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "call __sw_hweight32",
"popcntl %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
@@ -45,7 +46,8 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm_inline (ALTERNATIVE("call __sw_hweight64",
+ asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "call __sw_hweight64",
"popcntq %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index af7541c11821..8ace6559d399 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -168,13 +168,6 @@ void iosf_mbi_unblock_punit_i2c_access(void);
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
/**
- * iosf_mbi_register_pmic_bus_access_notifier - Unregister PMIC bus notifier
- *
- * @nb: notifier_block to unregister
- */
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
-
-/**
* iosf_mbi_unregister_pmic_bus_access_notifier_unlocked - Unregister PMIC bus
* notifier, unlocked
*
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index abb8374c9ff7..9a9b21b78905 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#endif
+#ifndef CONFIG_PARAVIRT
+#ifndef __ASSEMBLY__
+/*
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+static __always_inline void arch_safe_halt(void)
+{
+ native_safe_halt();
+}
+
+/*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+static __always_inline void halt(void)
+{
+ native_halt();
+}
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
@@ -98,24 +120,6 @@ static __always_inline void arch_local_irq_enable(void)
}
/*
- * Used in the idle loop; sti takes one instruction cycle
- * to complete:
- */
-static __always_inline void arch_safe_halt(void)
-{
- native_safe_halt();
-}
-
-/*
- * Used when interrupts are already enabled or to
- * shutdown the processor:
- */
-static __always_inline void halt(void)
-{
- native_halt();
-}
-
-/*
* For spinlocks, etc:
*/
static __always_inline unsigned long arch_local_irq_save(void)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index bed346bfac89..c4c23190925c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -102,6 +102,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
}
+static __always_inline void arch_safe_halt(void)
+{
+ PVOP_VCALL0(irq.safe_halt);
+}
+
+static inline void halt(void)
+{
+ PVOP_VCALL0(irq.halt);
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
@@ -165,16 +175,6 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
-static __always_inline void arch_safe_halt(void)
-{
- PVOP_VCALL0(irq.safe_halt);
-}
-
-static inline void halt(void)
-{
- PVOP_VCALL0(irq.halt);
-}
-
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 62912023b46f..631c306ce1ff 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -120,10 +120,9 @@ struct pv_irq_ops {
struct paravirt_callee_save save_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
-
+#endif
void (*safe_halt)(void);
void (*halt)(void);
-#endif
} __no_randomize_layout;
struct pv_mmu_ops {
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index daea94c2993c..55a5e656e4b9 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE "", "clac", X86_FEATURE_SMAP
+ ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE "", "stac", X86_FEATURE_SMAP
+ ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", "clac", X86_FEATURE_SMAP);
+ alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", "stac", X86_FEATURE_SMAP);
+ alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -40,7 +40,8 @@ static __always_inline unsigned long smap_save(void)
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE("", "pushf; pop %0; " "clac" "\n\t",
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "", "pushf; pop %0; clac",
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
@@ -50,16 +51,22 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile ("# smap_restore\n\t"
- ALTERNATIVE("", "push %0; popf\n\t",
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "", "push %0; popf",
X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+
+#define ASM_CLAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
+#define ASM_STAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "stac", X86_FEATURE_SMAP)
#endif /* __ASSEMBLER__ */
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 65394aa9b49f..4a1922ec80cf 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
-void tdx_safe_halt(void);
+void tdx_halt(void);
bool tdx_early_handle_ve(struct pt_regs *regs);
@@ -72,7 +72,7 @@ void __init tdx_dump_td_ctls(u64 td_ctls);
#else
static inline void tdx_early_init(void) { };
-static inline void tdx_safe_halt(void) { };
+static inline void tdx_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 97771b9d33af..59a62c3780a2 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -231,14 +231,12 @@ static __always_inline void __xen_stac(void)
* Suppress objtool seeing the STAC/CLAC and getting confused about it
* calling random code with AC=1.
*/
- asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
- ASM_STAC ::: "memory", "flags");
+ asm volatile(ASM_STAC_UNSAFE ::: "memory", "flags");
}
static __always_inline void __xen_clac(void)
{
- asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
- ASM_CLAC ::: "memory", "flags");
+ asm volatile(ASM_CLAC_UNSAFE ::: "memory", "flags");
}
static inline long
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 72fa4bb78f0a..fee42a73d64a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -799,7 +799,7 @@ int __init arch_early_irq_init(void)
x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
NULL);
BUG_ON(x86_vector_domain == NULL);
- irq_set_default_host(x86_vector_domain);
+ irq_set_default_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 1f14c3308b6b..f6fd71b64b66 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -1786,13 +1786,13 @@ void mce_timer_kick(bool storm)
__this_cpu_write(mce_next_interval, check_interval * HZ);
}
-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
+/* Must not be called in IRQ context where timer_delete_sync() can deadlock */
static void mce_timer_delete_all(void)
{
int cpu;
for_each_online_cpu(cpu)
- del_timer_sync(&per_cpu(mce_timer, cpu));
+ timer_delete_sync(&per_cpu(mce_timer, cpu));
}
static void __mcheck_cpu_mce_banks_init(void)
@@ -2820,7 +2820,7 @@ static int mce_cpu_pre_down(unsigned int cpu)
struct timer_list *t = this_cpu_ptr(&mce_timer);
mce_disable_cpu();
- del_timer_sync(t);
+ timer_delete_sync(t);
mce_threshold_remove_device(cpu);
mce_device_remove(cpu);
return 0;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 97925632c28e..1ccd05d8999f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -75,6 +75,11 @@ void paravirt_set_sched_clock(u64 (*func)(void))
static_call_update(pv_sched_clock, func);
}
+static noinstr void pv_native_safe_halt(void)
+{
+ native_safe_halt();
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static noinstr void pv_native_write_cr2(unsigned long val)
{
@@ -100,11 +105,6 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{
native_set_debugreg(regno, val);
}
-
-static noinstr void pv_native_safe_halt(void)
-{
- native_safe_halt();
-}
#endif
struct pv_info pv_info = {
@@ -161,9 +161,11 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
+#endif /* CONFIG_PARAVIRT_XXL */
+
+ /* Irq HLT ops. */
.irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt,
-#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */
.mmu.flush_tlb_user = native_flush_tlb_local,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 91f6ff618852..962c3ce39323 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -939,7 +939,7 @@ void __init select_idle_routine(void)
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
- static_call_update(x86_idle, tdx_safe_halt);
+ static_call_update(x86_idle, tdx_halt);
} else {
static_call_update(x86_idle, default_idle);
}
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index bd21e9c335ad..38b33cdd4232 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -1553,7 +1553,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
kvm_vcpu_halt(vcpu);
if (sched_poll.timeout)
- del_timer(&vcpu->arch.xen.poll_timer);
+ timer_delete(&vcpu->arch.xen.poll_timer);
kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
}
@@ -2308,7 +2308,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
- del_timer_sync(&vcpu->arch.xen.poll_timer);
+ timer_delete_sync(&vcpu->arch.xen.poll_timer);
}
void kvm_xen_init_vm(struct kvm *kvm)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index aa8c341b2441..06296eb69fd4 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -77,6 +77,24 @@ SYM_FUNC_START(rep_movs_alternative)
_ASM_EXTABLE_UA( 0b, 1b)
.Llarge_movsq:
+ /* Do the first possibly unaligned word */
+0: movq (%rsi),%rax
+1: movq %rax,(%rdi)
+
+ _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
+
+ /* What would be the offset to the aligned destination? */
+ leaq 8(%rdi),%rax
+ andq $-8,%rax
+ subq %rdi,%rax
+
+ /* .. and update pointers and count to match */
+ addq %rax,%rdi
+ addq %rax,%rsi
+ subq %rax,%rcx
+
+ /* make %rcx contain the number of words, %rax the remainder */
movq %rcx,%rax
shrq $3,%rcx
andl $7,%eax
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 36a017b4a30d..7c4f6f591f2b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret);
- /* update max_pfn, max_low_pfn and high_memory */
- update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
- nr_pages << PAGE_SHIFT);
+ /*
+ * Special case: add_pages() is called by memremap_pages() for adding device
+ * private pages. Do not bump up max_pfn in the device private path,
+ * because max_pfn changes affect dma_addressing_limited().
+ *
+ * dma_addressing_limited() returning true when max_pfn is the device's
+ * addressable memory can force device drivers to use bounce buffers
+ * and impact their performance negatively:
+ */
+ if (!params->pgmap)
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
return ret;
}
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 72405d315b41..def3d9284254 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2274,6 +2274,7 @@ int set_mce_nospec(unsigned long pfn)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
}
+EXPORT_SYMBOL_GPL(set_mce_nospec);
/* Restore full speculative operation to the pfn. */
int clear_mce_nospec(unsigned long pfn)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index cec321fb74f2..a05fcddfc811 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -20,7 +20,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
paravirt_release_pte(page_to_pfn(pte));
- tlb_remove_table(tlb, page_ptdesc(pte));
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#if CONFIG_PGTABLE_LEVELS > 2
@@ -34,21 +34,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- tlb_remove_table(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
- tlb_remove_table(tlb, virt_to_ptdesc(pud));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
- tlb_remove_table(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index c81cea208c2c..40ae94db20d8 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -422,19 +422,6 @@ int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
-{
- int ret;
-
- /* Wait for the bus to go inactive before unregistering */
- iosf_mbi_punit_acquire();
- ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
- iosf_mbi_punit_release();
-
- return ret;
-}
-EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
-
void iosf_mbi_assert_punit_acquired(void)
{
WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 63230ff8cf4f..08e76a5ca155 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
+#include <asm/fred.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -231,6 +232,19 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
*/
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+
+ /*
+ * Reinitialize FRED to ensure the FRED MSRs contain the same values
+ * as before hibernation.
+ *
+ * Note, the setup of FRED RSPs requires access to percpu data
+ * structures. Therefore, FRED reinitialization can only occur after
+ * the percpu access pointer (i.e., MSR_GS_BASE) is restored.
+ */
+ if (ctxt->cr4 & X86_CR4_FRED) {
+ cpu_init_fred_exceptions();
+ cpu_init_fred_rsps();
+ }
#else
loadsegment(fs, __KERNEL_PERCPU);
#endif
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 6c2986d2ad11..08cd913cbd4e 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -12,8 +12,6 @@
#include <stdarg.h>
#include <linux/kallsyms.h>
-#define unlikely(cond) (cond)
-
#include <asm/insn.h>
#include <inat.c>
#include <insn.c>
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 4da336965698..b51aefd6ec2b 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -12,9 +12,9 @@
*/
#ifdef CONFIG_X86_32
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#define mb() alternative("lock addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else /* CONFIG_X86_32 */
diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h
deleted file mode 100644
index a3b061d66082..000000000000
--- a/arch/x86/um/asm/module.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_MODULE_H
-#define __UM_MODULE_H
-
-/* UML is simple */
-struct mod_arch_specific
-{
-};
-
-#ifdef CONFIG_X86_32
-
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
-
-#else
-
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Ehdr Elf64_Ehdr
-
-#endif
-
-#endif
diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c
index e80ab7d28117..37decaa74761 100644
--- a/arch/x86/um/os-Linux/mcontext.c
+++ b/arch/x86/um/os-Linux/mcontext.c
@@ -4,6 +4,7 @@
#include <asm/ptrace.h>
#include <sysdep/ptrace.h>
#include <sysdep/mcontext.h>
+#include <arch.h>
void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
{
@@ -27,7 +28,17 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
COPY(RIP);
COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS);
- regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
- regs->gp[CS / sizeof(unsigned long)] |= 3;
+ regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
+#endif
+}
+
+void mc_set_rip(void *_mc, void *target)
+{
+ mcontext_t *mc = _mc;
+
+#ifdef __i386__
+ mc->gregs[REG_EIP] = (unsigned long)target;
+#else
+ mc->gregs[REG_RIP] = (unsigned long)target;
#endif
}
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h
index b6f2437ec29c..ab5c8e47049c 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_32.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_32.h
@@ -29,4 +29,16 @@ struct faultinfo {
#define PTRACE_FULL_FAULTINFO 0
+#define ___backtrack_faulted(_faulted) \
+ asm volatile ( \
+ "mov $0, %0\n" \
+ "movl $__get_kernel_nofault_faulted_%=,%1\n" \
+ "jmp _end_%=\n" \
+ "__get_kernel_nofault_faulted_%=:\n" \
+ "mov $1, %0;" \
+ "_end_%=:" \
+ : "=r" (_faulted), \
+ "=m" (current->thread.segv_continue) :: \
+ )
+
#endif
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h
index ee88f88974ea..26fb4835d3e9 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_64.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_64.h
@@ -29,4 +29,16 @@ struct faultinfo {
#define PTRACE_FULL_FAULTINFO 1
+#define ___backtrack_faulted(_faulted) \
+ asm volatile ( \
+ "mov $0, %0\n" \
+ "movq $__get_kernel_nofault_faulted_%=,%1\n" \
+ "jmp _end_%=\n" \
+ "__get_kernel_nofault_faulted_%=:\n" \
+ "mov $1, %0;" \
+ "_end_%=:" \
+ : "=r" (_faulted), \
+ "=m" (current->thread.segv_continue) :: \
+ )
+
#endif
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index f238f7b33cdd..dc8dfb2abd80 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -12,33 +12,22 @@
static unsigned int __read_mostly vdso_enabled = 1;
unsigned long um_vdso_addr;
+static struct page *um_vdso;
extern unsigned long task_size;
extern char vdso_start[], vdso_end[];
-static struct page **vdsop;
-
static int __init init_vdso(void)
{
- struct page *um_vdso;
-
BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
um_vdso_addr = task_size - PAGE_SIZE;
- vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
- if (!vdsop)
- goto oom;
-
um_vdso = alloc_page(GFP_KERNEL);
- if (!um_vdso) {
- kfree(vdsop);
-
+ if (!um_vdso)
goto oom;
- }
copy_page(page_address(um_vdso), vdso_start);
- *vdsop = um_vdso;
return 0;
@@ -56,6 +45,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
+ .pages = &um_vdso,
};
if (!vdso_enabled)
@@ -64,7 +54,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- vdso_mapping.pages = vdsop;
vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index abec44b687df..8b95221375a8 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -48,7 +48,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
static void rs_close(struct tty_struct *tty, struct file * filp)
{
if (tty->count == 1)
- del_timer_sync(&serial_timer);
+ timer_delete_sync(&serial_timer);
}
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index e89f27f2bb18..c6d8c62695e1 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -375,7 +375,7 @@ static int iss_net_close(struct net_device *dev)
struct iss_net_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
lp->tp.net_ops->close(lp);
return 0;