diff options
Diffstat (limited to 'include/asm-generic')
36 files changed, 597 insertions, 1011 deletions
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 8675b7b4ad23..2c53a1e0b760 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -32,6 +32,7 @@ mandatory-y += irq_work.h mandatory-y += kdebug.h mandatory-y += kmap_size.h mandatory-y += kprobes.h +mandatory-y += kvm_types.h mandatory-y += linkage.h mandatory-y += local.h mandatory-y += local64.h @@ -59,8 +60,8 @@ mandatory-y += tlbflush.h mandatory-y += topology.h mandatory-y += trace_clock.h mandatory-y += uaccess.h +mandatory-y += unwind_user.h mandatory-y += vermagic.h mandatory-y += vga.h mandatory-y += video.h mandatory-y += word-at-a-time.h -mandatory-y += xor.h diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 100d24b02e52..f22ccfc0df98 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -10,7 +10,7 @@ #include <linux/types.h> typedef struct { - s64 counter; + s64 __aligned(sizeof(s64)) counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index cc840537885f..ddd90bbe40df 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -26,6 +26,9 @@ __NR_fremovexattr, __NR_fchownat, __NR_fchmodat, #endif +#ifdef __NR_fchmodat2 +__NR_fchmodat2, +#endif #ifdef __NR_chown32 __NR_chown32, __NR_fchown32, diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h index 7bb7b5a83ae2..fb9991f53fb6 100644 --- a/include/asm-generic/audit_read.h +++ b/include/asm-generic/audit_read.h @@ -4,9 +4,15 @@ __NR_readlink, #endif __NR_quotactl, __NR_listxattr, +#ifdef __NR_listxattrat +__NR_listxattrat, +#endif __NR_llistxattr, __NR_flistxattr, __NR_getxattr, +#ifdef __NR_getxattrat +__NR_getxattrat, +#endif __NR_lgetxattr, __NR_fgetxattr, #ifdef __NR_readlinkat diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 2d08c750c8a7..3a899c626fdc 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h @@ -10,7 +10,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned int generic___ffs(unsigned long word) +static __always_inline __attribute_const__ unsigned int generic___ffs(unsigned long word) { unsigned int num = 0; diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index e974ec932ec1..35f33780ca6c 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h @@ -10,7 +10,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned int generic___fls(unsigned long word) +static __always_inline __attribute_const__ unsigned int generic___fls(unsigned long word) { unsigned int num = BITS_PER_LONG - 1; diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h index cf4b3d33bf96..d3c3f567045d 100644 --- a/include/asm-generic/bitops/builtin-__ffs.h +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -8,7 +8,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned int __ffs(unsigned long word) +static __always_inline __attribute_const__ unsigned int __ffs(unsigned long word) { return __builtin_ctzl(word); } diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h index 6d72fc8a5259..7770c4f1bfcd 100644 --- a/include/asm-generic/bitops/builtin-__fls.h +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -8,7 +8,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned int __fls(unsigned long word) +static __always_inline __attribute_const__ unsigned int __fls(unsigned long word) { return (sizeof(word) * 8) - 1 - __builtin_clzl(word); } diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h index c8455cc28841..be707da8c7cd 100644 --- a/include/asm-generic/bitops/builtin-fls.h +++ b/include/asm-generic/bitops/builtin-fls.h @@ -9,7 +9,7 @@ * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(unsigned int x) +static __always_inline __attribute_const__ int fls(unsigned int x) { return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h index 4c43f242daeb..5ff2b7fbda6d 100644 --- a/include/asm-generic/bitops/ffs.h +++ b/include/asm-generic/bitops/ffs.h @@ -10,7 +10,7 @@ * the libc and compiler builtin ffs routines, therefore * differs in spirit from ffz (man ffs). */ -static inline int generic_ffs(int x) +static inline __attribute_const__ int generic_ffs(int x) { int r = 1; diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index 26f3ce1dd6e4..8eed3437edb9 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h @@ -10,7 +10,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int generic_fls(unsigned int x) +static __always_inline __attribute_const__ int generic_fls(unsigned int x) { int r = 32; diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h index 866f2b2304ff..b5f58dd261a3 100644 --- a/include/asm-generic/bitops/fls64.h +++ b/include/asm-generic/bitops/fls64.h @@ -16,7 +16,7 @@ * at position 64. */ #if BITS_PER_LONG == 32 -static __always_inline int fls64(__u64 x) +static __always_inline __attribute_const__ int fls64(__u64 x) { __u32 h = x >> 32; if (h) @@ -24,7 +24,7 @@ static __always_inline int fls64(__u64 x) return fls(x); } #elif BITS_PER_LONG == 64 -static __always_inline int fls64(__u64 x) +static __always_inline __attribute_const__ int fls64(__u64 x) { if (x == 0) return 0; diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index 4225a8ca9c1a..c010d54275e4 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -100,4 +100,4 @@ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long return arch_test_and_change_bit(nr, addr); } -#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ +#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */ diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h index 1023e2a4bd37..90e8aeebfd2f 100644 --- a/include/asm-generic/bitsperlong.h +++ b/include/asm-generic/bitsperlong.h @@ -19,6 +19,15 @@ #error Inconsistent word size. Check asm/bitsperlong.h #endif +#if __CHAR_BIT__ * __SIZEOF_LONG__ != __BITS_PER_LONG +#error Inconsistent word size. Check asm/bitsperlong.h +#endif + +#ifndef __ASSEMBLER__ +_Static_assert(sizeof(long) * 8 == __BITS_PER_LONG, + "Inconsistent word size. Check asm/bitsperlong.h"); +#endif + #ifndef BITS_PER_LONG_LONG #define BITS_PER_LONG_LONG 64 #endif diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 387720933973..09e8eccee8ed 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -13,10 +13,19 @@ #define BUGFLAG_ONCE (1 << 1) #define BUGFLAG_DONE (1 << 2) #define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */ +#define BUGFLAG_ARGS (1 << 4) #define BUGFLAG_TAINT(taint) ((taint) << 8) #define BUG_GET_TAINT(bug) ((bug)->flags >> 8) #endif +#ifndef WARN_CONDITION_STR +#ifdef CONFIG_DEBUG_BUGVERBOSE_DETAILED +# define WARN_CONDITION_STR(cond_str) "[" cond_str "] " +#else +# define WARN_CONDITION_STR(cond_str) +#endif +#endif /* WARN_CONDITION_STR */ + #ifndef __ASSEMBLY__ #include <linux/panic.h> #include <linux/printk.h> @@ -29,19 +38,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #ifdef CONFIG_BUG -#ifdef CONFIG_GENERIC_BUG -struct bug_entry { #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS - unsigned long bug_addr; +#define BUG_REL(type, name) type name #else - signed int bug_addr_disp; +#define BUG_REL(type, name) signed int name##_disp #endif -#ifdef CONFIG_DEBUG_BUGVERBOSE -#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS - const char *file; -#else - signed int file_disp; + +#ifdef CONFIG_GENERIC_BUG +struct bug_entry { + BUG_REL(unsigned long, bug_addr); +#ifdef HAVE_ARCH_BUG_FORMAT + BUG_REL(const char *, format); #endif +#ifdef CONFIG_DEBUG_BUGVERBOSE + BUG_REL(const char *, file); unsigned short line; #endif unsigned short flags; @@ -92,28 +102,50 @@ void warn_slowpath_fmt(const char *file, const int line, unsigned taint, const char *fmt, ...); extern __printf(1, 2) void __warn_printk(const char *fmt, ...); -#ifndef __WARN_FLAGS -#define __WARN() __WARN_printf(TAINT_WARN, NULL) +#ifdef __WARN_FLAGS +#define __WARN() __WARN_FLAGS("", BUGFLAG_TAINT(TAINT_WARN)) + +#ifndef WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_FLAGS(#condition, \ + BUGFLAG_TAINT(TAINT_WARN)); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN_ON_ONCE +#define WARN_ON_ONCE(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_FLAGS(#condition, \ + BUGFLAG_ONCE | \ + BUGFLAG_TAINT(TAINT_WARN)); \ + unlikely(__ret_warn_on); \ +}) +#endif +#endif /* __WARN_FLAGS */ + +#if defined(__WARN_FLAGS) && !defined(__WARN_printf) #define __WARN_printf(taint, arg...) do { \ instrumentation_begin(); \ - warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \ + __warn_printk(arg); \ + __WARN_FLAGS("", BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ instrumentation_end(); \ } while (0) -#else -#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) +#endif + +#ifndef __WARN_printf #define __WARN_printf(taint, arg...) do { \ instrumentation_begin(); \ - __warn_printk(arg); \ - __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ + warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \ instrumentation_end(); \ } while (0) -#define WARN_ON_ONCE(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - __WARN_FLAGS(BUGFLAG_ONCE | \ - BUGFLAG_TAINT(TAINT_WARN)); \ - unlikely(__ret_warn_on); \ -}) +#endif + +#ifndef __WARN +#define __WARN() __WARN_printf(TAINT_WARN, NULL) #endif /* used internally by panic.c */ @@ -148,8 +180,10 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...); DO_ONCE_LITE_IF(condition, WARN_ON, 1) #endif +#ifndef WARN_ONCE #define WARN_ONCE(condition, format...) \ DO_ONCE_LITE_IF(condition, WARN, 1, format) +#endif #define WARN_TAINT_ONCE(condition, taint, format...) \ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format) diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h index 372c320c5043..4948e5d4e9d9 100644 --- a/include/asm-generic/codetag.lds.h +++ b/include/asm-generic/codetag.lds.h @@ -2,6 +2,12 @@ #ifndef __ASM_GENERIC_CODETAG_LDS_H #define __ASM_GENERIC_CODETAG_LDS_H +#ifdef CONFIG_MEM_ALLOC_PROFILING +#define IF_MEM_ALLOC_PROFILING(...) __VA_ARGS__ +#else +#define IF_MEM_ALLOC_PROFILING(...) +#endif + #define SECTION_WITH_BOUNDARIES(_name) \ . = ALIGN(8); \ __start_##_name = .; \ @@ -9,16 +15,10 @@ __stop_##_name = .; #define CODETAG_SECTIONS() \ - SECTION_WITH_BOUNDARIES(alloc_tags) - -/* - * Module codetags which aren't used after module unload, therefore have the - * same lifespan as the module and can be safely unloaded with the module. - */ -#define MOD_CODETAG_SECTIONS() + IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags)) #define MOD_SEPARATE_CODETAG_SECTION(_name) \ - .codetag.##_name : { \ + .codetag.##_name 0 : { \ SECTION_WITH_BOUNDARIES(_name) \ } @@ -28,6 +28,6 @@ * unload them individually once unused. */ #define MOD_SEPARATE_CODETAG_SECTIONS() \ - MOD_SEPARATE_CODETAG_SECTION(alloc_tags) + IF_MEM_ALLOC_PROFILING(MOD_SEPARATE_CODETAG_SECTION(alloc_tags)) #endif /* __ASM_GENERIC_CODETAG_LDS_H */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 2a19215baae5..fbbcfd801cd0 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -25,7 +25,9 @@ * argument and comparison of the previous * futex value with another constant. * - * @encoded_op: encoded operation to execute + * @op: operation to execute + * @oparg: argument of the operation + * @oval: previous value at @uaddr on successful return * @uaddr: pointer to user space address * * Return: diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 2afc95bf1655..e1a2e1b7c8e7 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -5,11 +5,6 @@ #include <linux/swap.h> #include <linux/swapops.h> -static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) -{ - return mk_pte(page, pgprot); -} - static inline unsigned long huge_pte_write(pte_t pte) { return pte_write(pte); @@ -71,15 +66,6 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, } #endif -#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} -#endif - #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz) @@ -111,22 +97,6 @@ static inline int huge_pte_none(pte_t pte) } #endif -/* Please refer to comments above pte_none_mostly() for the usage */ -#ifndef __HAVE_ARCH_HUGE_PTE_NONE_MOSTLY -static inline int huge_pte_none_mostly(pte_t pte) -{ - return huge_pte_none(pte) || is_pte_marker(pte); -} -#endif - -#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - return 0; -} -#endif - #ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 11abad6c87e1..ca5a1ce6f0f8 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -75,6 +75,7 @@ #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) #include <linux/tracepoint-defs.h> +#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint) DECLARE_TRACEPOINT(rwmmio_write); DECLARE_TRACEPOINT(rwmmio_post_write); DECLARE_TRACEPOINT(rwmmio_read); @@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, #else +#define rwmmio_tracepoint_enabled(tracepoint) false static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, @@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val; - log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); - log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val; - log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); - log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val; - log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); - log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val; - log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); __io_ar(val); - log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -249,11 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); - log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif @@ -261,11 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); - log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -273,11 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); - log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeq((u64 __force)__cpu_to_le64(value), addr); __io_aw(); - log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) { u8 val; - log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); val = __raw_readb(addr); - log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) { u16 val; - log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); - log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) { u32 val; - log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); - log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) { u64 val; - log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_read)) + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); - log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_read)) + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -356,9 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __raw_writeb(value, addr); - log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif @@ -366,9 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __raw_writew((u16 __force)cpu_to_le16(value), addr); - log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -376,9 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __raw_writel((u32 __force)__cpu_to_le32(value), addr); - log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -386,9 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_write)) + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __raw_writeq((u64 __force)__cpu_to_le64(value), addr); - log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); + if (rwmmio_tracepoint_enabled(rwmmio_post_write)) + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index a3b5029aebbd..efa6610acbc7 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -30,7 +30,15 @@ static inline int pfn_valid(unsigned long pfn) return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr; } #define pfn_valid pfn_valid -#endif + +#ifndef for_each_valid_pfn +#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ + for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \ + (pfn) < min_t(unsigned long, (end_pfn), \ + ARCH_PFN_OFFSET + max_mapnr); \ + (pfn)++) +#endif /* for_each_valid_pfn */ +#endif /* valid_pfn */ #elif defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -45,7 +53,7 @@ static inline int pfn_valid(unsigned long pfn) */ #define __page_to_pfn(pg) \ ({ const struct page *__pg = (pg); \ - int __sec = page_to_section(__pg); \ + int __sec = memdesc_section(__pg->flags); \ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ }) diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index ccccb1cbf7df..d37b68238c97 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -31,6 +31,7 @@ enum hv_partition_type { HV_PARTITION_TYPE_GUEST, HV_PARTITION_TYPE_ROOT, + HV_PARTITION_TYPE_L1VH, }; struct ms_hyperv_info { @@ -61,6 +62,8 @@ struct ms_hyperv_info { }; }; u64 shared_gpa_boundary; + bool msi_ext_dest_id; + bool confidential_vmbus_available; }; extern struct ms_hyperv_info ms_hyperv; extern bool hv_nested; @@ -123,10 +126,12 @@ static inline unsigned int hv_repcomp(u64 status) /* * Rep hypercalls. Callers of this functions are supposed to ensure that - * rep_count and varhead_size comply with Hyper-V hypercall definition. + * rep_count, varhead_size, and rep_start comply with Hyper-V hypercall + * definition. */ -static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, - void *input, void *output) +static inline u64 hv_do_rep_hypercall_ex(u16 code, u16 rep_count, + u16 varhead_size, u16 rep_start, + void *input, void *output) { u64 control = code; u64 status; @@ -134,6 +139,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; + control |= (u64)rep_start << HV_HYPERCALL_REP_START_OFFSET; do { status = hv_do_hypercall(control, input, output); @@ -151,6 +157,14 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, return status; } +/* For the typical case where rep_start is 0 */ +static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, + void *input, void *output) +{ + return hv_do_rep_hypercall_ex(code, rep_count, varhead_size, 0, + input, output); +} + /* Generate the guest OS identifier as described in the Hyper-V TLFS */ static inline u64 hv_generate_guest_id(u64 kernel_version) { @@ -162,41 +176,6 @@ static inline u64 hv_generate_guest_id(u64 kernel_version) return guest_id; } -/* Free the message slot and signal end-of-message if required */ -static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) -{ - /* - * On crash we're reading some other CPU's message page and we need - * to be careful: this other CPU may already had cleared the header - * and the host may already had delivered some other message there. - * In case we blindly write msg->header.message_type we're going - * to lose it. We can still lose a message of the same type but - * we count on the fact that there can only be one - * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages - * on crash. - */ - if (cmpxchg(&msg->header.message_type, old_msg_type, - HVMSG_NONE) != old_msg_type) - return; - - /* - * The cmxchg() above does an implicit memory barrier to - * ensure the write to MessageType (ie set to - * HVMSG_NONE) happens before we read the - * MessagePending and EOMing. Otherwise, the EOMing - * will not deliver any more messages since there is - * no empty slot - */ - if (msg->header.message_flags.msg_pending) { - /* - * This will cause message queue rescan to - * possibly deliver another msg from the - * hypervisor - */ - hv_set_msr(HV_MSR_EOM, 0); - } -} - int hv_get_hypervisor_version(union hv_hypervisor_version_info *info); void hv_setup_vmbus_handler(void (*handler)(void)); @@ -210,9 +189,6 @@ void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); void hv_remove_crash_handler(void); void hv_setup_mshv_handler(void (*handler)(void)); -extern int vmbus_interrupt; -extern int vmbus_irq; - #if IS_ENABLED(CONFIG_HYPERV) /* * Hypervisor's notion of virtual processor ID is different from @@ -236,10 +212,6 @@ int hv_common_cpu_init(unsigned int cpu); int hv_common_cpu_die(unsigned int cpu); void hv_identify_partition_type(void); -void *hv_alloc_hyperv_page(void); -void *hv_alloc_hyperv_zeroed_page(void); -void hv_free_hyperv_page(void *addr); - /** * hv_cpu_number_to_vp_number() - Map CPU to VP. * @cpu_number: CPU number in Linux terms @@ -337,6 +309,10 @@ bool hv_is_isolation_supported(void); bool hv_isolation_type_snp(void); u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2); +void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set); +void hv_para_set_sint_proxy(bool enable); +u64 hv_para_get_synic_register(unsigned int reg); +void hv_para_set_synic_register(unsigned int reg, u64 val); void hyperv_cleanup(void); bool hv_query_ext_cap(u64 cap_query); void hv_setup_dma_ops(struct device *dev, bool coherent); @@ -358,12 +334,30 @@ static inline bool hv_root_partition(void) { return hv_curr_partition_type == HV_PARTITION_TYPE_ROOT; } +static inline bool hv_l1vh_partition(void) +{ + return hv_curr_partition_type == HV_PARTITION_TYPE_L1VH; +} +static inline bool hv_parent_partition(void) +{ + return hv_root_partition() || hv_l1vh_partition(); +} + +bool hv_result_needs_memory(u64 status); +int hv_deposit_memory_node(int node, u64 partition_id, u64 status); int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); #else /* CONFIG_MSHV_ROOT */ static inline bool hv_root_partition(void) { return false; } +static inline bool hv_l1vh_partition(void) { return false; } +static inline bool hv_parent_partition(void) { return false; } +static inline bool hv_result_needs_memory(u64 status) { return false; } +static inline int hv_deposit_memory_node(int node, u64 partition_id, u64 status) +{ + return -EOPNOTSUPP; +} static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) { return -EOPNOTSUPP; @@ -378,4 +372,15 @@ static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u3 } #endif /* CONFIG_MSHV_ROOT */ +static inline int hv_deposit_memory(u64 partition_id, u64 status) +{ + return hv_deposit_memory_node(NUMA_NO_NODE, partition_id, status); +} + +#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) +u8 __init get_vtl(void); +#else +static inline u8 get_vtl(void) { return 0; } +#endif + #endif diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h index 124c734ca5d9..92cca4b23f13 100644 --- a/include/asm-generic/msi.h +++ b/include/asm-generic/msi.h @@ -33,6 +33,7 @@ typedef struct msi_alloc_info { /* Device generating MSIs is proxying for another device */ #define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0) +#define MSI_ALLOC_FLAGS_FIXED_MSG_DATA (1UL << 1) #define GENERIC_MSI_DOMAIN_OPS 1 diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h index 8d3009dd28ff..8348c116aa3b 100644 --- a/include/asm-generic/param.h +++ b/include/asm-generic/param.h @@ -6,6 +6,6 @@ # undef HZ # define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* some user interfaces are */ +# define USER_HZ __USER_HZ /* some user interfaces are */ # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ #endif /* __ASM_GENERIC_PARAM_H */ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 02aeca21479a..6628670bcb90 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -2,6 +2,8 @@ #ifndef _ASM_GENERIC_PERCPU_H_ #define _ASM_GENERIC_PERCPU_H_ +#ifndef __ASSEMBLER__ + #include <linux/compiler.h> #include <linux/threads.h> #include <linux/percpu-defs.h> @@ -557,4 +559,5 @@ do { \ this_cpu_generic_cmpxchg(pcp, oval, nval) #endif +#endif /* __ASSEMBLER__ */ #endif /* _ASM_GENERIC_PERCPU_H_ */ diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 892ece4558a2..57137d3ac159 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -18,11 +18,17 @@ */ static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) { - struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL & - ~__GFP_HIGHMEM, 0); + struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL, 0); if (!ptdesc) return NULL; + if (!pagetable_pte_ctor(mm, ptdesc)) { + pagetable_free(ptdesc); + return NULL; + } + + ptdesc_set_kernel(ptdesc); + return ptdesc_address(ptdesc); } #define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__)) @@ -48,7 +54,7 @@ static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - pagetable_free(virt_to_ptdesc(pte)); + pagetable_dtor_free(virt_to_ptdesc(pte)); } /** @@ -70,7 +76,7 @@ static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; - if (!pagetable_pte_ctor(ptdesc)) { + if (!pagetable_pte_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } @@ -137,10 +143,14 @@ static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long ad ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; - if (!pagetable_pmd_ctor(ptdesc)) { + if (!pagetable_pmd_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } + + if (mm == &init_mm) + ptdesc_set_kernel(ptdesc); + return ptdesc_address(ptdesc); } #define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__)) @@ -167,13 +177,16 @@ static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - gfp &= ~__GFP_HIGHMEM; ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; pagetable_pud_ctor(ptdesc); + + if (mm == &init_mm) + ptdesc_set_kernel(ptdesc); + return ptdesc_address(ptdesc); } #define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__)) @@ -221,13 +234,16 @@ static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - gfp &= ~__GFP_HIGHMEM; ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; pagetable_p4d_ctor(ptdesc); + + if (mm == &init_mm) + ptdesc_set_kernel(ptdesc); + return ptdesc_address(ptdesc); } #define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__)) @@ -265,13 +281,16 @@ static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - gfp &= ~__GFP_HIGHMEM; ptdesc = pagetable_alloc_noprof(gfp, order); if (!ptdesc) return NULL; pagetable_pgd_ctor(ptdesc); + + if (mm == &init_mm) + ptdesc_set_kernel(ptdesc); + return ptdesc_address(ptdesc); } #define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__)) diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h index 828966d4c281..0d85791efdf7 100644 --- a/include/asm-generic/pgtable_uffd.h +++ b/include/asm-generic/pgtable_uffd.h @@ -1,6 +1,23 @@ #ifndef _ASM_GENERIC_PGTABLE_UFFD_H #define _ASM_GENERIC_PGTABLE_UFFD_H +/* + * Some platforms can customize the uffd-wp bit, making it unavailable + * even if the architecture provides the resource. + * Adding this API allows architectures to add their own checks for the + * devices on which the kernel is running. + * Note: When overriding it, please make sure the + * CONFIG_HAVE_ARCH_USERFAULTFD_WP is part of this macro. + */ +#ifndef pgtable_supports_uffd_wp +#define pgtable_supports_uffd_wp() IS_ENABLED(CONFIG_HAVE_ARCH_USERFAULTFD_WP) +#endif + +static inline bool uffd_supports_wp_marker(void) +{ + return pgtable_supports_uffd_wp() && IS_ENABLED(CONFIG_PTE_MARKER_UFFD_WP); +} + #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP static __always_inline int pte_uffd_wp(pte_t pte) { diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h index 6d4244d643df..151d267a496b 100644 --- a/include/asm-generic/rqspinlock.h +++ b/include/asm-generic/rqspinlock.h @@ -28,7 +28,7 @@ struct rqspinlock { */ struct bpf_res_spin_lock { u32 val; -}; +} __aligned(__alignof__(struct rqspinlock)); struct qspinlock; #ifdef CONFIG_QUEUED_SPINLOCKS @@ -129,8 +129,8 @@ dec: * <error> for lock B * release_held_lock_entry * - * try_cmpxchg_acquire for lock A * grab_held_lock_entry + * try_cmpxchg_acquire for lock A * * Lack of any ordering means reordering may occur such that dec, inc * are done before entry is overwritten. This permits a remote lock @@ -139,13 +139,8 @@ dec: * CPU holds a lock it is attempting to acquire, leading to false ABBA * diagnosis). * - * In case of unlock, we will always do a release on the lock word after - * releasing the entry, ensuring that other CPUs cannot hold the lock - * (and make conclusions about deadlocks) until the entry has been - * cleared on the local CPU, preventing any anomalies. Reordering is - * still possible there, but a remote CPU cannot observe a lock in our - * table which it is already holding, since visibility entails our - * release store for the said lock has not retired. + * The case of unlock is treated differently due to NMI reentrancy, see + * comments in res_spin_unlock. * * In theory we don't have a problem if the dec and WRITE_ONCE above get * reordered with each other, we either notice an empty NULL entry on @@ -175,16 +170,28 @@ static __always_inline int res_spin_lock(rqspinlock_t *lock) { int val = 0; - if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) { - grab_held_lock_entry(lock); + /* + * Grab the deadlock detection entry before doing the cmpxchg, so that + * reentrancy due to NMIs between the succeeding cmpxchg and creation of + * held lock entry can correctly detect an acquisition attempt in the + * interrupted context. + * + * cmpxchg lock A + * <NMI> + * res_spin_lock(A) --> missed AA, leads to timeout + * </NMI> + * grab_held_lock_entry(A) + */ + grab_held_lock_entry(lock); + + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return 0; - } return resilient_queued_spin_lock_slowpath(lock, val); } #else -#define res_spin_lock(lock) resilient_tas_spin_lock(lock) +#define res_spin_lock(lock) ({ grab_held_lock_entry(lock); resilient_tas_spin_lock(lock); }) #endif /* CONFIG_QUEUED_SPINLOCKS */ @@ -192,28 +199,25 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock) { struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks); - if (unlikely(rqh->cnt > RES_NR_HELD)) - goto unlock; - WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL); -unlock: /* - * Release barrier, ensures correct ordering. See release_held_lock_entry - * for details. Perform release store instead of queued_spin_unlock, - * since we use this function for test-and-set fallback as well. When we - * have CONFIG_QUEUED_SPINLOCKS=n, we clear the full 4-byte lockword. + * Release barrier, ensures correct ordering. Perform release store + * instead of queued_spin_unlock, since we use this function for the TAS + * fallback as well. When we have CONFIG_QUEUED_SPINLOCKS=n, we clear + * the full 4-byte lockword. * - * Like release_held_lock_entry, we can do the release before the dec. - * We simply care about not seeing the 'lock' in our table from a remote - * CPU once the lock has been released, which doesn't rely on the dec. + * Perform the smp_store_release before clearing the lock entry so that + * NMIs landing in the unlock path can correctly detect AA issues. The + * opposite order shown below may lead to missed AA checks: * - * Unlike smp_wmb(), release is not a two way fence, hence it is - * possible for a inc to move up and reorder with our clearing of the - * entry. This isn't a problem however, as for a misdiagnosis of ABBA, - * the remote CPU needs to hold this lock, which won't be released until - * the store below is done, which would ensure the entry is overwritten - * to NULL, etc. + * WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL) + * <NMI> + * res_spin_lock(A) --> missed AA, leads to timeout + * </NMI> + * smp_store_release(A->locked, 0) */ smp_store_release(&lock->locked, 0); + if (likely(rqh->cnt <= RES_NR_HELD)) + WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL); this_cpu_dec(rqspinlock_held_locks.cnt); } diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h index d0343d58a74a..70c8716ad32a 100644 --- a/include/asm-generic/simd.h +++ b/include/asm-generic/simd.h @@ -1,6 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_SIMD_H +#define _ASM_GENERIC_SIMD_H -#include <linux/hardirq.h> +#include <linux/compiler_attributes.h> +#include <linux/preempt.h> +#include <linux/sched.h> +#include <linux/types.h> /* * may_use_simd - whether it is allowable at this time to issue SIMD @@ -13,3 +18,5 @@ static __must_check inline bool may_use_simd(void) { return !in_interrupt(); } + +#endif /* _ASM_GENERIC_SIMD_H */ diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 182b039ce5fa..c5a3ad53beec 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -38,6 +38,20 @@ struct pt_regs; int syscall_get_nr(struct task_struct *task, struct pt_regs *regs); /** + * syscall_set_nr - change the system call a task is executing + * @task: task of interest, must be blocked + * @regs: task_pt_regs() of @task + * @nr: system call number + * + * Changes the system call number @task is about to execute. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or + * %SYSCALL_WORK_SYSCALL_AUDIT. + */ +void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr); + +/** * syscall_rollback - roll back registers after an aborted system call * @task: task of interest, must be in system call exit tracing * @regs: task_pt_regs() of @task @@ -118,6 +132,22 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args); /** + * syscall_set_arguments - change system call parameter value + * @task: task of interest, must be in system call entry tracing + * @regs: task_pt_regs() of @task + * @args: array of argument values to store + * + * Changes 6 arguments to the system call. + * The first argument gets value @args[0], and so on. + * + * It's only valid to call this when @task is stopped for tracing on + * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or + * %SYSCALL_WORK_SYSCALL_AUDIT. + */ +void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + const unsigned long *args); + +/** * syscall_get_arch - return the AUDIT_ARCH for the current system call * @task: task of interest, must be blocked * diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h new file mode 100644 index 000000000000..528e6fc7efe9 --- /dev/null +++ b/include/asm-generic/thread_info_tif.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_THREAD_INFO_TIF_H_ +#define _ASM_GENERIC_THREAD_INFO_TIF_H_ + +#include <vdso/bits.h> + +/* Bits 16-31 are reserved for architecture specific purposes */ + +#define TIF_NOTIFY_RESUME 0 // callback before returning to user +#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) + +#define TIF_SIGPENDING 1 // signal pending +#define _TIF_SIGPENDING BIT(TIF_SIGPENDING) + +#define TIF_NOTIFY_SIGNAL 2 // signal notifications exist +#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) + +#define TIF_MEMDIE 3 // is terminating due to OOM killer +#define _TIF_MEMDIE BIT(TIF_MEMDIE) + +#define TIF_NEED_RESCHED 4 // rescheduling necessary +#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) + +#ifdef HAVE_TIF_NEED_RESCHED_LAZY +# define TIF_NEED_RESCHED_LAZY 5 // Lazy rescheduling needed +# define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY) +#endif + +#ifdef HAVE_TIF_POLLING_NRFLAG +# define TIF_POLLING_NRFLAG 6 // idle is polling for TIF_NEED_RESCHED +# define _TIF_POLLING_NRFLAG BIT(TIF_POLLING_NRFLAG) +#endif + +#define TIF_USER_RETURN_NOTIFY 7 // notify kernel of userspace return +#define _TIF_USER_RETURN_NOTIFY BIT(TIF_USER_RETURN_NOTIFY) + +#define TIF_UPROBE 8 // breakpointed or singlestepping +#define _TIF_UPROBE BIT(TIF_UPROBE) + +#define TIF_PATCH_PENDING 9 // pending live patching update +#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) + +#ifdef HAVE_TIF_RESTORE_SIGMASK +# define TIF_RESTORE_SIGMASK 10 // Restore signal mask in do_signal() +# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK) +#endif + +#define TIF_RSEQ 11 // Run RSEQ fast path +#define _TIF_RSEQ BIT(TIF_RSEQ) + +#define TIF_HRTIMER_REARM 12 // re-arm the timer +#define _TIF_HRTIMER_REARM BIT(TIF_HRTIMER_REARM) + +#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 88a42973fa47..bdcc2778ac64 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -46,7 +46,8 @@ * * The mmu_gather API consists of: * - * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() + * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_gather_mmu_vma() / + * tlb_finish_mmu() * * start and finish a mmu_gather * @@ -58,6 +59,11 @@ * Defaults to flushing at tlb_end_vma() to reset the range; helps when * there's large holes between the VMAs. * + * - tlb_free_vmas() + * + * tlb_free_vmas() marks the start of unlinking of one or more vmas + * and freeing page-tables. + * * - tlb_remove_table() * * tlb_remove_table() is the basic primitive to free page-table directories @@ -207,7 +213,7 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) -#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE +#ifndef CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE static inline void __tlb_remove_table(void *table) { struct ptdesc *ptdesc = (struct ptdesc *)table; @@ -245,6 +251,8 @@ static inline void tlb_remove_table(struct mmu_gather *tlb, void *table) void tlb_remove_table_sync_one(void); +void tlb_remove_table_sync_rcu(void); + #else #ifdef tlb_needs_table_invalidate @@ -253,6 +261,8 @@ void tlb_remove_table_sync_one(void); static inline void tlb_remove_table_sync_one(void) { } +static inline void tlb_remove_table_sync_rcu(void) { } + #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ @@ -281,8 +291,7 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - bool delay_rmap, int page_size); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, unsigned int nr_pages, bool delay_rmap); @@ -359,6 +368,20 @@ struct mmu_gather { unsigned int vma_huge : 1; unsigned int vma_pfn : 1; + /* + * Did we unshare (unmap) any shared page tables? For now only + * used for hugetlb PMD table sharing. + */ + unsigned int unshared_tables : 1; + + /* + * Did we unshare any page tables such that they are now exclusive + * and could get reused+modified by the new owner? When setting this + * flag, "unshared_tables" will be set as well. For now only used + * for hugetlb PMD table sharing. + */ + unsigned int fully_unshared_tables : 1; + unsigned int batch_count; #ifndef CONFIG_MMU_GATHER_NO_GATHER @@ -395,6 +418,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) tlb->cleared_pmds = 0; tlb->cleared_puds = 0; tlb->cleared_p4ds = 0; + tlb->unshared_tables = 0; /* * Do not reset mmu_gather::vma_* fields here, we do not * call into tlb_start_vma() again to set them if there is an @@ -464,7 +488,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) */ tlb->vma_huge = is_vm_hugetlb_page(vma); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); - tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); + + /* + * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma + * in the tracked range, see tlb_free_vmas(). + */ + tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); } static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) @@ -474,7 +503,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) * these bits. */ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || - tlb->cleared_puds || tlb->cleared_p4ds)) + tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables)) return; tlb_flush(tlb); @@ -484,7 +513,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - if (__tlb_remove_page_size(tlb, page, false, page_size)) + if (__tlb_remove_page_size(tlb, page, page_size)) tlb_flush_mmu(tlb); } @@ -548,22 +577,38 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct * static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { + if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) + return; + + /* + * Do a TLB flush and reset the range at VMA boundaries; this avoids + * the ranges growing with the unused space between consecutive VMAs, + * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on + * this. + */ + tlb_flush_mmu_tlbonly(tlb); +} + +static inline void tlb_free_vmas(struct mmu_gather *tlb) +{ if (tlb->fullmm) return; /* * VM_PFNMAP is more fragile because the core mm will not track the - * page mapcount -- there might not be page-frames for these PFNs after - * all. Force flush TLBs for such ranges to avoid munmap() vs - * unmap_mapping_range() races. + * page mapcount -- there might not be page-frames for these PFNs + * after all. + * + * Specifically() there is a race between munmap() and + * unmap_mapping_range(), where munmap() will unlink the VMA, such + * that unmap_mapping_range() will no longer observe the VMA and + * no-op, without observing the TLBI, returning prematurely. + * + * So if we're about to unlink such a VMA, and we have pending + * TLBI for such a vma, flush things now. */ - if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { - /* - * Do a TLB flush and reset the range at VMA boundaries; this avoids - * the ranges growing with the unused space between consecutive VMAs. - */ + if (tlb->vma_pfn) tlb_flush_mmu_tlbonly(tlb); - } } /* @@ -747,6 +792,63 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) } #endif +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING +static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt, + unsigned long addr) +{ + /* + * The caller must make sure that concurrent unsharing + exclusive + * reuse is impossible until tlb_flush_unshared_tables() was called. + */ + VM_WARN_ON_ONCE(!ptdesc_pmd_is_shared(pt)); + ptdesc_pmd_pts_dec(pt); + + /* Clearing a PUD pointing at a PMD table with PMD leaves. */ + tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE); + + /* + * If the page table is now exclusively owned, we fully unshared + * a page table. + */ + if (!ptdesc_pmd_is_shared(pt)) + tlb->fully_unshared_tables = true; + tlb->unshared_tables = true; +} + +static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb) +{ + /* + * As soon as the caller drops locks to allow for reuse of + * previously-shared tables, these tables could get modified and + * even reused outside of hugetlb context, so we have to make sure that + * any page table walkers (incl. TLB, GUP-fast) are aware of that + * change. + * + * Even if we are not fully unsharing a PMD table, we must + * flush the TLB for the unsharer now. + */ + if (tlb->unshared_tables) + tlb_flush_mmu_tlbonly(tlb); + + /* + * Similarly, we must make sure that concurrent GUP-fast will not + * walk previously-shared page tables that are getting modified+reused + * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast. + * + * We only perform this when we are the last sharer of a page table, + * as the IPI will reach all CPUs: any GUP-fast. + * + * Note that on configs where tlb_remove_table_sync_one() is a NOP, + * the expectation is that the tlb_flush_mmu_tlbonly() would have issued + * required IPIs already for us. + */ + if (tlb->fully_unshared_tables) { + tlb_remove_table_sync_one(); + tlb->fully_unshared_tables = false; + } +} +#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ + #endif /* CONFIG_MMU */ #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 4dbe715be65b..9865ba48c5b1 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -45,11 +45,7 @@ #endif #ifndef cpumask_of_node - #ifdef CONFIG_NUMA - #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) - #else - #define cpumask_of_node(node) ((void)(node), cpu_online_mask) - #endif +#define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #ifndef pcibus_to_node #define pcibus_to_node(bus) ((void)(bus), -1) @@ -61,7 +57,7 @@ cpumask_of_node(pcibus_to_node(bus))) #endif -#endif /* CONFIG_NUMA */ +#endif /* !CONFIG_NUMA */ #if !defined(CONFIG_NUMA) || !defined(CONFIG_HAVE_MEMORYLESS_NODES) diff --git a/include/asm-generic/unwind_user.h b/include/asm-generic/unwind_user.h new file mode 100644 index 000000000000..b8882b909944 --- /dev/null +++ b/include/asm-generic/unwind_user.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_UNWIND_USER_H +#define _ASM_GENERIC_UNWIND_USER_H + +#endif /* _ASM_GENERIC_UNWIND_USER_H */ diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h index b550afa15ecd..5c6d9799f4e7 100644 --- a/include/asm-generic/vdso/vsyscall.h +++ b/include/asm-generic/vdso/vsyscall.h @@ -4,8 +4,6 @@ #ifndef __ASSEMBLY__ -#ifdef CONFIG_GENERIC_VDSO_DATA_STORE - #ifndef __arch_get_vdso_u_time_data static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void) { @@ -20,13 +18,11 @@ static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(vo } #endif -#endif /* CONFIG_GENERIC_VDSO_DATA_STORE */ - -#ifndef __arch_update_vsyscall -static __always_inline void __arch_update_vsyscall(struct vdso_time_data *vdata) +#ifndef __arch_update_vdso_clock +static __always_inline void __arch_update_vdso_clock(struct vdso_clock *vc) { } -#endif /* __arch_update_vsyscall */ +#endif /* __arch_update_vdso_clock */ #ifndef __arch_sync_vdso_time_data static __always_inline void __arch_sync_vdso_time_data(struct vdso_time_data *vdata) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 58a635a6d5bd..60c8c22fd3e4 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -87,39 +87,56 @@ #define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT) /* - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which - * generates .data.identifier sections, which need to be pulled in with - * .data. We don't want to pull in .data..other sections, which Linux - * has defined. Same for text and bss. + * Support -ffunction-sections by matching .text and .text.*, + * but exclude '.text..*', .text.startup[.*], and .text.exit[.*]. * - * With LTO_CLANG, the linker also splits sections by default, so we need - * these macros to combine the sections during the final link. + * .text.startup and .text.startup.* are matched later by INIT_TEXT, and + * .text.exit and .text.exit.* are matched later by EXIT_TEXT, so they must be + * explicitly excluded here. * - * With AUTOFDO_CLANG and PROPELLER_CLANG, by default, the linker splits - * text sections and regroups functions into subsections. + * Other .text.* sections that are typically grouped separately, such as + * .text.unlikely or .text.hot, must be matched explicitly before using + * TEXT_MAIN. * - * RODATA_MAIN is not used because existing code already defines .rodata.x - * sections to be brought in with rodata. + * NOTE: builds *with* and *without* -ffunction-sections are both supported by + * this single macro. Even with -ffunction-sections, there may be some objects + * NOT compiled with the flag due to the use of a specific Makefile override + * like cflags-y or AUTOFDO_PROFILE_foo.o. So this single catchall rule is + * needed to support mixed object builds. + * + * One implication is that functions named startup(), exit(), split(), + * unlikely(), hot(), and unknown() are not allowed in the kernel due to the + * ambiguity of their section names with -ffunction-sections. For example, + * .text.startup could be __attribute__((constructor)) code in a *non* + * ffunction-sections object, which should be placed in .init.text; or it could + * be an actual function named startup() in an ffunction-sections object, which + * should be placed in .text. The build will detect and complain about any such + * ambiguously named functions. */ -#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) || \ -defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) -#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* -#else -#define TEXT_MAIN .text -#endif -#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* +#define TEXT_MAIN \ + .text \ + .text.[_0-9A-Za-df-rt-z]* \ + .text.s[_0-9A-Za-su-z]* .text.s .text.s.* \ + .text.st[_0-9A-Zb-z]* .text.st .text.st.* \ + .text.sta[_0-9A-Za-qs-z]* .text.sta .text.sta.* \ + .text.star[_0-9A-Za-su-z]* .text.star .text.star.* \ + .text.start[_0-9A-Za-tv-z]* .text.start .text.start.* \ + .text.startu[_0-9A-Za-oq-z]* .text.startu .text.startu.* \ + .text.startup[_0-9A-Za-z]* \ + .text.e[_0-9A-Za-wy-z]* .text.e .text.e.* \ + .text.ex[_0-9A-Za-hj-z]* .text.ex .text.ex.* \ + .text.exi[_0-9A-Za-su-z]* .text.exi .text.exi.* \ + .text.exit[_0-9A-Za-z]* + +/* + * Support -fdata-sections by matching .data, .data.*, and others, + * but exclude '.data..*'. + */ +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* -#else -#define DATA_MAIN .data -#define SDATA_MAIN .sdata -#define RODATA_MAIN .rodata -#define BSS_MAIN .bss -#define SBSS_MAIN .sbss -#endif /* * GCC 4.5 and later have a 32 bytes section alignment for structures. @@ -157,7 +174,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define PATCHABLE_DISCARDS *(__patchable_function_entries) #endif -#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG +#ifndef CONFIG_ARCH_SUPPORTS_CFI /* * Simply points to ftrace_stub, but with the proper protocol. * Defined by the linker script in linux/vmlinux.lds.h @@ -167,7 +184,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define FTRACE_STUB_HACK #endif -#ifdef CONFIG_FTRACE_MCOUNT_RECORD +#ifdef CONFIG_DYNAMIC_FTRACE /* * The ftrace call sites are logged to a section whose name depends on the * compiler option used. A given kernel image will only use one, AKA @@ -361,6 +378,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) __start_once = .; \ *(.data..once) \ __end_once = .; \ + *(.data..do_once) \ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ @@ -490,32 +508,25 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) \ PRINTK_INDEX \ \ - /* Kernel symbol table: Normal symbols */ \ + /* Kernel symbol table */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ __start___ksymtab = .; \ KEEP(*(SORT(___ksymtab+*))) \ __stop___ksymtab = .; \ } \ \ - /* Kernel symbol table: GPL-only symbols */ \ - __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ - __start___ksymtab_gpl = .; \ - KEEP(*(SORT(___ksymtab_gpl+*))) \ - __stop___ksymtab_gpl = .; \ - } \ - \ - /* Kernel symbol table: Normal symbols */ \ + /* Kernel symbol CRC table */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ __start___kcrctab = .; \ KEEP(*(SORT(___kcrctab+*))) \ __stop___kcrctab = .; \ } \ \ - /* Kernel symbol table: GPL-only symbols */ \ - __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ - __start___kcrctab_gpl = .; \ - KEEP(*(SORT(___kcrctab_gpl+*))) \ - __stop___kcrctab_gpl = .; \ + /* Kernel symbol flags table */ \ + __kflagstab : AT(ADDR(__kflagstab) - LOAD_OFFSET) { \ + __start___kflagstab = .; \ + KEEP(*(SORT(___kflagstab+*))) \ + __stop___kflagstab = .; \ } \ \ /* Kernel symbol table: strings */ \ @@ -580,9 +591,8 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) * during second ld run in second ld pass when generating System.map * * TEXT_MAIN here will match symbols with a fixed pattern (for example, - * .text.hot or .text.unlikely) if dead code elimination or - * function-section is enabled. Match these symbols first before - * TEXT_MAIN to ensure they are grouped together. + * .text.hot or .text.unlikely). Match those before TEXT_MAIN to ensure + * they get grouped together. * * Also placing .text.hot section at the beginning of a page, this * would help the TLB performance. @@ -667,10 +677,11 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) */ #ifdef CONFIG_DEBUG_INFO_BTF #define BTF \ + . = ALIGN(PAGE_SIZE); \ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ BOUNDED_SECTION_BY(.BTF, _BTF) \ } \ - . = ALIGN(4); \ + . = ALIGN(PAGE_SIZE); \ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ *(.BTF_ids) \ } @@ -727,16 +738,16 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define INIT_TEXT \ *(.init.text .init.text.*) \ - *(.text.startup) + *(.text.startup .text.startup.*) #define EXIT_DATA \ *(.exit.data .exit.data.*) \ *(.fini_array .fini_array.*) \ - *(.dtors .dtors.*) \ + *(.dtors .dtors.*) #define EXIT_TEXT \ *(.exit.text) \ - *(.text.exit) \ + *(.text.exit .text.exit.*) #define EXIT_CALL \ *(.exitcall.exit) @@ -835,6 +846,9 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) .strtab 0 : { *(.strtab) } \ .shstrtab 0 : { *(.shstrtab) } +#define MODINFO \ + .modinfo : { *(.modinfo) . = ALIGN(8); } + #ifdef CONFIG_GENERIC_BUG #define BUG_TABLE \ . = ALIGN(8); \ @@ -952,7 +966,11 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define RUNTIME_CONST_VARIABLES \ RUNTIME_CONST(shift, d_hash_shift) \ - RUNTIME_CONST(ptr, dentry_hashtable) + RUNTIME_CONST(ptr, dentry_hashtable) \ + RUNTIME_CONST(ptr, __dentry_cache) \ + RUNTIME_CONST(ptr, __names_cache) \ + RUNTIME_CONST(ptr, __filp_cache) \ + RUNTIME_CONST(ptr, __bfilp_cache) /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ #define KUNIT_TABLE() \ @@ -1043,9 +1061,9 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) *(.discard.*) \ *(.export_symbol) \ *(.no_trim_symbol) \ - *(.modinfo) \ /* ld.bfd warns about .gnu.version* even when not emitted */ \ *(.gnu.version*) \ + *(__tracepoint_check) \ #define DISCARDS \ /DISCARD/ : { \ diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h deleted file mode 100644 index 44509d48fca2..000000000000 --- a/include/asm-generic/xor.h +++ /dev/null @@ -1,738 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * include/asm-generic/xor.h - * - * Generic optimized RAID-5 checksumming functions. - */ - -#include <linux/prefetch.h> - -static void -xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - p1[0] ^= p2[0]; - p1[1] ^= p2[1]; - p1[2] ^= p2[2]; - p1[3] ^= p2[3]; - p1[4] ^= p2[4]; - p1[5] ^= p2[5]; - p1[6] ^= p2[6]; - p1[7] ^= p2[7]; - p1 += 8; - p2 += 8; - } while (--lines > 0); -} - -static void -xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - p1[0] ^= p2[0] ^ p3[0]; - p1[1] ^= p2[1] ^ p3[1]; - p1[2] ^= p2[2] ^ p3[2]; - p1[3] ^= p2[3] ^ p3[3]; - p1[4] ^= p2[4] ^ p3[4]; - p1[5] ^= p2[5] ^ p3[5]; - p1[6] ^= p2[6] ^ p3[6]; - p1[7] ^= p2[7] ^ p3[7]; - p1 += 8; - p2 += 8; - p3 += 8; - } while (--lines > 0); -} - -static void -xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; - p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; - p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; - p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; - p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; - p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; - p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; - p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - } while (--lines > 0); -} - -static void -xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4, - const unsigned long * __restrict p5) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; - p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; - p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; - p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; - p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; - p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; - p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; - p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - p5 += 8; - } while (--lines > 0); -} - -static void -xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - } while (--lines > 0); -} - -static void -xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - d0 ^= p3[0]; - d1 ^= p3[1]; - d2 ^= p3[2]; - d3 ^= p3[3]; - d4 ^= p3[4]; - d5 ^= p3[5]; - d6 ^= p3[6]; - d7 ^= p3[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - p3 += 8; - } while (--lines > 0); -} - -static void -xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - d0 ^= p3[0]; - d1 ^= p3[1]; - d2 ^= p3[2]; - d3 ^= p3[3]; - d4 ^= p3[4]; - d5 ^= p3[5]; - d6 ^= p3[6]; - d7 ^= p3[7]; - d0 ^= p4[0]; - d1 ^= p4[1]; - d2 ^= p4[2]; - d3 ^= p4[3]; - d4 ^= p4[4]; - d5 ^= p4[5]; - d6 ^= p4[6]; - d7 ^= p4[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - } while (--lines > 0); -} - -static void -xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4, - const unsigned long * __restrict p5) -{ - long lines = bytes / (sizeof (long)) / 8; - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - d0 ^= p3[0]; - d1 ^= p3[1]; - d2 ^= p3[2]; - d3 ^= p3[3]; - d4 ^= p3[4]; - d5 ^= p3[5]; - d6 ^= p3[6]; - d7 ^= p3[7]; - d0 ^= p4[0]; - d1 ^= p4[1]; - d2 ^= p4[2]; - d3 ^= p4[3]; - d4 ^= p4[4]; - d5 ^= p4[5]; - d6 ^= p4[6]; - d7 ^= p4[7]; - d0 ^= p5[0]; - d1 ^= p5[1]; - d2 ^= p5[2]; - d3 ^= p5[3]; - d4 ^= p5[4]; - d5 ^= p5[5]; - d6 ^= p5[6]; - d7 ^= p5[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - p5 += 8; - } while (--lines > 0); -} - -static void -xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - prefetchw(p1); - prefetch(p2); - - do { - prefetchw(p1+8); - prefetch(p2+8); - once_more: - p1[0] ^= p2[0]; - p1[1] ^= p2[1]; - p1[2] ^= p2[2]; - p1[3] ^= p2[3]; - p1[4] ^= p2[4]; - p1[5] ^= p2[5]; - p1[6] ^= p2[6]; - p1[7] ^= p2[7]; - p1 += 8; - p2 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - prefetchw(p1); - prefetch(p2); - prefetch(p3); - - do { - prefetchw(p1+8); - prefetch(p2+8); - prefetch(p3+8); - once_more: - p1[0] ^= p2[0] ^ p3[0]; - p1[1] ^= p2[1] ^ p3[1]; - p1[2] ^= p2[2] ^ p3[2]; - p1[3] ^= p2[3] ^ p3[3]; - p1[4] ^= p2[4] ^ p3[4]; - p1[5] ^= p2[5] ^ p3[5]; - p1[6] ^= p2[6] ^ p3[6]; - p1[7] ^= p2[7] ^ p3[7]; - p1 += 8; - p2 += 8; - p3 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - - prefetchw(p1); - prefetch(p2); - prefetch(p3); - prefetch(p4); - - do { - prefetchw(p1+8); - prefetch(p2+8); - prefetch(p3+8); - prefetch(p4+8); - once_more: - p1[0] ^= p2[0] ^ p3[0] ^ p4[0]; - p1[1] ^= p2[1] ^ p3[1] ^ p4[1]; - p1[2] ^= p2[2] ^ p3[2] ^ p4[2]; - p1[3] ^= p2[3] ^ p3[3] ^ p4[3]; - p1[4] ^= p2[4] ^ p3[4] ^ p4[4]; - p1[5] ^= p2[5] ^ p3[5] ^ p4[5]; - p1[6] ^= p2[6] ^ p3[6] ^ p4[6]; - p1[7] ^= p2[7] ^ p3[7] ^ p4[7]; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4, - const unsigned long * __restrict p5) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - - prefetchw(p1); - prefetch(p2); - prefetch(p3); - prefetch(p4); - prefetch(p5); - - do { - prefetchw(p1+8); - prefetch(p2+8); - prefetch(p3+8); - prefetch(p4+8); - prefetch(p5+8); - once_more: - p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0]; - p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1]; - p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2]; - p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3]; - p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4]; - p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5]; - p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6]; - p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7]; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - p5 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - - prefetchw(p1); - prefetch(p2); - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - - prefetchw(p1+8); - prefetch(p2+8); - once_more: - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - - prefetchw(p1); - prefetch(p2); - prefetch(p3); - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - - prefetchw(p1+8); - prefetch(p2+8); - prefetch(p3+8); - once_more: - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - d0 ^= p3[0]; - d1 ^= p3[1]; - d2 ^= p3[2]; - d3 ^= p3[3]; - d4 ^= p3[4]; - d5 ^= p3[5]; - d6 ^= p3[6]; - d7 ^= p3[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - p3 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - - prefetchw(p1); - prefetch(p2); - prefetch(p3); - prefetch(p4); - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - - prefetchw(p1+8); - prefetch(p2+8); - prefetch(p3+8); - prefetch(p4+8); - once_more: - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - d0 ^= p3[0]; - d1 ^= p3[1]; - d2 ^= p3[2]; - d3 ^= p3[3]; - d4 ^= p3[4]; - d5 ^= p3[5]; - d6 ^= p3[6]; - d7 ^= p3[7]; - d0 ^= p4[0]; - d1 ^= p4[1]; - d2 ^= p4[2]; - d3 ^= p4[3]; - d4 ^= p4[4]; - d5 ^= p4[5]; - d6 ^= p4[6]; - d7 ^= p4[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static void -xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1, - const unsigned long * __restrict p2, - const unsigned long * __restrict p3, - const unsigned long * __restrict p4, - const unsigned long * __restrict p5) -{ - long lines = bytes / (sizeof (long)) / 8 - 1; - - prefetchw(p1); - prefetch(p2); - prefetch(p3); - prefetch(p4); - prefetch(p5); - - do { - register long d0, d1, d2, d3, d4, d5, d6, d7; - - prefetchw(p1+8); - prefetch(p2+8); - prefetch(p3+8); - prefetch(p4+8); - prefetch(p5+8); - once_more: - d0 = p1[0]; /* Pull the stuff into registers */ - d1 = p1[1]; /* ... in bursts, if possible. */ - d2 = p1[2]; - d3 = p1[3]; - d4 = p1[4]; - d5 = p1[5]; - d6 = p1[6]; - d7 = p1[7]; - d0 ^= p2[0]; - d1 ^= p2[1]; - d2 ^= p2[2]; - d3 ^= p2[3]; - d4 ^= p2[4]; - d5 ^= p2[5]; - d6 ^= p2[6]; - d7 ^= p2[7]; - d0 ^= p3[0]; - d1 ^= p3[1]; - d2 ^= p3[2]; - d3 ^= p3[3]; - d4 ^= p3[4]; - d5 ^= p3[5]; - d6 ^= p3[6]; - d7 ^= p3[7]; - d0 ^= p4[0]; - d1 ^= p4[1]; - d2 ^= p4[2]; - d3 ^= p4[3]; - d4 ^= p4[4]; - d5 ^= p4[5]; - d6 ^= p4[6]; - d7 ^= p4[7]; - d0 ^= p5[0]; - d1 ^= p5[1]; - d2 ^= p5[2]; - d3 ^= p5[3]; - d4 ^= p5[4]; - d5 ^= p5[5]; - d6 ^= p5[6]; - d7 ^= p5[7]; - p1[0] = d0; /* Store the result (in bursts) */ - p1[1] = d1; - p1[2] = d2; - p1[3] = d3; - p1[4] = d4; - p1[5] = d5; - p1[6] = d6; - p1[7] = d7; - p1 += 8; - p2 += 8; - p3 += 8; - p4 += 8; - p5 += 8; - } while (--lines > 0); - if (lines == 0) - goto once_more; -} - -static struct xor_block_template xor_block_8regs = { - .name = "8regs", - .do_2 = xor_8regs_2, - .do_3 = xor_8regs_3, - .do_4 = xor_8regs_4, - .do_5 = xor_8regs_5, -}; - -static struct xor_block_template xor_block_32regs = { - .name = "32regs", - .do_2 = xor_32regs_2, - .do_3 = xor_32regs_3, - .do_4 = xor_32regs_4, - .do_5 = xor_32regs_5, -}; - -static struct xor_block_template xor_block_8regs_p __maybe_unused = { - .name = "8regs_prefetch", - .do_2 = xor_8regs_p_2, - .do_3 = xor_8regs_p_3, - .do_4 = xor_8regs_p_4, - .do_5 = xor_8regs_p_5, -}; - -static struct xor_block_template xor_block_32regs_p __maybe_unused = { - .name = "32regs_prefetch", - .do_2 = xor_32regs_p_2, - .do_3 = xor_32regs_p_3, - .do_4 = xor_32regs_p_4, - .do_5 = xor_32regs_p_5, -}; - -#define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_8regs); \ - xor_speed(&xor_block_8regs_p); \ - xor_speed(&xor_block_32regs); \ - xor_speed(&xor_block_32regs_p); \ - } while (0) |
