diff options
Diffstat (limited to 'include')
81 files changed, 1136 insertions, 590 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0300374101cd..2a462cf4eaa9 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -91,8 +91,8 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, bool acpi_dev_found(const char *hid); bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); -const char * -acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv); +struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); #ifdef CONFIG_ACPI @@ -687,6 +687,10 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev) adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set); } +static inline void acpi_dev_put(struct acpi_device *adev) +{ + put_device(&adev->dev); +} #else /* CONFIG_ACPI */ static inline int register_acpi_bus_type(void *bus) { return 0; } diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 24dbb4e742a6..3b1b1d0e4c33 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20190215 +#define ACPI_CA_VERSION 0x20190405 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 65cc9cbf1141..d568128025df 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -66,14 +66,14 @@ ******************************************************************************/ struct acpi_table_header { - char signature[ACPI_NAME_SIZE]; /* ASCII table signature */ + char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */ u32 length; /* Length of table in bytes, including this header */ u8 revision; /* ACPI Specification minor version number */ u8 checksum; /* To make sum of entire table == 0 */ char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */ u32 oem_revision; /* OEM revision number */ - char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */ + char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */ u32 asl_compiler_revision; /* ASL compiler version */ }; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index f73382e82c26..ad6892a24015 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -375,7 +375,7 @@ typedef u64 acpi_physical_address; /* Names within the namespace are 4 bytes long */ -#define ACPI_NAME_SIZE 4 +#define ACPI_NAMESEG_SIZE 4 /* Fixed by ACPI spec */ #define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ #define ACPI_PATH_SEPARATOR '.' @@ -515,11 +515,11 @@ typedef u64 acpi_integer; /* Optimizations for 4-character (32-bit) acpi_name manipulation */ #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED -#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) -#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) +#define ACPI_COMPARE_NAMESEG(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) +#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) #else -#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) -#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) +#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE)) +#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE)) #endif /* Support for the special RSDP signature (8 characters) */ @@ -529,7 +529,7 @@ typedef u64 acpi_integer; /* Support for OEMx signature (x can be any character) */ #define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\ - strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE) + strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE) /* * Algorithm to obtain access bit width. diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index fcb61b4659b3..8666fe7f35d7 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -23,7 +23,9 @@ * * Return: * 0 - On success - * <0 - On error + * -EFAULT - User access resulted in a page fault + * -EAGAIN - Atomic operation was unable to complete due to contention + * -ENOSYS - Operation not supported */ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) @@ -85,7 +87,9 @@ out_pagefault_enable: * * Return: * 0 - On success - * <0 - On error + * -EFAULT - User access resulted in a page fault + * -EAGAIN - Atomic operation was unable to complete due to contention + * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) */ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 303871651f8a..8f3bf95a36d1 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -19,12 +19,9 @@ #include <asm-generic/iomap.h> #endif +#include <asm/mmiowb.h> #include <asm-generic/pci_iomap.h> -#ifndef mmiowb -#define mmiowb() do {} while (0) -#endif - #ifndef __io_br #define __io_br() barrier() #endif @@ -49,7 +46,7 @@ /* serialize device access against a spin_unlock, usually handled there. */ #ifndef __io_aw -#define __io_aw() barrier() +#define __io_aw() mmiowb_set_pending() #endif #ifndef __io_pbw diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h new file mode 100644 index 000000000000..9439ff037b2d --- /dev/null +++ b/include/asm-generic/mmiowb.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMIOWB_H +#define __ASM_GENERIC_MMIOWB_H + +/* + * Generic implementation of mmiowb() tracking for spinlocks. + * + * If your architecture doesn't ensure that writes to an I/O peripheral + * within two spinlocked sections on two different CPUs are seen by the + * peripheral in the order corresponding to the lock handover, then you + * need to follow these FIVE easy steps: + * + * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy) + * in asm/mmiowb.h, then #include this file + * 2. Ensure your I/O write accessors call mmiowb_set_pending() + * 3. Select ARCH_HAS_MMIOWB + * 4. Untangle the resulting mess of header files + * 5. Complain to your architects + */ +#ifdef CONFIG_MMIOWB + +#include <linux/compiler.h> +#include <asm-generic/mmiowb_types.h> + +#ifndef arch_mmiowb_state +#include <asm/percpu.h> +#include <asm/smp.h> + +DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); +#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state) +#else +#define __mmiowb_state() arch_mmiowb_state() +#endif /* arch_mmiowb_state */ + +static inline void mmiowb_set_pending(void) +{ + struct mmiowb_state *ms = __mmiowb_state(); + ms->mmiowb_pending = ms->nesting_count; +} + +static inline void mmiowb_spin_lock(void) +{ + struct mmiowb_state *ms = __mmiowb_state(); + ms->nesting_count++; +} + +static inline void mmiowb_spin_unlock(void) +{ + struct mmiowb_state *ms = __mmiowb_state(); + + if (unlikely(ms->mmiowb_pending)) { + ms->mmiowb_pending = 0; + mmiowb(); + } + + ms->nesting_count--; +} +#else +#define mmiowb_set_pending() do { } while (0) +#define mmiowb_spin_lock() do { } while (0) +#define mmiowb_spin_unlock() do { } while (0) +#endif /* CONFIG_MMIOWB */ +#endif /* __ASM_GENERIC_MMIOWB_H */ diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h new file mode 100644 index 000000000000..8eb0095655e7 --- /dev/null +++ b/include/asm-generic/mmiowb_types.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MMIOWB_TYPES_H +#define __ASM_GENERIC_MMIOWB_TYPES_H + +#include <linux/types.h> + +struct mmiowb_state { + u16 nesting_count; + u16 mmiowb_pending; +}; + +#endif /* __ASM_GENERIC_MMIOWB_TYPES_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index fa782fba51ee..75d9d68a6de7 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1126,6 +1126,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, static inline void init_espfix_bsp(void) { } #endif +extern void __init pgd_cache_init(void); + #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) { diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h deleted file mode 100644 index 93e67a055a4d..000000000000 --- a/include/asm-generic/rwsem.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_RWSEM_H -#define _ASM_GENERIC_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." -#endif - -#ifdef __KERNEL__ - -/* - * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. - * Adapted largely from include/asm-i386/rwsem.h - * by Paul Mackerras <paulus@samba.org>. - */ - -/* - * the semaphore definition - */ -#ifdef CONFIG_64BIT -# define RWSEM_ACTIVE_MASK 0xffffffffL -#else -# define RWSEM_ACTIVE_MASK 0x0000ffffL -#endif - -#define RWSEM_UNLOCKED_VALUE 0x00000000L -#define RWSEM_ACTIVE_BIAS 0x00000001L -#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) - rwsem_down_read_failed(sem); -} - -static inline int __down_read_killable(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { - if (IS_ERR(rwsem_down_read_failed_killable(sem))) - return -EINTR; - } - - return 0; -} - -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - long tmp; - - while ((tmp = atomic_long_read(&sem->count)) >= 0) { - if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, - tmp + RWSEM_ACTIVE_READ_BIAS)) { - return 1; - } - } - return 0; -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count); - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) - rwsem_down_write_failed(sem); -} - -static inline int __down_write_killable(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count); - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) - if (IS_ERR(rwsem_down_write_failed_killable(sem))) - return -EINTR; - return 0; -} - -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - return tmp == RWSEM_UNLOCKED_VALUE; -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_dec_return_release(&sem->count); - if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) - rwsem_wake(sem); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count) < 0)) - rwsem_wake(sem); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - long tmp; - - /* - * When downgrading from exclusive to shared ownership, - * anything inside the write-locked region cannot leak - * into the read side. In contrast, anything in the - * read-locked region is ok to be re-ordered into the - * write side. As such, rely on RELEASE semantics. - */ - tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); - if (tmp < 0) - rwsem_downgrade_wake(sem); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d79abca81a52..d1779d442aa5 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -77,6 +77,20 @@ static inline int arch_is_kernel_data(unsigned long addr) } #endif +/* + * Check if an address is part of freed initmem. This is needed on architectures + * with virt == phys kernel mapping, for code that wants to check if an address + * is part of a static object within [_stext, _end]. After initmem is freed, + * memory can be allocated from it, and such allocations would then have + * addresses within the range [_stext, _end]. + */ +#ifndef arch_is_kernel_initmem_freed +static inline int arch_is_kernel_initmem_freed(unsigned long addr) +{ + return 0; +} +#endif + /** * memory_contains - checks if an object is contained within a memory region * @begin: virtual address of the beginning of the memory region diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6be86c1c5c58..480e5b2a5748 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -19,9 +19,140 @@ #include <linux/swap.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> + +/* + * Blindly accessing user memory from NMI context can be dangerous + * if we're in the middle of switching the current user task or switching + * the loaded mm. + */ +#ifndef nmi_uaccess_okay +# define nmi_uaccess_okay() true +#endif #ifdef CONFIG_MMU +/* + * Generic MMU-gather implementation. + * + * The mmu_gather data structure is used by the mm code to implement the + * correct and efficient ordering of freeing pages and TLB invalidations. + * + * This correct ordering is: + * + * 1) unhook page + * 2) TLB invalidate page + * 3) free page + * + * That is, we must never free a page before we have ensured there are no live + * translations left to it. Otherwise it might be possible to observe (or + * worse, change) the page content after it has been reused. + * + * The mmu_gather API consists of: + * + * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather + * + * Finish in particular will issue a (final) TLB invalidate and free + * all (remaining) queued pages. + * + * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA + * + * Defaults to flushing at tlb_end_vma() to reset the range; helps when + * there's large holes between the VMAs. + * + * - tlb_remove_page() / __tlb_remove_page() + * - tlb_remove_page_size() / __tlb_remove_page_size() + * + * __tlb_remove_page_size() is the basic primitive that queues a page for + * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a + * boolean indicating if the queue is (now) full and a call to + * tlb_flush_mmu() is required. + * + * tlb_remove_page() and tlb_remove_page_size() imply the call to + * tlb_flush_mmu() when required and has no return value. + * + * - tlb_change_page_size() + * + * call before __tlb_remove_page*() to set the current page-size; implies a + * possible tlb_flush_mmu() call. + * + * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() + * + * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets + * related state, like the range) + * + * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees + * whatever pages are still batched. + * + * - mmu_gather::fullmm + * + * A flag set by tlb_gather_mmu() to indicate we're going to free + * the entire mm; this allows a number of optimizations. + * + * - We can ignore tlb_{start,end}_vma(); because we don't + * care about ranges. Everything will be shot down. + * + * - (RISC) architectures that use ASIDs can cycle to a new ASID + * and delay the invalidation until ASID space runs out. + * + * - mmu_gather::need_flush_all + * + * A flag that can be set by the arch code if it wants to force + * flush the entire TLB irrespective of the range. For instance + * x86-PAE needs this when changing top-level entries. + * + * And allows the architecture to provide and implement tlb_flush(): + * + * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make + * use of: + * + * - mmu_gather::start / mmu_gather::end + * + * which provides the range that needs to be flushed to cover the pages to + * be freed. + * + * - mmu_gather::freed_tables + * + * set when we freed page table pages + * + * - tlb_get_unmap_shift() / tlb_get_unmap_size() + * + * returns the smallest TLB entry size unmapped in this range. + * + * If an architecture does not provide tlb_flush() a default implementation + * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is + * specified, in which case we'll default to flush_tlb_mm(). + * + * Additionally there are a few opt-in features: + * + * HAVE_MMU_GATHER_PAGE_SIZE + * + * This ensures we call tlb_flush() every time tlb_change_page_size() actually + * changes the size and provides mmu_gather::page_size to tlb_flush(). + * + * HAVE_RCU_TABLE_FREE + * + * This provides tlb_remove_table(), to be used instead of tlb_remove_page() + * for page directores (__p*_free_tlb()). This provides separate freeing of + * the page-table pages themselves in a semi-RCU fashion (see comment below). + * Useful if your architecture doesn't use IPIs for remote TLB invalidates + * and therefore doesn't naturally serialize with software page-table walkers. + * + * When used, an architecture is expected to provide __tlb_remove_table() + * which does the actual freeing of these pages. + * + * HAVE_RCU_TABLE_NO_INVALIDATE + * + * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before + * freeing the page-table pages. This can be avoided if you use + * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux + * page-tables natively. + * + * MMU_GATHER_NO_RANGE + * + * Use this if your architecture lacks an efficient flush_tlb_range(). + */ + #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. @@ -60,11 +191,11 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) -extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -89,14 +220,21 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -/* struct mmu_gather is an opaque type used by the mm code for passing around +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); +#endif + +/* + * struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ struct mmu_gather { struct mm_struct *mm; + #ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif + unsigned long start; unsigned long end; /* @@ -124,23 +262,30 @@ struct mmu_gather { unsigned int cleared_puds : 1; unsigned int cleared_p4ds : 1; + /* + * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma + */ + unsigned int vma_exec : 1; + unsigned int vma_huge : 1; + + unsigned int batch_count; + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; - unsigned int batch_count; - int page_size; -}; -#define HAVE_GENERIC_MMU_GATHER +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + unsigned int page_size; +#endif +#endif +}; void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); -void tlb_flush_mmu_free(struct mmu_gather *tlb); -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, @@ -163,8 +308,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) tlb->cleared_pmds = 0; tlb->cleared_puds = 0; tlb->cleared_p4ds = 0; + /* + * Do not reset mmu_gather::vma_* fields here, we do not + * call into tlb_start_vma() again to set them if there is an + * intermediate flush. + */ +} + +#ifdef CONFIG_MMU_GATHER_NO_RANGE + +#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) +#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() +#endif + +/* + * When an architecture does not have efficient means of range flushing TLBs + * there is no point in doing intermediate flushes on tlb_end_vma() to keep the + * range small. We equally don't have to worry about page granularity or other + * things. + * + * All we need to do is issue a full flush for any !0 range. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->end) + flush_tlb_mm(tlb->mm); +} + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#define tlb_end_vma tlb_end_vma +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#else /* CONFIG_MMU_GATHER_NO_RANGE */ + +#ifndef tlb_flush + +#if defined(tlb_start_vma) || defined(tlb_end_vma) +#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() +#endif + +/* + * When an architecture does not provide its own tlb_flush() implementation + * but does have a reasonably efficient flush_vma_range() implementation + * use that. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || tlb->need_flush_all) { + flush_tlb_mm(tlb->mm); + } else if (tlb->end) { + struct vm_area_struct vma = { + .vm_mm = tlb->mm, + .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | + (tlb->vma_huge ? VM_HUGETLB : 0), + }; + + flush_tlb_range(&vma, tlb->start, tlb->end); + } +} + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + /* + * flush_tlb_range() implementations that look at VM_HUGETLB (tile, + * mips-4k) flush only large pages. + * + * flush_tlb_range() implementations that flush I-TLB also flush D-TLB + * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing + * range. + * + * We rely on tlb_end_vma() to issue a flush, such that when we reset + * these values the batch is empty. + */ + tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); + tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); } +#else + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#endif + +#endif /* CONFIG_MMU_GATHER_NO_RANGE */ + static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { if (!tlb->end) @@ -196,21 +427,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) return tlb_remove_page_size(tlb, page, PAGE_SIZE); } -#ifndef tlb_remove_check_page_size_change -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, +static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size) { - /* - * We don't care about page size change, just update - * mmu_gather page size here so that debug checks - * doesn't throw false warning. - */ -#ifdef CONFIG_DEBUG_VM +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + if (tlb->page_size && tlb->page_size != page_size) { + if (!tlb->fullmm) + tlb_flush_mmu(tlb); + } + tlb->page_size = page_size; #endif } -#endif static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) { @@ -237,17 +465,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) * the vmas are adjusted to only cover the region to be torn down. */ #ifndef tlb_start_vma -#define tlb_start_vma(tlb, vma) do { } while (0) -#endif +static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (tlb->fullmm) + return; -#define __tlb_end_vma(tlb, vma) \ - do { \ - if (!tlb->fullmm) \ - tlb_flush_mmu_tlbonly(tlb); \ - } while (0) + tlb_update_vma_flags(tlb, vma); + flush_cache_range(vma, vma->vm_start, vma->vm_end); +} +#endif #ifndef tlb_end_vma -#define tlb_end_vma __tlb_end_vma +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (tlb->fullmm) + return; + + /* + * Do a TLB flush and reset the range at VMA boundaries; this avoids + * the ranges growing with the unused space between consecutive VMAs, + * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on + * this. + */ + tlb_flush_mmu_tlbonly(tlb); +} #endif #ifndef __tlb_remove_tlb_entry @@ -372,6 +613,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) #endif /* CONFIG_MMU */ -#define tlb_migrate_finish(mm) do {} while (0) - #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/crypto/aes.h b/include/crypto/aes.h index 852eaa9cd4db..0fdb542c70cd 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -28,10 +28,10 @@ struct crypto_aes_ctx { u32 key_length; }; -extern const u32 crypto_ft_tab[4][256]; -extern const u32 crypto_fl_tab[4][256]; -extern const u32 crypto_it_tab[4][256]; -extern const u32 crypto_il_tab[4][256]; +extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; +extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned; +extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; +extern const u32 crypto_il_tab[4][256] ____cacheline_aligned; int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len); diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 2d690494568c..8884046659a0 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -19,14 +19,20 @@ * * @base: Common attributes for async crypto requests * @src: Source data - * @dst: Destination data + * For verify op this is signature + digest, in that case + * total size of @src is @src_len + @dst_len. + * @dst: Destination data (Should be NULL for verify op) * @src_len: Size of the input buffer - * @dst_len: Size of the output buffer. It needs to be at least - * as big as the expected result depending on the operation + * For verify op it's size of signature part of @src, this part + * is supposed to be operated by cipher. + * @dst_len: Size of @dst buffer (for all ops except verify). + * It needs to be at least as big as the expected result + * depending on the operation. * After operation it will be updated with the actual size of the * result. * In case of error where the dst sgl size was insufficient, * it will be updated to the size required for the operation. + * For verify op this is size of digest part in @src. * @__ctx: Start of private context data */ struct akcipher_request { @@ -55,10 +61,9 @@ struct crypto_akcipher { * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation - * @verify: Function performs a sign operation as defined by public key - * algorithm. In case of error, where the dst_len was insufficient, - * the req->dst_len will be updated to the size required for the - * operation + * @verify: Function performs a complete verify operation as defined by + * public key algorithm, returning verification status. Requires + * digest value as input parameter. * @encrypt: Function performs an encrypt operation as defined by public key * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the @@ -69,10 +74,10 @@ struct crypto_akcipher { * operation * @set_pub_key: Function invokes the algorithm specific set public key * function, which knows how to decode and interpret - * the BER encoded public key + * the BER encoded public key and parameters * @set_priv_key: Function invokes the algorithm specific set private key * function, which knows how to decode and interpret - * the BER encoded private key + * the BER encoded private key and parameters * @max_size: Function returns dest buffer size required for a given key. * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic @@ -238,9 +243,10 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, * * @req: public key request * @src: ptr to input scatter list - * @dst: ptr to output scatter list + * @dst: ptr to output scatter list or NULL for verify op * @src_len: size of the src input scatter list to be processed - * @dst_len: size of the dst output scatter list + * @dst_len: size of the dst output scatter list or size of signature + * portion in @src for verify op */ static inline void akcipher_request_set_crypt(struct akcipher_request *req, struct scatterlist *src, @@ -343,14 +349,18 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) } /** - * crypto_akcipher_verify() - Invoke public key verify operation + * crypto_akcipher_verify() - Invoke public key signature verification * - * Function invokes the specific public key verify operation for a given - * public key algorithm + * Function invokes the specific public key signature verification operation + * for a given public key algorithm. * * @req: asymmetric key request * - * Return: zero on success; error code in case of error + * Note: req->dst should be NULL, req->src should point to SG of size + * (req->src_size + req->dst_size), containing signature (of req->src_size + * length) with appended digest (of req->dst_size length). + * + * Return: zero on verification success; error code in case of error. */ static inline int crypto_akcipher_verify(struct akcipher_request *req) { @@ -369,11 +379,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req) * crypto_akcipher_set_pub_key() - Invoke set public key operation * * Function invokes the algorithm specific set key function, which knows - * how to decode and interpret the encoded key + * how to decode and interpret the encoded key and parameters * * @tfm: tfm handle - * @key: BER encoded public key - * @keylen: length of the key + * @key: BER encoded public key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) * * Return: zero on success; error code in case of error */ @@ -390,11 +401,12 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, * crypto_akcipher_set_priv_key() - Invoke set private key operation * * Function invokes the algorithm specific set key function, which knows - * how to decode and interpret the encoded key + * how to decode and interpret the encoded key and parameters * * @tfm: tfm handle - * @key: BER encoded private key - * @keylen: length of the key + * @key: BER encoded private key, algo OID, paramlen, BER encoded + * parameters + * @keylen: length of the key (not including other data) * * Return: zero on success; error code in case of error */ diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 1e64f354c2b8..23169f4d87e6 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h @@ -18,27 +18,11 @@ #include <crypto/hash.h> #include <crypto/skcipher.h> -struct cryptd_ablkcipher { - struct crypto_ablkcipher base; -}; - -static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( - struct crypto_ablkcipher *tfm) -{ - return (struct cryptd_ablkcipher *)tfm; -} - -/* alg_name should be algorithm to be cryptd-ed */ -struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, - u32 type, u32 mask); -struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); -bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm); -void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); - struct cryptd_skcipher { struct crypto_skcipher base; }; +/* alg_name should be algorithm to be cryptd-ed */ struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, u32 type, u32 mask); struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); diff --git a/include/crypto/des.h b/include/crypto/des.h index d4094d58ac54..72c7c8e5a5a7 100644 --- a/include/crypto/des.h +++ b/include/crypto/des.h @@ -6,6 +6,11 @@ #ifndef __CRYPTO_DES_H #define __CRYPTO_DES_H +#include <crypto/skcipher.h> +#include <linux/compiler.h> +#include <linux/fips.h> +#include <linux/string.h> + #define DES_KEY_SIZE 8 #define DES_EXPKEY_WORDS 32 #define DES_BLOCK_SIZE 8 @@ -14,6 +19,44 @@ #define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) #define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE +static inline int __des3_verify_key(u32 *flags, const u8 *key) +{ + int err = -EINVAL; + u32 K[6]; + + memcpy(K, key, DES3_EDE_KEY_SIZE); + + if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (fips_enabled || + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))) + goto bad; + + if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) + goto bad; + + err = 0; + +out: + memzero_explicit(K, DES3_EDE_KEY_SIZE); + + return err; + +bad: + *flags |= CRYPTO_TFM_RES_WEAK_KEY; + goto out; +} + +static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key) +{ + u32 flags; + int err; + + flags = crypto_skcipher_get_flags(tfm); + err = __des3_verify_key(&flags, key); + crypto_skcipher_set_flags(tfm, flags); + return err; +} extern unsigned long des_ekey(u32 *pe, const u8 *k); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 3b31c1b349ae..d21bea2c4382 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -146,8 +146,6 @@ struct ahash_alg { struct shash_desc { struct crypto_shash *tfm; - u32 flags; - void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -819,6 +817,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc) * cipher handle must point to a keyed message digest cipher in order for this * function to succeed. * + * Context: Any context. * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, @@ -835,6 +834,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, * crypto_shash_update and crypto_shash_final. The parameters have the same * meaning as discussed for those separate three functions. * + * Context: Any context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ @@ -850,6 +850,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, * caller-allocated output buffer out which must have sufficient size (e.g. by * calling crypto_shash_descsize). * + * Context: Any context. * Return: 0 if the export creation was successful; < 0 if an error occurred */ static inline int crypto_shash_export(struct shash_desc *desc, void *out) @@ -866,6 +867,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) * the input buffer. That buffer should have been generated with the * crypto_ahash_export function. * + * Context: Any context. * Return: 0 if the import was successful; < 0 if an error occurred */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) @@ -886,6 +888,7 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) * operational state handle. Any potentially existing state created by * previous operations is discarded. * + * Context: Any context. * Return: 0 if the message digest initialization was successful; < 0 if an * error occurred */ @@ -907,6 +910,7 @@ static inline int crypto_shash_init(struct shash_desc *desc) * * Updates the message digest state of the operational state handle. * + * Context: Any context. * Return: 0 if the message digest update was successful; < 0 if an error * occurred */ @@ -923,6 +927,7 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data, * into the output buffer. The caller must ensure that the output buffer is * large enough by using crypto_shash_digestsize. * + * Context: Any context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ @@ -939,6 +944,7 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out); * crypto_shash_update and crypto_shash_final. The parameters have the same * meaning as discussed for those separate functions. * + * Context: Any context. * Return: 0 if the message digest creation was successful; < 0 if an error * occurred */ diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h index f18344518e32..d2316242a988 100644 --- a/include/crypto/internal/simd.h +++ b/include/crypto/internal/simd.h @@ -6,6 +6,11 @@ #ifndef _CRYPTO_INTERNAL_SIMD_H #define _CRYPTO_INTERNAL_SIMD_H +#include <linux/percpu.h> +#include <linux/types.h> + +/* skcipher support */ + struct simd_skcipher_alg; struct skcipher_alg; @@ -22,4 +27,43 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, void simd_unregister_skciphers(struct skcipher_alg *algs, int count, struct simd_skcipher_alg **simd_algs); +/* AEAD support */ + +struct simd_aead_alg; +struct aead_alg; + +struct simd_aead_alg *simd_aead_create_compat(const char *algname, + const char *drvname, + const char *basename); +struct simd_aead_alg *simd_aead_create(const char *algname, + const char *basename); +void simd_aead_free(struct simd_aead_alg *alg); + +int simd_register_aeads_compat(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +void simd_unregister_aeads(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs); + +/* + * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or + * access the SIMD register file? + * + * This delegates to may_use_simd(), except that this also returns false if SIMD + * in crypto code has been temporarily disabled on this CPU by the crypto + * self-tests, in order to test the no-SIMD fallback code. This override is + * currently limited to configurations where the extra self-tests are enabled, + * because it might be a bit too invasive to be part of the regular self-tests. + * + * This is a macro so that <asm/simd.h>, which some architectures don't have, + * doesn't have to be included directly here. + */ +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test); +#define crypto_simd_usable() \ + (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test)) +#else +#define crypto_simd_usable() may_use_simd() +#endif + #endif /* _CRYPTO_INTERNAL_SIMD_H */ diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h index ad2aa743dd99..5cefddb1991f 100644 --- a/include/crypto/morus1280_glue.h +++ b/include/crypto/morus1280_glue.h @@ -47,16 +47,7 @@ int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, int crypto_morus1280_glue_encrypt(struct aead_request *req); int crypto_morus1280_glue_decrypt(struct aead_request *req); -int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen); -int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead, - unsigned int authsize); -int cryptd_morus1280_glue_encrypt(struct aead_request *req); -int cryptd_morus1280_glue_decrypt(struct aead_request *req); -int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead); -void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead); - -#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \ +#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \ static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\ .init = crypto_morus1280_##id##_init, \ .ad = crypto_morus1280_##id##_ad, \ @@ -77,55 +68,29 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead); { \ } \ \ - static struct aead_alg crypto_morus1280_##id##_algs[] = {\ - { \ - .setkey = crypto_morus1280_glue_setkey, \ - .setauthsize = crypto_morus1280_glue_setauthsize, \ - .encrypt = crypto_morus1280_glue_encrypt, \ - .decrypt = crypto_morus1280_glue_decrypt, \ - .init = crypto_morus1280_##id##_init_tfm, \ - .exit = crypto_morus1280_##id##_exit_tfm, \ - \ - .ivsize = MORUS_NONCE_SIZE, \ - .maxauthsize = MORUS_MAX_AUTH_SIZE, \ - .chunksize = MORUS1280_BLOCK_SIZE, \ - \ - .base = { \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ - .cra_blocksize = 1, \ - .cra_ctxsize = sizeof(struct morus1280_ctx), \ - .cra_alignmask = 0, \ - \ - .cra_name = "__morus1280", \ - .cra_driver_name = "__"driver_name, \ - \ - .cra_module = THIS_MODULE, \ - } \ - }, { \ - .setkey = cryptd_morus1280_glue_setkey, \ - .setauthsize = cryptd_morus1280_glue_setauthsize, \ - .encrypt = cryptd_morus1280_glue_encrypt, \ - .decrypt = cryptd_morus1280_glue_decrypt, \ - .init = cryptd_morus1280_glue_init_tfm, \ - .exit = cryptd_morus1280_glue_exit_tfm, \ + static struct aead_alg crypto_morus1280_##id##_alg = { \ + .setkey = crypto_morus1280_glue_setkey, \ + .setauthsize = crypto_morus1280_glue_setauthsize, \ + .encrypt = crypto_morus1280_glue_encrypt, \ + .decrypt = crypto_morus1280_glue_decrypt, \ + .init = crypto_morus1280_##id##_init_tfm, \ + .exit = crypto_morus1280_##id##_exit_tfm, \ + \ + .ivsize = MORUS_NONCE_SIZE, \ + .maxauthsize = MORUS_MAX_AUTH_SIZE, \ + .chunksize = MORUS1280_BLOCK_SIZE, \ + \ + .base = { \ + .cra_flags = CRYPTO_ALG_INTERNAL, \ + .cra_blocksize = 1, \ + .cra_ctxsize = sizeof(struct morus1280_ctx), \ + .cra_alignmask = 0, \ + .cra_priority = priority, \ \ - .ivsize = MORUS_NONCE_SIZE, \ - .maxauthsize = MORUS_MAX_AUTH_SIZE, \ - .chunksize = MORUS1280_BLOCK_SIZE, \ + .cra_name = "__morus1280", \ + .cra_driver_name = "__"driver_name, \ \ - .base = { \ - .cra_flags = CRYPTO_ALG_ASYNC, \ - .cra_blocksize = 1, \ - .cra_ctxsize = sizeof(struct crypto_aead *), \ - .cra_alignmask = 0, \ - \ - .cra_priority = priority, \ - \ - .cra_name = "morus1280", \ - .cra_driver_name = driver_name, \ - \ - .cra_module = THIS_MODULE, \ - } \ + .cra_module = THIS_MODULE, \ } \ } diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h index df8e1103ff94..0ee6266cb26c 100644 --- a/include/crypto/morus640_glue.h +++ b/include/crypto/morus640_glue.h @@ -47,16 +47,7 @@ int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm, int crypto_morus640_glue_encrypt(struct aead_request *req); int crypto_morus640_glue_decrypt(struct aead_request *req); -int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen); -int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead, - unsigned int authsize); -int cryptd_morus640_glue_encrypt(struct aead_request *req); -int cryptd_morus640_glue_decrypt(struct aead_request *req); -int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead); -void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead); - -#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \ +#define MORUS640_DECLARE_ALG(id, driver_name, priority) \ static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\ .init = crypto_morus640_##id##_init, \ .ad = crypto_morus640_##id##_ad, \ @@ -77,55 +68,29 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead); { \ } \ \ - static struct aead_alg crypto_morus640_##id##_algs[] = {\ - { \ - .setkey = crypto_morus640_glue_setkey, \ - .setauthsize = crypto_morus640_glue_setauthsize, \ - .encrypt = crypto_morus640_glue_encrypt, \ - .decrypt = crypto_morus640_glue_decrypt, \ - .init = crypto_morus640_##id##_init_tfm, \ - .exit = crypto_morus640_##id##_exit_tfm, \ - \ - .ivsize = MORUS_NONCE_SIZE, \ - .maxauthsize = MORUS_MAX_AUTH_SIZE, \ - .chunksize = MORUS640_BLOCK_SIZE, \ - \ - .base = { \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ - .cra_blocksize = 1, \ - .cra_ctxsize = sizeof(struct morus640_ctx), \ - .cra_alignmask = 0, \ - \ - .cra_name = "__morus640", \ - .cra_driver_name = "__"driver_name, \ - \ - .cra_module = THIS_MODULE, \ - } \ - }, { \ - .setkey = cryptd_morus640_glue_setkey, \ - .setauthsize = cryptd_morus640_glue_setauthsize, \ - .encrypt = cryptd_morus640_glue_encrypt, \ - .decrypt = cryptd_morus640_glue_decrypt, \ - .init = cryptd_morus640_glue_init_tfm, \ - .exit = cryptd_morus640_glue_exit_tfm, \ + static struct aead_alg crypto_morus640_##id##_alg = {\ + .setkey = crypto_morus640_glue_setkey, \ + .setauthsize = crypto_morus640_glue_setauthsize, \ + .encrypt = crypto_morus640_glue_encrypt, \ + .decrypt = crypto_morus640_glue_decrypt, \ + .init = crypto_morus640_##id##_init_tfm, \ + .exit = crypto_morus640_##id##_exit_tfm, \ + \ + .ivsize = MORUS_NONCE_SIZE, \ + .maxauthsize = MORUS_MAX_AUTH_SIZE, \ + .chunksize = MORUS640_BLOCK_SIZE, \ + \ + .base = { \ + .cra_flags = CRYPTO_ALG_INTERNAL, \ + .cra_blocksize = 1, \ + .cra_ctxsize = sizeof(struct morus640_ctx), \ + .cra_alignmask = 0, \ + .cra_priority = priority, \ \ - .ivsize = MORUS_NONCE_SIZE, \ - .maxauthsize = MORUS_MAX_AUTH_SIZE, \ - .chunksize = MORUS640_BLOCK_SIZE, \ + .cra_name = "__morus640", \ + .cra_driver_name = "__"driver_name, \ \ - .base = { \ - .cra_flags = CRYPTO_ALG_ASYNC, \ - .cra_blocksize = 1, \ - .cra_ctxsize = sizeof(struct crypto_aead *), \ - .cra_alignmask = 0, \ - \ - .cra_priority = priority, \ - \ - .cra_name = "morus640", \ - .cra_driver_name = driver_name, \ - \ - .cra_module = THIS_MODULE, \ - } \ + .cra_module = THIS_MODULE, \ } \ } diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index be626eac9113..712fe1214b5f 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -15,6 +15,7 @@ #define _LINUX_PUBLIC_KEY_H #include <linux/keyctl.h> +#include <linux/oid_registry.h> /* * Cryptographic data for the public-key subtype of the asymmetric key type. @@ -25,6 +26,9 @@ struct public_key { void *key; u32 keylen; + enum OID algo; + void *params; + u32 paramlen; bool key_is_private; const char *id_type; const char *pkey_algo; diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h index 856e32af8657..cae1b4a01971 100644 --- a/include/crypto/streebog.h +++ b/include/crypto/streebog.h @@ -23,7 +23,10 @@ struct streebog_uint512 { }; struct streebog_state { - u8 buffer[STREEBOG_BLOCK_SIZE]; + union { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 m; + }; struct streebog_uint512 hash; struct streebog_uint512 h; struct streebog_uint512 N; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d5dcebd7aad3..ca55ae00f8c9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -669,12 +669,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) return false; } -static inline const char * -acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +static inline struct acpi_device * +acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) { return NULL; } +static inline void acpi_dev_put(struct acpi_device *adev) {} + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 38cd77b39a64..723e4dfa1c14 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -26,6 +26,14 @@ #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) +/* + * PMCG model identifiers for use in smmu pmu driver. Please note + * that this is purely for the use of software and has nothing to + * do with hardware or with IORT specification. + */ +#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */ +#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */ + int iort_register_domain_token(int trans_id, phys_addr_t base, struct fwnode_handle *fw_node); void iort_deregister_domain_token(int trans_id); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 445348facea9..d58aa0db05f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, .line = __LINE__, \ }; \ ______r = !!(cond); \ - ______f.miss_hit[______r]++; \ + ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 5041357d0297..732745f865b7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -137,9 +137,26 @@ static inline int disable_nonboot_cpus(void) return freeze_secondary_cpus(0); } extern void enable_nonboot_cpus(void); + +static inline int suspend_disable_secondary_cpus(void) +{ + int cpu = 0; + + if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) + cpu = -1; + + return freeze_secondary_cpus(cpu); +} +static inline void suspend_enable_secondary_cpus(void) +{ + return enable_nonboot_cpus(); +} + #else /* !CONFIG_PM_SLEEP_SMP */ static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} +static inline int suspend_disable_secondary_cpus(void) { return 0; } +static inline void suspend_enable_secondary_cpus(void) { } #endif /* !CONFIG_PM_SLEEP_SMP */ void cpu_startup_entry(enum cpuhp_state state); @@ -175,6 +192,7 @@ enum cpuhp_smt_control { CPU_SMT_DISABLED, CPU_SMT_FORCE_DISABLED, CPU_SMT_NOT_SUPPORTED, + CPU_SMT_NOT_IMPLEMENTED, }; #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) @@ -182,9 +200,33 @@ extern enum cpuhp_smt_control cpu_smt_control; extern void cpu_smt_disable(bool force); extern void cpu_smt_check_topology(void); #else -# define cpu_smt_control (CPU_SMT_ENABLED) +# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) static inline void cpu_smt_disable(bool force) { } static inline void cpu_smt_check_topology(void) { } #endif +/* + * These are used for a global "mitigations=" cmdline option for toggling + * optional CPU mitigations. + */ +enum cpu_mitigations { + CPU_MITIGATIONS_OFF, + CPU_MITIGATIONS_AUTO, + CPU_MITIGATIONS_AUTO_NOSMT, +}; + +extern enum cpu_mitigations cpu_mitigations; + +/* mitigations=off */ +static inline bool cpu_mitigations_off(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_OFF; +} + +/* mitigations=auto,nosmt */ +static inline bool cpu_mitigations_auto_nosmt(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; +} + #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b160e98076e3..684caf067003 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } #endif +static inline bool policy_is_inactive(struct cpufreq_policy *policy) +{ + return cpumask_empty(policy->cpus); +} + static inline bool policy_is_shared(struct cpufreq_policy *policy) { return cpumask_weight(policy->cpus) > 1; @@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu); void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); + +struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); +void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); +int cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_policy *new_policy); void cpufreq_update_policy(unsigned int cpu); +void cpufreq_update_limits(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); @@ -322,6 +333,9 @@ struct cpufreq_driver { /* should be defined, if possible */ unsigned int (*get)(unsigned int cpu); + /* Called to update policy limits on firmware notifications. */ + void (*update_limits)(unsigned int cpu); + /* optional */ int (*bios_limit)(int cpu, unsigned int *limit); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index e78281d07b70..dbfdd0fadbef 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -147,6 +147,7 @@ enum cpuhp_state { CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, + CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3b39472324a3..bb9a0db89f1a 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -83,6 +83,7 @@ struct cpuidle_device { unsigned int use_deepest_state:1; unsigned int poll_time_limit:1; unsigned int cpu; + ktime_t next_hrtimer; int last_residency; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; diff --git a/include/linux/dmi.h b/include/linux/dmi.h index c46fdb36700b..8de8c4f15163 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -102,9 +102,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); extern const char * dmi_get_system_info(int field); extern const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from); -extern void dmi_scan_machine(void); -extern void dmi_memdev_walk(void); -extern void dmi_set_dump_stack_arch_desc(void); +extern void dmi_setup(void); extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); extern int dmi_get_bios_year(void); extern int dmi_name_in_vendors(const char *str); @@ -122,9 +120,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; static inline const char * dmi_get_system_info(int field) { return NULL; } static inline const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { return NULL; } -static inline void dmi_scan_machine(void) { return; } -static inline void dmi_memdev_walk(void) { } -static inline void dmi_set_dump_stack_arch_desc(void) { } +static inline void dmi_setup(void) { } static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { if (yearp) diff --git a/include/linux/filter.h b/include/linux/filter.h index 6074aa064b54..7d3abde3f183 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -20,6 +20,7 @@ #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> +#include <linux/vmalloc.h> #include <net/sch_generic.h> @@ -503,7 +504,6 @@ struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ jit_requested:1,/* archs need to JIT the prog */ - undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ @@ -733,24 +733,15 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { - fp->undo_set_mem = 1; + set_vm_flush_reset_perms(fp); set_memory_ro((unsigned long)fp, fp->pages); } -static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) -{ - if (fp->undo_set_mem) - set_memory_rw((unsigned long)fp, fp->pages); -} - static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { + set_vm_flush_reset_perms(hdr); set_memory_ro((unsigned long)hdr, hdr->pages); -} - -static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) -{ - set_memory_rw((unsigned long)hdr, hdr->pages); + set_memory_x((unsigned long)hdr, hdr->pages); } static inline struct bpf_binary_header * @@ -788,7 +779,6 @@ void __bpf_prog_free(struct bpf_prog *fp); static inline void bpf_prog_unlock_free(struct bpf_prog *fp) { - bpf_prog_unlock_ro(fp); __bpf_prog_free(fp); } diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 5be5dab50b13..01684d935580 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -309,4 +309,23 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12 #define INTEL_SIP_SMC_RSU_UPDATE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE) + +/* + * Request INTEL_SIP_SMC_ECC_DBE + * + * Sync call used by service driver at EL1 to alert EL3 that a Double + * Bit ECC error has occurred. + * + * Call register usage: + * a0 INTEL_SIP_SMC_ECC_DBE + * a1 SysManager Double Bit Error value + * a2-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + */ +#define INTEL_SIP_SMC_FUNCID_ECC_DBE 13 +#define INTEL_SIP_SMC_ECC_DBE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE) + #endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 730876187344..20899919ead8 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { #ifdef CONFIG_STACK_TRACER -#define STACK_TRACE_ENTRIES 500 - -struct stack_trace; - -extern unsigned stack_trace_index[]; -extern struct stack_trace stack_trace_max; -extern unsigned long stack_trace_max_size; -extern arch_spinlock_t stack_trace_max_lock; - extern int stack_tracer_enabled; -void stack_trace_print(void); -int -stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + +int stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 99e0c1b0b5fb..2b949fa501e1 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -40,6 +40,11 @@ enum hwmon_chip_attributes { hwmon_chip_register_tz, hwmon_chip_update_interval, hwmon_chip_alarms, + hwmon_chip_samples, + hwmon_chip_curr_samples, + hwmon_chip_in_samples, + hwmon_chip_power_samples, + hwmon_chip_temp_samples, }; #define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history) @@ -49,6 +54,11 @@ enum hwmon_chip_attributes { #define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz) #define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval) #define HWMON_C_ALARMS BIT(hwmon_chip_alarms) +#define HWMON_C_SAMPLES BIT(hwmon_chip_samples) +#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples) +#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples) +#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples) +#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples) enum hwmon_temp_attributes { hwmon_temp_input = 0, @@ -365,6 +375,14 @@ struct hwmon_channel_info { const u32 *config; }; +#define HWMON_CHANNEL_INFO(stype, ...) \ + (&(struct hwmon_channel_info) { \ + .type = hwmon_##stype, \ + .config = (u32 []) { \ + __VA_ARGS__, 0 \ + } \ + }) + /** * Chip configuration * @ops: Pointer to hwmon operations. diff --git a/include/linux/ima.h b/include/linux/ima.h index dc12fbcf484c..fd9f7cf4cdf5 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -31,7 +31,7 @@ extern void ima_post_path_mknod(struct dentry *dentry); extern void ima_add_kexec_buffer(struct kimage *image); #endif -#if defined(CONFIG_X86) && defined(CONFIG_EFI) +#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) extern bool arch_ima_get_secureboot(void); extern const char * const *arch_get_ima_policy(void); #else diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 690b238a44d5..c7eef32e7739 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -668,31 +668,6 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); -struct tasklet_hrtimer { - struct hrtimer timer; - struct tasklet_struct tasklet; - enum hrtimer_restart (*function)(struct hrtimer *); -}; - -extern void -tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, - enum hrtimer_restart (*function)(struct hrtimer *), - clockid_t which_clock, enum hrtimer_mode mode); - -static inline -void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, - const enum hrtimer_mode mode) -{ - hrtimer_start(&ttimer->timer, time, mode); -} - -static inline -void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) -{ - hrtimer_cancel(&ttimer->timer); - tasklet_kill(&ttimer->tasklet); -} - /* * Autoprobing for irqs: * diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0f919d5fe84f..c2ffff5f9ae2 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1606,7 +1606,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc, JBD_MAX_CHECKSUM_SIZE); desc.shash.tfm = journal->j_chksum_driver; - desc.shash.flags = 0; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index a49f2b45b3f0..42710d5949ba 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -12,21 +12,79 @@ struct static_key_deferred { struct delayed_work work; }; -extern void static_key_slow_dec_deferred(struct static_key_deferred *key); -extern void static_key_deferred_flush(struct static_key_deferred *key); +struct static_key_true_deferred { + struct static_key_true key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; + +#define static_key_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) +#define static_branch_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) + +#define static_key_deferred_flush(x) \ + __static_key_deferred_flush((x), &(x)->work) + +extern void +__static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout); +extern void __static_key_deferred_flush(void *key, struct delayed_work *work); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); +extern void jump_label_update_timeout(struct work_struct *work); + +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { \ + .key = { STATIC_KEY_INIT_TRUE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { \ + .key = { STATIC_KEY_INIT_FALSE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; +struct static_key_true_deferred { + struct static_key_true key; +}; +struct static_key_false_deferred { + struct static_key_false key; +}; +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT } +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT } + +#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key) + static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { STATIC_KEY_CHECK_USE(key); static_key_slow_dec(&key->key); } -static inline void static_key_deferred_flush(struct static_key_deferred *key) +static inline void static_key_deferred_flush(void *key) { STATIC_KEY_CHECK_USE(key); } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 79c3873d58ac..6e2377e6c1d6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,6 +66,11 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; +struct lock_trace { + unsigned int nr_entries; + unsigned int offset; +}; + #define LOCKSTAT_POINTS 4 /* @@ -100,7 +105,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -188,7 +193,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct stack_trace trace; + struct lock_trace trace; int distance; /* @@ -471,7 +476,7 @@ struct pin_cookie { }; #define NIL_COOKIE (struct pin_cookie){ } -#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6b10c21630f5..083d7b4863ed 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2610,37 +2610,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages, int enable) { } #endif -#ifdef CONFIG_DEBUG_PAGEALLOC extern bool _debug_pagealloc_enabled; -extern void __kernel_map_pages(struct page *page, int numpages, int enable); static inline bool debug_pagealloc_enabled(void) { - return _debug_pagealloc_enabled; + return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled; } +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + static inline void kernel_map_pages(struct page *page, int numpages, int enable) { - if (!debug_pagealloc_enabled()) - return; - __kernel_map_pages(page, numpages, enable); } #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ -#else /* CONFIG_DEBUG_PAGEALLOC */ +#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } #endif /* CONFIG_HIBERNATION */ -static inline bool debug_pagealloc_enabled(void) -{ - return false; -} -#endif /* CONFIG_DEBUG_PAGEALLOC */ +#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifdef __HAVE_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index d2fa9ca42e9a..7f30446348c4 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -93,6 +93,24 @@ enum OID { OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ + /* EC-RDSA */ + OID_gostCPSignA, /* 1.2.643.2.2.35.1 */ + OID_gostCPSignB, /* 1.2.643.2.2.35.2 */ + OID_gostCPSignC, /* 1.2.643.2.2.35.3 */ + OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */ + OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */ + OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */ + OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */ + OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */ + OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */ + OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */ + OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */ + OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */ + OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */ + OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */ + OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */ + OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */ + OID__NR }; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1f678f023850..15a82ff0aefe 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -463,7 +463,7 @@ enum perf_addr_filter_action_t { /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage - * @inode: object file's inode for file-based filters + * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop @@ -887,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); + +extern void perf_pmu_resched(struct pmu *pmu); + extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); @@ -1054,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo #endif /* - * Take a snapshot of the regs. Skip ip and frame pointer to - * the nth caller. We only need a few of the regs: + * When generating a perf sample in-line, instead of from an interrupt / + * exception, we lack a pt_regs. This is typically used from software events + * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. + * + * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests - * - bp for callchains - * - eflags, for future purposes, just in case + * - sp for PERF_SAMPLE_CALLCHAIN + * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) + * + * NOTE: assumes @regs is otherwise already 0 filled; this is important for + * things like PERF_SAMPLE_REGS_INTR. */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h index 3245f45f9d77..a3370a007702 100644 --- a/include/linux/platform_data/ads7828.h +++ b/include/linux/platform_data/ads7828.h @@ -4,7 +4,7 @@ * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * - * For further information, see the Documentation/hwmon/ads7828 file. + * For further information, see the Documentation/hwmon/ads7828.rst file. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/platform_data/ds620.h b/include/linux/platform_data/ds620.h index 6ef58bb77e46..f0ce22a78bb8 100644 --- a/include/linux/platform_data/ds620.h +++ b/include/linux/platform_data/ds620.h @@ -14,7 +14,7 @@ struct ds620_platform_data { * 1 = PO_LOW * 2 = PO_HIGH * - * (see Documentation/hwmon/ds620) + * (see Documentation/hwmon/ds620.rst) */ int pomode; }; diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9f0aa1b48c78..dde59fd3590f 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h @@ -7,7 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * For further information, see the Documentation/hwmon/ina2xx file. + * For further information, see the Documentation/hwmon/ina2xx.rst file. */ /** diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h index 8da8f94ee15c..2bbd0919bc89 100644 --- a/include/linux/platform_data/max197.h +++ b/include/linux/platform_data/max197.h @@ -8,7 +8,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * For further information, see the Documentation/hwmon/max197 file. + * For further information, see the Documentation/hwmon/max197.rst file. */ #ifndef _PDATA_MAX197_H diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h index ee03d429742b..5fa115d3ea4b 100644 --- a/include/linux/platform_data/ntc_thermistor.h +++ b/include/linux/platform_data/ntc_thermistor.h @@ -42,7 +42,7 @@ struct ntc_thermistor_platform_data { * read_uV() * * How to setup pullup_ohm, pulldown_ohm, and connect is - * described at Documentation/hwmon/ntc_thermistor + * described at Documentation/hwmon/ntc_thermistor.rst * * pullup/down_ohm: 0 for infinite / not-connected * diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 1ed5874bcee0..0e8e356bed6a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -16,6 +16,7 @@ #include <linux/of.h> #include <linux/notifier.h> #include <linux/spinlock.h> +#include <linux/cpumask.h> /* * Flags to control the behaviour of a genpd. @@ -42,11 +43,22 @@ * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered * on, in case any of its attached devices is used * in the wakeup path to serve system wakeups. + * + * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get + * devices attached, which may belong to CPUs or + * possibly have subdomains with CPUs attached. + * This flag enables the genpd backend driver to + * deploy idle power management support for CPUs + * and groups of CPUs. Note that, the backend + * driver must then comply with the so called, + * last-man-standing algorithm, for the CPUs in the + * PM domain. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) #define GENPD_FLAG_ALWAYS_ON (1U << 2) #define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) +#define GENPD_FLAG_CPU_DOMAIN (1U << 4) enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -69,6 +81,7 @@ struct genpd_power_state { s64 residency_ns; struct fwnode_handle *fwnode; ktime_t idle_time; + void *data; }; struct genpd_lock_ops; @@ -93,6 +106,7 @@ struct generic_pm_domain { unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int performance_state; /* Aggregated max performance state */ + cpumask_var_t cpus; /* A cpumask of the attached CPUs */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct opp_table *opp_table; /* OPP table of the genpd */ @@ -104,15 +118,17 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; + bool cached_power_down_state_idx; int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ struct genpd_power_state *states; + void (*free_states)(struct genpd_power_state *states, + unsigned int state_count); unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ - void *free; /* Free the state that was allocated for default */ ktime_t on_time; ktime_t accounting_time; const struct genpd_lock_ops *lock_ops; @@ -159,6 +175,7 @@ struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; struct notifier_block nb; + int cpu; unsigned int performance_state; void *data; }; @@ -187,6 +204,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; +#ifdef CONFIG_CPU_IDLE +extern struct dev_power_governor pm_domain_cpu_gov; +#endif #else static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 24c757a32a7b..b150fe97ce5a 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -102,6 +102,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); +struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, + unsigned long u_volt); struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq); @@ -207,6 +209,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, return ERR_PTR(-ENOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, + unsigned long u_volt) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { diff --git a/include/linux/property.h b/include/linux/property.h index 65d3420dd5d1..a29369c89e6e 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -13,6 +13,7 @@ #ifndef _LINUX_PROPERTY_H_ #define _LINUX_PROPERTY_H_ +#include <linux/bits.h> #include <linux/fwnode.h> #include <linux/types.h> @@ -304,6 +305,23 @@ struct fwnode_handle * fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, u32 endpoint); +/* + * Fwnode lookup flags + * + * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the + * closest endpoint ID greater than the specified + * one. + * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote + * endpoint of the given endpoint belongs to, + * may be disabled. + */ +#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0) +#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1) + +struct fwnode_handle * +fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, + u32 port, u32 endpoint, unsigned long flags); + #define fwnode_graph_for_each_endpoint(fwnode, child) \ for (child = NULL; \ (child = fwnode_graph_get_next_endpoint(fwnode, child)); ) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 827c601841c4..6f89fc8d4b8e 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -5,8 +5,7 @@ * * Author: Brijesh Singh <brijesh.singh@amd.com> * - * SEV spec 0.14 is available at: - * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf + * SEV API spec is available at https://developer.amd.com/sev * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f6165d304b4d..48841e5dab90 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1338,7 +1338,6 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) } /* Let SB update */ - mmiowb(); return rc; } @@ -1374,7 +1373,6 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ - mmiowb(); barrier(); } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6cdb1db776cf..922bb6848813 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp) static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { - if (READ_ONCE(rhp->func) == f) + rcu_callback_t func = READ_ONCE(rhp->func); + + if (func == f) return true; - WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L); + WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 90bfa3279a01..563290fc194f 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -18,7 +18,7 @@ * awoken. */ struct rcuwait { - struct task_struct *task; + struct task_struct __rcu *task; }; #define __RCUWAIT_INITIALIZER(name) \ diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h deleted file mode 100644 index e47568363e5e..000000000000 --- a/include/linux/rwsem-spinlock.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* rwsem-spinlock.h: fallback C implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de> - * - Derived also from comments by Linus - */ - -#ifndef _LINUX_RWSEM_SPINLOCK_H -#define _LINUX_RWSEM_SPINLOCK_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ -/* - * the rw-semaphore definition - * - if count is 0 then there are no active readers or writers - * - if count is +ve then that is the number of active readers - * - if count is -1 then there is one active writer - * - if wait_list is not empty, then there are processes waiting for the semaphore - */ -struct rw_semaphore { - __s32 count; - raw_spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define RWSEM_UNLOCKED_VALUE 0x00000000 - -extern void __down_read(struct rw_semaphore *sem); -extern int __must_check __down_read_killable(struct rw_semaphore *sem); -extern int __down_read_trylock(struct rw_semaphore *sem); -extern void __down_write(struct rw_semaphore *sem); -extern int __must_check __down_write_killable(struct rw_semaphore *sem); -extern int __down_write_trylock(struct rw_semaphore *sem); -extern void __up_read(struct rw_semaphore *sem); -extern void __up_write(struct rw_semaphore *sem); -extern void __downgrade_write(struct rw_semaphore *sem); -extern int rwsem_is_locked(struct rw_semaphore *sem); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 67dbb57508b1..2ea18a3def04 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -20,25 +20,30 @@ #include <linux/osq_lock.h> #endif -struct rw_semaphore; - -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include <linux/rwsem-spinlock.h> /* use a generic implementation */ -#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE -#else -/* All arch specific implementations share the same struct */ +/* + * For an uncontended rwsem, count and owner are the only fields a task + * needs to touch when acquiring the rwsem. So they are put next to each + * other to increase the chance that they will share the same cacheline. + * + * In a contended rwsem, the owner is likely the most frequently accessed + * field in the structure as the optimistic waiter that holds the osq lock + * will spin on owner. For an embedded rwsem, other hot fields in the + * containing structure should be moved further away from the rwsem to + * reduce the chance that they will share the same cacheline causing + * cacheline bouncing problem. + */ struct rw_semaphore { atomic_long_t count; - struct list_head wait_list; - raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* spinner MCS lock */ /* * Write owner. Used as a speculative check to see * if the owner is running on the cpu. */ struct task_struct *owner; + struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif + raw_spinlock_t wait_lock; + struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -50,24 +55,14 @@ struct rw_semaphore { */ #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* Include the arch specific part */ -#include <asm/rwsem.h> - /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != 0; } +#define RWSEM_UNLOCKED_VALUE 0L #define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) -#endif /* Common initializer macros and functions */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1549584a1538..50606a6e73d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1057,7 +1057,6 @@ struct task_struct { #ifdef CONFIG_RSEQ struct rseq __user *rseq; - u32 rseq_len; u32 rseq_sig; /* * RmW on rseq_event_mask must be performed atomically @@ -1855,12 +1854,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { if (clone_flags & CLONE_THREAD) { t->rseq = NULL; - t->rseq_len = 0; t->rseq_sig = 0; t->rseq_event_mask = 0; } else { t->rseq = current->rseq; - t->rseq_len = current->rseq_len; t->rseq_sig = current->rseq_sig; t->rseq_event_mask = current->rseq_event_mask; } @@ -1869,7 +1866,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) static inline void rseq_execve(struct task_struct *t) { t->rseq = NULL; - t->rseq_len = 0; t->rseq_sig = 0; t->rseq_event_mask = 0; } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 2e97a2227045..f1227f2c38a4 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -76,6 +76,7 @@ extern void exit_itimers(struct signal_struct *); extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); +struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 57c7ed3fe465..cfc0a89a7159 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -76,8 +76,8 @@ struct sched_domain_shared { struct sched_domain { /* These fields must be setup */ - struct sched_domain *parent; /* top domain must be null terminated */ - struct sched_domain *child; /* bottom domain must be null terminated */ + struct sched_domain __rcu *parent; /* top domain must be null terminated */ + struct sched_domain __rcu *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 2a986d282a97..b5071497b8cb 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -17,6 +17,17 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif +#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP +static inline int set_direct_map_invalid_noflush(struct page *page) +{ + return 0; +} +static inline int set_direct_map_default_noflush(struct page *page) +{ + return 0; +} +#endif + #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn) { diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index d0884b525001..9d1bc65d226c 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -29,7 +29,7 @@ struct smpboot_thread_data; * @thread_comm: The base name of the thread */ struct smp_hotplug_thread { - struct task_struct __percpu **store; + struct task_struct * __percpu *store; struct list_head list; int (*thread_should_run)(unsigned int cpu); void (*thread_fn)(unsigned int cpu); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e089157dcf97..ed7c4d6b8235 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -57,6 +57,7 @@ #include <linux/stringify.h> #include <linux/bottom_half.h> #include <asm/barrier.h> +#include <asm/mmiowb.h> /* @@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); + mmiowb_spin_lock(); } #ifndef arch_spin_lock_flags @@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo { __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); + mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { - return arch_spin_trylock(&(lock)->raw_lock); + int ret = arch_spin_trylock(&(lock)->raw_lock); + + if (ret) + mmiowb_spin_lock(); + + return ret; } static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { + mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c495b2d51569..e432cc92c73d 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -56,45 +56,11 @@ struct srcu_struct { }; void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); -void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced); +void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); -/** - * cleanup_srcu_struct - deconstruct a sleep-RCU structure - * @ssp: structure to clean up. - * - * Must invoke this after you are finished using a given srcu_struct that - * was initialized via init_srcu_struct(), else you leak memory. - */ -static inline void cleanup_srcu_struct(struct srcu_struct *ssp) -{ - _cleanup_srcu_struct(ssp, false); -} - -/** - * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure - * @ssp: structure to clean up. - * - * Must invoke this after you are finished using a given srcu_struct that - * was initialized via init_srcu_struct(), else you leak memory. Also, - * all grace-period processing must have completed. - * - * "Completed" means that the last synchronize_srcu() and - * synchronize_srcu_expedited() calls must have returned before the call - * to cleanup_srcu_struct_quiesced(). It also means that the callback - * from the last call_srcu() must have been invoked before the call to - * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help - * with this last. Violating these rules will get you a WARN_ON() splat - * (with high probability, anyway), and will also cause the srcu_struct - * to be leaked. - */ -static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp) -{ - _cleanup_srcu_struct(ssp, true); -} - #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 7978b3e2c1e1..0805dee1b6b8 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -23,10 +23,10 @@ typedef u32 depot_stack_handle_t; -struct stack_trace; +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, gfp_t gfp_flags); -depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); - -void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); +unsigned int stack_depot_fetch(depot_stack_handle_t handle, + unsigned long **entries); #endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index ba29a0613e66..f0cfd12cb45e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -3,11 +3,64 @@ #define __LINUX_STACKTRACE_H #include <linux/types.h> +#include <asm/errno.h> struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE +void stack_trace_print(unsigned long *trace, unsigned int nr_entries, + int spaces); +int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, + unsigned int nr_entries, int spaces); +unsigned int stack_trace_save(unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_tsk(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, + unsigned int size, unsigned int skipnr); +unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); + +/* Internal interfaces. Do not use in generic code */ +#ifdef CONFIG_ARCH_STACKWALK + +/** + * stack_trace_consume_fn - Callback for arch_stack_walk() + * @cookie: Caller supplied pointer handed back by arch_stack_walk() + * @addr: The stack entry address to consume + * @reliable: True when the stack entry is reliable. Required by + * some printk based consumers. + * + * Return: True, if the entry was consumed or skipped + * False, if there is no space left to store + */ +typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr, + bool reliable); +/** + * arch_stack_walk - Architecture specific function to walk the stack + * @consume_entry: Callback which is invoked by the architecture code for + * each entry. + * @cookie: Caller supplied pointer which is handed back to + * @consume_entry + * @task: Pointer to a task struct, can be NULL + * @regs: Pointer to registers, can be NULL + * + * ============ ======= ============================================ + * task regs + * ============ ======= ============================================ + * task NULL Stack trace from task (can be current) + * current regs Stack trace starting on regs->stackpointer + * ============ ======= ============================================ + */ +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs); +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task); +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs); + +#else /* CONFIG_ARCH_STACKWALK */ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; @@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, struct stack_trace *trace); - -extern void print_stack_trace(struct stack_trace *trace, int spaces); -extern int snprint_stack_trace(char *buf, size_t size, - struct stack_trace *trace, int spaces); - -#ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); +#endif /* !CONFIG_ARCH_STACKWALK */ +#endif /* CONFIG_STACKTRACE */ + +#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE) +int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, + unsigned int size); #else -# define save_stack_trace_user(trace) do { } while (0) +static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk, + unsigned long *store, + unsigned int size) +{ + return -ENOSYS; +} #endif -#else /* !CONFIG_STACKTRACE */ -# define save_stack_trace(trace) do { } while (0) -# define save_stack_trace_tsk(tsk, trace) do { } while (0) -# define save_stack_trace_user(trace) do { } while (0) -# define print_stack_trace(trace, spaces) do { } while (0) -# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) -# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) -#endif /* CONFIG_STACKTRACE */ - #endif /* __LINUX_STACKTRACE_H */ diff --git a/include/linux/string.h b/include/linux/string.h index 6ab0a6fa512e..4deb11f7976b 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -31,6 +31,10 @@ size_t strlcpy(char *, const char *, size_t); #ifndef __HAVE_ARCH_STRSCPY ssize_t strscpy(char *, const char *, size_t); #endif + +/* Wraps calls to strscpy()/memset(), no arch specific code required */ +ssize_t strscpy_pad(char *dest, const char *src, size_t count); + #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3f529ad9a9d2..6b3ea9ea6a9e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -425,6 +425,7 @@ void restore_processor_state(void); /* kernel/power/main.c */ extern int register_pm_notifier(struct notifier_block *nb); extern int unregister_pm_notifier(struct notifier_block *nb); +extern void ksys_sync_helper(void); #define pm_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ @@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) return 0; } +static inline void ksys_sync_helper(void) {} + #define pm_notifier(fn, pri) do { (void)(fn); } while (0) static inline bool pm_wakeup_pending(void) { return false; } diff --git a/include/linux/tick.h b/include/linux/tick.h index 55388ab45fd4..f92a10b5e112 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -68,6 +68,12 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ +#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) +extern void tick_offline_cpu(unsigned int cpu); +#else +static inline void tick_offline_cpu(unsigned int cpu) { } +#endif + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); #else @@ -122,6 +128,7 @@ extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern bool tick_nohz_idle_got_tick(void); +extern ktime_t tick_nohz_get_next_hrtimer(void); extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); @@ -145,7 +152,11 @@ static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } static inline bool tick_nohz_idle_got_tick(void) { return false; } - +static inline ktime_t tick_nohz_get_next_hrtimer(void) +{ + /* Next wake up is the tick period, assume it starts now */ + return ktime_add(ktime_get(), TICK_NSEC); +} static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { *delta_next = TICK_NSEC; diff --git a/include/linux/time64.h b/include/linux/time64.h index f38d382ffec1..a620ee610b9f 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -33,6 +33,17 @@ struct itimerspec64 { #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +/* + * Limits for settimeofday(): + * + * To prevent setting the time close to the wraparound point time setting + * is limited so a reasonable uptime can be accomodated. Uptime of 30 years + * should be really sufficient, which means the cutoff is 2232. At that + * point the cutoff is just a small part of the larger problem. + */ +#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) +#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) + static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { @@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts) return true; } +static inline bool timespec64_valid_settod(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ + if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) + return false; + return true; +} + /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 37b226e8df13..2b70130af585 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +static inline unsigned long user_access_save(void) { return 0UL; } +static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 103a48a48872..12bf0b68ed92 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -115,6 +115,7 @@ struct uprobes_state { struct xol_area *xol_area; }; +extern void __init uprobes_init(void); extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool is_swbp_insn(uprobe_opcode_t *insn); @@ -154,6 +155,10 @@ extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, struct uprobes_state { }; +static inline void uprobes_init(void) +{ +} + #define uprobe_get_trap_addr(regs) instruction_pointer(regs) static inline int diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 398e9c95cd61..c6eebb839552 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -21,6 +21,11 @@ struct notifier_block; /* in notifier.h */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ +/* + * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with + * vfree_atomic(). + */ +#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -142,6 +147,13 @@ extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); +static inline void set_vm_flush_reset_perms(void *addr) +{ + struct vm_struct *vm = find_vm_area(addr); + + if (vm) + vm->flags |= VM_FLUSH_RESET_PERMS; +} #else static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, @@ -157,6 +169,9 @@ static inline void unmap_kernel_range(unsigned long addr, unsigned long size) { } +static inline void set_vm_flush_reset_perms(void *addr) +{ +} #endif /* Allocate/destroy a 'vmalloc' VM area. */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c9b0b2b5d672..99f722c4d804 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -219,7 +219,7 @@ struct xfrm_state { struct xfrm_stats stats; struct xfrm_lifetime_cur curlft; - struct tasklet_hrtimer mtimer; + struct hrtimer mtimer; struct xfrm_state_offload xso; diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h new file mode 100644 index 000000000000..3eebabcb2812 --- /dev/null +++ b/include/soc/rockchip/rk3399_grf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Rockchip General Register Files definitions + * + * Copyright (c) 2018, Collabora Ltd. + * Author: Enric Balletbo i Serra <enric.balletbo@collabora.com> + */ + +#ifndef __SOC_RK3399_GRF_H +#define __SOC_RK3399_GRF_H + +/* PMU GRF Registers */ +#define RK3399_PMUGRF_OS_REG2 0x308 +#define RK3399_PMUGRF_DDRTYPE_SHIFT 13 +#define RK3399_PMUGRF_DDRTYPE_MASK 7 +#define RK3399_PMUGRF_DDRTYPE_DDR3 3 +#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5 +#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6 +#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7 + +#endif diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h index 7e28092c4d3d..ad9482c56797 100644 --- a/include/soc/rockchip/rockchip_sip.h +++ b/include/soc/rockchip/rockchip_sip.h @@ -23,5 +23,6 @@ #define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05 #define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06 #define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07 +#define ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD 0x08 #endif diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h new file mode 100644 index 000000000000..cf5b8772175d --- /dev/null +++ b/include/trace/events/devfreq.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM devfreq + +#if !defined(_TRACE_DEVFREQ_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_DEVFREQ_H + +#include <linux/devfreq.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(devfreq_monitor, + TP_PROTO(struct devfreq *devfreq), + + TP_ARGS(devfreq), + + TP_STRUCT__entry( + __field(unsigned long, freq) + __field(unsigned long, busy_time) + __field(unsigned long, total_time) + __field(unsigned int, polling_ms) + __string(dev_name, dev_name(&devfreq->dev)) + ), + + TP_fast_assign( + __entry->freq = devfreq->previous_freq; + __entry->busy_time = devfreq->last_status.busy_time; + __entry->total_time = devfreq->last_status.total_time; + __entry->polling_ms = devfreq->profile->polling_ms; + __assign_str(dev_name, dev_name(&devfreq->dev)); + ), + + TP_printk("dev_name=%s freq=%lu polling_ms=%u load=%lu", + __get_str(dev_name), __entry->freq, __entry->polling_ms, + __entry->total_time == 0 ? 0 : + (100 * __entry->busy_time) / __entry->total_time) +); +#endif /* _TRACE_DEVFREQ_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index a57e4ee989d6..b7a904825e7d 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -73,7 +73,7 @@ TRACE_EVENT(timer_start, __entry->flags = flags; ), - TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", + TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now, __entry->flags & TIMER_CPUMASK, @@ -89,23 +89,27 @@ TRACE_EVENT(timer_start, */ TRACE_EVENT(timer_expire_entry, - TP_PROTO(struct timer_list *timer), + TP_PROTO(struct timer_list *timer, unsigned long baseclk), - TP_ARGS(timer), + TP_ARGS(timer, baseclk), TP_STRUCT__entry( __field( void *, timer ) __field( unsigned long, now ) __field( void *, function) + __field( unsigned long, baseclk ) ), TP_fast_assign( __entry->timer = timer; __entry->now = jiffies; __entry->function = timer->function; + __entry->baseclk = baseclk; ), - TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) + TP_printk("timer=%p function=%ps now=%lu baseclk=%lu", + __entry->timer, __entry->function, __entry->now, + __entry->baseclk) ); /** @@ -210,7 +214,7 @@ TRACE_EVENT(hrtimer_start, __entry->mode = mode; ), - TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu " + TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu " "mode=%s", __entry->hrtimer, __entry->function, (unsigned long long) __entry->expires, (unsigned long long) __entry->softexpires, @@ -243,7 +247,8 @@ TRACE_EVENT(hrtimer_expire_entry, __entry->function = hrtimer->function; ), - TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, + TP_printk("hrtimer=%p function=%ps now=%llu", + __entry->hrtimer, __entry->function, (unsigned long long) __entry->now) ); diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index b3bcabe380da..2fcad1dd0b0e 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -49,8 +49,11 @@ #define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10) #define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14) +#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15) +#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) #define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14) +#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) /* PSCI v0.2 power state encoding for CPU_SUSPEND function */ #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff @@ -97,6 +100,10 @@ #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK \ (0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT) +#define PSCI_1_0_OS_INITIATED BIT(0) +#define PSCI_1_0_SUSPEND_MODE_PC 0 +#define PSCI_1_0_SUSPEND_MODE_OSI 1 + /* PSCI return values (inclusive of all PSCI versions) */ #define PSCI_RET_SUCCESS 0 #define PSCI_RET_NOT_SUPPORTED -1 diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index ac8c60bcc83b..43521d500c2b 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -6,8 +6,7 @@ * * Author: Brijesh Singh <brijesh.singh@amd.com> * - * SEV spec 0.14 is available at: - * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf + * SEV API specification is available at: https://developer.amd.com/sev/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -30,7 +29,8 @@ enum { SEV_PDH_GEN, SEV_PDH_CERT_EXPORT, SEV_PEK_CERT_IMPORT, - SEV_GET_ID, + SEV_GET_ID, /* This command is deprecated, use SEV_GET_ID2 */ + SEV_GET_ID2, SEV_MAX, }; @@ -125,7 +125,7 @@ struct sev_user_data_pdh_cert_export { } __packed; /** - * struct sev_user_data_get_id - GET_ID command parameters + * struct sev_user_data_get_id - GET_ID command parameters (deprecated) * * @socket1: Buffer to pass unique ID of first socket * @socket2: Buffer to pass unique ID of second socket @@ -136,6 +136,16 @@ struct sev_user_data_get_id { } __packed; /** + * struct sev_user_data_get_id2 - GET_ID command parameters + * @address: Buffer to store unique ID + * @length: length of the unique ID + */ +struct sev_user_data_get_id2 { + __u64 address; /* In */ + __u32 length; /* In/Out */ +} __packed; + +/** * struct sev_issue_cmd - SEV ioctl parameters * * @cmd: SEV commands to execute diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 02bb7ad6e986..8f10748dac79 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -353,6 +353,10 @@ struct vfio_region_gfx_edid { #define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 }; +#define VFIO_REGION_TYPE_CCW (2) +/* ccw sub-types */ +#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1) + /* * 10de vendor sub-type * diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h index 2ec5f367ff78..cbecbf0cd54f 100644 --- a/include/uapi/linux/vfio_ccw.h +++ b/include/uapi/linux/vfio_ccw.h @@ -12,6 +12,7 @@ #include <linux/types.h> +/* used for START SUBCHANNEL, always present */ struct ccw_io_region { #define ORB_AREA_SIZE 12 __u8 orb_area[ORB_AREA_SIZE]; @@ -22,4 +23,15 @@ struct ccw_io_region { __u32 ret_code; } __packed; +/* + * used for processing commands that trigger asynchronous actions + * Note: this is controlled by a capability + */ +#define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0) +#define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1) +struct ccw_cmd_region { + __u32 command; + __u32 ret_code; +} __packed; + #endif |