summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h79
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h1
-rw-r--r--arch/powerpc/include/asm/checksum.h22
-rw-r--r--arch/powerpc/include/asm/futex.h6
-rw-r--r--arch/powerpc/include/asm/kexec.h14
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/page_32.h2
-rw-r--r--arch/powerpc/include/asm/page_64.h12
-rw-r--r--arch/powerpc/include/asm/pgtable.h24
-rw-r--r--arch/powerpc/include/asm/pkeys.h8
-rw-r--r--arch/powerpc/include/asm/ps3.h15
-rw-r--r--arch/powerpc/include/asm/uaccess.h3
-rw-r--r--arch/powerpc/include/asm/unistd32.h7
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h1
-rw-r--r--arch/powerpc/include/asm/vdso/processor.h3
-rw-r--r--arch/powerpc/include/asm/xor.h47
-rw-r--r--arch/powerpc/include/asm/xor_altivec.h22
20 files changed, 145 insertions, 139 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 001e28f9eabc..e18a4fa282a1 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -295,8 +295,8 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
* for our hash-based implementation, we fix that up here.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+static inline bool __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
@@ -438,7 +438,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
return true;
}
-static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
+static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
{
return pte_present(pte) && !is_kernel_addr(addr);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 1a91762b455d..e67e64ac6e8c 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -107,8 +107,8 @@
* in here, on radix we expect them to be zero.
*/
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
- _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_SPECIAL | \
+ _PAGE_PTE | _PAGE_SOFT_DIRTY)
/*
* user access blocked by key
*/
@@ -349,13 +349,13 @@ static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
* For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same
* function for both hash and radix.
*/
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+static inline bool __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
- return 0;
+ return false;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
@@ -549,7 +549,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
return arch_pte_access_permitted(pte_val(pte), write, 0);
}
-static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
+static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
{
return pte_present(pte) && pte_user(pte);
}
@@ -925,9 +925,9 @@ static inline bool pud_access_permitted(pud_t pud, bool write)
}
#define pud_user_accessible_page pud_user_accessible_page
-static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
+static inline bool pud_user_accessible_page(struct mm_struct *mm, unsigned long addr, pud_t pud)
{
- return pud_leaf(pud) && pte_user_accessible_page(pud_pte(pud), addr);
+ return pud_leaf(pud) && pte_user_accessible_page(mm, addr, pud_pte(pud));
}
#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) })
@@ -1096,9 +1096,9 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write)
}
#define pmd_user_accessible_page pmd_user_accessible_page
-static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
+static inline bool pmd_user_accessible_page(struct mm_struct *mm, unsigned long addr, pmd_t pmd)
{
- return pmd_leaf(pmd) && pte_user_accessible_page(pmd_pte(pmd), addr);
+ return pmd_leaf(pmd) && pte_user_accessible_page(mm, addr, pmd_pte(pmd));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -1161,24 +1161,24 @@ pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp,
* For radix we should always find H_PAGE_HASHPTE zero. Hence
* the below will work for radix too
*/
-static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp)
+static inline bool __pmdp_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
{
unsigned long old;
if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
- return 0;
+ return false;
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
-static inline int __pudp_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pud_t *pudp)
+static inline bool __pudp_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp)
{
unsigned long old;
if ((pud_raw(*pudp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
- return 0;
+ return false;
old = pud_hugepage_update(mm, addr, pudp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
@@ -1289,6 +1289,29 @@ static inline pud_t pud_mkhuge(pud_t pud)
return pud;
}
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return pte_special(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
+}
+#endif
+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return pte_special(pud_pte(pud));
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pte_pud(pte_mkspecial(pud_pte(pud)));
+}
+#endif
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
@@ -1300,11 +1323,11 @@ extern int pudp_set_access_flags(struct vm_area_struct *vma,
pud_t entry, int dirty);
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
+bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#define __HAVE_ARCH_PUDP_TEST_AND_CLEAR_YOUNG
-extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp);
+bool pudp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp);
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
@@ -1313,12 +1336,27 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
{
pmd_t old_pmd;
+ /*
+ * Non-present PMDs can be migration entries or device-private THP
+ * entries. This can happen at 2 places:
+ * - When the address space is being unmapped zap_huge_pmd(), and we
+ * encounter non-present pmds.
+ * - migrate_vma_collect_huge_pmd() could calls this during migration
+ * of device-private pmd entries.
+ */
+ if (!pmd_present(*pmdp)) {
+ old_pmd = READ_ONCE(*pmdp);
+ pmd_clear(pmdp);
+ goto out;
+ }
+
if (radix_enabled()) {
old_pmd = radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
} else {
old_pmd = hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
}
+out:
page_table_check_pmd_clear(mm, addr, old_pmd);
return old_pmd;
@@ -1400,7 +1438,6 @@ static inline bool arch_needs_pgtable_deposit(void)
return false;
return true;
}
-extern void serialize_against_pte_lookup(struct mm_struct *mm);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index a38542259fab..de9b96660582 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -92,7 +92,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
#endif
-extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void);
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 4b573a3b7e17..52921ea2494a 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/in6.h>
+#include <linux/uaccess.h>
/*
* Computes the checksum of a memory block at src, length len,
* and adds in "sum" (32-bit), while copying the block to dst.
@@ -21,11 +22,24 @@
extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len);
+static inline __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
+{
+ scoped_user_read_access_size(src, len, efault)
+ return csum_partial_copy_generic((void __force *)src, dst, len);
+
+efault:
+ return 0;
+}
+
#define HAVE_CSUM_COPY_USER
-extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len);
+static inline __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
+{
+ scoped_user_write_access_size(dst, len, efault)
+ return csum_partial_copy_generic(src, (void __force *)dst, len);
+
+efault:
+ return 0;
+}
#define _HAVE_ARCH_CSUM_AND_COPY
#define csum_partial_copy_nocheck(src, dst, len) \
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index b3001f8b2c1e..8cf3b2e97e17 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -33,8 +33,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
- if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
+ uaddr = masked_user_access_begin(uaddr);
switch (op) {
case FUTEX_OP_SET:
@@ -69,8 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int ret = 0;
u32 prev;
- if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
+ uaddr = masked_user_access_begin(uaddr);
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index bd4a6c42a5f3..e02710d6a2e1 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -66,11 +66,9 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co
unsigned long start_address) __noreturn;
void kexec_copy_flush(struct kimage *image);
-#ifdef CONFIG_KEXEC_FILE
-extern const struct kexec_file_ops kexec_elf64_ops;
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
#define ARCH_HAS_KIMAGE_ARCH
-
struct kimage_arch {
struct crash_mem *exclude_ranges;
@@ -78,6 +76,10 @@ struct kimage_arch {
void *backup_buf;
void *fdt;
};
+#endif
+
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops kexec_elf64_ops;
char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
unsigned long cmdline_len);
@@ -145,6 +147,10 @@ int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags);
unsigned int arch_crash_get_elfcorehdr_size(void);
#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size
+
+int machine_kexec_post_load(struct kimage *image);
+#define machine_kexec_post_load machine_kexec_post_load
+
#endif /* CONFIG_CRASH_HOTPLUG */
extern int crashing_cpu;
@@ -159,6 +165,8 @@ extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
+extern void sync_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr,
+ bool phdr_to_kimage);
static inline bool kdump_in_progress(void)
{
return crashing_cpu >= 0;
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 2deb955b7bc8..661eb3820d12 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -155,7 +155,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \
- int __young = ptep_test_and_clear_young(__vma, __address, __ptep);\
+ bool __young = ptep_test_and_clear_young(__vma, __address, __ptep);\
__young; \
})
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index e6da5eaccff6..59375f354f3b 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -101,8 +101,8 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
}
#endif
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline bool ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
@@ -249,7 +249,7 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
return true;
}
-static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
+static inline bool pte_user_accessible_page(struct mm_struct *mm, unsigned long addr, pte_t pte)
{
return pte_present(pte) && !is_kernel_addr(addr);
}
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f2bb1f98eebe..281f25e071a3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -240,8 +240,8 @@ static inline const void *pfn_to_kaddr(unsigned long pfn)
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
-#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
-#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
+#define VMA_DATA_DEFAULT_FLAGS32 VMA_DATA_FLAGS_TSK_EXEC
+#define VMA_DATA_DEFAULT_FLAGS64 VMA_DATA_FLAGS_NON_EXEC
#ifdef __powerpc64__
#include <asm/page_64.h>
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 25482405a811..1fd8c21f0a42 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -10,7 +10,7 @@
#endif
#endif
-#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
+#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_DEFAULT_FLAGS32
#if defined(CONFIG_PPC_256K_PAGES) || \
(defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 0f564a06bf68..d96c984d023b 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -84,9 +84,9 @@ extern u64 ppc64_pft_size;
#endif /* __ASSEMBLER__ */
-#define VM_DATA_DEFAULT_FLAGS \
+#define VMA_DATA_DEFAULT_FLAGS \
(is_32bit_task() ? \
- VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+ VMA_DATA_DEFAULT_FLAGS32 : VMA_DATA_DEFAULT_FLAGS64)
/*
* This is the default if a program doesn't have a PT_GNU_STACK
@@ -94,12 +94,12 @@ extern u64 ppc64_pft_size;
* stack by default, so in the absence of a PT_GNU_STACK program header
* we turn execute permission off.
*/
-#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC
-#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
+#define VMA_STACK_DEFAULT_FLAGS32 VMA_DATA_FLAGS_EXEC
+#define VMA_STACK_DEFAULT_FLAGS64 VMA_DATA_FLAGS_NON_EXEC
-#define VM_STACK_DEFAULT_FLAGS \
+#define VMA_STACK_DEFAULT_FLAGS \
(is_32bit_task() ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+ VMA_STACK_DEFAULT_FLAGS32 : VMA_STACK_DEFAULT_FLAGS64)
#include <asm-generic/getorder.h>
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dcd3a88caaf6..d20ff2ae02f5 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -63,6 +63,20 @@ static inline pgprot_t pte_pgprot(pte_t pte)
return __pgprot(pte_flags);
}
+#ifdef CONFIG_PPC64
+#define pmd_pgprot pmd_pgprot
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
+{
+ return pte_pgprot(pmd_pte(pmd));
+}
+
+#define pud_pgprot pud_pgprot
+static inline pgprot_t pud_pgprot(pud_t pud)
+{
+ return pte_pgprot(pud_pte(pud));
+}
+#endif /* CONFIG_PPC64 */
+
static inline pgprot_t pgprot_nx(pgprot_t prot)
{
return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
@@ -76,12 +90,6 @@ static inline const void *pmd_page_vaddr(pmd_t pmd)
}
#define pmd_page_vaddr pmd_page_vaddr
#endif
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern pgd_t swapper_pg_dir[];
@@ -205,11 +213,11 @@ static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
#endif /* CONFIG_PPC64 */
#ifndef pmd_user_accessible_page
-#define pmd_user_accessible_page(pmd, addr) false
+#define pmd_user_accessible_page(mm, addr, pmd) false
#endif
#ifndef pud_user_accessible_page
-#define pud_user_accessible_page(pud, addr) false
+#define pud_user_accessible_page(mm, addr, pud) false
#endif
#endif /* __ASSEMBLER__ */
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 28e752138996..bd20d4106471 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -143,10 +143,8 @@ static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
return __arch_override_mprotect_pkey(vma, prot, pkey);
}
-extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
- unsigned long init_val);
-static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
- unsigned long init_val)
+extern int __arch_set_user_pkey_access(int pkey, unsigned long init_val);
+static inline int arch_set_user_pkey_access(int pkey, unsigned long init_val)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return -EINVAL;
@@ -160,7 +158,7 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
if (pkey == 0)
return init_val ? -EINVAL : 0;
- return __arch_set_user_pkey_access(tsk, pkey, init_val);
+ return __arch_set_user_pkey_access(pkey, init_val);
}
static inline bool arch_pkeys_enabled(void)
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 987e23a2bd28..b090ceb32a69 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -65,6 +65,7 @@ struct ps3_dma_region_ops;
/**
* struct ps3_dma_region - A per device dma state variables structure
+ * @dev: device structure
* @did: The HV device id.
* @page_size: The ioc pagesize.
* @region_type: The HV region type.
@@ -108,15 +109,15 @@ struct ps3_dma_region_ops {
dma_addr_t bus_addr,
unsigned long len);
};
+
+struct ps3_system_bus_device;
+
/**
* struct ps3_dma_region_init - Helper to initialize structure variables
*
* Helper to properly initialize variables prior to calling
* ps3_system_bus_device_register.
*/
-
-struct ps3_system_bus_device;
-
int ps3_dma_region_init(struct ps3_system_bus_device *dev,
struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
enum ps3_dma_region_type region_type, void *addr, unsigned long len);
@@ -156,10 +157,12 @@ struct ps3_mmio_region_ops {
int (*free)(struct ps3_mmio_region *);
};
/**
- * struct ps3_mmio_region_init - Helper to initialize structure variables
+ * ps3_mmio_region_init - Helper to initialize structure variables
*
* Helper to properly initialize variables prior to calling
* ps3_system_bus_device_register.
+ *
+ * Returns: %0 on success, %-errno on error (or BUG())
*/
int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
@@ -405,7 +408,7 @@ static inline struct ps3_system_bus_driver *
}
/**
- * ps3_system_bus_set_drvdata -
+ * ps3_system_bus_set_drvdata - set driver's private data for this device
* @dev: device structure
* @data: Data to set
*/
@@ -464,7 +467,7 @@ enum ps3_lpm_rights {
* enum ps3_lpm_tb_type - Type of trace buffer lv1 should use.
*
* @PS3_LPM_TB_TYPE_NONE: Do not use a trace buffer.
- * @PS3_LPM_RIGHTS_USE_TB: Use the lv1 internal trace buffer. Must have
+ * @PS3_LPM_TB_TYPE_INTERNAL: Use the lv1 internal trace buffer. Must have
* rights @PS3_LPM_RIGHTS_USE_TB.
*/
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 17e63244e885..e98c628e3899 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -434,8 +434,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n)
}
#endif
-extern long __copy_from_user_flushcache(void *dst, const void __user *src,
- unsigned size);
+extern size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size);
static __must_check __always_inline bool __user_access_begin(const void __user *ptr, size_t len,
unsigned long dir)
diff --git a/arch/powerpc/include/asm/unistd32.h b/arch/powerpc/include/asm/unistd32.h
new file mode 100644
index 000000000000..07689897d206
--- /dev/null
+++ b/arch/powerpc/include/asm/unistd32.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_UNISTD32_H_
+#define _ASM_POWERPC_UNISTD32_H_
+
+#include <asm/unistd_32.h>
+
+#endif /* _ASM_POWERPC_UNISTD32_H_ */
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index 8ea397e26ad0..a853f853da6c 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -8,6 +8,7 @@
#include <asm/barrier.h>
#include <asm/unistd.h>
#include <uapi/linux/time.h>
+#include <vdso/time32.h>
#define VDSO_HAS_CLOCK_GETRES 1
diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h
index c1f3d7aaf3ee..4c6802c3a580 100644
--- a/arch/powerpc/include/asm/vdso/processor.h
+++ b/arch/powerpc/include/asm/vdso/processor.h
@@ -4,6 +4,9 @@
#ifndef __ASSEMBLER__
+#include <asm/cputable.h>
+#include <asm/feature-fixups.h>
+
/* Macros for adjusting thread priority (hardware multi-threading) */
#ifdef CONFIG_PPC64
#define HMT_very_low() asm volatile("or 31, 31, 31 # very low priority")
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
deleted file mode 100644
index 37d05c11d09c..000000000000
--- a/arch/powerpc/include/asm/xor.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Copyright (C) IBM Corporation, 2012
- *
- * Author: Anton Blanchard <anton@au.ibm.com>
- */
-#ifndef _ASM_POWERPC_XOR_H
-#define _ASM_POWERPC_XOR_H
-
-#ifdef CONFIG_ALTIVEC
-
-#include <asm/cputable.h>
-#include <asm/cpu_has_feature.h>
-#include <asm/xor_altivec.h>
-
-static struct xor_block_template xor_block_altivec = {
- .name = "altivec",
- .do_2 = xor_altivec_2,
- .do_3 = xor_altivec_3,
- .do_4 = xor_altivec_4,
- .do_5 = xor_altivec_5,
-};
-
-#define XOR_SPEED_ALTIVEC() \
- do { \
- if (cpu_has_feature(CPU_FTR_ALTIVEC)) \
- xor_speed(&xor_block_altivec); \
- } while (0)
-#else
-#define XOR_SPEED_ALTIVEC()
-#endif
-
-/* Also try the generic routines. */
-#include <asm-generic/xor.h>
-
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
-do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_8regs_p); \
- xor_speed(&xor_block_32regs); \
- xor_speed(&xor_block_32regs_p); \
- XOR_SPEED_ALTIVEC(); \
-} while (0)
-
-#endif /* _ASM_POWERPC_XOR_H */
diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h
deleted file mode 100644
index 294620a25f80..000000000000
--- a/arch/powerpc/include/asm/xor_altivec.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_XOR_ALTIVEC_H
-#define _ASM_POWERPC_XOR_ALTIVEC_H
-
-#ifdef CONFIG_ALTIVEC
-void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2);
-void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3);
-void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4);
-void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5);
-
-#endif
-#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */