diff options
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/Makefile | 15 | ||||
-rw-r--r-- | arch/powerpc/lib/code-patching.c | 171 | ||||
-rw-r--r-- | arch/powerpc/lib/copyuser_power7.S | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/crtsavres.S | 6 | ||||
-rw-r--r-- | arch/powerpc/lib/feature-fixups.c | 180 | ||||
-rw-r--r-- | arch/powerpc/lib/sstep.c | 19 | ||||
-rw-r--r-- | arch/powerpc/lib/xor_vmx.c | 53 | ||||
-rw-r--r-- | arch/powerpc/lib/xor_vmx.h | 20 | ||||
-rw-r--r-- | arch/powerpc/lib/xor_vmx_glue.c | 62 |
9 files changed, 382 insertions, 148 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index ed7dfce331e0..3c3146ba62da 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -9,10 +9,17 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) -obj-y += string.o alloc.o crtsavres.o code-patching.o \ - feature-fixups.o +obj-y += string.o alloc.o code-patching.o feature-fixups.o -obj-$(CONFIG_PPC32) += div64.o copy_32.o +obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o + +# See corresponding test in arch/powerpc/Makefile +# 64-bit linker creates .sfpr on demand for final link (vmlinux), +# so it is only needed for modules, and only for older linkers which +# do not support --save-restore-funcs +ifeq ($(call ld-ifversion, -lt, 225000000, y),y) +extra-$(CONFIG_PPC64) += crtsavres.o +endif obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \ @@ -30,7 +37,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o -obj-$(CONFIG_ALTIVEC) += xor_vmx.o +obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec) obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 500b0f6a0b64..c9de03e0c1f1 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -12,23 +12,186 @@ #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/mm.h> -#include <asm/page.h> -#include <asm/code-patching.h> +#include <linux/cpuhotplug.h> +#include <linux/slab.h> #include <linux/uaccess.h> #include <linux/kprobes.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> +#include <asm/page.h> +#include <asm/code-patching.h> -int patch_instruction(unsigned int *addr, unsigned int instr) +static int __patch_instruction(unsigned int *addr, unsigned int instr) { int err; __put_user_size(instr, addr, 4, err); if (err) return err; - asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); + + asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" :: "r" (addr)); + + return 0; +} + +#ifdef CONFIG_STRICT_KERNEL_RWX +static DEFINE_PER_CPU(struct vm_struct *, text_poke_area); + +static int text_area_cpu_up(unsigned int cpu) +{ + struct vm_struct *area; + + area = get_vm_area(PAGE_SIZE, VM_ALLOC); + if (!area) { + WARN_ONCE(1, "Failed to create text area for cpu %d\n", + cpu); + return -1; + } + this_cpu_write(text_poke_area, area); + + return 0; +} + +static int text_area_cpu_down(unsigned int cpu) +{ + free_vm_area(this_cpu_read(text_poke_area)); + return 0; +} + +/* + * Run as a late init call. This allows all the boot time patching to be done + * simply by patching the code, and then we're called here prior to + * mark_rodata_ro(), which happens after all init calls are run. Although + * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge + * it as being preferable to a kernel that will crash later when someone tries + * to use patch_instruction(). + */ +static int __init setup_text_poke_area(void) +{ + BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "powerpc/text_poke:online", text_area_cpu_up, + text_area_cpu_down)); + + return 0; +} +late_initcall(setup_text_poke_area); + +/* + * This can be called for kernel text or a module. + */ +static int map_patch_area(void *addr, unsigned long text_poke_addr) +{ + unsigned long pfn; + int err; + + if (is_vmalloc_addr(addr)) + pfn = vmalloc_to_pfn(addr); + else + pfn = __pa_symbol(addr) >> PAGE_SHIFT; + + err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), + pgprot_val(PAGE_KERNEL)); + + pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err); + if (err) + return -1; + return 0; } +static inline int unmap_patch_area(unsigned long addr) +{ + pte_t *ptep; + pmd_t *pmdp; + pud_t *pudp; + pgd_t *pgdp; + + pgdp = pgd_offset_k(addr); + if (unlikely(!pgdp)) + return -EINVAL; + + pudp = pud_offset(pgdp, addr); + if (unlikely(!pudp)) + return -EINVAL; + + pmdp = pmd_offset(pudp, addr); + if (unlikely(!pmdp)) + return -EINVAL; + + ptep = pte_offset_kernel(pmdp, addr); + if (unlikely(!ptep)) + return -EINVAL; + + pr_devel("clearing mm %p, pte %p, addr %lx\n", &init_mm, ptep, addr); + + /* + * In hash, pte_clear flushes the tlb, in radix, we have to + */ + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + + return 0; +} + +int patch_instruction(unsigned int *addr, unsigned int instr) +{ + int err; + unsigned int *dest = NULL; + unsigned long flags; + unsigned long text_poke_addr; + unsigned long kaddr = (unsigned long)addr; + + /* + * During early early boot patch_instruction is called + * when text_poke_area is not ready, but we still need + * to allow patching. We just do the plain old patching + * We use slab_is_available and per cpu read * via this_cpu_read + * of text_poke_area. Per-CPU areas might not be up early + * this can create problems with just using this_cpu_read() + */ + if (!slab_is_available() || !this_cpu_read(text_poke_area)) + return __patch_instruction(addr, instr); + + local_irq_save(flags); + + text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr; + if (map_patch_area(addr, text_poke_addr)) { + err = -1; + goto out; + } + + dest = (unsigned int *)(text_poke_addr) + + ((kaddr & ~PAGE_MASK) / sizeof(unsigned int)); + + /* + * We use __put_user_size so that we can handle faults while + * writing to dest and return err to handle faults gracefully + */ + __put_user_size(instr, dest, 4, err); + if (!err) + asm ("dcbst 0, %0; sync; icbi 0,%0; icbi 0,%1; sync; isync" + ::"r" (dest), "r"(addr)); + + err = unmap_patch_area(text_poke_addr); + if (err) + pr_warn("failed to unmap %lx\n", text_poke_addr); + +out: + local_irq_restore(flags); + + return err; +} +#else /* !CONFIG_STRICT_KERNEL_RWX */ + +int patch_instruction(unsigned int *addr, unsigned int instr) +{ + return __patch_instruction(addr, instr); +} + +#endif /* CONFIG_STRICT_KERNEL_RWX */ +NOKPROBE_SYMBOL(patch_instruction); + int patch_branch(unsigned int *addr, unsigned long target, int flags) { return patch_instruction(addr, create_branch(addr, target, flags)); diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index a24b4039352c..706b7cc19846 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -82,14 +82,14 @@ _GLOBAL(__copy_tofrom_user_power7) #ifdef CONFIG_ALTIVEC cmpldi r5,16 - cmpldi cr1,r5,4096 + cmpldi cr1,r5,3328 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy - bgt cr1,.Lvmx_copy + bge cr1,.Lvmx_copy #else cmpldi r5,16 diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S index 18af0b3d3eb2..7e5e1c28e56a 100644 --- a/arch/powerpc/lib/crtsavres.S +++ b/arch/powerpc/lib/crtsavres.S @@ -44,10 +44,10 @@ #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -#ifndef CONFIG_PPC64 - .section ".text" +#ifndef CONFIG_PPC64 + /* Routines for saving integer registers, called by the compiler. */ /* Called with r11 pointing to the stack header word of the caller of the */ /* function, just beyond the end of the integer save area. */ @@ -314,8 +314,6 @@ _GLOBAL(_restvr_31) #else /* CONFIG_PPC64 */ - .section ".text.save.restore","ax",@progbits - .globl _savegpr0_14 _savegpr0_14: std r14,-144(r1) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index f3917705c686..41cf5ae273cf 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -233,192 +233,192 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p) static void test_basic_patching(void) { - extern unsigned int ftr_fixup_test1; - extern unsigned int end_ftr_fixup_test1; - extern unsigned int ftr_fixup_test1_orig; - extern unsigned int ftr_fixup_test1_expected; - int size = &end_ftr_fixup_test1 - &ftr_fixup_test1; + extern unsigned int ftr_fixup_test1[]; + extern unsigned int end_ftr_fixup_test1[]; + extern unsigned int ftr_fixup_test1_orig[]; + extern unsigned int ftr_fixup_test1_expected[]; + int size = end_ftr_fixup_test1 - ftr_fixup_test1; fixup.value = fixup.mask = 8; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2); fixup.alt_start_off = fixup.alt_end_off = 0; /* Sanity check */ - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(8, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); patch_feature_section(~8, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); } static void test_alternative_patching(void) { - extern unsigned int ftr_fixup_test2; - extern unsigned int end_ftr_fixup_test2; - extern unsigned int ftr_fixup_test2_orig; - extern unsigned int ftr_fixup_test2_alt; - extern unsigned int ftr_fixup_test2_expected; - int size = &end_ftr_fixup_test2 - &ftr_fixup_test2; + extern unsigned int ftr_fixup_test2[]; + extern unsigned int end_ftr_fixup_test2[]; + extern unsigned int ftr_fixup_test2_orig[]; + extern unsigned int ftr_fixup_test2_alt[]; + extern unsigned int ftr_fixup_test2_expected[]; + int size = end_ftr_fixup_test2 - ftr_fixup_test2; fixup.value = fixup.mask = 0xF; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1); /* Sanity check */ - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(0xF, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); patch_feature_section(~0xF, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); } static void test_alternative_case_too_big(void) { - extern unsigned int ftr_fixup_test3; - extern unsigned int end_ftr_fixup_test3; - extern unsigned int ftr_fixup_test3_orig; - extern unsigned int ftr_fixup_test3_alt; - int size = &end_ftr_fixup_test3 - &ftr_fixup_test3; + extern unsigned int ftr_fixup_test3[]; + extern unsigned int end_ftr_fixup_test3[]; + extern unsigned int ftr_fixup_test3_orig[]; + extern unsigned int ftr_fixup_test3_alt[]; + int size = end_ftr_fixup_test3 - ftr_fixup_test3; fixup.value = fixup.mask = 0xC; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2); /* Sanity check */ - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); /* Expect nothing to be patched, and the error returned to us */ check(patch_feature_section(0xF, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(0, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(~0xF, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); } static void test_alternative_case_too_small(void) { - extern unsigned int ftr_fixup_test4; - extern unsigned int end_ftr_fixup_test4; - extern unsigned int ftr_fixup_test4_orig; - extern unsigned int ftr_fixup_test4_alt; - extern unsigned int ftr_fixup_test4_expected; - int size = &end_ftr_fixup_test4 - &ftr_fixup_test4; + extern unsigned int ftr_fixup_test4[]; + extern unsigned int end_ftr_fixup_test4[]; + extern unsigned int ftr_fixup_test4_orig[]; + extern unsigned int ftr_fixup_test4_alt[]; + extern unsigned int ftr_fixup_test4_expected[]; + int size = end_ftr_fixup_test4 - ftr_fixup_test4; unsigned long flag; /* Check a high-bit flag */ flag = 1UL << ((sizeof(unsigned long) - 1) * 8); fixup.value = fixup.mask = flag; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2); /* Sanity check */ - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(flag, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); patch_feature_section(~flag, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); } static void test_alternative_case_with_branch(void) { - extern unsigned int ftr_fixup_test5; - extern unsigned int end_ftr_fixup_test5; - extern unsigned int ftr_fixup_test5_expected; - int size = &end_ftr_fixup_test5 - &ftr_fixup_test5; + extern unsigned int ftr_fixup_test5[]; + extern unsigned int end_ftr_fixup_test5[]; + extern unsigned int ftr_fixup_test5_expected[]; + int size = end_ftr_fixup_test5 - ftr_fixup_test5; - check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0); + check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); } static void test_alternative_case_with_external_branch(void) { - extern unsigned int ftr_fixup_test6; - extern unsigned int end_ftr_fixup_test6; - extern unsigned int ftr_fixup_test6_expected; - int size = &end_ftr_fixup_test6 - &ftr_fixup_test6; + extern unsigned int ftr_fixup_test6[]; + extern unsigned int end_ftr_fixup_test6[]; + extern unsigned int ftr_fixup_test6_expected[]; + int size = end_ftr_fixup_test6 - ftr_fixup_test6; - check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0); + check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); } static void test_cpu_macros(void) { - extern u8 ftr_fixup_test_FTR_macros; - extern u8 ftr_fixup_test_FTR_macros_expected; - unsigned long size = &ftr_fixup_test_FTR_macros_expected - - &ftr_fixup_test_FTR_macros; + extern u8 ftr_fixup_test_FTR_macros[]; + extern u8 ftr_fixup_test_FTR_macros_expected[]; + unsigned long size = ftr_fixup_test_FTR_macros_expected - + ftr_fixup_test_FTR_macros; /* The fixups have already been done for us during boot */ - check(memcmp(&ftr_fixup_test_FTR_macros, - &ftr_fixup_test_FTR_macros_expected, size) == 0); + check(memcmp(ftr_fixup_test_FTR_macros, + ftr_fixup_test_FTR_macros_expected, size) == 0); } static void test_fw_macros(void) { #ifdef CONFIG_PPC64 - extern u8 ftr_fixup_test_FW_FTR_macros; - extern u8 ftr_fixup_test_FW_FTR_macros_expected; - unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected - - &ftr_fixup_test_FW_FTR_macros; + extern u8 ftr_fixup_test_FW_FTR_macros[]; + extern u8 ftr_fixup_test_FW_FTR_macros_expected[]; + unsigned long size = ftr_fixup_test_FW_FTR_macros_expected - + ftr_fixup_test_FW_FTR_macros; /* The fixups have already been done for us during boot */ - check(memcmp(&ftr_fixup_test_FW_FTR_macros, - &ftr_fixup_test_FW_FTR_macros_expected, size) == 0); + check(memcmp(ftr_fixup_test_FW_FTR_macros, + ftr_fixup_test_FW_FTR_macros_expected, size) == 0); #endif } static void test_lwsync_macros(void) { - extern u8 lwsync_fixup_test; - extern u8 end_lwsync_fixup_test; - extern u8 lwsync_fixup_test_expected_LWSYNC; - extern u8 lwsync_fixup_test_expected_SYNC; - unsigned long size = &end_lwsync_fixup_test - - &lwsync_fixup_test; + extern u8 lwsync_fixup_test[]; + extern u8 end_lwsync_fixup_test[]; + extern u8 lwsync_fixup_test_expected_LWSYNC[]; + extern u8 lwsync_fixup_test_expected_SYNC[]; + unsigned long size = end_lwsync_fixup_test - + lwsync_fixup_test; /* The fixups have already been done for us during boot */ if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { - check(memcmp(&lwsync_fixup_test, - &lwsync_fixup_test_expected_LWSYNC, size) == 0); + check(memcmp(lwsync_fixup_test, + lwsync_fixup_test_expected_LWSYNC, size) == 0); } else { - check(memcmp(&lwsync_fixup_test, - &lwsync_fixup_test_expected_SYNC, size) == 0); + check(memcmp(lwsync_fixup_test, + lwsync_fixup_test_expected_SYNC, size) == 0); } } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 33117f8a0882..ee33327686ae 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -683,8 +683,10 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 19: switch ((instr >> 1) & 0x3ff) { case 0: /* mcrf */ - rd = (instr >> 21) & 0x1c; - ra = (instr >> 16) & 0x1c; + rd = 7 - ((instr >> 23) & 0x7); + ra = 7 - ((instr >> 18) & 0x7); + rd *= 4; + ra *= 4; val = (regs->ccr >> ra) & 0xf; regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); goto instr_done; @@ -964,6 +966,19 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, #endif case 19: /* mfcr */ + if ((instr >> 20) & 1) { + imm = 0xf0000000UL; + for (sh = 0; sh < 8; ++sh) { + if (instr & (0x80000 >> sh)) { + regs->gpr[rd] = regs->ccr & imm; + break; + } + imm >>= 4; + } + + goto instr_done; + } + regs->gpr[rd] = regs->ccr; regs->gpr[rd] &= 0xffffffffUL; goto instr_done; diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c index f9de69a04e88..4df240aa5f81 100644 --- a/arch/powerpc/lib/xor_vmx.c +++ b/arch/powerpc/lib/xor_vmx.c @@ -29,10 +29,7 @@ #define vector __attribute__((vector_size(16))) #endif -#include <linux/preempt.h> -#include <linux/export.h> -#include <linux/sched.h> -#include <asm/switch_to.h> +#include "xor_vmx.h" typedef vector signed char unative_t; @@ -64,16 +61,13 @@ typedef vector signed char unative_t; V1##_3 = vec_xor(V1##_3, V2##_3); \ } while (0) -void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in) +void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in) { DEFINE(v1); DEFINE(v2); unsigned long lines = bytes / (sizeof(unative_t)) / 4; - preempt_disable(); - enable_kernel_altivec(); - do { LOAD(v1); LOAD(v2); @@ -83,23 +77,16 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, v1 += 4; v2 += 4; } while (--lines > 0); - - disable_kernel_altivec(); - preempt_enable(); } -EXPORT_SYMBOL(xor_altivec_2); -void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in) +void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in) { DEFINE(v1); DEFINE(v2); DEFINE(v3); unsigned long lines = bytes / (sizeof(unative_t)) / 4; - preempt_disable(); - enable_kernel_altivec(); - do { LOAD(v1); LOAD(v2); @@ -112,15 +99,11 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, v2 += 4; v3 += 4; } while (--lines > 0); - - disable_kernel_altivec(); - preempt_enable(); } -EXPORT_SYMBOL(xor_altivec_3); -void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in) +void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in) { DEFINE(v1); DEFINE(v2); @@ -128,9 +111,6 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, DEFINE(v4); unsigned long lines = bytes / (sizeof(unative_t)) / 4; - preempt_disable(); - enable_kernel_altivec(); - do { LOAD(v1); LOAD(v2); @@ -146,15 +126,11 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, v3 += 4; v4 += 4; } while (--lines > 0); - - disable_kernel_altivec(); - preempt_enable(); } -EXPORT_SYMBOL(xor_altivec_4); -void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, - unsigned long *v2_in, unsigned long *v3_in, - unsigned long *v4_in, unsigned long *v5_in) +void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in) { DEFINE(v1); DEFINE(v2); @@ -163,9 +139,6 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, DEFINE(v5); unsigned long lines = bytes / (sizeof(unative_t)) / 4; - preempt_disable(); - enable_kernel_altivec(); - do { LOAD(v1); LOAD(v2); @@ -184,8 +157,4 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, v4 += 4; v5 += 4; } while (--lines > 0); - - disable_kernel_altivec(); - preempt_enable(); } -EXPORT_SYMBOL(xor_altivec_5); diff --git a/arch/powerpc/lib/xor_vmx.h b/arch/powerpc/lib/xor_vmx.h new file mode 100644 index 000000000000..4746708451ae --- /dev/null +++ b/arch/powerpc/lib/xor_vmx.h @@ -0,0 +1,20 @@ +/* + * Simple interface to link xor_vmx.c and xor_vmx_glue.c + * + * Separating these file ensures that no altivec instructions are run + * outside of the enable/disable altivec block. + */ + +void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in); + +void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in); + +void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in); + +void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in); diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c new file mode 100644 index 000000000000..6521fe5e8cef --- /dev/null +++ b/arch/powerpc/lib/xor_vmx_glue.c @@ -0,0 +1,62 @@ +/* + * Altivec XOR operations + * + * Copyright 2017 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/preempt.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <asm/switch_to.h> +#include "xor_vmx.h" + +void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in) +{ + preempt_disable(); + enable_kernel_altivec(); + __xor_altivec_2(bytes, v1_in, v2_in); + disable_kernel_altivec(); + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_2); + +void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in) +{ + preempt_disable(); + enable_kernel_altivec(); + __xor_altivec_3(bytes, v1_in, v2_in, v3_in); + disable_kernel_altivec(); + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_3); + +void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in) +{ + preempt_disable(); + enable_kernel_altivec(); + __xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in); + disable_kernel_altivec(); + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_4); + +void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in) +{ + preempt_disable(); + enable_kernel_altivec(); + __xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in); + disable_kernel_altivec(); + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_5); |