diff options
Diffstat (limited to 'arch/arm')
60 files changed, 489 insertions, 6171 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 819f0d08401e..3ea1c417339f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -218,9 +218,6 @@ config GENERIC_CALIBRATE_DELAY config ARCH_MAY_HAVE_PC_FDC bool -config ZONE_DMA - bool - config ARCH_SUPPORTS_UPROBES def_bool y @@ -353,7 +350,6 @@ config ARCH_EP93XX select ARM_VIC select GENERIC_IRQ_MULTI_HANDLER select AUTO_ZRELADDR - select CLKDEV_LOOKUP select CLKSRC_MMIO select CPU_ARM920T select GPIOLIB @@ -505,7 +501,6 @@ config ARCH_OMAP1 bool "TI OMAP1" depends on MMU select ARCH_OMAP - select CLKDEV_LOOKUP select CLKSRC_MMIO select GENERIC_IRQ_CHIP select GENERIC_IRQ_MULTI_HANDLER diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index ba8514468df8..1c4384db223d 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -66,6 +66,8 @@ config UNWINDER_FRAME_POINTER config UNWINDER_ARM bool "ARM EABI stack unwinder" depends on AEABI && !FUNCTION_GRAPH_TRACER + # https://github.com/ClangBuiltLinux/linux/issues/732 + depends on !LD_IS_LLD || LLD_VERSION >= 110000 select ARM_UNWIND help This option enables stack unwinding support in the kernel diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 8eb70c1febce..9d91ae1091b0 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -100,7 +100,7 @@ targets := vmlinux vmlinux.lds piggy_data piggy.o \ lib1funcs.o ashldi3.o bswapsdi2.o \ head.o $(OBJS) -clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S +clean-files += lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index 31927d2fe297..1feb6b0f7a1f 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c @@ -121,7 +121,7 @@ static void hex_str(char *out, uint32_t value) /* * Convert and fold provided ATAGs into the provided FDT. * - * REturn values: + * Return values: * = 0 -> pretend success * = 1 -> bad ATAG (may retry with another possible ATAG pointer) * < 0 -> error from libfdt diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi index dced92a8970e..b7b7106f2dee 100644 --- a/arch/arm/boot/dts/am33xx-clocks.dtsi +++ b/arch/arm/boot/dts/am33xx-clocks.dtsi @@ -164,7 +164,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-core-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x0490>, <0x045c>, <0x0468>; + reg = <0x0490>, <0x045c>, <0x0468>, <0x0460>, <0x0464>; }; dpll_core_x2_ck: dpll_core_x2_ck { @@ -204,7 +204,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x0488>, <0x0420>, <0x042c>; + reg = <0x0488>, <0x0420>, <0x042c>, <0x0424>, <0x0428>; }; dpll_mpu_m2_ck: dpll_mpu_m2_ck@4a8 { @@ -220,7 +220,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-no-gate-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x0494>, <0x0434>, <0x0440>; + reg = <0x0494>, <0x0434>, <0x0440>, <0x0438>, <0x043c>; }; dpll_ddr_m2_ck: dpll_ddr_m2_ck@4a0 { @@ -244,7 +244,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-no-gate-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x0498>, <0x0448>, <0x0454>; + reg = <0x0498>, <0x0448>, <0x0454>, <0x044c>, <0x0450>; }; dpll_disp_m2_ck: dpll_disp_m2_ck@4a4 { @@ -261,7 +261,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-no-gate-j-type-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x048c>, <0x0470>, <0x049c>; + reg = <0x048c>, <0x0470>, <0x049c>, <0x0474>, <0x0478>; }; dpll_per_m2_ck: dpll_per_m2_ck@4ac { diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index c726cd8dbdf1..314fc5975acb 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi @@ -204,7 +204,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-core-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x2d20>, <0x2d24>, <0x2d2c>; + reg = <0x2d20>, <0x2d24>, <0x2d2c>, <0x2d48>, <0x2d4c>; }; dpll_core_x2_ck: dpll_core_x2_ck { @@ -250,7 +250,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x2d60>, <0x2d64>, <0x2d6c>; + reg = <0x2d60>, <0x2d64>, <0x2d6c>, <0x2d88>, <0x2d8c>; }; dpll_mpu_m2_ck: dpll_mpu_m2_ck@2d70 { @@ -276,7 +276,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x2da0>, <0x2da4>, <0x2dac>; + reg = <0x2da0>, <0x2da4>, <0x2dac>, <0x2dc8>, <0x2dcc>; }; dpll_ddr_m2_ck: dpll_ddr_m2_ck@2db0 { @@ -294,7 +294,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x2e20>, <0x2e24>, <0x2e2c>; + reg = <0x2e20>, <0x2e24>, <0x2e2c>, <0x2e48>, <0x2e4c>; }; dpll_disp_m2_ck: dpll_disp_m2_ck@2e30 { @@ -313,7 +313,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-j-type-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x2de0>, <0x2de4>, <0x2dec>; + reg = <0x2de0>, <0x2de4>, <0x2dec>, <0x2e08>, <0x2e0c>; }; dpll_per_m2_ck: dpll_per_m2_ck@2df0 { @@ -557,7 +557,7 @@ #clock-cells = <0>; compatible = "ti,am3-dpll-clock"; clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; - reg = <0x2e60>, <0x2e64>, <0x2e6c>; + reg = <0x2e60>, <0x2e64>, <0x2e6c>, <0x2e88>, <0x2e8c>; }; dpll_extdev_m2_ck: dpll_extdev_m2_ck@2e70 { diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi index a534a8e444d9..04e8a27ba1eb 100644 --- a/arch/arm/boot/dts/arm-realview-eb.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb.dtsi @@ -148,7 +148,7 @@ usb: usb@4f000000 { compatible = "nxp,usb-isp1761"; reg = <0x4f000000 0x20000>; - port1-otg; + dr_mode = "peripheral"; }; bridge { diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index f925782f8560..366687fb1ee3 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts @@ -166,7 +166,7 @@ reg = <0x3b000000 0x20000>; interrupt-parent = <&intc_fpga1176>; interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>; - port1-otg; + dr_mode = "peripheral"; }; bridge { diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts index 0c7dabef4a5f..228a51a38f95 100644 --- a/arch/arm/boot/dts/arm-realview-pb11mp.dts +++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts @@ -712,7 +712,7 @@ reg = <0x4f000000 0x20000>; interrupt-parent = <&intc_tc11mp>; interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; - port1-otg; + dr_mode = "peripheral"; }; }; }; diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi index ac95667ed781..ccf6f756b6ed 100644 --- a/arch/arm/boot/dts/arm-realview-pbx.dtsi +++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi @@ -164,7 +164,7 @@ usb: usb@4f000000 { compatible = "nxp,usb-isp1761"; reg = <0x4f000000 0x20000>; - port1-otg; + dr_mode = "peripheral"; }; bridge { diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index be040b6a02fa..5a5fa6190a52 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts @@ -651,6 +651,7 @@ &uart2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart2>; + fsl,dma-info = <24 20>; status = "okay"; }; @@ -670,6 +671,7 @@ &uart5 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart5>; + fsl,dma-info = <4096 4>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 05c55875835d..e47e1ca63043 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -101,6 +101,13 @@ ranges = <0 0x100000 0x2400>; }; + vdec0: vdec@300000 { + compatible = "microchip,sama5d4-vdec"; + reg = <0x00300000 0x100000>; + interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 19>; + }; + usb0: gadget@400000 { compatible = "atmel,sama5d3-udc"; reg = <0x00400000 0x100000 diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi index 4f7220b11f2d..2ad9fd7c94ec 100644 --- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi @@ -144,7 +144,7 @@ compatible = "nxp,usb-isp1761"; reg = <2 0x03000000 0x20000>; interrupts = <16>; - port1-otg; + dr_mode = "peripheral"; }; iofpga-bus@300000000 { diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi index 2ac41ed3a57c..ec13ceb9ed36 100644 --- a/arch/arm/boot/dts/vexpress-v2m.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m.dtsi @@ -62,7 +62,7 @@ compatible = "nxp,usb-isp1761"; reg = <3 0x03000000 0x20000>; interrupts = <16>; - port1-otg; + dr_mode = "peripheral"; }; iofpga@7,00000000 { diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig index 2aa3ebeb89d7..7a32de51f0fa 100644 --- a/arch/arm/configs/footbridge_defconfig +++ b/arch/arm/configs/footbridge_defconfig @@ -64,7 +64,6 @@ CONFIG_PARIDE_ON26=m CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y -CONFIG_IDE=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_NET_VENDOR_3COM=y diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 875a3c28a267..363f1b1b08e3 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -215,8 +215,6 @@ CONFIG_IIO=m CONFIG_AD5446=m CONFIG_EEPROM_AT24=m CONFIG_SENSORS_LIS3_SPI=m -CONFIG_IDE=m -CONFIG_BLK_DEV_IDECS=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 8f26c454ea12..eafa898ba6a7 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -45,20 +45,12 @@ poly1305-arm-y := poly1305-core.o poly1305-glue.o nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o curve25519-neon-y := curve25519-core.o curve25519-glue.o -ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ cmd_perl = $(PERL) $(<) > $(@) -$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl +$(obj)/%-core.S: $(src)/%-armv4.pl $(call cmd,perl) -$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl - $(call cmd,perl) - -$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl - $(call cmd,perl) -endif - clean-files += poly1305-core.S sha256-core.S sha512-core.S # massage the perlasm code a bit so we only get the NEON routine if we need it diff --git a/arch/arm/crypto/poly1305-core.S_shipped b/arch/arm/crypto/poly1305-core.S_shipped deleted file mode 100644 index 37b71d990293..000000000000 --- a/arch/arm/crypto/poly1305-core.S_shipped +++ /dev/null @@ -1,1158 +0,0 @@ -#ifndef __KERNEL__ -# include "arm_arch.h" -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ -# define poly1305_init poly1305_init_arm -# define poly1305_blocks poly1305_blocks_arm -# define poly1305_emit poly1305_emit_arm -.globl poly1305_blocks_neon -#endif - -#if defined(__thumb2__) -.syntax unified -.thumb -#else -.code 32 -#endif - -.text - -.globl poly1305_emit -.globl poly1305_blocks -.globl poly1305_init -.type poly1305_init,%function -.align 5 -poly1305_init: -.Lpoly1305_init: - stmdb sp!,{r4-r11} - - eor r3,r3,r3 - cmp r1,#0 - str r3,[r0,#0] @ zero hash value - str r3,[r0,#4] - str r3,[r0,#8] - str r3,[r0,#12] - str r3,[r0,#16] - str r3,[r0,#36] @ clear is_base2_26 - add r0,r0,#20 - -#ifdef __thumb2__ - it eq -#endif - moveq r0,#0 - beq .Lno_key - -#if __ARM_MAX_ARCH__>=7 - mov r3,#-1 - str r3,[r0,#28] @ impossible key power value -# ifndef __KERNEL__ - adr r11,.Lpoly1305_init - ldr r12,.LOPENSSL_armcap -# endif -#endif - ldrb r4,[r1,#0] - mov r10,#0x0fffffff - ldrb r5,[r1,#1] - and r3,r10,#-4 @ 0x0ffffffc - ldrb r6,[r1,#2] - ldrb r7,[r1,#3] - orr r4,r4,r5,lsl#8 - ldrb r5,[r1,#4] - orr r4,r4,r6,lsl#16 - ldrb r6,[r1,#5] - orr r4,r4,r7,lsl#24 - ldrb r7,[r1,#6] - and r4,r4,r10 - -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -# if !defined(_WIN32) - ldr r12,[r11,r12] @ OPENSSL_armcap_P -# endif -# if defined(__APPLE__) || defined(_WIN32) - ldr r12,[r12] -# endif -#endif - ldrb r8,[r1,#7] - orr r5,r5,r6,lsl#8 - ldrb r6,[r1,#8] - orr r5,r5,r7,lsl#16 - ldrb r7,[r1,#9] - orr r5,r5,r8,lsl#24 - ldrb r8,[r1,#10] - and r5,r5,r3 - -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - tst r12,#ARMV7_NEON @ check for NEON -# ifdef __thumb2__ - adr r9,.Lpoly1305_blocks_neon - adr r11,.Lpoly1305_blocks - it ne - movne r11,r9 - adr r12,.Lpoly1305_emit - orr r11,r11,#1 @ thumb-ify addresses - orr r12,r12,#1 -# else - add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init) - ite eq - addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init) - addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init) -# endif -#endif - ldrb r9,[r1,#11] - orr r6,r6,r7,lsl#8 - ldrb r7,[r1,#12] - orr r6,r6,r8,lsl#16 - ldrb r8,[r1,#13] - orr r6,r6,r9,lsl#24 - ldrb r9,[r1,#14] - and r6,r6,r3 - - ldrb r10,[r1,#15] - orr r7,r7,r8,lsl#8 - str r4,[r0,#0] - orr r7,r7,r9,lsl#16 - str r5,[r0,#4] - orr r7,r7,r10,lsl#24 - str r6,[r0,#8] - and r7,r7,r3 - str r7,[r0,#12] -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - stmia r2,{r11,r12} @ fill functions table - mov r0,#1 -#else - mov r0,#0 -#endif -.Lno_key: - ldmia sp!,{r4-r11} -#if __ARM_ARCH__>=5 - bx lr @ bx lr -#else - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size poly1305_init,.-poly1305_init -.type poly1305_blocks,%function -.align 5 -poly1305_blocks: -.Lpoly1305_blocks: - stmdb sp!,{r3-r11,lr} - - ands r2,r2,#-16 - beq .Lno_data - - add r2,r2,r1 @ end pointer - sub sp,sp,#32 - -#if __ARM_ARCH__<7 - ldmia r0,{r4-r12} @ load context - add r0,r0,#20 - str r2,[sp,#16] @ offload stuff - str r0,[sp,#12] -#else - ldr lr,[r0,#36] @ is_base2_26 - ldmia r0!,{r4-r8} @ load hash value - str r2,[sp,#16] @ offload stuff - str r0,[sp,#12] - - adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32 - mov r10,r5,lsr#6 - adcs r10,r10,r6,lsl#20 - mov r11,r6,lsr#12 - adcs r11,r11,r7,lsl#14 - mov r12,r7,lsr#18 - adcs r12,r12,r8,lsl#8 - mov r2,#0 - teq lr,#0 - str r2,[r0,#16] @ clear is_base2_26 - adc r2,r2,r8,lsr#24 - - itttt ne - movne r4,r9 @ choose between radixes - movne r5,r10 - movne r6,r11 - movne r7,r12 - ldmia r0,{r9-r12} @ load key - it ne - movne r8,r2 -#endif - - mov lr,r1 - cmp r3,#0 - str r10,[sp,#20] - str r11,[sp,#24] - str r12,[sp,#28] - b .Loop - -.align 4 -.Loop: -#if __ARM_ARCH__<7 - ldrb r0,[lr],#16 @ load input -# ifdef __thumb2__ - it hi -# endif - addhi r8,r8,#1 @ 1<<128 - ldrb r1,[lr,#-15] - ldrb r2,[lr,#-14] - ldrb r3,[lr,#-13] - orr r1,r0,r1,lsl#8 - ldrb r0,[lr,#-12] - orr r2,r1,r2,lsl#16 - ldrb r1,[lr,#-11] - orr r3,r2,r3,lsl#24 - ldrb r2,[lr,#-10] - adds r4,r4,r3 @ accumulate input - - ldrb r3,[lr,#-9] - orr r1,r0,r1,lsl#8 - ldrb r0,[lr,#-8] - orr r2,r1,r2,lsl#16 - ldrb r1,[lr,#-7] - orr r3,r2,r3,lsl#24 - ldrb r2,[lr,#-6] - adcs r5,r5,r3 - - ldrb r3,[lr,#-5] - orr r1,r0,r1,lsl#8 - ldrb r0,[lr,#-4] - orr r2,r1,r2,lsl#16 - ldrb r1,[lr,#-3] - orr r3,r2,r3,lsl#24 - ldrb r2,[lr,#-2] - adcs r6,r6,r3 - - ldrb r3,[lr,#-1] - orr r1,r0,r1,lsl#8 - str lr,[sp,#8] @ offload input pointer - orr r2,r1,r2,lsl#16 - add r10,r10,r10,lsr#2 - orr r3,r2,r3,lsl#24 -#else - ldr r0,[lr],#16 @ load input - it hi - addhi r8,r8,#1 @ padbit - ldr r1,[lr,#-12] - ldr r2,[lr,#-8] - ldr r3,[lr,#-4] -# ifdef __ARMEB__ - rev r0,r0 - rev r1,r1 - rev r2,r2 - rev r3,r3 -# endif - adds r4,r4,r0 @ accumulate input - str lr,[sp,#8] @ offload input pointer - adcs r5,r5,r1 - add r10,r10,r10,lsr#2 - adcs r6,r6,r2 -#endif - add r11,r11,r11,lsr#2 - adcs r7,r7,r3 - add r12,r12,r12,lsr#2 - - umull r2,r3,r5,r9 - adc r8,r8,#0 - umull r0,r1,r4,r9 - umlal r2,r3,r8,r10 - umlal r0,r1,r7,r10 - ldr r10,[sp,#20] @ reload r10 - umlal r2,r3,r6,r12 - umlal r0,r1,r5,r12 - umlal r2,r3,r7,r11 - umlal r0,r1,r6,r11 - umlal r2,r3,r4,r10 - str r0,[sp,#0] @ future r4 - mul r0,r11,r8 - ldr r11,[sp,#24] @ reload r11 - adds r2,r2,r1 @ d1+=d0>>32 - eor r1,r1,r1 - adc lr,r3,#0 @ future r6 - str r2,[sp,#4] @ future r5 - - mul r2,r12,r8 - eor r3,r3,r3 - umlal r0,r1,r7,r12 - ldr r12,[sp,#28] @ reload r12 - umlal r2,r3,r7,r9 - umlal r0,r1,r6,r9 - umlal r2,r3,r6,r10 - umlal r0,r1,r5,r10 - umlal r2,r3,r5,r11 - umlal r0,r1,r4,r11 - umlal r2,r3,r4,r12 - ldr r4,[sp,#0] - mul r8,r9,r8 - ldr r5,[sp,#4] - - adds r6,lr,r0 @ d2+=d1>>32 - ldr lr,[sp,#8] @ reload input pointer - adc r1,r1,#0 - adds r7,r2,r1 @ d3+=d2>>32 - ldr r0,[sp,#16] @ reload end pointer - adc r3,r3,#0 - add r8,r8,r3 @ h4+=d3>>32 - - and r1,r8,#-4 - and r8,r8,#3 - add r1,r1,r1,lsr#2 @ *=5 - adds r4,r4,r1 - adcs r5,r5,#0 - adcs r6,r6,#0 - adcs r7,r7,#0 - adc r8,r8,#0 - - cmp r0,lr @ done yet? - bhi .Loop - - ldr r0,[sp,#12] - add sp,sp,#32 - stmdb r0,{r4-r8} @ store the result - -.Lno_data: -#if __ARM_ARCH__>=5 - ldmia sp!,{r3-r11,pc} -#else - ldmia sp!,{r3-r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size poly1305_blocks,.-poly1305_blocks -.type poly1305_emit,%function -.align 5 -poly1305_emit: -.Lpoly1305_emit: - stmdb sp!,{r4-r11} - - ldmia r0,{r3-r7} - -#if __ARM_ARCH__>=7 - ldr ip,[r0,#36] @ is_base2_26 - - adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32 - mov r9,r4,lsr#6 - adcs r9,r9,r5,lsl#20 - mov r10,r5,lsr#12 - adcs r10,r10,r6,lsl#14 - mov r11,r6,lsr#18 - adcs r11,r11,r7,lsl#8 - mov r0,#0 - adc r0,r0,r7,lsr#24 - - tst ip,ip - itttt ne - movne r3,r8 - movne r4,r9 - movne r5,r10 - movne r6,r11 - it ne - movne r7,r0 -#endif - - adds r8,r3,#5 @ compare to modulus - adcs r9,r4,#0 - adcs r10,r5,#0 - adcs r11,r6,#0 - adc r0,r7,#0 - tst r0,#4 @ did it carry/borrow? - -#ifdef __thumb2__ - it ne -#endif - movne r3,r8 - ldr r8,[r2,#0] -#ifdef __thumb2__ - it ne -#endif - movne r4,r9 - ldr r9,[r2,#4] -#ifdef __thumb2__ - it ne -#endif - movne r5,r10 - ldr r10,[r2,#8] -#ifdef __thumb2__ - it ne -#endif - movne r6,r11 - ldr r11,[r2,#12] - - adds r3,r3,r8 - adcs r4,r4,r9 - adcs r5,r5,r10 - adc r6,r6,r11 - -#if __ARM_ARCH__>=7 -# ifdef __ARMEB__ - rev r3,r3 - rev r4,r4 - rev r5,r5 - rev r6,r6 -# endif - str r3,[r1,#0] - str r4,[r1,#4] - str r5,[r1,#8] - str r6,[r1,#12] -#else - strb r3,[r1,#0] - mov r3,r3,lsr#8 - strb r4,[r1,#4] - mov r4,r4,lsr#8 - strb r5,[r1,#8] - mov r5,r5,lsr#8 - strb r6,[r1,#12] - mov r6,r6,lsr#8 - - strb r3,[r1,#1] - mov r3,r3,lsr#8 - strb r4,[r1,#5] - mov r4,r4,lsr#8 - strb r5,[r1,#9] - mov r5,r5,lsr#8 - strb r6,[r1,#13] - mov r6,r6,lsr#8 - - strb r3,[r1,#2] - mov r3,r3,lsr#8 - strb r4,[r1,#6] - mov r4,r4,lsr#8 - strb r5,[r1,#10] - mov r5,r5,lsr#8 - strb r6,[r1,#14] - mov r6,r6,lsr#8 - - strb r3,[r1,#3] - strb r4,[r1,#7] - strb r5,[r1,#11] - strb r6,[r1,#15] -#endif - ldmia sp!,{r4-r11} -#if __ARM_ARCH__>=5 - bx lr @ bx lr -#else - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size poly1305_emit,.-poly1305_emit -#if __ARM_MAX_ARCH__>=7 -.fpu neon - -.type poly1305_init_neon,%function -.align 5 -poly1305_init_neon: -.Lpoly1305_init_neon: - ldr r3,[r0,#48] @ first table element - cmp r3,#-1 @ is value impossible? - bne .Lno_init_neon - - ldr r4,[r0,#20] @ load key base 2^32 - ldr r5,[r0,#24] - ldr r6,[r0,#28] - ldr r7,[r0,#32] - - and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 - mov r3,r4,lsr#26 - mov r4,r5,lsr#20 - orr r3,r3,r5,lsl#6 - mov r5,r6,lsr#14 - orr r4,r4,r6,lsl#12 - mov r6,r7,lsr#8 - orr r5,r5,r7,lsl#18 - and r3,r3,#0x03ffffff - and r4,r4,#0x03ffffff - and r5,r5,#0x03ffffff - - vdup.32 d0,r2 @ r^1 in both lanes - add r2,r3,r3,lsl#2 @ *5 - vdup.32 d1,r3 - add r3,r4,r4,lsl#2 - vdup.32 d2,r2 - vdup.32 d3,r4 - add r4,r5,r5,lsl#2 - vdup.32 d4,r3 - vdup.32 d5,r5 - add r5,r6,r6,lsl#2 - vdup.32 d6,r4 - vdup.32 d7,r6 - vdup.32 d8,r5 - - mov r5,#2 @ counter - -.Lsquare_neon: - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 - @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 - @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 - @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 - @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 - - vmull.u32 q5,d0,d0[1] - vmull.u32 q6,d1,d0[1] - vmull.u32 q7,d3,d0[1] - vmull.u32 q8,d5,d0[1] - vmull.u32 q9,d7,d0[1] - - vmlal.u32 q5,d7,d2[1] - vmlal.u32 q6,d0,d1[1] - vmlal.u32 q7,d1,d1[1] - vmlal.u32 q8,d3,d1[1] - vmlal.u32 q9,d5,d1[1] - - vmlal.u32 q5,d5,d4[1] - vmlal.u32 q6,d7,d4[1] - vmlal.u32 q8,d1,d3[1] - vmlal.u32 q7,d0,d3[1] - vmlal.u32 q9,d3,d3[1] - - vmlal.u32 q5,d3,d6[1] - vmlal.u32 q8,d0,d5[1] - vmlal.u32 q6,d5,d6[1] - vmlal.u32 q7,d7,d6[1] - vmlal.u32 q9,d1,d5[1] - - vmlal.u32 q8,d7,d8[1] - vmlal.u32 q5,d1,d8[1] - vmlal.u32 q6,d3,d8[1] - vmlal.u32 q7,d5,d8[1] - vmlal.u32 q9,d0,d7[1] - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein - @ and P. Schwabe - @ - @ H0>>+H1>>+H2>>+H3>>+H4 - @ H3>>+H4>>*5+H0>>+H1 - @ - @ Trivia. - @ - @ Result of multiplication of n-bit number by m-bit number is - @ n+m bits wide. However! Even though 2^n is a n+1-bit number, - @ m-bit number multiplied by 2^n is still n+m bits wide. - @ - @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2, - @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit - @ one is n+1 bits wide. - @ - @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that - @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 - @ can be 27. However! In cases when their width exceeds 26 bits - @ they are limited by 2^26+2^6. This in turn means that *sum* - @ of the products with these values can still be viewed as sum - @ of 52-bit numbers as long as the amount of addends is not a - @ power of 2. For example, - @ - @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, - @ - @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or - @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than - @ 8 * (2^52) or 2^55. However, the value is then multiplied by - @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12), - @ which is less than 32 * (2^52) or 2^57. And when processing - @ data we are looking at triple as many addends... - @ - @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and - @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the - @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while - @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 - @ instruction accepts 2x32-bit input and writes 2x64-bit result. - @ This means that result of reduction have to be compressed upon - @ loop wrap-around. This can be done in the process of reduction - @ to minimize amount of instructions [as well as amount of - @ 128-bit instructions, which benefits low-end processors], but - @ one has to watch for H2 (which is narrower than H0) and 5*H4 - @ not being wider than 58 bits, so that result of right shift - @ by 26 bits fits in 32 bits. This is also useful on x86, - @ because it allows to use paddd in place for paddq, which - @ benefits Atom, where paddq is ridiculously slow. - - vshr.u64 q15,q8,#26 - vmovn.i64 d16,q8 - vshr.u64 q4,q5,#26 - vmovn.i64 d10,q5 - vadd.i64 q9,q9,q15 @ h3 -> h4 - vbic.i32 d16,#0xfc000000 @ &=0x03ffffff - vadd.i64 q6,q6,q4 @ h0 -> h1 - vbic.i32 d10,#0xfc000000 - - vshrn.u64 d30,q9,#26 - vmovn.i64 d18,q9 - vshr.u64 q4,q6,#26 - vmovn.i64 d12,q6 - vadd.i64 q7,q7,q4 @ h1 -> h2 - vbic.i32 d18,#0xfc000000 - vbic.i32 d12,#0xfc000000 - - vadd.i32 d10,d10,d30 - vshl.u32 d30,d30,#2 - vshrn.u64 d8,q7,#26 - vmovn.i64 d14,q7 - vadd.i32 d10,d10,d30 @ h4 -> h0 - vadd.i32 d16,d16,d8 @ h2 -> h3 - vbic.i32 d14,#0xfc000000 - - vshr.u32 d30,d10,#26 - vbic.i32 d10,#0xfc000000 - vshr.u32 d8,d16,#26 - vbic.i32 d16,#0xfc000000 - vadd.i32 d12,d12,d30 @ h0 -> h1 - vadd.i32 d18,d18,d8 @ h3 -> h4 - - subs r5,r5,#1 - beq .Lsquare_break_neon - - add r6,r0,#(48+0*9*4) - add r7,r0,#(48+1*9*4) - - vtrn.32 d0,d10 @ r^2:r^1 - vtrn.32 d3,d14 - vtrn.32 d5,d16 - vtrn.32 d1,d12 - vtrn.32 d7,d18 - - vshl.u32 d4,d3,#2 @ *5 - vshl.u32 d6,d5,#2 - vshl.u32 d2,d1,#2 - vshl.u32 d8,d7,#2 - vadd.i32 d4,d4,d3 - vadd.i32 d2,d2,d1 - vadd.i32 d6,d6,d5 - vadd.i32 d8,d8,d7 - - vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! - vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! - vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! - vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! - vst1.32 {d8[0]},[r6,:32] - vst1.32 {d8[1]},[r7,:32] - - b .Lsquare_neon - -.align 4 -.Lsquare_break_neon: - add r6,r0,#(48+2*4*9) - add r7,r0,#(48+3*4*9) - - vmov d0,d10 @ r^4:r^3 - vshl.u32 d2,d12,#2 @ *5 - vmov d1,d12 - vshl.u32 d4,d14,#2 - vmov d3,d14 - vshl.u32 d6,d16,#2 - vmov d5,d16 - vshl.u32 d8,d18,#2 - vmov d7,d18 - vadd.i32 d2,d2,d12 - vadd.i32 d4,d4,d14 - vadd.i32 d6,d6,d16 - vadd.i32 d8,d8,d18 - - vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! - vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! - vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! - vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! - vst1.32 {d8[0]},[r6] - vst1.32 {d8[1]},[r7] - -.Lno_init_neon: - bx lr @ bx lr -.size poly1305_init_neon,.-poly1305_init_neon - -.type poly1305_blocks_neon,%function -.align 5 -poly1305_blocks_neon: -.Lpoly1305_blocks_neon: - ldr ip,[r0,#36] @ is_base2_26 - - cmp r2,#64 - blo .Lpoly1305_blocks - - stmdb sp!,{r4-r7} - vstmdb sp!,{d8-d15} @ ABI specification says so - - tst ip,ip @ is_base2_26? - bne .Lbase2_26_neon - - stmdb sp!,{r1-r3,lr} - bl .Lpoly1305_init_neon - - ldr r4,[r0,#0] @ load hash value base 2^32 - ldr r5,[r0,#4] - ldr r6,[r0,#8] - ldr r7,[r0,#12] - ldr ip,[r0,#16] - - and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 - mov r3,r4,lsr#26 - veor d10,d10,d10 - mov r4,r5,lsr#20 - orr r3,r3,r5,lsl#6 - veor d12,d12,d12 - mov r5,r6,lsr#14 - orr r4,r4,r6,lsl#12 - veor d14,d14,d14 - mov r6,r7,lsr#8 - orr r5,r5,r7,lsl#18 - veor d16,d16,d16 - and r3,r3,#0x03ffffff - orr r6,r6,ip,lsl#24 - veor d18,d18,d18 - and r4,r4,#0x03ffffff - mov r1,#1 - and r5,r5,#0x03ffffff - str r1,[r0,#36] @ set is_base2_26 - - vmov.32 d10[0],r2 - vmov.32 d12[0],r3 - vmov.32 d14[0],r4 - vmov.32 d16[0],r5 - vmov.32 d18[0],r6 - adr r5,.Lzeros - - ldmia sp!,{r1-r3,lr} - b .Lhash_loaded - -.align 4 -.Lbase2_26_neon: - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ load hash value - - veor d10,d10,d10 - veor d12,d12,d12 - veor d14,d14,d14 - veor d16,d16,d16 - veor d18,d18,d18 - vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]! - adr r5,.Lzeros - vld1.32 {d18[0]},[r0] - sub r0,r0,#16 @ rewind - -.Lhash_loaded: - add r4,r1,#32 - mov r3,r3,lsl#24 - tst r2,#31 - beq .Leven - - vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]! - vmov.32 d28[0],r3 - sub r2,r2,#16 - add r4,r1,#32 - -# ifdef __ARMEB__ - vrev32.8 q10,q10 - vrev32.8 q13,q13 - vrev32.8 q11,q11 - vrev32.8 q12,q12 -# endif - vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26 - vshl.u32 d26,d26,#18 - - vsri.u32 d26,d24,#14 - vshl.u32 d24,d24,#12 - vadd.i32 d29,d28,d18 @ add hash value and move to #hi - - vbic.i32 d26,#0xfc000000 - vsri.u32 d24,d22,#20 - vshl.u32 d22,d22,#6 - - vbic.i32 d24,#0xfc000000 - vsri.u32 d22,d20,#26 - vadd.i32 d27,d26,d16 - - vbic.i32 d20,#0xfc000000 - vbic.i32 d22,#0xfc000000 - vadd.i32 d25,d24,d14 - - vadd.i32 d21,d20,d10 - vadd.i32 d23,d22,d12 - - mov r7,r5 - add r6,r0,#48 - - cmp r2,r2 - b .Long_tail - -.align 4 -.Leven: - subs r2,r2,#64 - it lo - movlo r4,r5 - - vmov.i32 q14,#1<<24 @ padbit, yes, always - vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1] - add r1,r1,#64 - vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0) - add r4,r4,#64 - itt hi - addhi r7,r0,#(48+1*9*4) - addhi r6,r0,#(48+3*9*4) - -# ifdef __ARMEB__ - vrev32.8 q10,q10 - vrev32.8 q13,q13 - vrev32.8 q11,q11 - vrev32.8 q12,q12 -# endif - vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26 - vshl.u32 q13,q13,#18 - - vsri.u32 q13,q12,#14 - vshl.u32 q12,q12,#12 - - vbic.i32 q13,#0xfc000000 - vsri.u32 q12,q11,#20 - vshl.u32 q11,q11,#6 - - vbic.i32 q12,#0xfc000000 - vsri.u32 q11,q10,#26 - - vbic.i32 q10,#0xfc000000 - vbic.i32 q11,#0xfc000000 - - bls .Lskip_loop - - vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2 - vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4 - vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! - vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! - b .Loop_neon - -.align 5 -.Loop_neon: - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 - @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r - @ ___________________/ - @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 - @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r - @ ___________________/ ____________________/ - @ - @ Note that we start with inp[2:3]*r^2. This is because it - @ doesn't depend on reduction in previous iteration. - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 - @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 - @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 - @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 - @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ inp[2:3]*r^2 - - vadd.i32 d24,d24,d14 @ accumulate inp[0:1] - vmull.u32 q7,d25,d0[1] - vadd.i32 d20,d20,d10 - vmull.u32 q5,d21,d0[1] - vadd.i32 d26,d26,d16 - vmull.u32 q8,d27,d0[1] - vmlal.u32 q7,d23,d1[1] - vadd.i32 d22,d22,d12 - vmull.u32 q6,d23,d0[1] - - vadd.i32 d28,d28,d18 - vmull.u32 q9,d29,d0[1] - subs r2,r2,#64 - vmlal.u32 q5,d29,d2[1] - it lo - movlo r4,r5 - vmlal.u32 q8,d25,d1[1] - vld1.32 d8[1],[r7,:32] - vmlal.u32 q6,d21,d1[1] - vmlal.u32 q9,d27,d1[1] - - vmlal.u32 q5,d27,d4[1] - vmlal.u32 q8,d23,d3[1] - vmlal.u32 q9,d25,d3[1] - vmlal.u32 q6,d29,d4[1] - vmlal.u32 q7,d21,d3[1] - - vmlal.u32 q8,d21,d5[1] - vmlal.u32 q5,d25,d6[1] - vmlal.u32 q9,d23,d5[1] - vmlal.u32 q6,d27,d6[1] - vmlal.u32 q7,d29,d6[1] - - vmlal.u32 q8,d29,d8[1] - vmlal.u32 q5,d23,d8[1] - vmlal.u32 q9,d21,d7[1] - vmlal.u32 q6,d25,d8[1] - vmlal.u32 q7,d27,d8[1] - - vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0) - add r4,r4,#64 - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ (hash+inp[0:1])*r^4 and accumulate - - vmlal.u32 q8,d26,d0[0] - vmlal.u32 q5,d20,d0[0] - vmlal.u32 q9,d28,d0[0] - vmlal.u32 q6,d22,d0[0] - vmlal.u32 q7,d24,d0[0] - vld1.32 d8[0],[r6,:32] - - vmlal.u32 q8,d24,d1[0] - vmlal.u32 q5,d28,d2[0] - vmlal.u32 q9,d26,d1[0] - vmlal.u32 q6,d20,d1[0] - vmlal.u32 q7,d22,d1[0] - - vmlal.u32 q8,d22,d3[0] - vmlal.u32 q5,d26,d4[0] - vmlal.u32 q9,d24,d3[0] - vmlal.u32 q6,d28,d4[0] - vmlal.u32 q7,d20,d3[0] - - vmlal.u32 q8,d20,d5[0] - vmlal.u32 q5,d24,d6[0] - vmlal.u32 q9,d22,d5[0] - vmlal.u32 q6,d26,d6[0] - vmlal.u32 q8,d28,d8[0] - - vmlal.u32 q7,d28,d6[0] - vmlal.u32 q5,d22,d8[0] - vmlal.u32 q9,d20,d7[0] - vmov.i32 q14,#1<<24 @ padbit, yes, always - vmlal.u32 q6,d24,d8[0] - vmlal.u32 q7,d26,d8[0] - - vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1] - add r1,r1,#64 -# ifdef __ARMEB__ - vrev32.8 q10,q10 - vrev32.8 q11,q11 - vrev32.8 q12,q12 - vrev32.8 q13,q13 -# endif - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ lazy reduction interleaved with base 2^32 -> base 2^26 of - @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14. - - vshr.u64 q15,q8,#26 - vmovn.i64 d16,q8 - vshr.u64 q4,q5,#26 - vmovn.i64 d10,q5 - vadd.i64 q9,q9,q15 @ h3 -> h4 - vbic.i32 d16,#0xfc000000 - vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26 - vadd.i64 q6,q6,q4 @ h0 -> h1 - vshl.u32 q13,q13,#18 - vbic.i32 d10,#0xfc000000 - - vshrn.u64 d30,q9,#26 - vmovn.i64 d18,q9 - vshr.u64 q4,q6,#26 - vmovn.i64 d12,q6 - vadd.i64 q7,q7,q4 @ h1 -> h2 - vsri.u32 q13,q12,#14 - vbic.i32 d18,#0xfc000000 - vshl.u32 q12,q12,#12 - vbic.i32 d12,#0xfc000000 - - vadd.i32 d10,d10,d30 - vshl.u32 d30,d30,#2 - vbic.i32 q13,#0xfc000000 - vshrn.u64 d8,q7,#26 - vmovn.i64 d14,q7 - vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec] - vsri.u32 q12,q11,#20 - vadd.i32 d16,d16,d8 @ h2 -> h3 - vshl.u32 q11,q11,#6 - vbic.i32 d14,#0xfc000000 - vbic.i32 q12,#0xfc000000 - - vshrn.u64 d30,q5,#26 @ re-narrow - vmovn.i64 d10,q5 - vsri.u32 q11,q10,#26 - vbic.i32 q10,#0xfc000000 - vshr.u32 d8,d16,#26 - vbic.i32 d16,#0xfc000000 - vbic.i32 d10,#0xfc000000 - vadd.i32 d12,d12,d30 @ h0 -> h1 - vadd.i32 d18,d18,d8 @ h3 -> h4 - vbic.i32 q11,#0xfc000000 - - bhi .Loop_neon - -.Lskip_loop: - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 - - add r7,r0,#(48+0*9*4) - add r6,r0,#(48+1*9*4) - adds r2,r2,#32 - it ne - movne r2,#0 - bne .Long_tail - - vadd.i32 d25,d24,d14 @ add hash value and move to #hi - vadd.i32 d21,d20,d10 - vadd.i32 d27,d26,d16 - vadd.i32 d23,d22,d12 - vadd.i32 d29,d28,d18 - -.Long_tail: - vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1 - vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2 - - vadd.i32 d24,d24,d14 @ can be redundant - vmull.u32 q7,d25,d0 - vadd.i32 d20,d20,d10 - vmull.u32 q5,d21,d0 - vadd.i32 d26,d26,d16 - vmull.u32 q8,d27,d0 - vadd.i32 d22,d22,d12 - vmull.u32 q6,d23,d0 - vadd.i32 d28,d28,d18 - vmull.u32 q9,d29,d0 - - vmlal.u32 q5,d29,d2 - vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! - vmlal.u32 q8,d25,d1 - vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! - vmlal.u32 q6,d21,d1 - vmlal.u32 q9,d27,d1 - vmlal.u32 q7,d23,d1 - - vmlal.u32 q8,d23,d3 - vld1.32 d8[1],[r7,:32] - vmlal.u32 q5,d27,d4 - vld1.32 d8[0],[r6,:32] - vmlal.u32 q9,d25,d3 - vmlal.u32 q6,d29,d4 - vmlal.u32 q7,d21,d3 - - vmlal.u32 q8,d21,d5 - it ne - addne r7,r0,#(48+2*9*4) - vmlal.u32 q5,d25,d6 - it ne - addne r6,r0,#(48+3*9*4) - vmlal.u32 q9,d23,d5 - vmlal.u32 q6,d27,d6 - vmlal.u32 q7,d29,d6 - - vmlal.u32 q8,d29,d8 - vorn q0,q0,q0 @ all-ones, can be redundant - vmlal.u32 q5,d23,d8 - vshr.u64 q0,q0,#38 - vmlal.u32 q9,d21,d7 - vmlal.u32 q6,d25,d8 - vmlal.u32 q7,d27,d8 - - beq .Lshort_tail - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ (hash+inp[0:1])*r^4:r^3 and accumulate - - vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3 - vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4 - - vmlal.u32 q7,d24,d0 - vmlal.u32 q5,d20,d0 - vmlal.u32 q8,d26,d0 - vmlal.u32 q6,d22,d0 - vmlal.u32 q9,d28,d0 - - vmlal.u32 q5,d28,d2 - vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! - vmlal.u32 q8,d24,d1 - vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! - vmlal.u32 q6,d20,d1 - vmlal.u32 q9,d26,d1 - vmlal.u32 q7,d22,d1 - - vmlal.u32 q8,d22,d3 - vld1.32 d8[1],[r7,:32] - vmlal.u32 q5,d26,d4 - vld1.32 d8[0],[r6,:32] - vmlal.u32 q9,d24,d3 - vmlal.u32 q6,d28,d4 - vmlal.u32 q7,d20,d3 - - vmlal.u32 q8,d20,d5 - vmlal.u32 q5,d24,d6 - vmlal.u32 q9,d22,d5 - vmlal.u32 q6,d26,d6 - vmlal.u32 q7,d28,d6 - - vmlal.u32 q8,d28,d8 - vorn q0,q0,q0 @ all-ones - vmlal.u32 q5,d22,d8 - vshr.u64 q0,q0,#38 - vmlal.u32 q9,d20,d7 - vmlal.u32 q6,d24,d8 - vmlal.u32 q7,d26,d8 - -.Lshort_tail: - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ horizontal addition - - vadd.i64 d16,d16,d17 - vadd.i64 d10,d10,d11 - vadd.i64 d18,d18,d19 - vadd.i64 d12,d12,d13 - vadd.i64 d14,d14,d15 - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ lazy reduction, but without narrowing - - vshr.u64 q15,q8,#26 - vand.i64 q8,q8,q0 - vshr.u64 q4,q5,#26 - vand.i64 q5,q5,q0 - vadd.i64 q9,q9,q15 @ h3 -> h4 - vadd.i64 q6,q6,q4 @ h0 -> h1 - - vshr.u64 q15,q9,#26 - vand.i64 q9,q9,q0 - vshr.u64 q4,q6,#26 - vand.i64 q6,q6,q0 - vadd.i64 q7,q7,q4 @ h1 -> h2 - - vadd.i64 q5,q5,q15 - vshl.u64 q15,q15,#2 - vshr.u64 q4,q7,#26 - vand.i64 q7,q7,q0 - vadd.i64 q5,q5,q15 @ h4 -> h0 - vadd.i64 q8,q8,q4 @ h2 -> h3 - - vshr.u64 q15,q5,#26 - vand.i64 q5,q5,q0 - vshr.u64 q4,q8,#26 - vand.i64 q8,q8,q0 - vadd.i64 q6,q6,q15 @ h0 -> h1 - vadd.i64 q9,q9,q4 @ h3 -> h4 - - cmp r2,#0 - bne .Leven - - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ store hash value - - vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]! - vst1.32 {d18[0]},[r0] - - vldmia sp!,{d8-d15} @ epilogue - ldmia sp!,{r4-r7} - bx lr @ bx lr -.size poly1305_blocks_neon,.-poly1305_blocks_neon - -.align 5 -.Lzeros: -.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -#ifndef __KERNEL__ -.LOPENSSL_armcap: -# ifdef _WIN32 -.word OPENSSL_armcap_P -# else -.word OPENSSL_armcap_P-.Lpoly1305_init -# endif -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif -#endif -.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm" -.align 2 diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped deleted file mode 100644 index 6363014a50d7..000000000000 --- a/arch/arm/crypto/sha256-core.S_shipped +++ /dev/null @@ -1,2816 +0,0 @@ -@ SPDX-License-Identifier: GPL-2.0 - -@ This code is taken from the OpenSSL project but the author (Andy Polyakov) -@ has relicensed it under the GPLv2. Therefore this program is free software; -@ you can redistribute it and/or modify it under the terms of the GNU General -@ Public License version 2 as published by the Free Software Foundation. -@ -@ The original headers, including the original license headers, are -@ included below for completeness. - -@ ==================================================================== -@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see https://www.openssl.org/~appro/cryptogams/. -@ ==================================================================== - -@ SHA256 block procedure for ARMv4. May 2007. - -@ Performance is ~2x better than gcc 3.4 generated code and in "abso- -@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per -@ byte [on single-issue Xscale PXA250 core]. - -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 22% improvement on -@ Cortex A8 core and ~20 cycles per processed byte. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 16% -@ improvement on Cortex A8 core and ~15.4 cycles per processed byte. - -@ September 2013. -@ -@ Add NEON implementation. On Cortex A8 it was measured to process one -@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon -@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only -@ code (meaning that latter performs sub-optimally, nothing was done -@ about it). - -@ May 2014. -@ -@ Add ARMv8 code path performing at 2.0 cpb on Apple A7. - -#ifndef __KERNEL__ -# include "arm_arch.h" -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -#endif - -.text -#if __ARM_ARCH__<7 -.code 32 -#else -.syntax unified -# ifdef __thumb2__ -.thumb -# else -.code 32 -# endif -#endif - -.type K256,%object -.align 5 -K256: -.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 -.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 -.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 -.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc -.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da -.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 -.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 -.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 -.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 -.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 -.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 -.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 -.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 -.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 -.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 -.size K256,.-K256 -.word 0 @ terminator -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha256_block_data_order -#endif -.align 5 - -.global sha256_block_data_order -.type sha256_block_data_order,%function -sha256_block_data_order: -.Lsha256_block_data_order: -#if __ARM_ARCH__<7 - sub r3,pc,#8 @ sha256_block_data_order -#else - adr r3,.Lsha256_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#ARMV8_SHA256 - bne .LARMv8 - tst r12,#ARMV7_NEON - bne .LNEON -#endif - add r2,r1,r2,lsl#6 @ len to point at the end of inp - stmdb sp!,{r0,r1,r2,r4-r11,lr} - ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11} - sub r14,r3,#256+32 @ K256 - sub sp,sp,#16*4 @ alloca(X[16]) -.Loop: -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ magic - eor r12,r12,r12 -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 0 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 0 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 0==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 0==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 0<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 1 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 1 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 1==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 1==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 1<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 2 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 2 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 2==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 2==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 2<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 3 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 3 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 3==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 3==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 3<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 4 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 4 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 4==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 4==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 4<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 5 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 5==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 5==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 5<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 6 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 6 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 6==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 6==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 6<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 7 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 7==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 7==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 7<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 8 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r8,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 8 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 8==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r8,r8,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r8,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 8==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 8<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 9 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r7,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 9 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 9==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r7,r7,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r7,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 9==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 9<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 10 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r6,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 10 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 10==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r6,r6,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r6,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 10==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 10<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 11 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r5,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 11 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 11==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r5,r5,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r5,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 11==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 11<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 12 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r4,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 12 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 12==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r4,r4,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r4,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 12==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 12<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 13 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r11,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 13 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 13==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r11,r11,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r11,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 13==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 13<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 14 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - eor r0,r0,r10,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 14 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - ldrb r12,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r12,lsl#8 - ldrb r12,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 14==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r10,r10,ror#5 - orr r2,r2,r12,lsl#24 - eor r0,r0,r10,ror#19 @ Sigma1(e) -#endif - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 14==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 14<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - @ ldr r2,[r1],#4 @ 15 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - eor r0,r0,r9,ror#19 @ Sigma1(e) -# ifndef __ARMEB__ - rev r2,r2 -# endif -#else - @ ldrb r2,[r1,#3] @ 15 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - ldrb r3,[r1,#2] - ldrb r0,[r1,#1] - orr r2,r2,r3,lsl#8 - ldrb r3,[r1],#4 - orr r2,r2,r0,lsl#16 -# if 15==15 - str r1,[sp,#17*4] @ make room for r1 -# endif - eor r0,r9,r9,ror#5 - orr r2,r2,r3,lsl#24 - eor r0,r0,r9,ror#19 @ Sigma1(e) -#endif - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 15==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 15<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -.Lrounds_16_xx: - @ ldr r2,[sp,#1*4] @ 16 - @ ldr r1,[sp,#14*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#0*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#9*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#0*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 16==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 16<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#2*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#15*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#2*4] @ 17 - @ ldr r1,[sp,#15*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#1*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#10*4] - - add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#1*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 17==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 17<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#3*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#0*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#3*4] @ 18 - @ ldr r1,[sp,#0*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#2*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#11*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#2*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 18==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 18<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#4*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#1*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#4*4] @ 19 - @ ldr r1,[sp,#1*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#3*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#12*4] - - add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#3*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 19==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 19<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#5*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#2*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#5*4] @ 20 - @ ldr r1,[sp,#2*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#4*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#13*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#4*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 20==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 20<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#6*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#3*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#6*4] @ 21 - @ ldr r1,[sp,#3*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#5*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#14*4] - - add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#5*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 21==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 21<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#7*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#4*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#7*4] @ 22 - @ ldr r1,[sp,#4*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#6*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#15*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#6*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 22==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 22<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#8*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#5*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#8*4] @ 23 - @ ldr r1,[sp,#5*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#7*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#0*4] - - add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#7*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 23==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 23<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#9*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#6*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#9*4] @ 24 - @ ldr r1,[sp,#6*4] - mov r0,r2,ror#7 - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#8*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#1*4] - - add r12,r12,r0 - eor r0,r8,r8,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r8,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r11,r11,r2 @ h+=X[i] - str r2,[sp,#8*4] - eor r2,r9,r10 - add r11,r11,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r8 - add r11,r11,r12 @ h+=K256[i] - eor r2,r2,r10 @ Ch(e,f,g) - eor r0,r4,r4,ror#11 - add r11,r11,r2 @ h+=Ch(e,f,g) -#if 24==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 24<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r4,r5 @ a^b, b^c in next round -#else - ldr r2,[sp,#10*4] @ from future BODY_16_xx - eor r12,r4,r5 @ a^b, b^c in next round - ldr r1,[sp,#7*4] @ from future BODY_16_xx -#endif - eor r0,r0,r4,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r7,r7,r11 @ d+=h - eor r3,r3,r5 @ Maj(a,b,c) - add r11,r11,r0,ror#2 @ h+=Sigma0(a) - @ add r11,r11,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#10*4] @ 25 - @ ldr r1,[sp,#7*4] - mov r0,r2,ror#7 - add r11,r11,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#9*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#2*4] - - add r3,r3,r0 - eor r0,r7,r7,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r7,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r10,r10,r2 @ h+=X[i] - str r2,[sp,#9*4] - eor r2,r8,r9 - add r10,r10,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r7 - add r10,r10,r3 @ h+=K256[i] - eor r2,r2,r9 @ Ch(e,f,g) - eor r0,r11,r11,ror#11 - add r10,r10,r2 @ h+=Ch(e,f,g) -#if 25==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 25<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r11,r4 @ a^b, b^c in next round -#else - ldr r2,[sp,#11*4] @ from future BODY_16_xx - eor r3,r11,r4 @ a^b, b^c in next round - ldr r1,[sp,#8*4] @ from future BODY_16_xx -#endif - eor r0,r0,r11,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r6,r6,r10 @ d+=h - eor r12,r12,r4 @ Maj(a,b,c) - add r10,r10,r0,ror#2 @ h+=Sigma0(a) - @ add r10,r10,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#11*4] @ 26 - @ ldr r1,[sp,#8*4] - mov r0,r2,ror#7 - add r10,r10,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#10*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#3*4] - - add r12,r12,r0 - eor r0,r6,r6,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r6,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r9,r9,r2 @ h+=X[i] - str r2,[sp,#10*4] - eor r2,r7,r8 - add r9,r9,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r6 - add r9,r9,r12 @ h+=K256[i] - eor r2,r2,r8 @ Ch(e,f,g) - eor r0,r10,r10,ror#11 - add r9,r9,r2 @ h+=Ch(e,f,g) -#if 26==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 26<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r10,r11 @ a^b, b^c in next round -#else - ldr r2,[sp,#12*4] @ from future BODY_16_xx - eor r12,r10,r11 @ a^b, b^c in next round - ldr r1,[sp,#9*4] @ from future BODY_16_xx -#endif - eor r0,r0,r10,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r5,r5,r9 @ d+=h - eor r3,r3,r11 @ Maj(a,b,c) - add r9,r9,r0,ror#2 @ h+=Sigma0(a) - @ add r9,r9,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#12*4] @ 27 - @ ldr r1,[sp,#9*4] - mov r0,r2,ror#7 - add r9,r9,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#11*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#4*4] - - add r3,r3,r0 - eor r0,r5,r5,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r5,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r8,r8,r2 @ h+=X[i] - str r2,[sp,#11*4] - eor r2,r6,r7 - add r8,r8,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r5 - add r8,r8,r3 @ h+=K256[i] - eor r2,r2,r7 @ Ch(e,f,g) - eor r0,r9,r9,ror#11 - add r8,r8,r2 @ h+=Ch(e,f,g) -#if 27==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 27<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r9,r10 @ a^b, b^c in next round -#else - ldr r2,[sp,#13*4] @ from future BODY_16_xx - eor r3,r9,r10 @ a^b, b^c in next round - ldr r1,[sp,#10*4] @ from future BODY_16_xx -#endif - eor r0,r0,r9,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r4,r4,r8 @ d+=h - eor r12,r12,r10 @ Maj(a,b,c) - add r8,r8,r0,ror#2 @ h+=Sigma0(a) - @ add r8,r8,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#13*4] @ 28 - @ ldr r1,[sp,#10*4] - mov r0,r2,ror#7 - add r8,r8,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#12*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#5*4] - - add r12,r12,r0 - eor r0,r4,r4,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r4,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r7,r7,r2 @ h+=X[i] - str r2,[sp,#12*4] - eor r2,r5,r6 - add r7,r7,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r4 - add r7,r7,r12 @ h+=K256[i] - eor r2,r2,r6 @ Ch(e,f,g) - eor r0,r8,r8,ror#11 - add r7,r7,r2 @ h+=Ch(e,f,g) -#if 28==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 28<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r8,r9 @ a^b, b^c in next round -#else - ldr r2,[sp,#14*4] @ from future BODY_16_xx - eor r12,r8,r9 @ a^b, b^c in next round - ldr r1,[sp,#11*4] @ from future BODY_16_xx -#endif - eor r0,r0,r8,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r11,r11,r7 @ d+=h - eor r3,r3,r9 @ Maj(a,b,c) - add r7,r7,r0,ror#2 @ h+=Sigma0(a) - @ add r7,r7,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#14*4] @ 29 - @ ldr r1,[sp,#11*4] - mov r0,r2,ror#7 - add r7,r7,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#13*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#6*4] - - add r3,r3,r0 - eor r0,r11,r11,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r11,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r6,r6,r2 @ h+=X[i] - str r2,[sp,#13*4] - eor r2,r4,r5 - add r6,r6,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r11 - add r6,r6,r3 @ h+=K256[i] - eor r2,r2,r5 @ Ch(e,f,g) - eor r0,r7,r7,ror#11 - add r6,r6,r2 @ h+=Ch(e,f,g) -#if 29==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 29<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r7,r8 @ a^b, b^c in next round -#else - ldr r2,[sp,#15*4] @ from future BODY_16_xx - eor r3,r7,r8 @ a^b, b^c in next round - ldr r1,[sp,#12*4] @ from future BODY_16_xx -#endif - eor r0,r0,r7,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r10,r10,r6 @ d+=h - eor r12,r12,r8 @ Maj(a,b,c) - add r6,r6,r0,ror#2 @ h+=Sigma0(a) - @ add r6,r6,r12 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#15*4] @ 30 - @ ldr r1,[sp,#12*4] - mov r0,r2,ror#7 - add r6,r6,r12 @ h+=Maj(a,b,c) from the past - mov r12,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r12,r12,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#14*4] - eor r12,r12,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#7*4] - - add r12,r12,r0 - eor r0,r10,r10,ror#5 @ from BODY_00_15 - add r2,r2,r12 - eor r0,r0,r10,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r12,[r14],#4 @ *K256++ - add r5,r5,r2 @ h+=X[i] - str r2,[sp,#14*4] - eor r2,r11,r4 - add r5,r5,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r10 - add r5,r5,r12 @ h+=K256[i] - eor r2,r2,r4 @ Ch(e,f,g) - eor r0,r6,r6,ror#11 - add r5,r5,r2 @ h+=Ch(e,f,g) -#if 30==31 - and r12,r12,#0xff - cmp r12,#0xf2 @ done? -#endif -#if 30<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r12,r6,r7 @ a^b, b^c in next round -#else - ldr r2,[sp,#0*4] @ from future BODY_16_xx - eor r12,r6,r7 @ a^b, b^c in next round - ldr r1,[sp,#13*4] @ from future BODY_16_xx -#endif - eor r0,r0,r6,ror#20 @ Sigma0(a) - and r3,r3,r12 @ (b^c)&=(a^b) - add r9,r9,r5 @ d+=h - eor r3,r3,r7 @ Maj(a,b,c) - add r5,r5,r0,ror#2 @ h+=Sigma0(a) - @ add r5,r5,r3 @ h+=Maj(a,b,c) - @ ldr r2,[sp,#0*4] @ 31 - @ ldr r1,[sp,#13*4] - mov r0,r2,ror#7 - add r5,r5,r3 @ h+=Maj(a,b,c) from the past - mov r3,r1,ror#17 - eor r0,r0,r2,ror#18 - eor r3,r3,r1,ror#19 - eor r0,r0,r2,lsr#3 @ sigma0(X[i+1]) - ldr r2,[sp,#15*4] - eor r3,r3,r1,lsr#10 @ sigma1(X[i+14]) - ldr r1,[sp,#8*4] - - add r3,r3,r0 - eor r0,r9,r9,ror#5 @ from BODY_00_15 - add r2,r2,r3 - eor r0,r0,r9,ror#19 @ Sigma1(e) - add r2,r2,r1 @ X[i] - ldr r3,[r14],#4 @ *K256++ - add r4,r4,r2 @ h+=X[i] - str r2,[sp,#15*4] - eor r2,r10,r11 - add r4,r4,r0,ror#6 @ h+=Sigma1(e) - and r2,r2,r9 - add r4,r4,r3 @ h+=K256[i] - eor r2,r2,r11 @ Ch(e,f,g) - eor r0,r5,r5,ror#11 - add r4,r4,r2 @ h+=Ch(e,f,g) -#if 31==31 - and r3,r3,#0xff - cmp r3,#0xf2 @ done? -#endif -#if 31<15 -# if __ARM_ARCH__>=7 - ldr r2,[r1],#4 @ prefetch -# else - ldrb r2,[r1,#3] -# endif - eor r3,r5,r6 @ a^b, b^c in next round -#else - ldr r2,[sp,#1*4] @ from future BODY_16_xx - eor r3,r5,r6 @ a^b, b^c in next round - ldr r1,[sp,#14*4] @ from future BODY_16_xx -#endif - eor r0,r0,r5,ror#20 @ Sigma0(a) - and r12,r12,r3 @ (b^c)&=(a^b) - add r8,r8,r4 @ d+=h - eor r12,r12,r6 @ Maj(a,b,c) - add r4,r4,r0,ror#2 @ h+=Sigma0(a) - @ add r4,r4,r12 @ h+=Maj(a,b,c) -#if __ARM_ARCH__>=7 - ite eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq r3,[sp,#16*4] @ pull ctx - bne .Lrounds_16_xx - - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r0,[r3,#0] - ldr r2,[r3,#4] - ldr r12,[r3,#8] - add r4,r4,r0 - ldr r0,[r3,#12] - add r5,r5,r2 - ldr r2,[r3,#16] - add r6,r6,r12 - ldr r12,[r3,#20] - add r7,r7,r0 - ldr r0,[r3,#24] - add r8,r8,r2 - ldr r2,[r3,#28] - add r9,r9,r12 - ldr r1,[sp,#17*4] @ pull inp - ldr r12,[sp,#18*4] @ pull inp+len - add r10,r10,r0 - add r11,r11,r2 - stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11} - cmp r1,r12 - sub r14,r14,#256 @ rewind Ktbl - bne .Loop - - add sp,sp,#19*4 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4-r11,pc} -#else - ldmia sp!,{r4-r11,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size sha256_block_data_order,.-sha256_block_data_order -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.global sha256_block_data_order_neon -.type sha256_block_data_order_neon,%function -.align 4 -sha256_block_data_order_neon: -.LNEON: - stmdb sp!,{r4-r12,lr} - - sub r11,sp,#16*4+16 - adr r14,.Lsha256_block_data_order - sub r14,r14,#.Lsha256_block_data_order-K256 - bic r11,r11,#15 @ align for 128-bit stores - mov r12,sp - mov sp,r11 @ alloca - add r2,r1,r2,lsl#6 @ len to point at the end of inp - - vld1.8 {q0},[r1]! - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - vld1.32 {q8},[r14,:128]! - vld1.32 {q9},[r14,:128]! - vld1.32 {q10},[r14,:128]! - vld1.32 {q11},[r14,:128]! - vrev32.8 q0,q0 @ yes, even on - str r0,[sp,#64] - vrev32.8 q1,q1 @ big-endian - str r1,[sp,#68] - mov r1,sp - vrev32.8 q2,q2 - str r2,[sp,#72] - vrev32.8 q3,q3 - str r12,[sp,#76] @ save original sp - vadd.i32 q8,q8,q0 - vadd.i32 q9,q9,q1 - vst1.32 {q8},[r1,:128]! - vadd.i32 q10,q10,q2 - vst1.32 {q9},[r1,:128]! - vadd.i32 q11,q11,q3 - vst1.32 {q10},[r1,:128]! - vst1.32 {q11},[r1,:128]! - - ldmia r0,{r4-r11} - sub r1,r1,#64 - ldr r2,[sp,#0] - eor r12,r12,r12 - eor r3,r5,r6 - b .L_00_48 - -.align 4 -.L_00_48: - vext.8 q8,q0,q1,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q2,q3,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q0,q0,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#4] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d7,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d7,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d7,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q0,q0,q9 - add r10,r10,r2 - ldr r2,[sp,#8] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d7,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d7,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d0,d0,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d0,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d0,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d0,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#12] - and r3,r3,r12 - vshr.u32 d24,d0,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d0,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d1,d1,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q0 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q1,q2,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q3,q0,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q1,q1,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#20] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d1,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d1,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d1,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q1,q1,q9 - add r6,r6,r2 - ldr r2,[sp,#24] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d1,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d1,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d2,d2,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d2,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d2,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d2,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#28] - and r3,r3,r12 - vshr.u32 d24,d2,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d2,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d3,d3,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q1 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vext.8 q8,q2,q3,#4 - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - vext.8 q9,q0,q1,#4 - add r4,r4,r12 - and r2,r2,r8 - eor r12,r0,r8,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vadd.i32 q2,q2,q9 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - vshr.u32 q9,q8,#3 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#36] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - veor q9,q9,q10 - add r10,r10,r2 - vsli.32 q11,q8,#14 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - vshr.u32 d24,d3,#17 - add r11,r11,r3 - and r2,r2,r7 - veor q9,q9,q11 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - vsli.32 d24,d3,#15 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - vshr.u32 d25,d3,#10 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - vadd.i32 q2,q2,q9 - add r10,r10,r2 - ldr r2,[sp,#40] - veor d25,d25,d24 - and r12,r12,r3 - add r6,r6,r10 - vshr.u32 d24,d3,#19 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - vsli.32 d24,d3,#13 - add r9,r9,r2 - eor r2,r7,r8 - veor d25,d25,d24 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - vadd.i32 d4,d4,d25 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - vshr.u32 d24,d4,#17 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - vsli.32 d24,d4,#15 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - vshr.u32 d25,d4,#10 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - veor d25,d25,d24 - ldr r2,[sp,#44] - and r3,r3,r12 - vshr.u32 d24,d4,#19 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - vld1.32 {q8},[r14,:128]! - add r8,r8,r2 - vsli.32 d24,d4,#13 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - veor d25,d25,d24 - add r9,r9,r3 - and r2,r2,r5 - vadd.i32 d5,d5,d25 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - vadd.i32 q8,q8,q2 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - vst1.32 {q8},[r1,:128]! - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vext.8 q8,q3,q0,#4 - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - vext.8 q9,q1,q2,#4 - add r8,r8,r12 - and r2,r2,r4 - eor r12,r0,r4,ror#19 - vshr.u32 q10,q8,#7 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vadd.i32 q3,q3,q9 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - vshr.u32 q9,q8,#3 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vsli.32 q10,q8,#25 - ldr r2,[sp,#52] - and r3,r3,r12 - vshr.u32 q11,q8,#18 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - veor q9,q9,q10 - add r6,r6,r2 - vsli.32 q11,q8,#14 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - vshr.u32 d24,d5,#17 - add r7,r7,r3 - and r2,r2,r11 - veor q9,q9,q11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - vsli.32 d24,d5,#15 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - vshr.u32 d25,d5,#10 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - vadd.i32 q3,q3,q9 - add r6,r6,r2 - ldr r2,[sp,#56] - veor d25,d25,d24 - and r12,r12,r3 - add r10,r10,r6 - vshr.u32 d24,d5,#19 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - vsli.32 d24,d5,#13 - add r5,r5,r2 - eor r2,r11,r4 - veor d25,d25,d24 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - vadd.i32 d6,d6,d25 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - vshr.u32 d24,d6,#17 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - vsli.32 d24,d6,#15 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - vshr.u32 d25,d6,#10 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - veor d25,d25,d24 - ldr r2,[sp,#60] - and r3,r3,r12 - vshr.u32 d24,d6,#19 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - vld1.32 {q8},[r14,:128]! - add r4,r4,r2 - vsli.32 d24,d6,#13 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - veor d25,d25,d24 - add r5,r5,r3 - and r2,r2,r9 - vadd.i32 d7,d7,d25 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - vadd.i32 q8,q8,q3 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[r14] - and r12,r12,r3 - add r8,r8,r4 - vst1.32 {q8},[r1,:128]! - add r4,r4,r0,ror#2 - eor r12,r12,r6 - teq r2,#0 @ check for K256 terminator - ldr r2,[sp,#0] - sub r1,r1,#64 - bne .L_00_48 - - ldr r1,[sp,#68] - ldr r0,[sp,#72] - sub r14,r14,#256 @ rewind r14 - teq r1,r0 - it eq - subeq r1,r1,#64 @ avoid SEGV - vld1.8 {q0},[r1]! @ load next input block - vld1.8 {q1},[r1]! - vld1.8 {q2},[r1]! - vld1.8 {q3},[r1]! - it ne - strne r1,[sp,#68] - mov r1,sp - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q0,q0 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q0 - ldr r2,[sp,#4] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#8] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#12] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#16] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q1,q1 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q1 - ldr r2,[sp,#20] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#24] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#28] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#32] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - add r11,r11,r2 - eor r2,r9,r10 - eor r0,r8,r8,ror#5 - add r4,r4,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r8 - eor r12,r0,r8,ror#19 - eor r0,r4,r4,ror#11 - eor r2,r2,r10 - vrev32.8 q2,q2 - add r11,r11,r12,ror#6 - eor r12,r4,r5 - eor r0,r0,r4,ror#20 - add r11,r11,r2 - vadd.i32 q8,q8,q2 - ldr r2,[sp,#36] - and r3,r3,r12 - add r7,r7,r11 - add r11,r11,r0,ror#2 - eor r3,r3,r5 - add r10,r10,r2 - eor r2,r8,r9 - eor r0,r7,r7,ror#5 - add r11,r11,r3 - and r2,r2,r7 - eor r3,r0,r7,ror#19 - eor r0,r11,r11,ror#11 - eor r2,r2,r9 - add r10,r10,r3,ror#6 - eor r3,r11,r4 - eor r0,r0,r11,ror#20 - add r10,r10,r2 - ldr r2,[sp,#40] - and r12,r12,r3 - add r6,r6,r10 - add r10,r10,r0,ror#2 - eor r12,r12,r4 - add r9,r9,r2 - eor r2,r7,r8 - eor r0,r6,r6,ror#5 - add r10,r10,r12 - and r2,r2,r6 - eor r12,r0,r6,ror#19 - eor r0,r10,r10,ror#11 - eor r2,r2,r8 - add r9,r9,r12,ror#6 - eor r12,r10,r11 - eor r0,r0,r10,ror#20 - add r9,r9,r2 - ldr r2,[sp,#44] - and r3,r3,r12 - add r5,r5,r9 - add r9,r9,r0,ror#2 - eor r3,r3,r11 - add r8,r8,r2 - eor r2,r6,r7 - eor r0,r5,r5,ror#5 - add r9,r9,r3 - and r2,r2,r5 - eor r3,r0,r5,ror#19 - eor r0,r9,r9,ror#11 - eor r2,r2,r7 - add r8,r8,r3,ror#6 - eor r3,r9,r10 - eor r0,r0,r9,ror#20 - add r8,r8,r2 - ldr r2,[sp,#48] - and r12,r12,r3 - add r4,r4,r8 - add r8,r8,r0,ror#2 - eor r12,r12,r10 - vst1.32 {q8},[r1,:128]! - add r7,r7,r2 - eor r2,r5,r6 - eor r0,r4,r4,ror#5 - add r8,r8,r12 - vld1.32 {q8},[r14,:128]! - and r2,r2,r4 - eor r12,r0,r4,ror#19 - eor r0,r8,r8,ror#11 - eor r2,r2,r6 - vrev32.8 q3,q3 - add r7,r7,r12,ror#6 - eor r12,r8,r9 - eor r0,r0,r8,ror#20 - add r7,r7,r2 - vadd.i32 q8,q8,q3 - ldr r2,[sp,#52] - and r3,r3,r12 - add r11,r11,r7 - add r7,r7,r0,ror#2 - eor r3,r3,r9 - add r6,r6,r2 - eor r2,r4,r5 - eor r0,r11,r11,ror#5 - add r7,r7,r3 - and r2,r2,r11 - eor r3,r0,r11,ror#19 - eor r0,r7,r7,ror#11 - eor r2,r2,r5 - add r6,r6,r3,ror#6 - eor r3,r7,r8 - eor r0,r0,r7,ror#20 - add r6,r6,r2 - ldr r2,[sp,#56] - and r12,r12,r3 - add r10,r10,r6 - add r6,r6,r0,ror#2 - eor r12,r12,r8 - add r5,r5,r2 - eor r2,r11,r4 - eor r0,r10,r10,ror#5 - add r6,r6,r12 - and r2,r2,r10 - eor r12,r0,r10,ror#19 - eor r0,r6,r6,ror#11 - eor r2,r2,r4 - add r5,r5,r12,ror#6 - eor r12,r6,r7 - eor r0,r0,r6,ror#20 - add r5,r5,r2 - ldr r2,[sp,#60] - and r3,r3,r12 - add r9,r9,r5 - add r5,r5,r0,ror#2 - eor r3,r3,r7 - add r4,r4,r2 - eor r2,r10,r11 - eor r0,r9,r9,ror#5 - add r5,r5,r3 - and r2,r2,r9 - eor r3,r0,r9,ror#19 - eor r0,r5,r5,ror#11 - eor r2,r2,r11 - add r4,r4,r3,ror#6 - eor r3,r5,r6 - eor r0,r0,r5,ror#20 - add r4,r4,r2 - ldr r2,[sp,#64] - and r12,r12,r3 - add r8,r8,r4 - add r4,r4,r0,ror#2 - eor r12,r12,r6 - vst1.32 {q8},[r1,:128]! - ldr r0,[r2,#0] - add r4,r4,r12 @ h+=Maj(a,b,c) from the past - ldr r12,[r2,#4] - ldr r3,[r2,#8] - ldr r1,[r2,#12] - add r4,r4,r0 @ accumulate - ldr r0,[r2,#16] - add r5,r5,r12 - ldr r12,[r2,#20] - add r6,r6,r3 - ldr r3,[r2,#24] - add r7,r7,r1 - ldr r1,[r2,#28] - add r8,r8,r0 - str r4,[r2],#4 - add r9,r9,r12 - str r5,[r2],#4 - add r10,r10,r3 - str r6,[r2],#4 - add r11,r11,r1 - str r7,[r2],#4 - stmia r2,{r8-r11} - - ittte ne - movne r1,sp - ldrne r2,[sp,#0] - eorne r12,r12,r12 - ldreq sp,[sp,#76] @ restore original sp - itt ne - eorne r3,r5,r6 - bne .L_00_48 - - ldmia sp!,{r4-r12,pc} -.size sha256_block_data_order_neon,.-sha256_block_data_order_neon -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - -# ifdef __thumb2__ -# define INST(a,b,c,d) .byte c,d|0xc,a,b -# else -# define INST(a,b,c,d) .byte a,b,c,d -# endif - -.type sha256_block_data_order_armv8,%function -.align 5 -sha256_block_data_order_armv8: -.LARMv8: - vld1.32 {q0,q1},[r0] -# ifdef __thumb2__ - adr r3,.LARMv8 - sub r3,r3,#.LARMv8-K256 -# else - adrl r3,K256 -# endif - add r2,r1,r2,lsl#6 @ len to point at the end of inp - -.Loop_v8: - vld1.8 {q8-q9},[r1]! - vld1.8 {q10-q11},[r1]! - vld1.32 {q12},[r3]! - vrev32.8 q8,q8 - vrev32.8 q9,q9 - vrev32.8 q10,q10 - vrev32.8 q11,q11 - vmov q14,q0 @ offload - vmov q15,q1 - teq r1,r2 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q10 - INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9 - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q11 - INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10 - vld1.32 {q13},[r3]! - vadd.i32 q12,q12,q8 - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - - vld1.32 {q12},[r3]! - vadd.i32 q13,q13,q9 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - - vld1.32 {q13},[r3] - vadd.i32 q12,q12,q10 - sub r3,r3,#256-16 @ rewind - vmov q2,q0 - INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12 - INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12 - - vadd.i32 q13,q13,q11 - vmov q2,q0 - INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13 - INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13 - - vadd.i32 q0,q0,q14 - vadd.i32 q1,q1,q15 - it ne - bne .Loop_v8 - - vst1.32 {q0,q1},[r0] - - bx lr @ bx lr -.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8 -#endif -.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>" -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm OPENSSL_armcap_P,4,4 -#endif diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped deleted file mode 100644 index 03014624f2ab..000000000000 --- a/arch/arm/crypto/sha512-core.S_shipped +++ /dev/null @@ -1,1869 +0,0 @@ -@ SPDX-License-Identifier: GPL-2.0 - -@ This code is taken from the OpenSSL project but the author (Andy Polyakov) -@ has relicensed it under the GPLv2. Therefore this program is free software; -@ you can redistribute it and/or modify it under the terms of the GNU General -@ Public License version 2 as published by the Free Software Foundation. -@ -@ The original headers, including the original license headers, are -@ included below for completeness. - -@ ==================================================================== -@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL -@ project. The module is, however, dual licensed under OpenSSL and -@ CRYPTOGAMS licenses depending on where you obtain it. For further -@ details see https://www.openssl.org/~appro/cryptogams/. -@ ==================================================================== - -@ SHA512 block procedure for ARMv4. September 2007. - -@ This code is ~4.5 (four and a half) times faster than code generated -@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue -@ Xscale PXA250 core]. -@ -@ July 2010. -@ -@ Rescheduling for dual-issue pipeline resulted in 6% improvement on -@ Cortex A8 core and ~40 cycles per processed byte. - -@ February 2011. -@ -@ Profiler-assisted and platform-specific optimization resulted in 7% -@ improvement on Coxtex A8 core and ~38 cycles per byte. - -@ March 2011. -@ -@ Add NEON implementation. On Cortex A8 it was measured to process -@ one byte in 23.3 cycles or ~60% faster than integer-only code. - -@ August 2012. -@ -@ Improve NEON performance by 12% on Snapdragon S4. In absolute -@ terms it's 22.6 cycles per byte, which is disappointing result. -@ Technical writers asserted that 3-way S4 pipeline can sustain -@ multiple NEON instructions per cycle, but dual NEON issue could -@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html -@ for further details. On side note Cortex-A15 processes one byte in -@ 16 cycles. - -@ Byte order [in]dependence. ========================================= -@ -@ Originally caller was expected to maintain specific *dword* order in -@ h[0-7], namely with most significant dword at *lower* address, which -@ was reflected in below two parameters as 0 and 4. Now caller is -@ expected to maintain native byte order for whole 64-bit values. -#ifndef __KERNEL__ -# include "arm_arch.h" -# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} -# define VFP_ABI_POP vldmia sp!,{d8-d15} -#else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -# define __ARM_MAX_ARCH__ 7 -# define VFP_ABI_PUSH -# define VFP_ABI_POP -#endif - -#ifdef __ARMEL__ -# define LO 0 -# define HI 4 -# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1 -#else -# define HI 0 -# define LO 4 -# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1 -#endif - -.text -#if __ARM_ARCH__<7 -.code 32 -#else -.syntax unified -# ifdef __thumb2__ -.thumb -# else -.code 32 -# endif -#endif - -.type K512,%object -.align 5 -K512: -WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd) -WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc) -WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019) -WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118) -WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe) -WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2) -WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1) -WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694) -WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3) -WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65) -WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483) -WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5) -WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210) -WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4) -WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725) -WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70) -WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926) -WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df) -WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8) -WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b) -WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001) -WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30) -WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910) -WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8) -WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53) -WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8) -WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb) -WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3) -WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60) -WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec) -WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9) -WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b) -WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207) -WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178) -WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6) -WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b) -WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493) -WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c) -WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a) -WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) -.size K512,.-K512 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.LOPENSSL_armcap: -.word OPENSSL_armcap_P-sha512_block_data_order -.skip 32-4 -#else -.skip 32 -#endif - -.global sha512_block_data_order -.type sha512_block_data_order,%function -sha512_block_data_order: -.Lsha512_block_data_order: -#if __ARM_ARCH__<7 - sub r3,pc,#8 @ sha512_block_data_order -#else - adr r3,.Lsha512_block_data_order -#endif -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) - ldr r12,.LOPENSSL_armcap - ldr r12,[r3,r12] @ OPENSSL_armcap_P - tst r12,#1 - bne .LNEON -#endif - add r2,r1,r2,lsl#7 @ len to point at the end of inp - stmdb sp!,{r4-r12,lr} - sub r14,r3,#672 @ K512 - sub sp,sp,#9*8 - - ldr r7,[r0,#32+LO] - ldr r8,[r0,#32+HI] - ldr r9, [r0,#48+LO] - ldr r10, [r0,#48+HI] - ldr r11, [r0,#56+LO] - ldr r12, [r0,#56+HI] -.Loop: - str r9, [sp,#48+0] - str r10, [sp,#48+4] - str r11, [sp,#56+0] - str r12, [sp,#56+4] - ldr r5,[r0,#0+LO] - ldr r6,[r0,#0+HI] - ldr r3,[r0,#8+LO] - ldr r4,[r0,#8+HI] - ldr r9, [r0,#16+LO] - ldr r10, [r0,#16+HI] - ldr r11, [r0,#24+LO] - ldr r12, [r0,#24+HI] - str r3,[sp,#8+0] - str r4,[sp,#8+4] - str r9, [sp,#16+0] - str r10, [sp,#16+4] - str r11, [sp,#24+0] - str r12, [sp,#24+4] - ldr r3,[r0,#40+LO] - ldr r4,[r0,#40+HI] - str r3,[sp,#40+0] - str r4,[sp,#40+4] - -.L00_15: -#if __ARM_ARCH__<7 - ldrb r3,[r1,#7] - ldrb r9, [r1,#6] - ldrb r10, [r1,#5] - ldrb r11, [r1,#4] - ldrb r4,[r1,#3] - ldrb r12, [r1,#2] - orr r3,r3,r9,lsl#8 - ldrb r9, [r1,#1] - orr r3,r3,r10,lsl#16 - ldrb r10, [r1],#8 - orr r3,r3,r11,lsl#24 - orr r4,r4,r12,lsl#8 - orr r4,r4,r9,lsl#16 - orr r4,r4,r10,lsl#24 -#else - ldr r3,[r1,#4] - ldr r4,[r1],#8 -#ifdef __ARMEL__ - rev r3,r3 - rev r4,r4 -#endif -#endif - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov r9,r7,lsr#14 - str r3,[sp,#64+0] - mov r10,r8,lsr#14 - str r4,[sp,#64+4] - eor r9,r9,r8,lsl#18 - ldr r11,[sp,#56+0] @ h.lo - eor r10,r10,r7,lsl#18 - ldr r12,[sp,#56+4] @ h.hi - eor r9,r9,r7,lsr#18 - eor r10,r10,r8,lsr#18 - eor r9,r9,r8,lsl#14 - eor r10,r10,r7,lsl#14 - eor r9,r9,r8,lsr#9 - eor r10,r10,r7,lsr#9 - eor r9,r9,r7,lsl#23 - eor r10,r10,r8,lsl#23 @ Sigma1(e) - adds r3,r3,r9 - ldr r9,[sp,#40+0] @ f.lo - adc r4,r4,r10 @ T += Sigma1(e) - ldr r10,[sp,#40+4] @ f.hi - adds r3,r3,r11 - ldr r11,[sp,#48+0] @ g.lo - adc r4,r4,r12 @ T += h - ldr r12,[sp,#48+4] @ g.hi - - eor r9,r9,r11 - str r7,[sp,#32+0] - eor r10,r10,r12 - str r8,[sp,#32+4] - and r9,r9,r7 - str r5,[sp,#0+0] - and r10,r10,r8 - str r6,[sp,#0+4] - eor r9,r9,r11 - ldr r11,[r14,#LO] @ K[i].lo - eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#HI] @ K[i].hi - - adds r3,r3,r9 - ldr r7,[sp,#24+0] @ d.lo - adc r4,r4,r10 @ T += Ch(e,f,g) - ldr r8,[sp,#24+4] @ d.hi - adds r3,r3,r11 - and r9,r11,#0xff - adc r4,r4,r12 @ T += K[i] - adds r7,r7,r3 - ldr r11,[sp,#8+0] @ b.lo - adc r8,r8,r4 @ d += T - teq r9,#148 - - ldr r12,[sp,#16+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq r14,r14,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov r9,r5,lsr#28 - mov r10,r6,lsr#28 - eor r9,r9,r6,lsl#4 - eor r10,r10,r5,lsl#4 - eor r9,r9,r6,lsr#2 - eor r10,r10,r5,lsr#2 - eor r9,r9,r5,lsl#30 - eor r10,r10,r6,lsl#30 - eor r9,r9,r6,lsr#7 - eor r10,r10,r5,lsr#7 - eor r9,r9,r5,lsl#25 - eor r10,r10,r6,lsl#25 @ Sigma0(a) - adds r3,r3,r9 - and r9,r5,r11 - adc r4,r4,r10 @ T += Sigma0(a) - - ldr r10,[sp,#8+4] @ b.hi - orr r5,r5,r11 - ldr r11,[sp,#16+4] @ c.hi - and r5,r5,r12 - and r12,r6,r10 - orr r6,r6,r10 - orr r5,r5,r9 @ Maj(a,b,c).lo - and r6,r6,r11 - adds r5,r5,r3 - orr r6,r6,r12 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc r6,r6,r4 @ h += T - tst r14,#1 - add r14,r14,#8 - tst r14,#1 - beq .L00_15 - ldr r9,[sp,#184+0] - ldr r10,[sp,#184+4] - bic r14,r14,#1 -.L16_79: - @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) - @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25 - @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7 - mov r3,r9,lsr#1 - ldr r11,[sp,#80+0] - mov r4,r10,lsr#1 - ldr r12,[sp,#80+4] - eor r3,r3,r10,lsl#31 - eor r4,r4,r9,lsl#31 - eor r3,r3,r9,lsr#8 - eor r4,r4,r10,lsr#8 - eor r3,r3,r10,lsl#24 - eor r4,r4,r9,lsl#24 - eor r3,r3,r9,lsr#7 - eor r4,r4,r10,lsr#7 - eor r3,r3,r10,lsl#25 - - @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) - @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26 - @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6 - mov r9,r11,lsr#19 - mov r10,r12,lsr#19 - eor r9,r9,r12,lsl#13 - eor r10,r10,r11,lsl#13 - eor r9,r9,r12,lsr#29 - eor r10,r10,r11,lsr#29 - eor r9,r9,r11,lsl#3 - eor r10,r10,r12,lsl#3 - eor r9,r9,r11,lsr#6 - eor r10,r10,r12,lsr#6 - ldr r11,[sp,#120+0] - eor r9,r9,r12,lsl#26 - - ldr r12,[sp,#120+4] - adds r3,r3,r9 - ldr r9,[sp,#192+0] - adc r4,r4,r10 - - ldr r10,[sp,#192+4] - adds r3,r3,r11 - adc r4,r4,r12 - adds r3,r3,r9 - adc r4,r4,r10 - @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41)) - @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23 - @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23 - mov r9,r7,lsr#14 - str r3,[sp,#64+0] - mov r10,r8,lsr#14 - str r4,[sp,#64+4] - eor r9,r9,r8,lsl#18 - ldr r11,[sp,#56+0] @ h.lo - eor r10,r10,r7,lsl#18 - ldr r12,[sp,#56+4] @ h.hi - eor r9,r9,r7,lsr#18 - eor r10,r10,r8,lsr#18 - eor r9,r9,r8,lsl#14 - eor r10,r10,r7,lsl#14 - eor r9,r9,r8,lsr#9 - eor r10,r10,r7,lsr#9 - eor r9,r9,r7,lsl#23 - eor r10,r10,r8,lsl#23 @ Sigma1(e) - adds r3,r3,r9 - ldr r9,[sp,#40+0] @ f.lo - adc r4,r4,r10 @ T += Sigma1(e) - ldr r10,[sp,#40+4] @ f.hi - adds r3,r3,r11 - ldr r11,[sp,#48+0] @ g.lo - adc r4,r4,r12 @ T += h - ldr r12,[sp,#48+4] @ g.hi - - eor r9,r9,r11 - str r7,[sp,#32+0] - eor r10,r10,r12 - str r8,[sp,#32+4] - and r9,r9,r7 - str r5,[sp,#0+0] - and r10,r10,r8 - str r6,[sp,#0+4] - eor r9,r9,r11 - ldr r11,[r14,#LO] @ K[i].lo - eor r10,r10,r12 @ Ch(e,f,g) - ldr r12,[r14,#HI] @ K[i].hi - - adds r3,r3,r9 - ldr r7,[sp,#24+0] @ d.lo - adc r4,r4,r10 @ T += Ch(e,f,g) - ldr r8,[sp,#24+4] @ d.hi - adds r3,r3,r11 - and r9,r11,#0xff - adc r4,r4,r12 @ T += K[i] - adds r7,r7,r3 - ldr r11,[sp,#8+0] @ b.lo - adc r8,r8,r4 @ d += T - teq r9,#23 - - ldr r12,[sp,#16+0] @ c.lo -#if __ARM_ARCH__>=7 - it eq @ Thumb2 thing, sanity check in ARM -#endif - orreq r14,r14,#1 - @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39)) - @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25 - @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25 - mov r9,r5,lsr#28 - mov r10,r6,lsr#28 - eor r9,r9,r6,lsl#4 - eor r10,r10,r5,lsl#4 - eor r9,r9,r6,lsr#2 - eor r10,r10,r5,lsr#2 - eor r9,r9,r5,lsl#30 - eor r10,r10,r6,lsl#30 - eor r9,r9,r6,lsr#7 - eor r10,r10,r5,lsr#7 - eor r9,r9,r5,lsl#25 - eor r10,r10,r6,lsl#25 @ Sigma0(a) - adds r3,r3,r9 - and r9,r5,r11 - adc r4,r4,r10 @ T += Sigma0(a) - - ldr r10,[sp,#8+4] @ b.hi - orr r5,r5,r11 - ldr r11,[sp,#16+4] @ c.hi - and r5,r5,r12 - and r12,r6,r10 - orr r6,r6,r10 - orr r5,r5,r9 @ Maj(a,b,c).lo - and r6,r6,r11 - adds r5,r5,r3 - orr r6,r6,r12 @ Maj(a,b,c).hi - sub sp,sp,#8 - adc r6,r6,r4 @ h += T - tst r14,#1 - add r14,r14,#8 -#if __ARM_ARCH__>=7 - ittt eq @ Thumb2 thing, sanity check in ARM -#endif - ldreq r9,[sp,#184+0] - ldreq r10,[sp,#184+4] - beq .L16_79 - bic r14,r14,#1 - - ldr r3,[sp,#8+0] - ldr r4,[sp,#8+4] - ldr r9, [r0,#0+LO] - ldr r10, [r0,#0+HI] - ldr r11, [r0,#8+LO] - ldr r12, [r0,#8+HI] - adds r9,r5,r9 - str r9, [r0,#0+LO] - adc r10,r6,r10 - str r10, [r0,#0+HI] - adds r11,r3,r11 - str r11, [r0,#8+LO] - adc r12,r4,r12 - str r12, [r0,#8+HI] - - ldr r5,[sp,#16+0] - ldr r6,[sp,#16+4] - ldr r3,[sp,#24+0] - ldr r4,[sp,#24+4] - ldr r9, [r0,#16+LO] - ldr r10, [r0,#16+HI] - ldr r11, [r0,#24+LO] - ldr r12, [r0,#24+HI] - adds r9,r5,r9 - str r9, [r0,#16+LO] - adc r10,r6,r10 - str r10, [r0,#16+HI] - adds r11,r3,r11 - str r11, [r0,#24+LO] - adc r12,r4,r12 - str r12, [r0,#24+HI] - - ldr r3,[sp,#40+0] - ldr r4,[sp,#40+4] - ldr r9, [r0,#32+LO] - ldr r10, [r0,#32+HI] - ldr r11, [r0,#40+LO] - ldr r12, [r0,#40+HI] - adds r7,r7,r9 - str r7,[r0,#32+LO] - adc r8,r8,r10 - str r8,[r0,#32+HI] - adds r11,r3,r11 - str r11, [r0,#40+LO] - adc r12,r4,r12 - str r12, [r0,#40+HI] - - ldr r5,[sp,#48+0] - ldr r6,[sp,#48+4] - ldr r3,[sp,#56+0] - ldr r4,[sp,#56+4] - ldr r9, [r0,#48+LO] - ldr r10, [r0,#48+HI] - ldr r11, [r0,#56+LO] - ldr r12, [r0,#56+HI] - adds r9,r5,r9 - str r9, [r0,#48+LO] - adc r10,r6,r10 - str r10, [r0,#48+HI] - adds r11,r3,r11 - str r11, [r0,#56+LO] - adc r12,r4,r12 - str r12, [r0,#56+HI] - - add sp,sp,#640 - sub r14,r14,#640 - - teq r1,r2 - bne .Loop - - add sp,sp,#8*9 @ destroy frame -#if __ARM_ARCH__>=5 - ldmia sp!,{r4-r12,pc} -#else - ldmia sp!,{r4-r12,lr} - tst lr,#1 - moveq pc,lr @ be binary compatible with V4, yet - .word 0xe12fff1e @ interoperable with Thumb ISA:-) -#endif -.size sha512_block_data_order,.-sha512_block_data_order -#if __ARM_MAX_ARCH__>=7 -.arch armv7-a -.fpu neon - -.global sha512_block_data_order_neon -.type sha512_block_data_order_neon,%function -.align 4 -sha512_block_data_order_neon: -.LNEON: - dmb @ errata #451034 on early Cortex A8 - add r2,r1,r2,lsl#7 @ len to point at the end of inp - VFP_ABI_PUSH - adr r3,.Lsha512_block_data_order - sub r3,r3,.Lsha512_block_data_order-K512 - vldmia r0,{d16-d23} @ load context -.Loop_neon: - vshr.u64 d24,d20,#14 @ 0 -#if 0<16 - vld1.64 {d0},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d20,#18 -#if 0>0 - vadd.i64 d16,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d20,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 0<16 && defined(__ARMEL__) - vrev64.8 d0,d0 -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d0 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 1 -#if 1<16 - vld1.64 {d1},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 1>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 1<16 && defined(__ARMEL__) - vrev64.8 d1,d1 -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d1 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 d24,d18,#14 @ 2 -#if 2<16 - vld1.64 {d2},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d18,#18 -#if 2>0 - vadd.i64 d22,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d18,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 2<16 && defined(__ARMEL__) - vrev64.8 d2,d2 -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d2 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 3 -#if 3<16 - vld1.64 {d3},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 3>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 3<16 && defined(__ARMEL__) - vrev64.8 d3,d3 -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d3 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 d24,d16,#14 @ 4 -#if 4<16 - vld1.64 {d4},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d16,#18 -#if 4>0 - vadd.i64 d20,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d16,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 4<16 && defined(__ARMEL__) - vrev64.8 d4,d4 -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d4 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 5 -#if 5<16 - vld1.64 {d5},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 5>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 5<16 && defined(__ARMEL__) - vrev64.8 d5,d5 -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d5 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 d24,d22,#14 @ 6 -#if 6<16 - vld1.64 {d6},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d22,#18 -#if 6>0 - vadd.i64 d18,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d22,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 6<16 && defined(__ARMEL__) - vrev64.8 d6,d6 -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d6 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 7 -#if 7<16 - vld1.64 {d7},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 7>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 7<16 && defined(__ARMEL__) - vrev64.8 d7,d7 -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d7 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - vshr.u64 d24,d20,#14 @ 8 -#if 8<16 - vld1.64 {d8},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d20,#18 -#if 8>0 - vadd.i64 d16,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d20,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 8<16 && defined(__ARMEL__) - vrev64.8 d8,d8 -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d8 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 9 -#if 9<16 - vld1.64 {d9},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 9>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 9<16 && defined(__ARMEL__) - vrev64.8 d9,d9 -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d9 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 d24,d18,#14 @ 10 -#if 10<16 - vld1.64 {d10},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d18,#18 -#if 10>0 - vadd.i64 d22,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d18,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 10<16 && defined(__ARMEL__) - vrev64.8 d10,d10 -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d10 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 11 -#if 11<16 - vld1.64 {d11},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 11>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 11<16 && defined(__ARMEL__) - vrev64.8 d11,d11 -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d11 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 d24,d16,#14 @ 12 -#if 12<16 - vld1.64 {d12},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d16,#18 -#if 12>0 - vadd.i64 d20,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d16,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 12<16 && defined(__ARMEL__) - vrev64.8 d12,d12 -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d12 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 13 -#if 13<16 - vld1.64 {d13},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 13>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 13<16 && defined(__ARMEL__) - vrev64.8 d13,d13 -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d13 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 d24,d22,#14 @ 14 -#if 14<16 - vld1.64 {d14},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d22,#18 -#if 14>0 - vadd.i64 d18,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d22,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 14<16 && defined(__ARMEL__) - vrev64.8 d14,d14 -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d14 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 15 -#if 15<16 - vld1.64 {d15},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 15>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 15<16 && defined(__ARMEL__) - vrev64.8 d15,d15 -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d15 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - mov r12,#4 -.L16_79_neon: - subs r12,#1 - vshr.u64 q12,q7,#19 - vshr.u64 q13,q7,#61 - vadd.i64 d16,d30 @ h+=Maj from the past - vshr.u64 q15,q7,#6 - vsli.64 q12,q7,#45 - vext.8 q14,q0,q1,#8 @ X[i+1] - vsli.64 q13,q7,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q0,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q4,q5,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d20,#14 @ from NEON_00_15 - vadd.i64 q0,q14 - vshr.u64 d25,d20,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d20,#41 @ from NEON_00_15 - vadd.i64 q0,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 16<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d0 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 17 -#if 17<16 - vld1.64 {d1},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 17>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 17<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d1 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 q12,q0,#19 - vshr.u64 q13,q0,#61 - vadd.i64 d22,d30 @ h+=Maj from the past - vshr.u64 q15,q0,#6 - vsli.64 q12,q0,#45 - vext.8 q14,q1,q2,#8 @ X[i+1] - vsli.64 q13,q0,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q1,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q5,q6,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d18,#14 @ from NEON_00_15 - vadd.i64 q1,q14 - vshr.u64 d25,d18,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d18,#41 @ from NEON_00_15 - vadd.i64 q1,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 18<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d2 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 19 -#if 19<16 - vld1.64 {d3},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 19>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 19<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d3 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 q12,q1,#19 - vshr.u64 q13,q1,#61 - vadd.i64 d20,d30 @ h+=Maj from the past - vshr.u64 q15,q1,#6 - vsli.64 q12,q1,#45 - vext.8 q14,q2,q3,#8 @ X[i+1] - vsli.64 q13,q1,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q2,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q6,q7,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d16,#14 @ from NEON_00_15 - vadd.i64 q2,q14 - vshr.u64 d25,d16,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d16,#41 @ from NEON_00_15 - vadd.i64 q2,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 20<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d4 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 21 -#if 21<16 - vld1.64 {d5},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 21>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 21<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d5 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 q12,q2,#19 - vshr.u64 q13,q2,#61 - vadd.i64 d18,d30 @ h+=Maj from the past - vshr.u64 q15,q2,#6 - vsli.64 q12,q2,#45 - vext.8 q14,q3,q4,#8 @ X[i+1] - vsli.64 q13,q2,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q3,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q7,q0,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d22,#14 @ from NEON_00_15 - vadd.i64 q3,q14 - vshr.u64 d25,d22,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d22,#41 @ from NEON_00_15 - vadd.i64 q3,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 22<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d6 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 23 -#if 23<16 - vld1.64 {d7},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 23>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 23<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d7 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - vshr.u64 q12,q3,#19 - vshr.u64 q13,q3,#61 - vadd.i64 d16,d30 @ h+=Maj from the past - vshr.u64 q15,q3,#6 - vsli.64 q12,q3,#45 - vext.8 q14,q4,q5,#8 @ X[i+1] - vsli.64 q13,q3,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q4,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q0,q1,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d20,#14 @ from NEON_00_15 - vadd.i64 q4,q14 - vshr.u64 d25,d20,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d20,#41 @ from NEON_00_15 - vadd.i64 q4,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d20,#50 - vsli.64 d25,d20,#46 - vmov d29,d20 - vsli.64 d26,d20,#23 -#if 24<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d21,d22 @ Ch(e,f,g) - vshr.u64 d24,d16,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d23 - vshr.u64 d25,d16,#34 - vsli.64 d24,d16,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d16,#39 - vadd.i64 d28,d8 - vsli.64 d25,d16,#30 - veor d30,d16,d17 - vsli.64 d26,d16,#25 - veor d23,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d18,d17 @ Maj(a,b,c) - veor d23,d26 @ Sigma0(a) - vadd.i64 d19,d27 - vadd.i64 d30,d27 - @ vadd.i64 d23,d30 - vshr.u64 d24,d19,#14 @ 25 -#if 25<16 - vld1.64 {d9},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d19,#18 -#if 25>0 - vadd.i64 d23,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d19,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d19,#50 - vsli.64 d25,d19,#46 - vmov d29,d19 - vsli.64 d26,d19,#23 -#if 25<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d20,d21 @ Ch(e,f,g) - vshr.u64 d24,d23,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d22 - vshr.u64 d25,d23,#34 - vsli.64 d24,d23,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d23,#39 - vadd.i64 d28,d9 - vsli.64 d25,d23,#30 - veor d30,d23,d16 - vsli.64 d26,d23,#25 - veor d22,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d17,d16 @ Maj(a,b,c) - veor d22,d26 @ Sigma0(a) - vadd.i64 d18,d27 - vadd.i64 d30,d27 - @ vadd.i64 d22,d30 - vshr.u64 q12,q4,#19 - vshr.u64 q13,q4,#61 - vadd.i64 d22,d30 @ h+=Maj from the past - vshr.u64 q15,q4,#6 - vsli.64 q12,q4,#45 - vext.8 q14,q5,q6,#8 @ X[i+1] - vsli.64 q13,q4,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q5,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q1,q2,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d18,#14 @ from NEON_00_15 - vadd.i64 q5,q14 - vshr.u64 d25,d18,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d18,#41 @ from NEON_00_15 - vadd.i64 q5,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d18,#50 - vsli.64 d25,d18,#46 - vmov d29,d18 - vsli.64 d26,d18,#23 -#if 26<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d19,d20 @ Ch(e,f,g) - vshr.u64 d24,d22,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d21 - vshr.u64 d25,d22,#34 - vsli.64 d24,d22,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d22,#39 - vadd.i64 d28,d10 - vsli.64 d25,d22,#30 - veor d30,d22,d23 - vsli.64 d26,d22,#25 - veor d21,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d16,d23 @ Maj(a,b,c) - veor d21,d26 @ Sigma0(a) - vadd.i64 d17,d27 - vadd.i64 d30,d27 - @ vadd.i64 d21,d30 - vshr.u64 d24,d17,#14 @ 27 -#if 27<16 - vld1.64 {d11},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d17,#18 -#if 27>0 - vadd.i64 d21,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d17,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d17,#50 - vsli.64 d25,d17,#46 - vmov d29,d17 - vsli.64 d26,d17,#23 -#if 27<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d18,d19 @ Ch(e,f,g) - vshr.u64 d24,d21,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d20 - vshr.u64 d25,d21,#34 - vsli.64 d24,d21,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d21,#39 - vadd.i64 d28,d11 - vsli.64 d25,d21,#30 - veor d30,d21,d22 - vsli.64 d26,d21,#25 - veor d20,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d23,d22 @ Maj(a,b,c) - veor d20,d26 @ Sigma0(a) - vadd.i64 d16,d27 - vadd.i64 d30,d27 - @ vadd.i64 d20,d30 - vshr.u64 q12,q5,#19 - vshr.u64 q13,q5,#61 - vadd.i64 d20,d30 @ h+=Maj from the past - vshr.u64 q15,q5,#6 - vsli.64 q12,q5,#45 - vext.8 q14,q6,q7,#8 @ X[i+1] - vsli.64 q13,q5,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q6,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q2,q3,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d16,#14 @ from NEON_00_15 - vadd.i64 q6,q14 - vshr.u64 d25,d16,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d16,#41 @ from NEON_00_15 - vadd.i64 q6,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d16,#50 - vsli.64 d25,d16,#46 - vmov d29,d16 - vsli.64 d26,d16,#23 -#if 28<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d17,d18 @ Ch(e,f,g) - vshr.u64 d24,d20,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d19 - vshr.u64 d25,d20,#34 - vsli.64 d24,d20,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d20,#39 - vadd.i64 d28,d12 - vsli.64 d25,d20,#30 - veor d30,d20,d21 - vsli.64 d26,d20,#25 - veor d19,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d22,d21 @ Maj(a,b,c) - veor d19,d26 @ Sigma0(a) - vadd.i64 d23,d27 - vadd.i64 d30,d27 - @ vadd.i64 d19,d30 - vshr.u64 d24,d23,#14 @ 29 -#if 29<16 - vld1.64 {d13},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d23,#18 -#if 29>0 - vadd.i64 d19,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d23,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d23,#50 - vsli.64 d25,d23,#46 - vmov d29,d23 - vsli.64 d26,d23,#23 -#if 29<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d16,d17 @ Ch(e,f,g) - vshr.u64 d24,d19,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d18 - vshr.u64 d25,d19,#34 - vsli.64 d24,d19,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d19,#39 - vadd.i64 d28,d13 - vsli.64 d25,d19,#30 - veor d30,d19,d20 - vsli.64 d26,d19,#25 - veor d18,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d21,d20 @ Maj(a,b,c) - veor d18,d26 @ Sigma0(a) - vadd.i64 d22,d27 - vadd.i64 d30,d27 - @ vadd.i64 d18,d30 - vshr.u64 q12,q6,#19 - vshr.u64 q13,q6,#61 - vadd.i64 d18,d30 @ h+=Maj from the past - vshr.u64 q15,q6,#6 - vsli.64 q12,q6,#45 - vext.8 q14,q7,q0,#8 @ X[i+1] - vsli.64 q13,q6,#3 - veor q15,q12 - vshr.u64 q12,q14,#1 - veor q15,q13 @ sigma1(X[i+14]) - vshr.u64 q13,q14,#8 - vadd.i64 q7,q15 - vshr.u64 q15,q14,#7 - vsli.64 q12,q14,#63 - vsli.64 q13,q14,#56 - vext.8 q14,q3,q4,#8 @ X[i+9] - veor q15,q12 - vshr.u64 d24,d22,#14 @ from NEON_00_15 - vadd.i64 q7,q14 - vshr.u64 d25,d22,#18 @ from NEON_00_15 - veor q15,q13 @ sigma0(X[i+1]) - vshr.u64 d26,d22,#41 @ from NEON_00_15 - vadd.i64 q7,q15 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d22,#50 - vsli.64 d25,d22,#46 - vmov d29,d22 - vsli.64 d26,d22,#23 -#if 30<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d23,d16 @ Ch(e,f,g) - vshr.u64 d24,d18,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d17 - vshr.u64 d25,d18,#34 - vsli.64 d24,d18,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d18,#39 - vadd.i64 d28,d14 - vsli.64 d25,d18,#30 - veor d30,d18,d19 - vsli.64 d26,d18,#25 - veor d17,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d20,d19 @ Maj(a,b,c) - veor d17,d26 @ Sigma0(a) - vadd.i64 d21,d27 - vadd.i64 d30,d27 - @ vadd.i64 d17,d30 - vshr.u64 d24,d21,#14 @ 31 -#if 31<16 - vld1.64 {d15},[r1]! @ handles unaligned -#endif - vshr.u64 d25,d21,#18 -#if 31>0 - vadd.i64 d17,d30 @ h+=Maj from the past -#endif - vshr.u64 d26,d21,#41 - vld1.64 {d28},[r3,:64]! @ K[i++] - vsli.64 d24,d21,#50 - vsli.64 d25,d21,#46 - vmov d29,d21 - vsli.64 d26,d21,#23 -#if 31<16 && defined(__ARMEL__) - vrev64.8 , -#endif - veor d25,d24 - vbsl d29,d22,d23 @ Ch(e,f,g) - vshr.u64 d24,d17,#28 - veor d26,d25 @ Sigma1(e) - vadd.i64 d27,d29,d16 - vshr.u64 d25,d17,#34 - vsli.64 d24,d17,#36 - vadd.i64 d27,d26 - vshr.u64 d26,d17,#39 - vadd.i64 d28,d15 - vsli.64 d25,d17,#30 - veor d30,d17,d18 - vsli.64 d26,d17,#25 - veor d16,d24,d25 - vadd.i64 d27,d28 - vbsl d30,d19,d18 @ Maj(a,b,c) - veor d16,d26 @ Sigma0(a) - vadd.i64 d20,d27 - vadd.i64 d30,d27 - @ vadd.i64 d16,d30 - bne .L16_79_neon - - vadd.i64 d16,d30 @ h+=Maj from the past - vldmia r0,{d24-d31} @ load context to temp - vadd.i64 q8,q12 @ vectorized accumulate - vadd.i64 q9,q13 - vadd.i64 q10,q14 - vadd.i64 q11,q15 - vstmia r0,{d16-d23} @ save context - teq r1,r2 - sub r3,#640 @ rewind K512 - bne .Loop_neon - - VFP_ABI_POP - bx lr @ .word 0xe12fff1e -.size sha512_block_data_order_neon,.-sha512_block_data_order_neon -#endif -.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" -.align 2 -#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -.comm OPENSSL_armcap_P,4,4 -#endif diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 455eb19a5ac1..db8512d9a918 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -22,8 +22,8 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -34,7 +34,7 @@ */ #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ @@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ @@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result, val; \ @@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ return result; \ } -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed -static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) +static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { int oldval; unsigned long res; @@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) return oldval; } -#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int oldval, newval; unsigned long tmp; @@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) return oldval; } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #else /* ARM_ARCH_6 */ @@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) #endif #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ int val; \ @@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ int val; \ @@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ return val; \ } -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; unsigned long flags; @@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#define atomic_fetch_andnot atomic_fetch_andnot +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot #endif /* __LINUX_ARM_ARCH__ */ @@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot +#define arch_atomic_andnot arch_atomic_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ @@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { @@ -250,7 +250,7 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { s64 result; @@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, s64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { __asm__ __volatile__("@ atomic64_set\n" " strd %2, %H2, [%1]" @@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i) ); } #else -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { s64 result; @@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, s64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { s64 tmp; @@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i) #endif #define ATOMIC64_OP(op, op1, op2) \ -static inline void atomic64_##op(s64 i, atomic64_t *v) \ +static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ @@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ static inline s64 \ -atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ +arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ @@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(op, op1, op2) \ static inline s64 \ -atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ +arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ @@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot +#define arch_atomic64_andnot arch_atomic64_andnot ATOMIC64_OPS(and, and, and) ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(or, orr, orr) ATOMIC64_OPS(xor, eor, eor) -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) +static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) { s64 oldval; unsigned long res; @@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) return oldval; } -#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed -static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) +static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) { s64 result; unsigned long tmp; @@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) return result; } -#define atomic64_xchg_relaxed atomic64_xchg_relaxed +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed -static inline s64 atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { s64 result; unsigned long tmp; @@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) return result; } -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive -static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 oldval, newval; unsigned long tmp; @@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) return oldval; } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 8b701f8e175c..4dfe538dfc68 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size return ret; } -#define xchg_relaxed(ptr, x) ({ \ +#define arch_xchg_relaxed(ptr, x) ({ \ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ sizeof(*(ptr))); \ }) @@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #error "SMP is not supported on this platform" #endif -#define xchg xchg_relaxed +#define arch_xchg arch_xchg_relaxed /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ -#define cmpxchg_local(ptr, o, n) ({ \ - (__typeof(*ptr))__cmpxchg_local_generic((ptr), \ +#define arch_cmpxchg_local(ptr, o, n) ({ \ + (__typeof(*ptr))__generic_cmpxchg_local((ptr), \ (unsigned long)(o), \ (unsigned long)(n), \ sizeof(*(ptr))); \ }) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) #include <asm-generic/cmpxchg.h> @@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return oldval; } -#define cmpxchg_relaxed(ptr,o,n) ({ \ +#define arch_cmpxchg_relaxed(ptr,o,n) ({ \ (__typeof__(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), \ (unsigned long)(n), \ @@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ case 1: case 2: - ret = __cmpxchg_local_generic(ptr, old, new, size); + ret = __generic_cmpxchg_local(ptr, old, new, size); break; #endif default: @@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, return ret; } -#define cmpxchg_local(ptr, o, n) ({ \ +#define arch_cmpxchg_local(ptr, o, n) ({ \ (__typeof(*ptr))__cmpxchg_local((ptr), \ (unsigned long)(o), \ (unsigned long)(n), \ @@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, return oldval; } -#define cmpxchg64_relaxed(ptr, o, n) ({ \ +#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \ (__typeof__(*(ptr)))__cmpxchg64((ptr), \ (unsigned long long)(o), \ (unsigned long long)(n)); \ }) -#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) +#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n)) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h index 0d67ed682e07..397be5ed30e7 100644 --- a/arch/arm/include/asm/cpuidle.h +++ b/arch/arm/include/asm/cpuidle.h @@ -7,9 +7,11 @@ #ifdef CONFIG_CPU_IDLE extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); +#define __cpuidle_method_section __used __section("__cpuidle_method_of_table") #else static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { return -ENODEV; } +#define __cpuidle_method_section __maybe_unused /* drop silently */ #endif /* Common ARM WFI state */ @@ -42,11 +44,15 @@ struct of_cpuidle_method { #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \ static const struct of_cpuidle_method __cpuidle_method_of_table_##name \ - __used __section("__cpuidle_method_of_table") \ - = { .method = _method, .ops = _ops } + __cpuidle_method_section = { .method = _method, .ops = _ops } extern int arm_cpuidle_suspend(int index); extern int arm_cpuidle_init(int cpu); +struct arm_cpuidle_irq_context { }; + +#define arm_cpuidle_save_irq_context(c) (void)c +#define arm_cpuidle_restore_irq_context(c) (void)c + #endif diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 48ec1d0337da..a4dbac07e4ef 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -15,6 +15,9 @@ extern void __gnu_mcount_nc(void); #ifdef CONFIG_DYNAMIC_FTRACE struct dyn_arch_ftrace { +#ifdef CONFIG_ARM_MODULE_PLTS + struct module *mod; +#endif }; static inline unsigned long ftrace_call_adjust(unsigned long addr) diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h index f20e08ac85ae..5475cbf9fb6b 100644 --- a/arch/arm/include/asm/insn.h +++ b/arch/arm/include/asm/insn.h @@ -13,18 +13,18 @@ arm_gen_nop(void) } unsigned long -__arm_gen_branch(unsigned long pc, unsigned long addr, bool link); +__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn); static inline unsigned long arm_gen_branch(unsigned long pc, unsigned long addr) { - return __arm_gen_branch(pc, addr, false); + return __arm_gen_branch(pc, addr, false, true); } static inline unsigned long -arm_gen_branch_link(unsigned long pc, unsigned long addr) +arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn) { - return __arm_gen_branch(pc, addr, true); + return __arm_gen_branch(pc, addr, true, warn); } #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index a711322d9f40..cfc9dfd70aad 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -20,8 +20,14 @@ #endif #include <asm/kasan_def.h> -/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +/* + * PAGE_OFFSET: the virtual address of the start of lowmem, memory above + * the virtual address range for userspace. + * KERNEL_OFFSET: the virtual address of the start of the kernel image. + * we may further offset this with TEXT_OFFSET in practice. + */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) +#define KERNEL_OFFSET (PAGE_OFFSET) #ifdef CONFIG_MMU @@ -153,6 +159,13 @@ extern unsigned long vectors_base; #ifndef __ASSEMBLY__ /* + * Physical start and end address of the kernel sections. These addresses are + * 2MB-aligned to match the section mappings placed over the kernel. + */ +extern u32 kernel_sec_start; +extern u32 kernel_sec_end; + +/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 4b0df09cbe67..cfffae67c04e 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -19,8 +19,18 @@ enum { }; #endif +#define PLT_ENT_STRIDE L1_CACHE_BYTES +#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) +#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) + +struct plt_entries { + u32 ldr[PLT_ENT_COUNT]; + u32 lit[PLT_ENT_COUNT]; +}; + struct mod_plt_sec { struct elf32_shdr *plt; + struct plt_entries *plt_ent; int plt_count; }; diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index fdee1f04f4f3..a17f01235c29 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -143,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) __pmd_populate(pmdp, page_to_phys(ptep), prot); } -#define pmd_pgtable(pmd) pmd_page(pmd) #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index d4edab51a77c..eabe72ff7381 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -130,7 +130,7 @@ flush_pmd_entry(pudp); \ } while (0) -static inline pmd_t *pud_page_vaddr(pud_t pud) +static inline pmd_t *pud_pgtable(pud_t pud) { return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); } diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d63a5bb6bd0c..cd1f84bb40ae 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -306,7 +306,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) +#define __swp_entry_to_pte(swp) __pte((swp).val | PTE_TYPE_FAULT) /* * It is an error for the kernel to have more swap files than we can diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h index 39ff217136d1..6f5d627c44a3 100644 --- a/arch/arm/include/asm/sync_bitops.h +++ b/arch/arm/include/asm/sync_bitops.h @@ -21,7 +21,7 @@ #define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) #define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) #define sync_test_bit(nr, addr) test_bit(nr, addr) -#define sync_cmpxchg cmpxchg +#define arch_sync_cmpxchg arch_cmpxchg #endif diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 66f6a3ae68d2..98b37340376b 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -13,7 +13,6 @@ extern void cpu_init(void); void soft_restart(unsigned long); -extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_idle)(void); #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 24cbfc112dfa..0ccc985b90af 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -253,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb; * space. * - mm - mm_struct describing address space * - * flush_tlb_range(mm,start,end) + * flush_tlb_range(vma,start,end) * * Invalidate a range of TLB entries in the specified * address space. @@ -261,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb; * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - * flush_tlb_page(vaddr,vma) + * flush_tlb_page(vma, uaddr) * * Invalidate the specified page in the specified address range. + * - vma - vm_area_struct describing address range * - vaddr - virtual address (may not be aligned) - * - vma - vma_struct describing address range - * - * flush_kern_tlb_page(kaddr) - * - * Invalidate the TLB entry for the specified page. The address - * will be in the kernels virtual memory space. Current uses - * only require the D-TLB to be invalidated. - * - kaddr - Kernel virtual memory address */ /* diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h deleted file mode 100644 index ab905ffcf193..000000000000 --- a/arch/arm/include/asm/unaligned.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __ASM_ARM_UNALIGNED_H -#define __ASM_ARM_UNALIGNED_H - -/* - * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+, - * but we don't want to use linux/unaligned/access_ok.h since that can lead - * to traps on unaligned stm/ldm or strd/ldrd. - */ -#include <asm/byteorder.h> - -#if defined(__LITTLE_ENDIAN) -# include <linux/unaligned/le_struct.h> -# include <linux/unaligned/be_byteshift.h> -# include <linux/unaligned/generic.h> -# define get_unaligned __get_unaligned_le -# define put_unaligned __put_unaligned_le -#elif defined(__BIG_ENDIAN) -# include <linux/unaligned/be_struct.h> -# include <linux/unaligned/le_byteshift.h> -# include <linux/unaligned/generic.h> -# define get_unaligned __get_unaligned_be -# define put_unaligned __put_unaligned_be -#else -# error need to define endianess -#endif - -#endif /* __ASM_ARM_UNALIGNED_H */ diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 9a79ef6b1876..3c83b5d29697 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -68,9 +68,10 @@ int ftrace_arch_code_modify_post_process(void) return 0; } -static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr, + bool warn) { - return arm_gen_branch_link(pc, addr); + return arm_gen_branch_link(pc, addr, warn); } static int ftrace_modify_code(unsigned long pc, unsigned long old, @@ -104,14 +105,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func) int ret; pc = (unsigned long)&ftrace_call; - new = ftrace_call_replace(pc, (unsigned long)func); + new = ftrace_call_replace(pc, (unsigned long)func, true); ret = ftrace_modify_code(pc, 0, new, false); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS if (!ret) { pc = (unsigned long)&ftrace_regs_call; - new = ftrace_call_replace(pc, (unsigned long)func); + new = ftrace_call_replace(pc, (unsigned long)func, true); ret = ftrace_modify_code(pc, 0, new, false); } @@ -124,10 +125,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long new, old; unsigned long ip = rec->ip; + unsigned long aaddr = adjust_address(rec, addr); + struct module *mod = NULL; + +#ifdef CONFIG_ARM_MODULE_PLTS + mod = rec->arch.mod; +#endif old = ftrace_nop_replace(rec); - new = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_call_replace(ip, aaddr, !mod); +#ifdef CONFIG_ARM_MODULE_PLTS + if (!new && mod) { + aaddr = get_module_plt(mod, ip, aaddr); + new = ftrace_call_replace(ip, aaddr, true); + } +#endif return ftrace_modify_code(rec->ip, old, new, true); } @@ -140,9 +153,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long new, old; unsigned long ip = rec->ip; - old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); + old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true); - new = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_call_replace(ip, adjust_address(rec, addr), true); return ftrace_modify_code(rec->ip, old, new, true); } @@ -152,12 +165,29 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { + unsigned long aaddr = adjust_address(rec, addr); unsigned long ip = rec->ip; unsigned long old; unsigned long new; int ret; - old = ftrace_call_replace(ip, adjust_address(rec, addr)); +#ifdef CONFIG_ARM_MODULE_PLTS + /* mod is only supplied during module loading */ + if (!mod) + mod = rec->arch.mod; + else + rec->arch.mod = mod; +#endif + + old = ftrace_call_replace(ip, aaddr, + !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); +#ifdef CONFIG_ARM_MODULE_PLTS + if (!old && mod) { + aaddr = get_module_plt(mod, ip, aaddr); + old = ftrace_call_replace(ip, aaddr, true); + } +#endif + new = ftrace_nop_replace(rec); ret = ftrace_modify_code(ip, old, new, true); diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 7f62c5eccdf3..9eb0b4dbcc12 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -23,7 +23,6 @@ #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif - /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must @@ -31,7 +30,7 @@ * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ -#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) +#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif @@ -48,6 +47,20 @@ .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE + /* + * This needs to be assigned at runtime when the linker symbols are + * resolved. + */ + .pushsection .data + .align 2 + .globl kernel_sec_start + .globl kernel_sec_end +kernel_sec_start: + .long 0 +kernel_sec_end: + .long 0 + .popsection + .macro pgtbl, rd, phys add \rd, \phys, #TEXT_OFFSET sub \rd, \rd, #PG_DIR_SIZE @@ -230,16 +243,23 @@ __create_page_tables: blo 1b /* - * Map our RAM from the start to the end of the kernel .bss section. + * The main matter: map in the kernel using section mappings, and + * set two variables to indicate the physical start and end of the + * kernel. */ - add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) ldr r6, =(_end - 1) - orr r3, r8, r7 + adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) + str r8, [r5] @ Save physical start of kernel + orr r3, r8, r7 @ Add the MMU flags add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: str r3, [r0], #1 << PMD_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b + eor r3, r3, r7 @ Remove the MMU flags + adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) + str r3, [r5] @ Save physical end of kernel #ifdef CONFIG_XIP_KERNEL /* diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c index 2e844b70386b..db0acbb7d7a0 100644 --- a/arch/arm/kernel/insn.c +++ b/arch/arm/kernel/insn.c @@ -3,8 +3,9 @@ #include <linux/kernel.h> #include <asm/opcodes.h> -static unsigned long -__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) +static unsigned long __arm_gen_branch_thumb2(unsigned long pc, + unsigned long addr, bool link, + bool warn) { unsigned long s, j1, j2, i1, i2, imm10, imm11; unsigned long first, second; @@ -12,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) offset = (long)addr - (long)(pc + 4); if (offset < -16777216 || offset > 16777214) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(warn); return 0; } @@ -33,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) return __opcode_thumb32_compose(first, second); } -static unsigned long -__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) +static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr, + bool link, bool warn) { unsigned long opcode = 0xea000000; long offset; @@ -44,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(warn); return 0; } @@ -54,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) } unsigned long -__arm_gen_branch(unsigned long pc, unsigned long addr, bool link) +__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) - return __arm_gen_branch_thumb2(pc, addr, link); + return __arm_gen_branch_thumb2(pc, addr, link, warn); else - return __arm_gen_branch_arm(pc, addr, link); + return __arm_gen_branch_arm(pc, addr, link, warn); } diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 698b6f636156..20ab1e607522 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -63,7 +63,27 @@ int arch_show_interrupts(struct seq_file *p, int prec) */ void handle_IRQ(unsigned int irq, struct pt_regs *regs) { - __handle_domain_irq(NULL, irq, false, regs); + struct pt_regs *old_regs = set_irq_regs(regs); + struct irq_desc *desc; + + irq_enter(); + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (unlikely(!irq || irq >= nr_irqs)) + desc = NULL; + else + desc = irq_to_desc(irq); + + if (likely(desc)) + handle_irq_desc(desc); + else + ack_bad_irq(irq); + + irq_exit(); + set_irq_regs(old_regs); } /* diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 6e626abaefc5..1fc309b41f94 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -4,6 +4,7 @@ */ #include <linux/elf.h> +#include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sort.h> @@ -12,10 +13,6 @@ #include <asm/cache.h> #include <asm/opcodes.h> -#define PLT_ENT_STRIDE L1_CACHE_BYTES -#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) -#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) - #ifdef CONFIG_THUMB2_KERNEL #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \ (PLT_ENT_STRIDE - 4)) @@ -24,9 +21,11 @@ (PLT_ENT_STRIDE - 8)) #endif -struct plt_entries { - u32 ldr[PLT_ENT_COUNT]; - u32 lit[PLT_ENT_COUNT]; +static const u32 fixed_plts[] = { +#ifdef CONFIG_DYNAMIC_FTRACE + FTRACE_ADDR, + MCOUNT_ADDR, +#endif }; static bool in_init(const struct module *mod, unsigned long loc) @@ -34,14 +33,40 @@ static bool in_init(const struct module *mod, unsigned long loc) return loc - (u32)mod->init_layout.base < mod->init_layout.size; } +static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt) +{ + int i; + + if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count) + return; + pltsec->plt_count = ARRAY_SIZE(fixed_plts); + + for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i) + plt->ldr[i] = PLT_ENT_LDR; + + BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit)); + memcpy(plt->lit, fixed_plts, sizeof(fixed_plts)); +} + u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) { struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : &mod->arch.init; + struct plt_entries *plt; + int idx; + + /* cache the address, ELF header is available only during module load */ + if (!pltsec->plt_ent) + pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr; + plt = pltsec->plt_ent; - struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr; - int idx = 0; + prealloc_fixed(pltsec, plt); + + for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx) + if (plt->lit[idx] == val) + return (u32)&plt->ldr[idx]; + idx = 0; /* * Look for an existing entry pointing to 'val'. Given that the * relocations are sorted, this will be the last entry we allocated. @@ -189,8 +214,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { - unsigned long core_plts = 0; - unsigned long init_plts = 0; + unsigned long core_plts = ARRAY_SIZE(fixed_plts); + unsigned long init_plts = ARRAY_SIZE(fixed_plts); Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; Elf32_Sym *syms = NULL; @@ -245,6 +270,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, sizeof(struct plt_entries)); mod->arch.core.plt_count = 0; + mod->arch.core.plt_ent = NULL; mod->arch.init.plt->sh_type = SHT_NOBITS; mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; @@ -252,6 +278,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, sizeof(struct plt_entries)); mod->arch.init.plt_count = 0; + mod->arch.init.plt_ent = NULL; pr_debug("%s: plt=%x, init.plt=%x\n", __func__, mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size); diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 2924d7910b10..eb2190477da1 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { - asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value)); } else { armv7_pmnc_select_counter(idx); - asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value)); } } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 6324f4db9b02..fc9e8b37eaa8 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -288,7 +288,7 @@ unsigned long get_wchan(struct task_struct *p) struct stackframe frame; unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (!p || p == current || task_is_running(p)) return 0; frame.fp = thread_saved_fp(p); diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 0ce388f15422..3044fcb8d073 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -18,7 +18,6 @@ typedef void (*phys_reset_t)(unsigned long, bool); /* * Function pointers to optional machine specific functions */ -void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -138,10 +137,7 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); - if (arm_pm_restart) - arm_pm_restart(reboot_mode, cmd); - else - do_kernel_restart(cmd); + do_kernel_restart(cmd); /* Give a grace period for failure to restart of 1s */ mdelay(1000); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1a5edf562e85..f97eb2371672 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -545,9 +545,11 @@ void notrace cpu_init(void) * In Thumb-2, msr with an immediate value is not allowed. */ #ifdef CONFIG_THUMB2_KERNEL -#define PLC "r" +#define PLC_l "l" +#define PLC_r "r" #else -#define PLC "I" +#define PLC_l "I" +#define PLC_r "I" #endif /* @@ -569,15 +571,15 @@ void notrace cpu_init(void) "msr cpsr_c, %9" : : "r" (stk), - PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), "I" (offsetof(struct stack, irq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), "I" (offsetof(struct stack, abt[0])), - PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), "I" (offsetof(struct stack, und[0])), - PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), "I" (offsetof(struct stack, fiq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); #endif } @@ -1081,6 +1083,20 @@ void __init hyp_mode_check(void) #endif } +static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); + +static int arm_restart(struct notifier_block *nb, unsigned long action, + void *data) +{ + __arm_pm_restart(action, data); + return NOTIFY_DONE; +} + +static struct notifier_block arm_restart_nb = { + .notifier_call = arm_restart, + .priority = 128, +}; + void __init setup_arch(char **cmdline_p) { const struct machine_desc *mdesc = NULL; @@ -1114,10 +1130,7 @@ void __init setup_arch(char **cmdline_p) if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; - init_mm.brk = (unsigned long) _end; + setup_initial_init_mm(_text, _etext, _edata, _end); /* populate cmd_line too for later use, preserving boot_command_line */ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); @@ -1149,8 +1162,10 @@ void __init setup_arch(char **cmdline_p) kasan_init(); request_standard_resources(mdesc); - if (mdesc->restart) - arm_pm_restart = mdesc->restart; + if (mdesc->restart) { + __arm_pm_restart = mdesc->restart; + register_restart_handler(&arm_restart_nb); + } unflatten_device_tree(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 74679240a9d8..c7bb168b0d97 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void) #endif pr_debug("CPU%u: Booted secondary processor\n", cpu); - preempt_disable(); trace_hardirqs_off(); /* diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index f7f4620d59c3..20c4f6d20c7a 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -47,7 +47,7 @@ SECTIONS #endif } - . = PAGE_OFFSET + TEXT_OFFSET; + . = KERNEL_OFFSET + TEXT_OFFSET; .head.text : { _text = .; HEAD_TEXT diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index aeaaa65ad757..b5eadd70d903 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -276,12 +276,38 @@ static struct platform_device *ixp46x_devices[] __initdata = { unsigned long ixp4xx_exp_bus_size; EXPORT_SYMBOL(ixp4xx_exp_bus_size); +static struct platform_device_info ixp_dev_info __initdata = { + .name = "ixp4xx_crypto", + .id = 0, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int __init ixp_crypto_register(void) +{ + struct platform_device *pdev; + + if (!(~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | + IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { + printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); + return -ENODEV; + } + + pdev = platform_device_register_full(&ixp_dev_info); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + return 0; +} + void __init ixp4xx_sys_init(void) { ixp4xx_exp_bus_size = SZ_16M; platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices)); + if (IS_ENABLED(CONFIG_CRYPTO_DEV_IXP4XX)) + ixp_crypto_register(); + if (cpu_is_ixp46x()) { int region; diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig index 43fb941dcd07..a56748d671c4 100644 --- a/arch/arm/mach-zynq/Kconfig +++ b/arch/arm/mach-zynq/Kconfig @@ -6,7 +6,7 @@ config ARCH_ZYNQ select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA select ARM_GIC - select ARM_GLOBAL_TIMER if !CPU_FREQ + select ARM_GLOBAL_TIMER select CADENCE_TTC_TIMER select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 35f43d0aa056..8355c3895894 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -601,8 +601,6 @@ config CPU_TLB_V6 config CPU_TLB_V7 bool -config VERIFY_PERMISSION_FAULT - bool endif config CPU_HAS_ASID diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index f7cc5d68444b..f81bceacc660 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -17,31 +17,5 @@ ENTRY(v7_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR uaccess_disable ip @ disable userspace access - - /* - * V6 code adjusts the returned DFSR. - * New designs should not need to patch up faults. - */ - -#if defined(CONFIG_VERIFY_PERMISSION_FAULT) - /* - * Detect erroneous permission failures and fix - */ - ldr r3, =0x40d @ On permission fault - and r3, r1, r3 - cmp r3, #0x0d - bne do_DataAbort - - mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR - isb - mrc p15, 0, ip, c7, c4, 0 @ Read the PAR - and r3, ip, #0x7b @ On translation fault - cmp r3, #0x0b - bne do_DataAbort - bic r1, r1, #0xf @ Fix up FSR FS[5:0] - and ip, ip, #0x7e - orr r1, r1, ip, LSR #1 -#endif - b do_DataAbort ENDPROC(v7_early_abort) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9d4744a632c6..6162a070a410 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -125,11 +125,22 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, int pfn_valid(unsigned long pfn) { phys_addr_t addr = __pfn_to_phys(pfn); + unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages; if (__phys_to_pfn(addr) != pfn) return 0; - return memblock_is_map_memory(addr); + /* + * If address less than pageblock_size bytes away from a present + * memory chunk there still will be a memory map entry for it + * because we round freed memory map to the pageblock boundaries. + */ + if (memblock_overlaps_region(&memblock.memory, + ALIGN_DOWN(addr, pageblock_size), + pageblock_size)) + return 1; + + return 0; } EXPORT_SYMBOL(pfn_valid); #endif diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 000e8210000b..80fb5a4a5c05 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -27,6 +27,7 @@ #include <linux/vmalloc.h> #include <linux/io.h> #include <linux/sizes.h> +#include <linux/memblock.h> #include <asm/cp15.h> #include <asm/cputype.h> @@ -284,7 +285,8 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, * Don't allow RAM to be mapped with mismatched attributes - this * causes problems with ARMv6+ */ - if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) + if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) && + mtype != MT_MEMORY_RW)) return NULL; area = get_vm_area_caller(size, VM_IOREMAP, caller); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c1e12aab67b8..7583bda5ea7d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1121,31 +1121,32 @@ void __init debug_ll_io_init(void) } #endif -static void * __initdata vmalloc_min = - (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); +static unsigned long __initdata vmalloc_size = 240 * SZ_1M; /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the vmalloc - * area - the default is 240m. + * area - the default is 240MiB. */ static int __init early_vmalloc(char *arg) { unsigned long vmalloc_reserve = memparse(arg, NULL); + unsigned long vmalloc_max; if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; - pr_warn("vmalloc area too small, limiting to %luMB\n", + pr_warn("vmalloc area is too small, limiting to %luMiB\n", vmalloc_reserve >> 20); } - if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { - vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); - pr_warn("vmalloc area is too big, limiting to %luMB\n", + vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET); + if (vmalloc_reserve > vmalloc_max) { + vmalloc_reserve = vmalloc_max; + pr_warn("vmalloc area is too big, limiting to %luMiB\n", vmalloc_reserve >> 20); } - vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); + vmalloc_size = vmalloc_reserve; return 0; } early_param("vmalloc", early_vmalloc); @@ -1165,7 +1166,8 @@ void __init adjust_lowmem_bounds(void) * and may itself be outside the valid range for which phys_addr_t * and therefore __pa() is defined. */ - vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; + vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET - + PAGE_OFFSET + PHYS_OFFSET; /* * The first usable region must be PMD aligned. Mark its start @@ -1246,7 +1248,7 @@ void __init adjust_lowmem_bounds(void) memblock_set_current_limit(memblock_limit); } -static inline void prepare_page_table(void) +static __init void prepare_page_table(void) { unsigned long addr; phys_addr_t end; @@ -1457,8 +1459,6 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { - phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); - phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); phys_addr_t start, end; u64 i; @@ -1466,55 +1466,126 @@ static void __init map_lowmem(void) for_each_mem_range(i, &start, &end) { struct map_desc map; + pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n", + (long long)start, (long long)end); if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) break; - if (end < kernel_x_start) { - map.pfn = __phys_to_pfn(start); - map.virtual = __phys_to_virt(start); - map.length = end - start; - map.type = MT_MEMORY_RWX; + /* + * If our kernel image is in the VMALLOC area we need to remove + * the kernel physical memory from lowmem since the kernel will + * be mapped separately. + * + * The kernel will typically be at the very start of lowmem, + * but any placement relative to memory ranges is possible. + * + * If the memblock contains the kernel, we have to chisel out + * the kernel memory from it and map each part separately. We + * get 6 different theoretical cases: + * + * +--------+ +--------+ + * +-- start --+ +--------+ | Kernel | | Kernel | + * | | | Kernel | | case 2 | | case 5 | + * | | | case 1 | +--------+ | | +--------+ + * | Memory | +--------+ | | | Kernel | + * | range | +--------+ | | | case 6 | + * | | | Kernel | +--------+ | | +--------+ + * | | | case 3 | | Kernel | | | + * +-- end ----+ +--------+ | case 4 | | | + * +--------+ +--------+ + */ - create_mapping(&map); - } else if (start >= kernel_x_end) { - map.pfn = __phys_to_pfn(start); - map.virtual = __phys_to_virt(start); - map.length = end - start; - map.type = MT_MEMORY_RW; + /* Case 5: kernel covers range, don't map anything, should be rare */ + if ((start > kernel_sec_start) && (end < kernel_sec_end)) + break; - create_mapping(&map); - } else { - /* This better cover the entire kernel */ - if (start < kernel_x_start) { + /* Cases where the kernel is starting inside the range */ + if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) { + /* Case 6: kernel is embedded in the range, we need two mappings */ + if ((start < kernel_sec_start) && (end > kernel_sec_end)) { + /* Map memory below the kernel */ map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); - map.length = kernel_x_start - start; + map.length = kernel_sec_start - start; map.type = MT_MEMORY_RW; - create_mapping(&map); - } - - map.pfn = __phys_to_pfn(kernel_x_start); - map.virtual = __phys_to_virt(kernel_x_start); - map.length = kernel_x_end - kernel_x_start; - map.type = MT_MEMORY_RWX; - - create_mapping(&map); - - if (kernel_x_end < end) { - map.pfn = __phys_to_pfn(kernel_x_end); - map.virtual = __phys_to_virt(kernel_x_end); - map.length = end - kernel_x_end; + /* Map memory above the kernel */ + map.pfn = __phys_to_pfn(kernel_sec_end); + map.virtual = __phys_to_virt(kernel_sec_end); + map.length = end - kernel_sec_end; map.type = MT_MEMORY_RW; - create_mapping(&map); + break; } + /* Case 1: kernel and range start at the same address, should be common */ + if (kernel_sec_start == start) + start = kernel_sec_end; + /* Case 3: kernel and range end at the same address, should be rare */ + if (kernel_sec_end == end) + end = kernel_sec_start; + } else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) { + /* Case 2: kernel ends inside range, starts below it */ + start = kernel_sec_end; + } else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) { + /* Case 4: kernel starts inside range, ends above it */ + end = kernel_sec_start; } + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY_RW; + create_mapping(&map); } } +static void __init map_kernel(void) +{ + /* + * We use the well known kernel section start and end and split the area in the + * middle like this: + * . . + * | RW memory | + * +----------------+ kernel_x_start + * | Executable | + * | kernel memory | + * +----------------+ kernel_x_end / kernel_nx_start + * | Non-executable | + * | kernel memory | + * +----------------+ kernel_nx_end + * | RW memory | + * . . + * + * Notice that we are dealing with section sized mappings here so all of this + * will be bumped to the closest section boundary. This means that some of the + * non-executable part of the kernel memory is actually mapped as executable. + * This will only persist until we turn on proper memory management later on + * and we remap the whole kernel with page granularity. + */ + phys_addr_t kernel_x_start = kernel_sec_start; + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + phys_addr_t kernel_nx_start = kernel_x_end; + phys_addr_t kernel_nx_end = kernel_sec_end; + struct map_desc map; + + map.pfn = __phys_to_pfn(kernel_x_start); + map.virtual = __phys_to_virt(kernel_x_start); + map.length = kernel_x_end - kernel_x_start; + map.type = MT_MEMORY_RWX; + create_mapping(&map); + + /* If the nx part is small it may end up covered by the tail of the RWX section */ + if (kernel_x_end == kernel_nx_end) + return; + + map.pfn = __phys_to_pfn(kernel_nx_start); + map.virtual = __phys_to_virt(kernel_nx_start); + map.length = kernel_nx_end - kernel_nx_start; + map.type = MT_MEMORY_RW; + create_mapping(&map); +} + #ifdef CONFIG_ARM_PV_FIXUP typedef void pgtables_remap(long long offset, unsigned long pgd); pgtables_remap lpae_pgtables_remap_asm; @@ -1645,9 +1716,18 @@ void __init paging_init(const struct machine_desc *mdesc) { void *zero_page; + pr_debug("physical kernel sections: 0x%08x-0x%08x\n", + kernel_sec_start, kernel_sec_end); + prepare_page_table(); map_lowmem(); memblock_set_current_limit(arm_lowmem_limit); + pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit); + /* + * After this point early_alloc(), i.e. the memblock allocator, can + * be used + */ + map_kernel(); dma_contiguous_remap(); early_fixmap_shutdown(); devicemaps_init(mdesc); diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S index 5335b9687297..74f4b383afe3 100644 --- a/arch/arm/mm/tlb-v6.S +++ b/arch/arm/mm/tlb-v6.S @@ -24,7 +24,7 @@ * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) - * - vma - vma_struct describing address range + * - vma - vm_area_struct describing address range * * It is assumed that: * - the "Invalidate single entry" instruction will invalidate diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 1bb28d7db567..87bf4ab17721 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -23,7 +23,7 @@ * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) - * - vma - vma_struct describing address range + * - vma - vm_area_struct describing address range * * It is assumed that: * - the "Invalidate single entry" instruction will invalidate diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index a9653117ca0d..27e0af78e88b 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -348,29 +348,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) reset_current_kprobe(); } break; - - case KPROBE_HIT_ACTIVE: - case KPROBE_HIT_SSDONE: - /* - * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accounting - * these specific fault cases. - */ - kprobes_inc_nmissed_count(cur); - - /* - * We come here because instructions in the pre/post - * handler caused the page_fault, this could happen - * if handler tries to access user space by - * copy_from_user(), get_user() etc. Let the - * user-specified handler try to fix it. - */ - if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) - return 1; - break; - - default: - break; } return 0; diff --git a/arch/arm/probes/kprobes/test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c index 456c181a7bfe..4e11f0b760f8 100644 --- a/arch/arm/probes/kprobes/test-thumb.c +++ b/arch/arm/probes/kprobes/test-thumb.c @@ -441,21 +441,21 @@ void kprobe_thumb32_test_cases(void) "3: mvn r0, r0 \n\t" "2: nop \n\t") - TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]", + TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") - TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]", + TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") - TEST_RRX("tbh [r",1,9f, ", r",14,1,"]", + TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" @@ -468,10 +468,10 @@ void kprobe_thumb32_test_cases(void) TEST_UNSUPPORTED("strexb r0, r1, [r2]") TEST_UNSUPPORTED("strexh r0, r1, [r2]") - TEST_UNSUPPORTED("strexd r0, r1, [r2]") + TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]") TEST_UNSUPPORTED("ldrexb r0, [r1]") TEST_UNSUPPORTED("ldrexh r0, [r1]") - TEST_UNSUPPORTED("ldrexd r0, [r1]") + TEST_UNSUPPORTED("ldrexd r0, r1, [r1]") TEST_GROUP("Data-processing (shifted register) and (modified immediate)") diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index 87de1f63f649..4a5c50f67ced 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -33,39 +33,26 @@ _dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \ $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') quiet_cmd_gen_mach = GEN $@ - cmd_gen_mach = mkdir -p $(dir $@) && \ - $(AWK) -f $(filter-out $(PHONY),$^) > $@ + cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@ $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE $(call if_changed,gen_mach) quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis common,$* \ --offset __NR_SYSCALL_BASE $< $@ quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@ quiet_cmd_sysnr = SYSNR $@ - cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ - '$(syshdr_abi_$(basetarget))' + cmd_sysnr = $(CONFIG_SHELL) $(sysnr) $< $@ -$(uapi)/unistd-oabi.h: abis := common,oabi -$(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE +$(uapi)/unistd-%.h: $(syscall) $(syshdr) FORCE $(call if_changed,syshdr) -$(uapi)/unistd-eabi.h: abis := common,eabi -$(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE - $(call if_changed,syshdr) - -sysnr_abi_unistd-nr := common,oabi,eabi,compat $(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE $(call if_changed,sysnr) -$(gen)/calls-oabi.S: abis := common,oabi -$(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE - $(call if_changed,systbl) - -$(gen)/calls-eabi.S: abis := common,eabi -$(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE +$(gen)/calls-%.S: $(syscall) $(systbl) FORCE $(call if_changed,systbl) diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 28e03b5fec00..c5df1179fc5d 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -456,7 +456,7 @@ 440 common process_madvise sys_process_madvise 441 common epoll_pwait2 sys_epoll_pwait2 442 common mount_setattr sys_mount_setattr -# 443 reserved for quotactl_path +443 common quotactl_fd sys_quotactl_fd 444 common landlock_create_ruleset sys_landlock_create_ruleset 445 common landlock_add_rule sys_landlock_add_rule 446 common landlock_restrict_self sys_landlock_restrict_self diff --git a/arch/arm/tools/syscallnr.sh b/arch/arm/tools/syscallnr.sh index df3ccd0ca831..9e386770b6b3 100644 --- a/arch/arm/tools/syscallnr.sh +++ b/arch/arm/tools/syscallnr.sh @@ -2,14 +2,13 @@ # SPDX-License-Identifier: GPL-2.0 in="$1" out="$2" -my_abis=`echo "($3)" | tr ',' '|'` align=1 fileguard=_ASM_ARM_`basename "$out" | sed \ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | tail -n1 | ( +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+" "$in" | sort -n | tail -n1 | ( echo "#ifndef ${fileguard} #define ${fileguard} 1 diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 8ad576ecd0f1..7f1c106b746f 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -29,6 +29,7 @@ #include <linux/cpu.h> #include <linux/console.h> #include <linux/pvclock_gtod.h> +#include <linux/reboot.h> #include <linux/time64.h> #include <linux/timekeeping.h> #include <linux/timekeeper_internal.h> @@ -181,11 +182,18 @@ void xen_reboot(int reason) BUG_ON(rc); } -static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) +static int xen_restart(struct notifier_block *nb, unsigned long action, + void *data) { xen_reboot(SHUTDOWN_reboot); + + return NOTIFY_DONE; } +static struct notifier_block xen_restart_nb = { + .notifier_call = xen_restart, + .priority = 192, +}; static void xen_power_off(void) { @@ -404,7 +412,7 @@ static int __init xen_pm_init(void) return -ENODEV; pm_power_off = xen_power_off; - arm_pm_restart = xen_restart; + register_restart_handler(&xen_restart_nb); if (!xen_initial_domain()) { struct timespec64 ts; xen_read_wallclock(&ts); |