summaryrefslogtreecommitdiff
path: root/arch/arm64/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 10:05:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 10:05:01 -0700
commit3cd86a58f7734bf9cef38f6f899608ebcaa3da13 (patch)
tree6ae5b8109011ee40deef645a9701e2d8dc4e4fce /arch/arm64/crypto
parenta8222fd5b80c7ec83f257060670becbeea9b50b9 (diff)
parentb2a84de2a2deb76a6a51609845341f508c518c03 (diff)
downloadlwn-3cd86a58f7734bf9cef38f6f899608ebcaa3da13.tar.gz
lwn-3cd86a58f7734bf9cef38f6f899608ebcaa3da13.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "The bulk is in-kernel pointer authentication, activity monitors and lots of asm symbol annotations. I also queued the sys_mremap() patch commenting the asymmetry in the address untagging. Summary: - In-kernel Pointer Authentication support (previously only offered to user space). - ARM Activity Monitors (AMU) extension support allowing better CPU utilisation numbers for the scheduler (frequency invariance). - Memory hot-remove support for arm64. - Lots of asm annotations (SYM_*) in preparation for the in-kernel Branch Target Identification (BTI) support. - arm64 perf updates: ARMv8.5-PMU 64-bit counters, refactoring the PMU init callbacks, support for new DT compatibles. - IPv6 header checksum optimisation. - Fixes: SDEI (software delegated exception interface) double-lock on hibernate with shared events. - Minor clean-ups and refactoring: cpu_ops accessor, cpu_do_switch_mm() converted to C, cpufeature finalisation helper. - sys_mremap() comment explaining the asymmetric address untagging behaviour" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (81 commits) mm/mremap: Add comment explaining the untagging behaviour of mremap() arm64: head: Convert install_el2_stub to SYM_INNER_LABEL arm64: Introduce get_cpu_ops() helper function arm64: Rename cpu_read_ops() to init_cpu_ops() arm64: Declare ACPI parking protocol CPU operation if needed arm64: move kimage_vaddr to .rodata arm64: use mov_q instead of literal ldr arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH lkdtm: arm64: test kernel pointer authentication arm64: compile the kernel with ptrauth return address signing kconfig: Add support for 'as-option' arm64: suspend: restore the kernel ptrauth keys arm64: __show_regs: strip PAC from lr in printk arm64: unwind: strip PAC from kernel addresses arm64: mask PAC bits of __builtin_return_address arm64: initialize ptrauth keys for kernel booting task arm64: initialize and switch ptrauth kernel keys arm64: enable ptrauth earlier arm64: cpufeature: handle conflicts based on capability arm64: cpufeature: Move cpu capability helpers inside C file ...
Diffstat (limited to 'arch/arm64/crypto')
-rw-r--r--arch/arm64/crypto/aes-ce.S4
-rw-r--r--arch/arm64/crypto/aes-modes.S48
-rw-r--r--arch/arm64/crypto/aes-neon.S4
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S16
4 files changed, 36 insertions, 36 deletions
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 45062553467f..1dc5bbbfeed2 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -9,8 +9,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
-#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
+#define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func)
+#define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func)
.arch armv8-a+crypto
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 8a2faa42b57e..cf618d8f6cec 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x)
* int blocks)
*/
-AES_ENTRY(aes_ecb_encrypt)
+AES_FUNC_START(aes_ecb_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbencout:
ldp x29, x30, [sp], #16
ret
-AES_ENDPROC(aes_ecb_encrypt)
+AES_FUNC_END(aes_ecb_encrypt)
-AES_ENTRY(aes_ecb_decrypt)
+AES_FUNC_START(aes_ecb_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbdecout:
ldp x29, x30, [sp], #16
ret
-AES_ENDPROC(aes_ecb_decrypt)
+AES_FUNC_END(aes_ecb_decrypt)
/*
@@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt)
* u32 const rk2[]);
*/
-AES_ENTRY(aes_essiv_cbc_encrypt)
+AES_FUNC_START(aes_essiv_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */
mov w8, #14 /* AES-256: 14 rounds */
@@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt)
enc_switch_key w3, x2, x6
b .Lcbcencloop4x
-AES_ENTRY(aes_cbc_encrypt)
+AES_FUNC_START(aes_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */
enc_prepare w3, x2, x6
@@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt)
.Lcbcencout:
st1 {v4.16b}, [x5] /* return iv */
ret
-AES_ENDPROC(aes_cbc_encrypt)
-AES_ENDPROC(aes_essiv_cbc_encrypt)
+AES_FUNC_END(aes_cbc_encrypt)
+AES_FUNC_END(aes_essiv_cbc_encrypt)
-AES_ENTRY(aes_essiv_cbc_decrypt)
+AES_FUNC_START(aes_essiv_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt)
encrypt_block cbciv, w8, x6, x7, w9
b .Lessivcbcdecstart
-AES_ENTRY(aes_cbc_decrypt)
+AES_FUNC_START(aes_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 )
st1 {cbciv.16b}, [x5] /* return iv */
ldp x29, x30, [sp], #16
ret
-AES_ENDPROC(aes_cbc_decrypt)
-AES_ENDPROC(aes_essiv_cbc_decrypt)
+AES_FUNC_END(aes_cbc_decrypt)
+AES_FUNC_END(aes_essiv_cbc_decrypt)
/*
@@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt)
* int rounds, int bytes, u8 const iv[])
*/
-AES_ENTRY(aes_cbc_cts_encrypt)
+AES_FUNC_START(aes_cbc_cts_encrypt)
adr_l x8, .Lcts_permute_table
sub x4, x4, #16
add x9, x8, #32
@@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt)
st1 {v0.16b}, [x4] /* overlapping stores */
st1 {v1.16b}, [x0]
ret
-AES_ENDPROC(aes_cbc_cts_encrypt)
+AES_FUNC_END(aes_cbc_cts_encrypt)
-AES_ENTRY(aes_cbc_cts_decrypt)
+AES_FUNC_START(aes_cbc_cts_decrypt)
adr_l x8, .Lcts_permute_table
sub x4, x4, #16
add x9, x8, #32
@@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
st1 {v0.16b}, [x0]
ret
-AES_ENDPROC(aes_cbc_cts_decrypt)
+AES_FUNC_END(aes_cbc_cts_decrypt)
.section ".rodata", "a"
.align 6
@@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt)
* int blocks, u8 ctr[])
*/
-AES_ENTRY(aes_ctr_encrypt)
+AES_FUNC_START(aes_ctr_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
rev x7, x7
ins vctr.d[0], x7
b .Lctrcarrydone
-AES_ENDPROC(aes_ctr_encrypt)
+AES_FUNC_END(aes_ctr_encrypt)
/*
@@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt)
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
.endm
-AES_ENTRY(aes_xts_encrypt)
+AES_FUNC_START(aes_xts_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr
b .Lxtsencctsout
-AES_ENDPROC(aes_xts_encrypt)
+AES_FUNC_END(aes_xts_encrypt)
-AES_ENTRY(aes_xts_decrypt)
+AES_FUNC_START(aes_xts_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr
b .Lxtsdecctsout
-AES_ENDPROC(aes_xts_decrypt)
+AES_FUNC_END(aes_xts_decrypt)
/*
* aes_mac_update(u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 dg[], int enc_before, int enc_after)
*/
-AES_ENTRY(aes_mac_update)
+AES_FUNC_START(aes_mac_update)
frame_push 6
mov x19, x0
@@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update)
ld1 {v0.16b}, [x23] /* get dg */
enc_prepare w21, x20, x0
b .Lmacloop4x
-AES_ENDPROC(aes_mac_update)
+AES_FUNC_END(aes_mac_update)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 247d34ddaab0..e47d3ec2cfb4 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -8,8 +8,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
-#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
+#define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func)
+#define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func)
xtsmask .req v7
cbciv .req v7
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index 084c6a30b03a..6b958dcdf136 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 )
* struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag)
*/
-ENTRY(pmull_gcm_encrypt)
+SYM_FUNC_START(pmull_gcm_encrypt)
pmull_gcm_do_crypt 1
-ENDPROC(pmull_gcm_encrypt)
+SYM_FUNC_END(pmull_gcm_encrypt)
/*
* void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
* struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag)
*/
-ENTRY(pmull_gcm_decrypt)
+SYM_FUNC_START(pmull_gcm_decrypt)
pmull_gcm_do_crypt 0
-ENDPROC(pmull_gcm_decrypt)
+SYM_FUNC_END(pmull_gcm_decrypt)
-pmull_gcm_ghash_4x:
+SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x)
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
@@ -681,9 +681,9 @@ pmull_gcm_ghash_4x:
eor XL.16b, XL.16b, T2.16b
ret
-ENDPROC(pmull_gcm_ghash_4x)
+SYM_FUNC_END(pmull_gcm_ghash_4x)
-pmull_gcm_enc_4x:
+SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x)
ld1 {KS0.16b}, [x5] // load upper counter
sub w10, w8, #4
sub w11, w8, #3
@@ -746,7 +746,7 @@ pmull_gcm_enc_4x:
eor INP3.16b, INP3.16b, KS3.16b
ret
-ENDPROC(pmull_gcm_enc_4x)
+SYM_FUNC_END(pmull_gcm_enc_4x)
.section ".rodata", "a"
.align 6