summaryrefslogtreecommitdiff
path: root/arch/x86/crypto/sha256-avx-asm.S
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-04-12 13:00:35 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2023-04-20 18:20:04 +0800
commit94330fbe082acfd7ac9f2a348933944ba78b14dc (patch)
treeb646a9bf65704f611dde3d56f10ab5970cba24c7 /arch/x86/crypto/sha256-avx-asm.S
parent9ac589cf3cdf2344a5240d7df04ccb37070d7e96 (diff)
downloadlwn-94330fbe082acfd7ac9f2a348933944ba78b14dc.tar.gz
lwn-94330fbe082acfd7ac9f2a348933944ba78b14dc.zip
crypto: x86/sha - Use local .L symbols for code
Avoid cluttering up the kallsyms symbol table with entries that should not end up in things like backtraces, as they have undescriptive and generated identifiers. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'arch/x86/crypto/sha256-avx-asm.S')
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 5555b5d5215a..53de72bdd851 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -360,7 +360,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
and $~15, %rsp # align stack pointer
shl $6, NUM_BLKS # convert to bytes
- jz done_hash
+ jz .Ldone_hash
add INP, NUM_BLKS # pointer to end of data
mov NUM_BLKS, _INP_END(%rsp)
@@ -377,7 +377,7 @@ SYM_TYPED_FUNC_START(sha256_transform_avx)
vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
vmovdqa _SHUF_00BA(%rip), SHUF_00BA
vmovdqa _SHUF_DC00(%rip), SHUF_DC00
-loop0:
+.Lloop0:
lea K256(%rip), TBL
## byte swap first 16 dwords
@@ -391,7 +391,7 @@ loop0:
## schedule 48 input dwords, by doing 3 rounds of 16 each
mov $3, SRND
.align 16
-loop1:
+.Lloop1:
vpaddd (TBL), X0, XFER
vmovdqa XFER, _XFER(%rsp)
FOUR_ROUNDS_AND_SCHED
@@ -410,10 +410,10 @@ loop1:
FOUR_ROUNDS_AND_SCHED
sub $1, SRND
- jne loop1
+ jne .Lloop1
mov $2, SRND
-loop2:
+.Lloop2:
vpaddd (TBL), X0, XFER
vmovdqa XFER, _XFER(%rsp)
DO_ROUND 0
@@ -433,7 +433,7 @@ loop2:
vmovdqa X3, X1
sub $1, SRND
- jne loop2
+ jne .Lloop2
addm (4*0)(CTX),a
addm (4*1)(CTX),b
@@ -447,9 +447,9 @@ loop2:
mov _INP(%rsp), INP
add $64, INP
cmp _INP_END(%rsp), INP
- jne loop0
+ jne .Lloop0
-done_hash:
+.Ldone_hash:
mov %rbp, %rsp
popq %rbp