summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-10 14:04:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-10 14:04:16 -0700
commit30066ce675d3af350bc5a53858991c0b518dda00 (patch)
tree75db2274cd0887b11b4e297771287f0fb4c14b81 /arch
parent6763afe4b9f39142bda2a92d69e62fe85f67251c (diff)
parentc3afafa47898e34eb49828ec4ac92bcdc81c8f0c (diff)
downloadlwn-30066ce675d3af350bc5a53858991c0b518dda00.tar.gz
lwn-30066ce675d3af350bc5a53858991c0b518dda00.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.9: API: - The crypto engine code now supports hashes. Algorithms: - Allow keys >= 2048 bits in FIPS mode for RSA. Drivers: - Memory overwrite fix for vmx ghash. - Add support for building ARM sha1-neon in Thumb2 mode. - Reenable ARM ghash-ce code by adding import/export. - Reenable img-hash by adding import/export. - Add support for multiple cores in omap-aes. - Add little-endian support for sha1-powerpc. - Add Cavium HWRNG driver for ThunderX SoC" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (137 commits) crypto: caam - treat SGT address pointer as u64 crypto: ccp - Make syslog errors human-readable crypto: ccp - clean up data structure crypto: vmx - Ensure ghash-generic is enabled crypto: testmgr - add guard to dst buffer for ahash_export crypto: caam - Unmap region obtained by of_iomap crypto: sha1-powerpc - little-endian support crypto: gcm - Fix IV buffer size in crypto_gcm_setkey crypto: vmx - Fix memory corruption caused by p8_ghash crypto: ghash-generic - move common definitions to a new header file crypto: caam - fix sg dump hwrng: omap - Only fail if pm_runtime_get_sync returns < 0 crypto: omap-sham - shrink the internal buffer size crypto: omap-sham - add support for export/import crypto: omap-sham - convert driver logic to use sgs for data xmit crypto: omap-sham - change the DMA threshold value to a define crypto: omap-sham - add support functions for sg based data handling crypto: omap-sham - rename sgl to sgl_tmp for deprecation crypto: omap-sham - align algorithms on word offset crypto: omap-sham - add context export/import stubs ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c26
-rw-r--r--arch/arm/crypto/sha1-armv7-neon.S1
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S13
3 files changed, 36 insertions, 4 deletions
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 1568cb5cd870..7546b3c02466 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -138,7 +138,7 @@ static struct shash_alg ghash_alg = {
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
- .cra_name = "ghash",
+ .cra_name = "__ghash",
.cra_driver_name = "__driver-ghash-ce",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_INTERNAL,
@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req)
}
}
+static int ghash_async_import(struct ahash_request *req, const void *in)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+
+ desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
+ desc->flags = req->base.flags;
+
+ return crypto_shash_import(desc, in);
+}
+
+static int ghash_async_export(struct ahash_request *req, void *out)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+
+ return crypto_shash_export(desc, out);
+}
+
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
+ .import = ghash_async_import,
+ .export = ghash_async_export,
.halg.digestsize = GHASH_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct ghash_desc_ctx),
.halg.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S
index dcd01f3f0bb0..2468fade49cf 100644
--- a/arch/arm/crypto/sha1-armv7-neon.S
+++ b/arch/arm/crypto/sha1-armv7-neon.S
@@ -12,7 +12,6 @@
#include <asm/assembler.h>
.syntax unified
-.code 32
.fpu neon
.text
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index 125e16520061..82ddc9bdfeb1 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -7,6 +7,15 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#ifdef __BIG_ENDIAN__
+#define LWZ(rt, d, ra) \
+ lwz rt,d(ra)
+#else
+#define LWZ(rt, d, ra) \
+ li rt,d; \
+ lwbrx rt,rt,ra
+#endif
+
/*
* We roll the registers for T, A, B, C, D, E around on each
* iteration; T on iteration t is A on iteration t+1, and so on.
@@ -23,7 +32,7 @@
#define W(t) (((t)%16)+16)
#define LOADW(t) \
- lwz W(t),(t)*4(r4)
+ LWZ(W(t),(t)*4,r4)
#define STEPD0_LOAD(t) \
andc r0,RD(t),RB(t); \
@@ -33,7 +42,7 @@
add r0,RE(t),r15; \
add RT(t),RT(t),r6; \
add r14,r0,W(t); \
- lwz W((t)+4),((t)+4)*4(r4); \
+ LWZ(W((t)+4),((t)+4)*4,r4); \
rotlwi RB(t),RB(t),30; \
add RT(t),RT(t),r14