summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorMike Marshall <hubcap@omnibond.com>2016-03-14 15:39:42 -0400
committerMike Marshall <hubcap@omnibond.com>2016-03-14 15:39:42 -0400
commitab6652524aaf834d5dcdb46dd7695813b8d63da5 (patch)
treebb3876a9b61254be902416f7dbf578deb28e9f22 /crypto
parentacfcbaf1925f2dc5c46c61de69d756dec92a2ff8 (diff)
parentb562e44f507e863c6792946e4e1b1449fbbac85d (diff)
downloadlwn-ab6652524aaf834d5dcdb46dd7695813b8d63da5.tar.gz
lwn-ab6652524aaf834d5dcdb46dd7695813b8d63da5.zip
Orangefs: merge to v4.5
Merge tag 'v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into current Linux 4.5
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/af_alg.c55
-rw-r--r--crypto/ahash.c5
-rw-r--r--crypto/akcipher.c34
-rw-r--r--crypto/algapi.c9
-rw-r--r--crypto/algif_aead.c10
-rw-r--r--crypto/algif_hash.c169
-rw-r--r--crypto/algif_skcipher.c266
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c2
-rw-r--r--crypto/chacha20poly1305.c8
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/crypto_user.c6
-rw-r--r--crypto/drbg.c6
-rw-r--r--crypto/hash_info.c2
-rw-r--r--crypto/mcryptd.c8
-rw-r--r--crypto/md5.c6
-rw-r--r--crypto/rsa-pkcs1pad.c628
-rw-r--r--crypto/rsa.c40
-rw-r--r--crypto/sha1_generic.c7
-rw-r--r--crypto/sha256_generic.c16
-rw-r--r--crypto/shash.c5
-rw-r--r--crypto/skcipher.c2
-rw-r--r--crypto/tcrypt.c2
27 files changed, 1168 insertions, 132 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 7240821137fd..3be07ad1d80d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -472,11 +472,13 @@ config CRYPTO_CRCT10DIF_PCLMUL
config CRYPTO_GHASH
tristate "GHASH digest algorithm"
select CRYPTO_GF128MUL
+ select CRYPTO_HASH
help
GHASH is message digest algorithm for GCM (Galois/Counter Mode).
config CRYPTO_POLY1305
tristate "Poly1305 authenticator algorithm"
+ select CRYPTO_HASH
help
Poly1305 authenticator algorithm, RFC7539.
diff --git a/crypto/Makefile b/crypto/Makefile
index f7aba923458d..2acdbbd30475 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o
rsa_generic-y += rsaprivkey-asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
+rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
cryptomgr-y := algboss.o testmgr.o
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index a8e7aa3e257b..f5e18c2a4852 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
goto unlock;
type->ops->owner = THIS_MODULE;
+ if (type->ops_nokey)
+ type->ops_nokey->owner = THIS_MODULE;
node->type = type;
list_add(&node->list, &alg_types);
err = 0;
@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
}
EXPORT_SYMBOL_GPL(af_alg_release);
+void af_alg_release_parent(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int nokey = ask->nokey_refcnt;
+ bool last = nokey && !ask->refcnt;
+
+ sk = ask->parent;
+ ask = alg_sk(sk);
+
+ lock_sock(sk);
+ ask->nokey_refcnt -= nokey;
+ if (!last)
+ last = !--ask->refcnt;
+ release_sock(sk);
+
+ if (last)
+ sock_put(sk);
+}
+EXPORT_SYMBOL_GPL(af_alg_release_parent);
+
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
const u32 forbidden = CRYPTO_ALG_INTERNAL;
@@ -133,6 +155,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_alg *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
+ int err;
if (sock->state == SS_CONNECTED)
return -EINVAL;
@@ -160,16 +183,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return PTR_ERR(private);
}
+ err = -EBUSY;
lock_sock(sk);
+ if (ask->refcnt | ask->nokey_refcnt)
+ goto unlock;
swap(ask->type, type);
swap(ask->private, private);
+ err = 0;
+
+unlock:
release_sock(sk);
alg_do_release(type, private);
- return 0;
+ return err;
}
static int alg_setkey(struct sock *sk, char __user *ukey,
@@ -202,11 +231,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
- int err = -ENOPROTOOPT;
+ int err = -EBUSY;
lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock;
+
type = ask->type;
+ err = -ENOPROTOOPT;
if (level != SOL_ALG || !type)
goto unlock;
@@ -238,6 +271,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
struct sock *sk2;
+ unsigned int nokey;
int err;
lock_sock(sk);
@@ -257,20 +291,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
security_sk_clone(sk, sk2);
err = type->accept(ask->private, sk2);
- if (err) {
- sk_free(sk2);
+
+ nokey = err == -ENOKEY;
+ if (nokey && type->accept_nokey)
+ err = type->accept_nokey(ask->private, sk2);
+
+ if (err)
goto unlock;
- }
sk2->sk_family = PF_ALG;
- sock_hold(sk);
+ if (nokey || !ask->refcnt++)
+ sock_hold(sk);
+ ask->nokey_refcnt += nokey;
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
+ alg_sk(sk2)->nokey_refcnt = nokey;
newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
+ if (nokey)
+ newsock->ops = type->ops_nokey;
+
err = 0;
unlock:
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 9c1dc8d6106a..d19b52324cf5 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
+ hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
- if (alg->setkey)
+ if (alg->setkey) {
hash->setkey = alg->setkey;
+ hash->has_setkey = true;
+ }
if (alg->export)
hash->export = alg->export;
if (alg->import)
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 120ec042ec9e..def301ed1288 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -21,6 +21,7 @@
#include <linux/cryptouser.h>
#include <net/netlink.h>
#include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
#include "internal.h"
#ifdef CONFIG_NET
@@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
return 0;
}
+static void crypto_akcipher_free_instance(struct crypto_instance *inst)
+{
+ struct akcipher_instance *akcipher = akcipher_instance(inst);
+
+ akcipher->free(akcipher);
+}
+
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
+ .free = crypto_akcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
@@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = {
.tfmsize = offsetof(struct crypto_akcipher, base),
};
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
+ u32 type, u32 mask)
+{
+ spawn->base.frontend = &crypto_akcipher_type;
+ return crypto_grab_spawn(&spawn->base, name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
+
struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
{
@@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
-int crypto_register_akcipher(struct akcipher_alg *alg)
+static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+}
+
+int crypto_register_akcipher(struct akcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_akcipher);
@@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
+int akcipher_register_instance(struct crypto_template *tmpl,
+ struct akcipher_instance *inst)
+{
+ akcipher_prepare_alg(&inst->alg);
+ return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(akcipher_register_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 59bf491fe3d8..7be76aa31579 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
{
struct crypto_spawn *spawn, *n;
- if (list_empty(stack))
+ spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
+ if (!spawn)
return NULL;
- spawn = list_first_entry(stack, struct crypto_spawn, list);
- n = list_entry(spawn->list.next, struct crypto_spawn, list);
+ n = list_next_entry(spawn, list);
if (spawn->alg && &n->list != stack && !n->alg)
n->alg = (n->list.next == stack) ? alg :
- &list_entry(n->list.next, struct crypto_spawn,
- list)->inst->alg;
+ &list_next_entry(n, list)->inst->alg;
list_move(&spawn->list, secondary_spawns);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 6d4d4569447e..147069c9afd0 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -106,7 +106,7 @@ static void aead_wmem_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM |
POLLRDBAND);
@@ -157,7 +157,7 @@ static void aead_data_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLRDNORM |
POLLRDBAND);
@@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
while (size) {
- unsigned long len = size;
+ size_t len = size;
struct scatterlist *sg = NULL;
/* use the existing memory in an allocated page */
@@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* allocate a new page */
len = min_t(unsigned long, size, aead_sndbuf(sk));
while (len) {
- int plen = 0;
+ size_t plen = 0;
if (sgl->cur >= ALG_MAX_PAGES) {
aead_put_sgl(sk);
@@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
sg = sgl->sg + sgl->cur;
- plen = min_t(int, len, PAGE_SIZE);
+ plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg, alloc_page(GFP_KERNEL));
err = -ENOMEM;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index b4c24fe3dcfb..68a5ceaa04c8 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,6 +34,11 @@ struct hash_ctx {
struct ahash_request req;
};
+struct algif_hash_tfm {
+ struct crypto_ahash *hash;
+ bool has_key;
+};
+
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
@@ -49,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
if (!ctx->more) {
- err = crypto_ahash_init(&ctx->req);
+ err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
+ &ctx->completion);
if (err)
goto unlock;
}
@@ -120,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
+ err = af_alg_wait_for_completion(err, &ctx->completion);
if (err)
goto unlock;
}
@@ -235,19 +242,151 @@ static struct proto_ops algif_hash_ops = {
.accept = hash_accept,
};
+static int hash_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct algif_hash_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_sendmsg(sock, msg, size);
+}
+
+static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_sendpage(sock, page, offset, size, flags);
+}
+
+static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_recvmsg(sock, msg, ignored, flags);
+}
+
+static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
+ int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_accept(sock, newsock, flags);
+}
+
+static struct proto_ops algif_hash_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .setsockopt = sock_no_setsockopt,
+ .poll = sock_no_poll,
+
+ .release = af_alg_release,
+ .sendmsg = hash_sendmsg_nokey,
+ .sendpage = hash_sendpage_nokey,
+ .recvmsg = hash_recvmsg_nokey,
+ .accept = hash_accept_nokey,
+};
+
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_ahash(name, type, mask);
+ struct algif_hash_tfm *tfm;
+ struct crypto_ahash *hash;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ hash = crypto_alloc_ahash(name, type, mask);
+ if (IS_ERR(hash)) {
+ kfree(tfm);
+ return ERR_CAST(hash);
+ }
+
+ tfm->hash = hash;
+
+ return tfm;
}
static void hash_release(void *private)
{
- crypto_free_ahash(private);
+ struct algif_hash_tfm *tfm = private;
+
+ crypto_free_ahash(tfm->hash);
+ kfree(tfm);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_ahash_setkey(private, key, keylen);
+ struct algif_hash_tfm *tfm = private;
+ int err;
+
+ err = crypto_ahash_setkey(tfm->hash, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void hash_sock_destruct(struct sock *sk)
@@ -261,12 +400,14 @@ static void hash_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
-static int hash_accept_parent(void *private, struct sock *sk)
+static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
struct hash_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
- unsigned ds = crypto_ahash_digestsize(private);
+ struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *hash = tfm->hash;
+ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -286,7 +427,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, private);
+ ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
@@ -295,12 +436,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
return 0;
}
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+ struct algif_hash_tfm *tfm = private;
+
+ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ return -ENOKEY;
+
+ return hash_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
+ .accept_nokey = hash_accept_parent_nokey,
.ops = &algif_hash_ops,
+ .ops_nokey = &algif_hash_ops_nokey,
.name = "hash",
.owner = THIS_MODULE
};
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 634b4d1ab681..28556fce4267 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -31,6 +31,11 @@ struct skcipher_sg_list {
struct scatterlist sg[0];
};
+struct skcipher_tfm {
+ struct crypto_skcipher *skcipher;
+ bool has_key;
+};
+
struct skcipher_ctx {
struct list_head tsgl;
struct af_alg_sgl rsgl;
@@ -40,7 +45,7 @@ struct skcipher_ctx {
struct af_alg_completion completion;
atomic_t inflight;
- unsigned used;
+ size_t used;
unsigned int len;
bool more;
@@ -60,18 +65,10 @@ struct skcipher_async_req {
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
- char iv[];
+ atomic_t *inflight;
+ struct skcipher_request req;
};
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
- crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
@@ -97,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
- struct sock *sk = req->data;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+ struct skcipher_async_req *sreq = req->data;
struct kiocb *iocb = sreq->iocb;
- atomic_dec(&ctx->inflight);
+ atomic_dec(sreq->inflight);
skcipher_free_async_sgls(sreq);
- kfree(req);
+ kzfree(sreq);
iocb->ki_complete(iocb, err, err);
}
@@ -153,7 +147,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
return 0;
}
-static void skcipher_pull_sgl(struct sock *sk, int used, int put)
+static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
@@ -167,7 +161,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put)
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
- int plen = min_t(int, used, sg[i].length);
+ size_t plen = min_t(size_t, used, sg[i].length);
if (!sg_page(sg + i))
continue;
@@ -238,7 +232,7 @@ static void skcipher_wmem_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM |
POLLRDBAND);
@@ -288,7 +282,7 @@ static void skcipher_data_wakeup(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (wq_has_sleeper(wq))
+ if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLRDNORM |
POLLRDBAND);
@@ -301,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
@@ -348,7 +345,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
while (size) {
struct scatterlist *sg;
unsigned long len = size;
- int plen;
+ size_t plen;
if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev,
@@ -387,10 +384,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
- sg_unmark_end(sg + sgl->cur);
+ if (sgl->cur)
+ sg_unmark_end(sg + sgl->cur - 1);
do {
i = sgl->cur;
- plen = min_t(int, len, PAGE_SIZE);
+ plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
err = -ENOMEM;
@@ -503,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
- unsigned int reqlen = sizeof(struct skcipher_async_req) +
- GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+ unsigned int txbufs = 0, len = 0, tx_nents;
+ unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
int err = -ENOMEM;
bool mark = false;
+ char *iv;
- lock_sock(sk);
- req = kmalloc(reqlen, GFP_KERNEL);
- if (unlikely(!req))
- goto unlock;
+ sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+ if (unlikely(!sreq))
+ goto out;
- sreq = GET_SREQ(req, ctx);
+ req = &sreq->req;
+ iv = (char *)(req + 1) + reqsize;
sreq->iocb = msg->msg_iocb;
- memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
+ sreq->inflight = &ctx->inflight;
+
+ lock_sock(sk);
+ tx_nents = skcipher_all_sg_nents(ctx);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg)) {
- kfree(req);
+ if (unlikely(!sreq->tsg))
goto unlock;
- }
sg_init_table(sreq->tsg, tx_nents);
- memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
- skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- skcipher_async_cb, sk);
+ memcpy(iv, ctx->iv, ivsize);
+ skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ skcipher_async_cb, sreq);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
@@ -609,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, sreq->iv);
+ len, iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
+ sreq = NULL;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
- kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
+ kzfree(sreq);
+out:
return err;
}
@@ -631,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
- &ctx->req));
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
+ unsigned bs = crypto_skcipher_blocksize(tfm);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
@@ -642,13 +651,6 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
while (msg_data_left(msg)) {
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
@@ -669,6 +671,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
if (!used)
goto free;
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
+
+ while (!sg->length)
+ sg++;
+
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
ctx->iv);
@@ -748,19 +757,139 @@ static struct proto_ops algif_skcipher_ops = {
.poll = skcipher_poll,
};
+static int skcipher_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct skcipher_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_sendmsg(sock, msg, size);
+}
+
+static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_sendpage(sock, page, offset, size, flags);
+}
+
+static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_recvmsg(sock, msg, ignored, flags);
+}
+
+static struct proto_ops algif_skcipher_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+
+ .release = af_alg_release,
+ .sendmsg = skcipher_sendmsg_nokey,
+ .sendpage = skcipher_sendpage_nokey,
+ .recvmsg = skcipher_recvmsg_nokey,
+ .poll = skcipher_poll,
+};
+
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_skcipher(name, type, mask);
+ struct skcipher_tfm *tfm;
+ struct crypto_skcipher *skcipher;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ skcipher = crypto_alloc_skcipher(name, type, mask);
+ if (IS_ERR(skcipher)) {
+ kfree(tfm);
+ return ERR_CAST(skcipher);
+ }
+
+ tfm->skcipher = skcipher;
+
+ return tfm;
}
static void skcipher_release(void *private)
{
- crypto_free_skcipher(private);
+ struct skcipher_tfm *tfm = private;
+
+ crypto_free_skcipher(tfm->skcipher);
+ kfree(tfm);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_skcipher_setkey(private, key, keylen);
+ struct skcipher_tfm *tfm = private;
+ int err;
+
+ err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void skcipher_wait(struct sock *sk)
@@ -788,24 +917,26 @@ static void skcipher_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
-static int skcipher_accept_parent(void *private, struct sock *sk)
+static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
+ struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *skcipher = tfm->skcipher;
+ unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
@@ -818,8 +949,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ask->private = ctx;
- skcipher_request_set_tfm(&ctx->req, private);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_request_set_tfm(&ctx->req, skcipher);
+ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
@@ -827,12 +959,24 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
return 0;
}
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+ struct skcipher_tfm *tfm = private;
+
+ if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ return -ENOKEY;
+
+ return skcipher_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
+ .accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
+ .ops_nokey = &algif_skcipher_ops_nokey,
.name = "skcipher",
.owner = THIS_MODULE
};
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 758acabf2d81..8f3056cd0399 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
struct pkcs7_signed_info *sinfo = ctx->sinfo;
if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
- !test_bit(sinfo_has_message_digest, &sinfo->aa_set) ||
- (ctx->msg->data_type == OID_msIndirectData &&
- !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) {
+ !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) {
pr_warn("Missing required AuthAttr\n");
return -EBADMSG;
}
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 9441240f7d2a..004d5fc8e56b 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -13,7 +13,7 @@
#define pr_fmt(fmt) "SIG: "fmt
#include <keys/asymmetric-subtype.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/err.h>
#include <crypto/public_key.h>
#include "asymmetric_keys.h"
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 2a44b3752471..9e9e5a6a9ed6 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -321,6 +321,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
goto error_free_cert;
} else if (!prep->trusted) {
ret = x509_validate_trust(cert, get_system_trusted_keyring());
+ if (ret)
+ ret = x509_validate_trust(cert, get_ima_mok_keyring());
if (!ret)
prep->trusted = 1;
}
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 99c3cce01290..7b6b935cef23 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
+ if (rctx->cryptlen == 0)
+ goto skip;
+
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
@@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req)
if (err)
return err;
+skip:
return poly_verify_tag(req);
}
@@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
+ if (req->cryptlen == 0)
+ goto skip;
+
chacha_iv(creq->iv, req, 1);
sg_init_table(rctx->src, 2);
@@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req)
if (err)
return err;
+skip:
return poly_genkey(req);
}
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 06f1b60f02b2..4c0a0e271876 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
MODULE_ALIAS_CRYPTO("crc32c-generic");
-MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index c81861b1350b..7921251cdb13 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
@@ -887,8 +888,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ type = crypto_skcipher_type(type);
mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 237f3795cfaa..43fe85f20d57 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (link->dump == NULL)
return -EINVAL;
+ down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
.done = link->done,
.min_dump_alloc = dump_alloc,
};
- return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
+ up_read(&crypto_alg_sem);
+
+ return err;
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
diff --git a/crypto/drbg.c b/crypto/drbg.c
index a7c23146b87f..ab6ef1d08568 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -626,7 +626,7 @@ out:
return len;
}
-static struct drbg_state_ops drbg_ctr_ops = {
+static const struct drbg_state_ops drbg_ctr_ops = {
.update = drbg_ctr_update,
.generate = drbg_ctr_generate,
.crypto_init = drbg_init_sym_kernel,
@@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
return len;
}
-static struct drbg_state_ops drbg_hmac_ops = {
+static const struct drbg_state_ops drbg_hmac_ops = {
.update = drbg_hmac_update,
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
@@ -1032,7 +1032,7 @@ out:
* scratchpad usage: as update and generate are used isolated, both
* can use the scratchpad
*/
-static struct drbg_state_ops drbg_hash_ops = {
+static const struct drbg_state_ops drbg_hash_ops = {
.update = drbg_hash_update,
.generate = drbg_hash_generate,
.crypto_init = drbg_init_hash_kernel,
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
index 3e7ff46f26e8..7b1e0b188ce6 100644
--- a/crypto/hash_info.c
+++ b/crypto/hash_info.c
@@ -31,6 +31,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_128] = "tgr128",
[HASH_ALGO_TGR_160] = "tgr160",
[HASH_ALGO_TGR_192] = "tgr192",
+ [HASH_ALGO_SM3_256] = "sm3-256",
};
EXPORT_SYMBOL_GPL(hash_algo_name);
@@ -52,5 +53,6 @@ const int hash_digest_size[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
[HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
[HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
+ [HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE,
};
EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index fe5b495a434d..f78d4fc4e38a 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void)
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
while (single_task_running()) {
mutex_lock(&flist->lock);
- if (list_empty(&flist->list)) {
- mutex_unlock(&flist->lock);
- return;
- }
- cstate = list_entry(flist->list.next,
+ cstate = list_first_entry_or_null(&flist->list,
struct mcryptd_alg_cstate, flush_list);
- if (!cstate->flusher_engaged) {
+ if (!cstate || !cstate->flusher_engaged) {
mutex_unlock(&flist->lock);
return;
}
diff --git a/crypto/md5.c b/crypto/md5.c
index 33d17e9a8702..2355a7c25c45 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -24,6 +24,12 @@
#include <linux/cryptohash.h>
#include <asm/byteorder.h>
+const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+};
+EXPORT_SYMBOL_GPL(md5_zero_message_hash);
+
/* XXX: this stuff can be optimized */
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
{
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
new file mode 100644
index 000000000000..50f5c97e1087
--- /dev/null
+++ b/crypto/rsa-pkcs1pad.c
@@ -0,0 +1,628 @@
+/*
+ * RSA padding templates.
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/akcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+struct pkcs1pad_ctx {
+ struct crypto_akcipher *child;
+
+ unsigned int key_size;
+};
+
+struct pkcs1pad_request {
+ struct akcipher_request child_req;
+
+ struct scatterlist in_sg[3], out_sg[2];
+ uint8_t *in_buf, *out_buf;
+};
+
+static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int err, size;
+
+ err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
+
+ if (!err) {
+ /* Find out new modulus size from rsa implementation */
+ size = crypto_akcipher_maxsize(ctx->child);
+
+ ctx->key_size = size > 0 ? size : 0;
+ if (size <= 0)
+ err = size;
+ }
+
+ return err;
+}
+
+static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int err, size;
+
+ err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
+
+ if (!err) {
+ /* Find out new modulus size from rsa implementation */
+ size = crypto_akcipher_maxsize(ctx->child);
+
+ ctx->key_size = size > 0 ? size : 0;
+ if (size <= 0)
+ err = size;
+ }
+
+ return err;
+}
+
+static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /*
+ * The maximum destination buffer size for the encrypt/sign operations
+ * will be the same as for RSA, even though it's smaller for
+ * decrypt/verify.
+ */
+
+ return ctx->key_size ?: -EINVAL;
+}
+
+static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
+ struct scatterlist *next)
+{
+ int nsegs = next ? 1 : 0;
+
+ if (offset_in_page(buf) + len <= PAGE_SIZE) {
+ nsegs += 1;
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg, buf, len);
+ } else {
+ nsegs += 2;
+ sg_init_table(sg, nsegs);
+ sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
+ sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
+ offset_in_page(buf) + len - PAGE_SIZE);
+ }
+
+ if (next)
+ sg_chain(sg, nsegs, next);
+}
+
+static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len;
+ size_t chunk_len, pad_left;
+ struct sg_mapping_iter miter;
+
+ if (!err) {
+ if (pad_len) {
+ sg_miter_start(&miter, req->dst,
+ sg_nents_for_len(req->dst, pad_len),
+ SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+ pad_left = pad_len;
+ while (pad_left) {
+ sg_miter_next(&miter);
+
+ chunk_len = min(miter.length, pad_left);
+ memset(miter.addr, 0, chunk_len);
+ pad_left -= chunk_len;
+ }
+
+ sg_miter_stop(&miter);
+ }
+
+ sg_pcopy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, ctx->key_size),
+ req_ctx->out_buf, req_ctx->child_req.dst_len,
+ pad_len);
+ }
+ req->dst_len = ctx->key_size;
+
+ kfree(req_ctx->in_buf);
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_encrypt_sign_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req,
+ pkcs1pad_encrypt_sign_complete(req, err));
+}
+
+static int pkcs1pad_encrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+ unsigned int i, ps_end;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (req->src_len > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ if (req->dst_len < ctx->key_size) {
+ req->dst_len = ctx->key_size;
+ return -EOVERFLOW;
+ }
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /*
+ * Replace both input and output to add the padding in the input and
+ * the potential missing leading zeros in the output.
+ */
+ req_ctx->child_req.src = req_ctx->in_sg;
+ req_ctx->child_req.src_len = ctx->key_size - 1;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size;
+
+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->in_buf)
+ return -ENOMEM;
+
+ ps_end = ctx->key_size - req->src_len - 2;
+ req_ctx->in_buf[0] = 0x02;
+ for (i = 1; i < ps_end; i++)
+ req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
+ req_ctx->in_buf[ps_end] = 0x00;
+
+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+ ctx->key_size - 1 - req->src_len, req->src);
+
+ req_ctx->out_buf = kmalloc(ctx->key_size,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf) {
+ kfree(req_ctx->in_buf);
+ return -ENOMEM;
+ }
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_encrypt_sign_complete_cb, req);
+
+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_encrypt_sign_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int pos;
+
+ if (err == -EOVERFLOW)
+ /* Decrypted value had no leading 0 byte */
+ err = -EINVAL;
+
+ if (err)
+ goto done;
+
+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (req_ctx->out_buf[0] != 0x02) {
+ err = -EINVAL;
+ goto done;
+ }
+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+ if (req_ctx->out_buf[pos] == 0x00)
+ break;
+ if (pos < 9 || pos == req_ctx->child_req.dst_len) {
+ err = -EINVAL;
+ goto done;
+ }
+ pos++;
+
+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = -EOVERFLOW;
+ req->dst_len = req_ctx->child_req.dst_len - pos;
+
+ if (!err)
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, req->dst_len),
+ req_ctx->out_buf + pos, req->dst_len);
+
+done:
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_decrypt_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+}
+
+static int pkcs1pad_decrypt(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+
+ if (!ctx->key_size || req->src_len != ctx->key_size)
+ return -EINVAL;
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /* Reuse input buffer, output to a new buffer */
+ req_ctx->child_req.src = req->src;
+ req_ctx->child_req.src_len = req->src_len;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf)
+ return -ENOMEM;
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size - 1, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_decrypt_complete_cb, req);
+
+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_decrypt_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+ unsigned int ps_end;
+
+ if (!ctx->key_size)
+ return -EINVAL;
+
+ if (req->src_len > ctx->key_size - 11)
+ return -EOVERFLOW;
+
+ if (req->dst_len < ctx->key_size) {
+ req->dst_len = ctx->key_size;
+ return -EOVERFLOW;
+ }
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /*
+ * Replace both input and output to add the padding in the input and
+ * the potential missing leading zeros in the output.
+ */
+ req_ctx->child_req.src = req_ctx->in_sg;
+ req_ctx->child_req.src_len = ctx->key_size - 1;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size;
+
+ req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->in_buf)
+ return -ENOMEM;
+
+ ps_end = ctx->key_size - req->src_len - 2;
+ req_ctx->in_buf[0] = 0x01;
+ memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
+ req_ctx->in_buf[ps_end] = 0x00;
+
+ pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
+ ctx->key_size - 1 - req->src_len, req->src);
+
+ req_ctx->out_buf = kmalloc(ctx->key_size,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf) {
+ kfree(req_ctx->in_buf);
+ return -ENOMEM;
+ }
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_encrypt_sign_complete_cb, req);
+
+ err = crypto_akcipher_sign(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_encrypt_sign_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ unsigned int pos;
+
+ if (err == -EOVERFLOW)
+ /* Decrypted value had no leading 0 byte */
+ err = -EINVAL;
+
+ if (err)
+ goto done;
+
+ if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (req_ctx->out_buf[0] != 0x01) {
+ err = -EINVAL;
+ goto done;
+ }
+ for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
+ if (req_ctx->out_buf[pos] != 0xff)
+ break;
+ if (pos < 9 || pos == req_ctx->child_req.dst_len ||
+ req_ctx->out_buf[pos] != 0x00) {
+ err = -EINVAL;
+ goto done;
+ }
+ pos++;
+
+ if (req->dst_len < req_ctx->child_req.dst_len - pos)
+ err = -EOVERFLOW;
+ req->dst_len = req_ctx->child_req.dst_len - pos;
+
+ if (!err)
+ sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, req->dst_len),
+ req_ctx->out_buf + pos, req->dst_len);
+
+done:
+ kzfree(req_ctx->out_buf);
+
+ return err;
+}
+
+static void pkcs1pad_verify_complete_cb(
+ struct crypto_async_request *child_async_req, int err)
+{
+ struct akcipher_request *req = child_async_req->data;
+ struct crypto_async_request async_req;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ async_req.data = req->base.data;
+ async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+ async_req.flags = child_async_req->flags;
+ req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
+}
+
+/*
+ * The verify operation is here for completeness similar to the verification
+ * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
+ * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
+ * retrieve the DigestInfo from a signature, instead the user is expected
+ * to call the sign operation to generate the expected signature and compare
+ * signatures instead of the message-digests.
+ */
+static int pkcs1pad_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
+ int err;
+
+ if (!ctx->key_size || req->src_len != ctx->key_size)
+ return -EINVAL;
+
+ if (ctx->key_size > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ /* Reuse input buffer, output to a new buffer */
+ req_ctx->child_req.src = req->src;
+ req_ctx->child_req.src_len = req->src_len;
+ req_ctx->child_req.dst = req_ctx->out_sg;
+ req_ctx->child_req.dst_len = ctx->key_size - 1;
+
+ req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req_ctx->out_buf)
+ return -ENOMEM;
+
+ pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+ ctx->key_size - 1, NULL);
+
+ akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+ pkcs1pad_verify_complete_cb, req);
+
+ err = crypto_akcipher_verify(&req_ctx->child_req);
+ if (err != -EINPROGRESS &&
+ (err != -EBUSY ||
+ !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return pkcs1pad_verify_complete(req, err);
+
+ return err;
+}
+
+static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct akcipher_instance *inst = akcipher_alg_instance(tfm);
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct crypto_akcipher *child_tfm;
+
+ child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst));
+ if (IS_ERR(child_tfm))
+ return PTR_ERR(child_tfm);
+
+ ctx->child = child_tfm;
+
+ return 0;
+}
+
+static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ crypto_free_akcipher(ctx->child);
+}
+
+static void pkcs1pad_free(struct akcipher_instance *inst)
+{
+ struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst);
+
+ crypto_drop_akcipher(spawn);
+
+ kfree(inst);
+}
+
+static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ struct akcipher_instance *inst;
+ struct crypto_akcipher_spawn *spawn;
+ struct akcipher_alg *rsa_alg;
+ const char *rsa_alg_name;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ rsa_alg_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(rsa_alg_name))
+ return PTR_ERR(rsa_alg_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ spawn = akcipher_instance_ctx(inst);
+ crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
+ err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
+ crypto_requires_sync(algt->type, algt->mask));
+ if (err)
+ goto out_free_inst;
+
+ rsa_alg = crypto_spawn_akcipher_alg(spawn);
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name,
+ CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+ rsa_alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_drop_alg;
+
+ inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
+ inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
+
+ inst->alg.init = pkcs1pad_init_tfm;
+ inst->alg.exit = pkcs1pad_exit_tfm;
+
+ inst->alg.encrypt = pkcs1pad_encrypt;
+ inst->alg.decrypt = pkcs1pad_decrypt;
+ inst->alg.sign = pkcs1pad_sign;
+ inst->alg.verify = pkcs1pad_verify;
+ inst->alg.set_pub_key = pkcs1pad_set_pub_key;
+ inst->alg.set_priv_key = pkcs1pad_set_priv_key;
+ inst->alg.max_size = pkcs1pad_get_max_size;
+ inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
+
+ inst->free = pkcs1pad_free;
+
+ err = akcipher_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_alg;
+
+ return 0;
+
+out_drop_alg:
+ crypto_drop_akcipher(spawn);
+out_free_inst:
+ kfree(inst);
+ return err;
+}
+
+struct crypto_template rsa_pkcs1pad_tmpl = {
+ .name = "pkcs1pad",
+ .create = pkcs1pad_create,
+ .module = THIS_MODULE,
+};
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 1093e041db03..77d737f52147 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -13,6 +13,7 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
/*
* RSAEP function [RFC3447 sec 5.1.1]
@@ -91,12 +92,6 @@ static int rsa_enc(struct akcipher_request *req)
goto err_free_c;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_c;
- }
-
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@@ -136,12 +131,6 @@ static int rsa_dec(struct akcipher_request *req)
goto err_free_m;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_m;
- }
-
ret = -ENOMEM;
c = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!c)
@@ -180,12 +169,6 @@ static int rsa_sign(struct akcipher_request *req)
goto err_free_s;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_s;
- }
-
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
@@ -225,12 +208,6 @@ static int rsa_verify(struct akcipher_request *req)
goto err_free_m;
}
- if (req->dst_len < mpi_get_size(pkey->n)) {
- req->dst_len = mpi_get_size(pkey->n);
- ret = -EOVERFLOW;
- goto err_free_m;
- }
-
ret = -ENOMEM;
s = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!s) {
@@ -339,11 +316,24 @@ static struct akcipher_alg rsa = {
static int rsa_init(void)
{
- return crypto_register_akcipher(&rsa);
+ int err;
+
+ err = crypto_register_akcipher(&rsa);
+ if (err)
+ return err;
+
+ err = crypto_register_template(&rsa_pkcs1pad_tmpl);
+ if (err) {
+ crypto_unregister_akcipher(&rsa);
+ return err;
+ }
+
+ return 0;
}
static void rsa_exit(void)
{
+ crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 39e3acc438d9..6877cbb9105f 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -26,6 +26,13 @@
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
+const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
+
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 78431163ed3c..8f9c47e1a96e 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -27,6 +27,22 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
+const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+};
+EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
+
+const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
+
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
diff --git a/crypto/shash.c b/crypto/shash.c
index ecb1e3d39bf0..359754591653 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -354,9 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
+ crt->setkey = shash_async_setkey;
+
+ crt->has_setkey = alg->setkey != shash_no_setkey;
- if (alg->setkey)
- crt->setkey = shash_async_setkey;
if (alg->export)
crt->export = shash_async_export;
if (alg->import)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7591928be7ca..d199c0b1751c 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -118,6 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->decrypt = skcipher_decrypt_blkcipher;
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+ skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
return 0;
}
@@ -210,6 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
sizeof(struct ablkcipher_request);
+ skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
return 0;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 46a4a757d478..270bc4b82bd9 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
- NULL, 0, 16, 8, aead_speed_template_20);
+ NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212: