From 2ced26078fcff26db532d6300a1b5f8ffd11a5e1 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:16 +0000 Subject: crypto: user - made crypto_user_stat optional Even if CRYPTO_STATS is set to n, some part of CRYPTO_STATS are compiled. This patch made all part of crypto_user_stat uncompiled in that case. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- include/linux/crypto.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3634ad6fe202..3e05053b8d57 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -515,6 +515,7 @@ struct crypto_alg { struct module *cra_module; +#ifdef CONFIG_CRYPTO_STATS union { atomic_t encrypt_cnt; atomic_t compress_cnt; @@ -552,6 +553,7 @@ struct crypto_alg { atomic_t compute_shared_secret_cnt; }; atomic_t sign_cnt; +#endif /* CONFIG_CRYPTO_STATS */ } CRYPTO_MINALIGN_ATTR; -- cgit v1.2.3 From 6e8e72cd206e2ba68801e4f2490f639d41808c8d Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:18 +0000 Subject: crypto: user - convert all stats from u32 to u64 All the 32-bit fields need to be 64-bit. In some cases, UINT32_MAX crypto operations can be done in seconds. Reported-by: Eric Biggers Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/algapi.c | 10 ++-- crypto/crypto_user_stat.c | 114 +++++++++++++++++++--------------------- include/crypto/acompress.h | 8 +-- include/crypto/aead.h | 8 +-- include/crypto/akcipher.h | 16 +++--- include/crypto/hash.h | 6 +-- include/crypto/kpp.h | 12 ++--- include/crypto/rng.h | 8 +-- include/crypto/skcipher.h | 8 +-- include/linux/crypto.h | 46 ++++++++-------- include/uapi/linux/cryptouser.h | 38 +++++++------- 11 files changed, 133 insertions(+), 141 deletions(-) (limited to 'include/linux') diff --git a/crypto/algapi.c b/crypto/algapi.c index f5396c88e8cd..42fe316f80ee 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -259,13 +259,13 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) list_add(&larval->alg.cra_list, &crypto_alg_list); #ifdef CONFIG_CRYPTO_STATS - atomic_set(&alg->encrypt_cnt, 0); - atomic_set(&alg->decrypt_cnt, 0); + atomic64_set(&alg->encrypt_cnt, 0); + atomic64_set(&alg->decrypt_cnt, 0); atomic64_set(&alg->encrypt_tlen, 0); atomic64_set(&alg->decrypt_tlen, 0); - atomic_set(&alg->verify_cnt, 0); - atomic_set(&alg->cipher_err_cnt, 0); - atomic_set(&alg->sign_cnt, 0); + atomic64_set(&alg->verify_cnt, 0); + atomic64_set(&alg->cipher_err_cnt, 0); + atomic64_set(&alg->sign_cnt, 0); #endif out: diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c index a6fb2e6f618d..352569f378a0 100644 --- a/crypto/crypto_user_stat.c +++ b/crypto/crypto_user_stat.c @@ -35,22 +35,21 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat raead; u64 v64; - u32 v32; memset(&raead, 0, sizeof(raead)); strscpy(raead.type, "aead", sizeof(raead.type)); - v32 = atomic_read(&alg->encrypt_cnt); - raead.stat_encrypt_cnt = v32; + v64 = atomic64_read(&alg->encrypt_cnt); + raead.stat_encrypt_cnt = v64; v64 = atomic64_read(&alg->encrypt_tlen); raead.stat_encrypt_tlen = v64; - v32 = atomic_read(&alg->decrypt_cnt); - raead.stat_decrypt_cnt = v32; + v64 = atomic64_read(&alg->decrypt_cnt); + raead.stat_decrypt_cnt = v64; v64 = atomic64_read(&alg->decrypt_tlen); raead.stat_decrypt_tlen = v64; - v32 = atomic_read(&alg->aead_err_cnt); - raead.stat_aead_err_cnt = v32; + v64 = atomic64_read(&alg->aead_err_cnt); + raead.stat_aead_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); } @@ -59,22 +58,21 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rcipher; u64 v64; - u32 v32; memset(&rcipher, 0, sizeof(rcipher)); strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - v32 = atomic_read(&alg->encrypt_cnt); - rcipher.stat_encrypt_cnt = v32; + v64 = atomic64_read(&alg->encrypt_cnt); + rcipher.stat_encrypt_cnt = v64; v64 = atomic64_read(&alg->encrypt_tlen); rcipher.stat_encrypt_tlen = v64; - v32 = atomic_read(&alg->decrypt_cnt); - rcipher.stat_decrypt_cnt = v32; + v64 = atomic64_read(&alg->decrypt_cnt); + rcipher.stat_decrypt_cnt = v64; v64 = atomic64_read(&alg->decrypt_tlen); rcipher.stat_decrypt_tlen = v64; - v32 = atomic_read(&alg->cipher_err_cnt); - rcipher.stat_cipher_err_cnt = v32; + v64 = atomic64_read(&alg->cipher_err_cnt); + rcipher.stat_cipher_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); } @@ -83,21 +81,20 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rcomp; u64 v64; - u32 v32; memset(&rcomp, 0, sizeof(rcomp)); strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - v32 = atomic_read(&alg->compress_cnt); - rcomp.stat_compress_cnt = v32; + v64 = atomic64_read(&alg->compress_cnt); + rcomp.stat_compress_cnt = v64; v64 = atomic64_read(&alg->compress_tlen); rcomp.stat_compress_tlen = v64; - v32 = atomic_read(&alg->decompress_cnt); - rcomp.stat_decompress_cnt = v32; + v64 = atomic64_read(&alg->decompress_cnt); + rcomp.stat_decompress_cnt = v64; v64 = atomic64_read(&alg->decompress_tlen); rcomp.stat_decompress_tlen = v64; - v32 = atomic_read(&alg->cipher_err_cnt); - rcomp.stat_compress_err_cnt = v32; + v64 = atomic64_read(&alg->cipher_err_cnt); + rcomp.stat_compress_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); } @@ -106,21 +103,20 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat racomp; u64 v64; - u32 v32; memset(&racomp, 0, sizeof(racomp)); strscpy(racomp.type, "acomp", sizeof(racomp.type)); - v32 = atomic_read(&alg->compress_cnt); - racomp.stat_compress_cnt = v32; + v64 = atomic64_read(&alg->compress_cnt); + racomp.stat_compress_cnt = v64; v64 = atomic64_read(&alg->compress_tlen); racomp.stat_compress_tlen = v64; - v32 = atomic_read(&alg->decompress_cnt); - racomp.stat_decompress_cnt = v32; + v64 = atomic64_read(&alg->decompress_cnt); + racomp.stat_decompress_cnt = v64; v64 = atomic64_read(&alg->decompress_tlen); racomp.stat_decompress_tlen = v64; - v32 = atomic_read(&alg->cipher_err_cnt); - racomp.stat_compress_err_cnt = v32; + v64 = atomic64_read(&alg->cipher_err_cnt); + racomp.stat_compress_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); } @@ -129,25 +125,24 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rakcipher; u64 v64; - u32 v32; memset(&rakcipher, 0, sizeof(rakcipher)); strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); - v32 = atomic_read(&alg->encrypt_cnt); - rakcipher.stat_encrypt_cnt = v32; + v64 = atomic64_read(&alg->encrypt_cnt); + rakcipher.stat_encrypt_cnt = v64; v64 = atomic64_read(&alg->encrypt_tlen); rakcipher.stat_encrypt_tlen = v64; - v32 = atomic_read(&alg->decrypt_cnt); - rakcipher.stat_decrypt_cnt = v32; + v64 = atomic64_read(&alg->decrypt_cnt); + rakcipher.stat_decrypt_cnt = v64; v64 = atomic64_read(&alg->decrypt_tlen); rakcipher.stat_decrypt_tlen = v64; - v32 = atomic_read(&alg->sign_cnt); - rakcipher.stat_sign_cnt = v32; - v32 = atomic_read(&alg->verify_cnt); - rakcipher.stat_verify_cnt = v32; - v32 = atomic_read(&alg->akcipher_err_cnt); - rakcipher.stat_akcipher_err_cnt = v32; + v64 = atomic64_read(&alg->sign_cnt); + rakcipher.stat_sign_cnt = v64; + v64 = atomic64_read(&alg->verify_cnt); + rakcipher.stat_verify_cnt = v64; + v64 = atomic64_read(&alg->akcipher_err_cnt); + rakcipher.stat_akcipher_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rakcipher), &rakcipher); @@ -156,19 +151,19 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rkpp; - u32 v; + u64 v; memset(&rkpp, 0, sizeof(rkpp)); strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); - v = atomic_read(&alg->setsecret_cnt); + v = atomic64_read(&alg->setsecret_cnt); rkpp.stat_setsecret_cnt = v; - v = atomic_read(&alg->generate_public_key_cnt); + v = atomic64_read(&alg->generate_public_key_cnt); rkpp.stat_generate_public_key_cnt = v; - v = atomic_read(&alg->compute_shared_secret_cnt); + v = atomic64_read(&alg->compute_shared_secret_cnt); rkpp.stat_compute_shared_secret_cnt = v; - v = atomic_read(&alg->kpp_err_cnt); + v = atomic64_read(&alg->kpp_err_cnt); rkpp.stat_kpp_err_cnt = v; return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); @@ -178,18 +173,17 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rhash; u64 v64; - u32 v32; memset(&rhash, 0, sizeof(rhash)); strscpy(rhash.type, "ahash", sizeof(rhash.type)); - v32 = atomic_read(&alg->hash_cnt); - rhash.stat_hash_cnt = v32; + v64 = atomic64_read(&alg->hash_cnt); + rhash.stat_hash_cnt = v64; v64 = atomic64_read(&alg->hash_tlen); rhash.stat_hash_tlen = v64; - v32 = atomic_read(&alg->hash_err_cnt); - rhash.stat_hash_err_cnt = v32; + v64 = atomic64_read(&alg->hash_err_cnt); + rhash.stat_hash_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } @@ -198,18 +192,17 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rhash; u64 v64; - u32 v32; memset(&rhash, 0, sizeof(rhash)); strscpy(rhash.type, "shash", sizeof(rhash.type)); - v32 = atomic_read(&alg->hash_cnt); - rhash.stat_hash_cnt = v32; + v64 = atomic64_read(&alg->hash_cnt); + rhash.stat_hash_cnt = v64; v64 = atomic64_read(&alg->hash_tlen); rhash.stat_hash_tlen = v64; - v32 = atomic_read(&alg->hash_err_cnt); - rhash.stat_hash_err_cnt = v32; + v64 = atomic64_read(&alg->hash_err_cnt); + rhash.stat_hash_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } @@ -218,20 +211,19 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_stat rrng; u64 v64; - u32 v32; memset(&rrng, 0, sizeof(rrng)); strscpy(rrng.type, "rng", sizeof(rrng.type)); - v32 = atomic_read(&alg->generate_cnt); - rrng.stat_generate_cnt = v32; + v64 = atomic64_read(&alg->generate_cnt); + rrng.stat_generate_cnt = v64; v64 = atomic64_read(&alg->generate_tlen); rrng.stat_generate_tlen = v64; - v32 = atomic_read(&alg->seed_cnt); - rrng.stat_seed_cnt = v32; - v32 = atomic_read(&alg->hash_err_cnt); - rrng.stat_rng_err_cnt = v32; + v64 = atomic64_read(&alg->seed_cnt); + rrng.stat_seed_cnt = v64; + v64 = atomic64_read(&alg->hash_err_cnt); + rrng.stat_rng_err_cnt = v64; return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); } diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 22e6f412c595..f79918196811 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -240,9 +240,9 @@ static inline void crypto_stat_compress(struct acomp_req *req, int ret) struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->compress_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->compress_cnt); + atomic64_inc(&tfm->base.__crt_alg->compress_cnt); atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen); } #endif @@ -254,9 +254,9 @@ static inline void crypto_stat_decompress(struct acomp_req *req, int ret) struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->compress_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->decompress_cnt); + atomic64_inc(&tfm->base.__crt_alg->decompress_cnt); atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen); } #endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 0d765d7bfb82..99afd78c665d 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -312,9 +312,9 @@ static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret) struct crypto_aead *tfm = crypto_aead_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->aead_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->encrypt_cnt); + atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt); atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen); } #endif @@ -326,9 +326,9 @@ static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret) struct crypto_aead *tfm = crypto_aead_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->aead_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->decrypt_cnt); + atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt); atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen); } #endif diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index afac71119396..3dc05cf7e0a9 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -278,9 +278,9 @@ static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req, struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->encrypt_cnt); + atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt); atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen); } #endif @@ -293,9 +293,9 @@ static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req, struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->decrypt_cnt); + atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt); atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen); } #endif @@ -308,9 +308,9 @@ static inline void crypto_stat_akcipher_sign(struct akcipher_request *req, struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); else - atomic_inc(&tfm->base.__crt_alg->sign_cnt); + atomic64_inc(&tfm->base.__crt_alg->sign_cnt); #endif } @@ -321,9 +321,9 @@ static inline void crypto_stat_akcipher_verify(struct akcipher_request *req, struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); else - atomic_inc(&tfm->base.__crt_alg->verify_cnt); + atomic64_inc(&tfm->base.__crt_alg->verify_cnt); #endif } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index bc7796600338..52920bed05ba 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -418,7 +418,7 @@ static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->hash_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt); else atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); #endif @@ -430,9 +430,9 @@ static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->hash_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->hash_cnt); + atomic64_inc(&tfm->base.__crt_alg->hash_cnt); atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); } #endif diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index f517ba6d3a27..bd5103a80919 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -272,9 +272,9 @@ static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret) { #ifdef CONFIG_CRYPTO_STATS if (ret) - atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt); else - atomic_inc(&tfm->base.__crt_alg->setsecret_cnt); + atomic64_inc(&tfm->base.__crt_alg->setsecret_cnt); #endif } @@ -285,9 +285,9 @@ static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req, struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); if (ret) - atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt); else - atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt); + atomic64_inc(&tfm->base.__crt_alg->generate_public_key_cnt); #endif } @@ -298,9 +298,9 @@ static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); if (ret) - atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt); else - atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt); + atomic64_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt); #endif } diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 6d258f5b68f1..966615bba45e 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -126,9 +126,9 @@ static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret) { #ifdef CONFIG_CRYPTO_STATS if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->rng_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt); else - atomic_inc(&tfm->base.__crt_alg->seed_cnt); + atomic64_inc(&tfm->base.__crt_alg->seed_cnt); #endif } @@ -137,9 +137,9 @@ static inline void crypto_stat_rng_generate(struct crypto_rng *tfm, { #ifdef CONFIG_CRYPTO_STATS if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->rng_err_cnt); + atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt); } else { - atomic_inc(&tfm->base.__crt_alg->generate_cnt); + atomic64_inc(&tfm->base.__crt_alg->generate_cnt); atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen); } #endif diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 925f547cdcfa..dff54731ddf4 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -491,9 +491,9 @@ static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req, { #ifdef CONFIG_CRYPTO_STATS if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&alg->cipher_err_cnt); + atomic64_inc(&alg->cipher_err_cnt); } else { - atomic_inc(&alg->encrypt_cnt); + atomic64_inc(&alg->encrypt_cnt); atomic64_add(req->cryptlen, &alg->encrypt_tlen); } #endif @@ -504,9 +504,9 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req, { #ifdef CONFIG_CRYPTO_STATS if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&alg->cipher_err_cnt); + atomic64_inc(&alg->cipher_err_cnt); } else { - atomic_inc(&alg->decrypt_cnt); + atomic64_inc(&alg->decrypt_cnt); atomic64_add(req->cryptlen, &alg->decrypt_tlen); } #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3e05053b8d57..b109b50906e7 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -517,11 +517,11 @@ struct crypto_alg { #ifdef CONFIG_CRYPTO_STATS union { - atomic_t encrypt_cnt; - atomic_t compress_cnt; - atomic_t generate_cnt; - atomic_t hash_cnt; - atomic_t setsecret_cnt; + atomic64_t encrypt_cnt; + atomic64_t compress_cnt; + atomic64_t generate_cnt; + atomic64_t hash_cnt; + atomic64_t setsecret_cnt; }; union { atomic64_t encrypt_tlen; @@ -530,29 +530,29 @@ struct crypto_alg { atomic64_t hash_tlen; }; union { - atomic_t akcipher_err_cnt; - atomic_t cipher_err_cnt; - atomic_t compress_err_cnt; - atomic_t aead_err_cnt; - atomic_t hash_err_cnt; - atomic_t rng_err_cnt; - atomic_t kpp_err_cnt; + atomic64_t akcipher_err_cnt; + atomic64_t cipher_err_cnt; + atomic64_t compress_err_cnt; + atomic64_t aead_err_cnt; + atomic64_t hash_err_cnt; + atomic64_t rng_err_cnt; + atomic64_t kpp_err_cnt; }; union { - atomic_t decrypt_cnt; - atomic_t decompress_cnt; - atomic_t seed_cnt; - atomic_t generate_public_key_cnt; + atomic64_t decrypt_cnt; + atomic64_t decompress_cnt; + atomic64_t seed_cnt; + atomic64_t generate_public_key_cnt; }; union { atomic64_t decrypt_tlen; atomic64_t decompress_tlen; }; union { - atomic_t verify_cnt; - atomic_t compute_shared_secret_cnt; + atomic64_t verify_cnt; + atomic64_t compute_shared_secret_cnt; }; - atomic_t sign_cnt; + atomic64_t sign_cnt; #endif /* CONFIG_CRYPTO_STATS */ } CRYPTO_MINALIGN_ATTR; @@ -983,9 +983,9 @@ static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); + atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt); } else { - atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt); + atomic64_inc(&crt->base->base.__crt_alg->encrypt_cnt); atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen); } #endif @@ -999,9 +999,9 @@ static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); + atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt); } else { - atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt); + atomic64_inc(&crt->base->base.__crt_alg->decrypt_cnt); atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen); } #endif diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 6dafbc3e4414..9f8187077ce4 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -79,11 +79,11 @@ struct crypto_user_alg { struct crypto_stat { char type[CRYPTO_MAX_NAME]; union { - __u32 stat_encrypt_cnt; - __u32 stat_compress_cnt; - __u32 stat_generate_cnt; - __u32 stat_hash_cnt; - __u32 stat_setsecret_cnt; + __u64 stat_encrypt_cnt; + __u64 stat_compress_cnt; + __u64 stat_generate_cnt; + __u64 stat_hash_cnt; + __u64 stat_setsecret_cnt; }; union { __u64 stat_encrypt_tlen; @@ -92,29 +92,29 @@ struct crypto_stat { __u64 stat_hash_tlen; }; union { - __u32 stat_akcipher_err_cnt; - __u32 stat_cipher_err_cnt; - __u32 stat_compress_err_cnt; - __u32 stat_aead_err_cnt; - __u32 stat_hash_err_cnt; - __u32 stat_rng_err_cnt; - __u32 stat_kpp_err_cnt; + __u64 stat_akcipher_err_cnt; + __u64 stat_cipher_err_cnt; + __u64 stat_compress_err_cnt; + __u64 stat_aead_err_cnt; + __u64 stat_hash_err_cnt; + __u64 stat_rng_err_cnt; + __u64 stat_kpp_err_cnt; }; union { - __u32 stat_decrypt_cnt; - __u32 stat_decompress_cnt; - __u32 stat_seed_cnt; - __u32 stat_generate_public_key_cnt; + __u64 stat_decrypt_cnt; + __u64 stat_decompress_cnt; + __u64 stat_seed_cnt; + __u64 stat_generate_public_key_cnt; }; union { __u64 stat_decrypt_tlen; __u64 stat_decompress_tlen; }; union { - __u32 stat_verify_cnt; - __u32 stat_compute_shared_secret_cnt; + __u64 stat_verify_cnt; + __u64 stat_compute_shared_secret_cnt; }; - __u32 stat_sign_cnt; + __u64 stat_sign_cnt; }; struct crypto_report_larval { -- cgit v1.2.3 From f7d76e05d058b832b373237566cc1af8251371b5 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:21 +0000 Subject: crypto: user - fix use_after_free of struct xxx_request All crypto_stats functions use the struct xxx_request for feeding stats, but in some case this structure could already be freed. For fixing this, the needed parameters (len and alg) will be stored before the request being executed. Fixes: cac5818c25d0 ("crypto: user - Implement a generic crypto statistics") Reported-by: syzbot Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/ahash.c | 17 +++- crypto/algapi.c | 233 +++++++++++++++++++++++++++++++++++++++++++++ crypto/rng.c | 4 +- include/crypto/acompress.h | 38 ++------ include/crypto/aead.h | 38 ++------ include/crypto/akcipher.h | 74 +++----------- include/crypto/hash.h | 32 +------ include/crypto/kpp.h | 48 ++-------- include/crypto/rng.h | 27 +----- include/crypto/skcipher.h | 36 ++----- include/linux/crypto.h | 105 +++++++++++++------- 11 files changed, 376 insertions(+), 276 deletions(-) (limited to 'include/linux') diff --git a/crypto/ahash.c b/crypto/ahash.c index 3a348fbcf8f9..5d320a811f75 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -364,20 +364,28 @@ static int crypto_ahash_op(struct ahash_request *req, int crypto_ahash_final(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); - crypto_stat_ahash_final(req, ret); + crypto_stats_ahash_final(nbytes, ret, alg); return ret; } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); - crypto_stat_ahash_final(req, ret); + crypto_stats_ahash_final(nbytes, ret, alg); return ret; } EXPORT_SYMBOL_GPL(crypto_ahash_finup); @@ -385,13 +393,16 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = crypto_ahash_op(req, tfm->digest); - crypto_stat_ahash_final(req, ret); + crypto_stats_ahash_final(nbytes, ret, alg); return ret; } EXPORT_SYMBOL_GPL(crypto_ahash_digest); diff --git a/crypto/algapi.c b/crypto/algapi.c index 42fe316f80ee..4c1e6079d271 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1078,6 +1078,239 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, } EXPORT_SYMBOL_GPL(crypto_type_has_alg); +#ifdef CONFIG_CRYPTO_STATS +void crypto_stats_get(struct crypto_alg *alg) +{ + crypto_alg_get(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_get); + +void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(nbytes, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt); + +void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(nbytes, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt); + +void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, + int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->aead_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(cryptlen, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt); + +void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, + int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->aead_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(cryptlen, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt); + +void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->akcipher_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(src_len, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt); + +void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->akcipher_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(src_len, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt); + +void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->akcipher_err_cnt); + else + atomic64_inc(&alg->sign_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); + +void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->akcipher_err_cnt); + else + atomic64_inc(&alg->verify_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); + +void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->compress_err_cnt); + } else { + atomic64_inc(&alg->compress_cnt); + atomic64_add(slen, &alg->compress_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_compress); + +void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->compress_err_cnt); + } else { + atomic64_inc(&alg->decompress_cnt); + atomic64_add(slen, &alg->decompress_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_decompress); + +void crypto_stats_ahash_update(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->hash_err_cnt); + else + atomic64_add(nbytes, &alg->hash_tlen); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ahash_update); + +void crypto_stats_ahash_final(unsigned int nbytes, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->hash_err_cnt); + } else { + atomic64_inc(&alg->hash_cnt); + atomic64_add(nbytes, &alg->hash_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_ahash_final); + +void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) +{ + if (ret) + atomic64_inc(&alg->kpp_err_cnt); + else + atomic64_inc(&alg->setsecret_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); + +void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) +{ + if (ret) + atomic64_inc(&alg->kpp_err_cnt); + else + atomic64_inc(&alg->generate_public_key_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); + +void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) +{ + if (ret) + atomic64_inc(&alg->kpp_err_cnt); + else + atomic64_inc(&alg->compute_shared_secret_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); + +void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic64_inc(&alg->rng_err_cnt); + else + atomic64_inc(&alg->seed_cnt); + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_rng_seed); + +void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, + int ret) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->rng_err_cnt); + } else { + atomic64_inc(&alg->generate_cnt); + atomic64_add(dlen, &alg->generate_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_rng_generate); + +void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->encrypt_cnt); + atomic64_add(cryptlen, &alg->encrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt); + +void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, + struct crypto_alg *alg) +{ + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic64_inc(&alg->cipher_err_cnt); + } else { + atomic64_inc(&alg->decrypt_cnt); + atomic64_add(cryptlen, &alg->decrypt_tlen); + } + crypto_alg_put(alg); +} +EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt); +#endif + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/rng.c b/crypto/rng.c index 2406501b90b7..33c38a72bff5 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -35,9 +35,11 @@ static int crypto_default_rng_refcnt; int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { + struct crypto_alg *alg = tfm->base.__crt_alg; u8 *buf = NULL; int err; + crypto_stats_get(alg); if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); if (!buf) @@ -50,7 +52,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) } err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - crypto_stat_rng_seed(tfm, err); + crypto_stats_rng_seed(alg, err); out: kzfree(buf); return err; diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index f79918196811..a3e766dff917 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -234,34 +234,6 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } -static inline void crypto_stat_compress(struct acomp_req *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->compress_cnt); - atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen); - } -#endif -} - -static inline void crypto_stat_decompress(struct acomp_req *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->decompress_cnt); - atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen); - } -#endif -} - /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -274,10 +246,13 @@ static inline void crypto_stat_decompress(struct acomp_req *req, int ret) static inline int crypto_acomp_compress(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; int ret; + crypto_stats_get(alg); ret = tfm->compress(req); - crypto_stat_compress(req, ret); + crypto_stats_compress(slen, ret, alg); return ret; } @@ -293,10 +268,13 @@ static inline int crypto_acomp_compress(struct acomp_req *req) static inline int crypto_acomp_decompress(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; int ret; + crypto_stats_get(alg); ret = tfm->decompress(req); - crypto_stat_decompress(req, ret); + crypto_stats_decompress(slen, ret, alg); return ret; } diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 99afd78c665d..b7b8d24cf765 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -306,34 +306,6 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) return __crypto_aead_cast(req->base.tfm); } -static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt); - atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt); - atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen); - } -#endif -} - /** * crypto_aead_encrypt() - encrypt plaintext * @req: reference to the aead_request handle that holds all information @@ -356,13 +328,16 @@ static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret) static inline int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_alg *alg = aead->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = crypto_aead_alg(aead)->encrypt(req); - crypto_stat_aead_encrypt(req, ret); + crypto_stats_aead_encrypt(cryptlen, alg, ret); return ret; } @@ -391,15 +366,18 @@ static inline int crypto_aead_encrypt(struct aead_request *req) static inline int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_alg *alg = aead->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else if (req->cryptlen < crypto_aead_authsize(aead)) ret = -EINVAL; else ret = crypto_aead_alg(aead)->decrypt(req); - crypto_stat_aead_decrypt(req, ret); + crypto_stats_aead_decrypt(cryptlen, alg, ret); return ret; } diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 3dc05cf7e0a9..2d690494568c 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -271,62 +271,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) return alg->max_size(tfm); } -static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt); - atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt); - atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen); - } -#endif -} - -static inline void crypto_stat_akcipher_sign(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - else - atomic64_inc(&tfm->base.__crt_alg->sign_cnt); -#endif -} - -static inline void crypto_stat_akcipher_verify(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - else - atomic64_inc(&tfm->base.__crt_alg->verify_cnt); -#endif -} - /** * crypto_akcipher_encrypt() - Invoke public key encrypt operation * @@ -341,10 +285,13 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; int ret; + crypto_stats_get(calg); ret = alg->encrypt(req); - crypto_stat_akcipher_encrypt(req, ret); + crypto_stats_akcipher_encrypt(src_len, ret, calg); return ret; } @@ -362,10 +309,13 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; int ret; + crypto_stats_get(calg); ret = alg->decrypt(req); - crypto_stat_akcipher_decrypt(req, ret); + crypto_stats_akcipher_decrypt(src_len, ret, calg); return ret; } @@ -383,10 +333,12 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->sign(req); - crypto_stat_akcipher_sign(req, ret); + crypto_stats_akcipher_sign(ret, calg); return ret; } @@ -404,10 +356,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->verify(req); - crypto_stat_akcipher_verify(req, ret); + crypto_stats_akcipher_verify(ret, calg); return ret; } diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 52920bed05ba..3b31c1b349ae 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -412,32 +412,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); -static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt); - else - atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); -#endif -} - -static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->hash_cnt); - atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); - } -#endif -} - /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information @@ -552,10 +526,14 @@ static inline int crypto_ahash_init(struct ahash_request *req) */ static inline int crypto_ahash_update(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crypto_ahash_reqtfm(req)->update(req); - crypto_stat_ahash_update(req, ret); + crypto_stats_ahash_update(nbytes, ret, alg); return ret; } diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index bd5103a80919..1a97e1601422 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -268,42 +268,6 @@ struct kpp_secret { unsigned short len; }; -static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret) - atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt); - else - atomic64_inc(&tfm->base.__crt_alg->setsecret_cnt); -#endif -} - -static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - - if (ret) - atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt); - else - atomic64_inc(&tfm->base.__crt_alg->generate_public_key_cnt); -#endif -} - -static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - - if (ret) - atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt); - else - atomic64_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt); -#endif -} - /** * crypto_kpp_set_secret() - Invoke kpp operation * @@ -323,10 +287,12 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->set_secret(tfm, buffer, len); - crypto_stat_kpp_set_secret(tfm, ret); + crypto_stats_kpp_set_secret(calg, ret); return ret; } @@ -347,10 +313,12 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->generate_public_key(req); - crypto_stat_kpp_generate_public_key(req, ret); + crypto_stats_kpp_generate_public_key(calg, ret); return ret; } @@ -368,10 +336,12 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->compute_shared_secret(req); - crypto_stat_kpp_compute_shared_secret(req, ret); + crypto_stats_kpp_compute_shared_secret(calg, ret); return ret; } diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 966615bba45e..022a1b896b47 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -122,29 +122,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } -static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt); - else - atomic64_inc(&tfm->base.__crt_alg->seed_cnt); -#endif -} - -static inline void crypto_stat_rng_generate(struct crypto_rng *tfm, - unsigned int dlen, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt); - } else { - atomic64_inc(&tfm->base.__crt_alg->generate_cnt); - atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen); - } -#endif -} - /** * crypto_rng_generate() - get random number * @tfm: cipher handle @@ -163,10 +140,12 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { + struct crypto_alg *alg = tfm->base.__crt_alg; int ret; + crypto_stats_get(alg); ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); - crypto_stat_rng_generate(tfm, dlen, ret); + crypto_stats_rng_generate(alg, dlen, ret); return ret; } diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index dff54731ddf4..480f8301a47d 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -486,32 +486,6 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( return container_of(tfm, struct crypto_sync_skcipher, base); } -static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req, - int ret, struct crypto_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->cipher_err_cnt); - } else { - atomic64_inc(&alg->encrypt_cnt); - atomic64_add(req->cryptlen, &alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req, - int ret, struct crypto_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->cipher_err_cnt); - } else { - atomic64_inc(&alg->decrypt_cnt); - atomic64_add(req->cryptlen, &alg->decrypt_tlen); - } -#endif -} - /** * crypto_skcipher_encrypt() - encrypt plaintext * @req: reference to the skcipher_request handle that holds all information @@ -526,13 +500,16 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req, static inline int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = tfm->encrypt(req); - crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg); + crypto_stats_skcipher_encrypt(cryptlen, ret, alg); return ret; } @@ -550,13 +527,16 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) static inline int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = tfm->decrypt(req); - crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg); + crypto_stats_skcipher_decrypt(cryptlen, ret, alg); return ret; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b109b50906e7..e2fd24714e00 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -557,6 +557,69 @@ struct crypto_alg { } CRYPTO_MINALIGN_ATTR; +#ifdef CONFIG_CRYPTO_STATS +void crypto_stats_get(struct crypto_alg *alg); +void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); +void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); +void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); +void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); +void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +#else +static inline void crypto_stats_get(struct crypto_alg *alg) +{} +static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) +{} +static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) +{} +#endif /* * A helper struct for waiting for completion of async crypto ops */ @@ -975,38 +1038,6 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( return __crypto_ablkcipher_cast(req->base.tfm); } -static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt); - } else { - atomic64_inc(&crt->base->base.__crt_alg->encrypt_cnt); - atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt); - } else { - atomic64_inc(&crt->base->base.__crt_alg->decrypt_cnt); - atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen); - } -#endif -} - /** * crypto_ablkcipher_encrypt() - encrypt plaintext * @req: reference to the ablkcipher_request handle that holds all information @@ -1022,10 +1053,13 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + struct crypto_alg *alg = crt->base->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crt->encrypt(req); - crypto_stat_ablkcipher_encrypt(req, ret); + crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); return ret; } @@ -1044,10 +1078,13 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + struct crypto_alg *alg = crt->base->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crt->decrypt(req); - crypto_stat_ablkcipher_decrypt(req, ret); + crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); return ret; } -- cgit v1.2.3 From 17c18f9e33282a170458cb5ea20759bfcb0da7d8 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:24 +0000 Subject: crypto: user - Split stats in multiple structures Like for userspace, this patch splits stats into multiple structures, one for each algorithm class. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/algapi.c | 108 +++++++++++++--------------- crypto/crypto_user_stat.c | 82 ++++++++++----------- include/linux/crypto.h | 180 ++++++++++++++++++++++++++++++---------------- 3 files changed, 210 insertions(+), 160 deletions(-) (limited to 'include/linux') diff --git a/crypto/algapi.c b/crypto/algapi.c index 4c1e6079d271..a8cb5aed0069 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -259,13 +259,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) list_add(&larval->alg.cra_list, &crypto_alg_list); #ifdef CONFIG_CRYPTO_STATS - atomic64_set(&alg->encrypt_cnt, 0); - atomic64_set(&alg->decrypt_cnt, 0); - atomic64_set(&alg->encrypt_tlen, 0); - atomic64_set(&alg->decrypt_tlen, 0); - atomic64_set(&alg->verify_cnt, 0); - atomic64_set(&alg->cipher_err_cnt, 0); - atomic64_set(&alg->sign_cnt, 0); + memset(&alg->stats, 0, sizeof(alg->stats)); #endif out: @@ -1089,10 +1083,10 @@ void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.cipher_err_cnt); } else { - atomic64_inc(&alg->encrypt_cnt); - atomic64_add(nbytes, &alg->encrypt_tlen); + atomic64_inc(&alg->stats.cipher.encrypt_cnt); + atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen); } crypto_alg_put(alg); } @@ -1102,10 +1096,10 @@ void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.cipher_err_cnt); } else { - atomic64_inc(&alg->decrypt_cnt); - atomic64_add(nbytes, &alg->decrypt_tlen); + atomic64_inc(&alg->stats.cipher.decrypt_cnt); + atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen); } crypto_alg_put(alg); } @@ -1115,10 +1109,10 @@ void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->aead_err_cnt); + atomic64_inc(&alg->stats.aead.aead_err_cnt); } else { - atomic64_inc(&alg->encrypt_cnt); - atomic64_add(cryptlen, &alg->encrypt_tlen); + atomic64_inc(&alg->stats.aead.encrypt_cnt); + atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen); } crypto_alg_put(alg); } @@ -1128,10 +1122,10 @@ void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->aead_err_cnt); + atomic64_inc(&alg->stats.aead.aead_err_cnt); } else { - atomic64_inc(&alg->decrypt_cnt); - atomic64_add(cryptlen, &alg->decrypt_tlen); + atomic64_inc(&alg->stats.aead.decrypt_cnt); + atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen); } crypto_alg_put(alg); } @@ -1141,10 +1135,10 @@ void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); } else { - atomic64_inc(&alg->encrypt_cnt); - atomic64_add(src_len, &alg->encrypt_tlen); + atomic64_inc(&alg->stats.akcipher.encrypt_cnt); + atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen); } crypto_alg_put(alg); } @@ -1154,10 +1148,10 @@ void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); } else { - atomic64_inc(&alg->decrypt_cnt); - atomic64_add(src_len, &alg->decrypt_tlen); + atomic64_inc(&alg->stats.akcipher.decrypt_cnt); + atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen); } crypto_alg_put(alg); } @@ -1166,9 +1160,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt); void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); else - atomic64_inc(&alg->sign_cnt); + atomic64_inc(&alg->stats.akcipher.sign_cnt); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); @@ -1176,9 +1170,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); else - atomic64_inc(&alg->verify_cnt); + atomic64_inc(&alg->stats.akcipher.verify_cnt); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); @@ -1186,10 +1180,10 @@ EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->compress_err_cnt); + atomic64_inc(&alg->stats.compress.compress_err_cnt); } else { - atomic64_inc(&alg->compress_cnt); - atomic64_add(slen, &alg->compress_tlen); + atomic64_inc(&alg->stats.compress.compress_cnt); + atomic64_add(slen, &alg->stats.compress.compress_tlen); } crypto_alg_put(alg); } @@ -1198,10 +1192,10 @@ EXPORT_SYMBOL_GPL(crypto_stats_compress); void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->compress_err_cnt); + atomic64_inc(&alg->stats.compress.compress_err_cnt); } else { - atomic64_inc(&alg->decompress_cnt); - atomic64_add(slen, &alg->decompress_tlen); + atomic64_inc(&alg->stats.compress.decompress_cnt); + atomic64_add(slen, &alg->stats.compress.decompress_tlen); } crypto_alg_put(alg); } @@ -1211,9 +1205,9 @@ void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->hash_err_cnt); + atomic64_inc(&alg->stats.hash.hash_err_cnt); else - atomic64_add(nbytes, &alg->hash_tlen); + atomic64_add(nbytes, &alg->stats.hash.hash_tlen); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_ahash_update); @@ -1222,10 +1216,10 @@ void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->hash_err_cnt); + atomic64_inc(&alg->stats.hash.hash_err_cnt); } else { - atomic64_inc(&alg->hash_cnt); - atomic64_add(nbytes, &alg->hash_tlen); + atomic64_inc(&alg->stats.hash.hash_cnt); + atomic64_add(nbytes, &alg->stats.hash.hash_tlen); } crypto_alg_put(alg); } @@ -1234,9 +1228,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_ahash_final); void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) { if (ret) - atomic64_inc(&alg->kpp_err_cnt); + atomic64_inc(&alg->stats.kpp.kpp_err_cnt); else - atomic64_inc(&alg->setsecret_cnt); + atomic64_inc(&alg->stats.kpp.setsecret_cnt); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); @@ -1244,9 +1238,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) { if (ret) - atomic64_inc(&alg->kpp_err_cnt); + atomic64_inc(&alg->stats.kpp.kpp_err_cnt); else - atomic64_inc(&alg->generate_public_key_cnt); + atomic64_inc(&alg->stats.kpp.generate_public_key_cnt); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); @@ -1254,9 +1248,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) { if (ret) - atomic64_inc(&alg->kpp_err_cnt); + atomic64_inc(&alg->stats.kpp.kpp_err_cnt); else - atomic64_inc(&alg->compute_shared_secret_cnt); + atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); @@ -1264,9 +1258,9 @@ EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->rng_err_cnt); + atomic64_inc(&alg->stats.rng.rng_err_cnt); else - atomic64_inc(&alg->seed_cnt); + atomic64_inc(&alg->stats.rng.seed_cnt); crypto_alg_put(alg); } EXPORT_SYMBOL_GPL(crypto_stats_rng_seed); @@ -1275,10 +1269,10 @@ void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->rng_err_cnt); + atomic64_inc(&alg->stats.rng.rng_err_cnt); } else { - atomic64_inc(&alg->generate_cnt); - atomic64_add(dlen, &alg->generate_tlen); + atomic64_inc(&alg->stats.rng.generate_cnt); + atomic64_add(dlen, &alg->stats.rng.generate_tlen); } crypto_alg_put(alg); } @@ -1288,10 +1282,10 @@ void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.cipher_err_cnt); } else { - atomic64_inc(&alg->encrypt_cnt); - atomic64_add(cryptlen, &alg->encrypt_tlen); + atomic64_inc(&alg->stats.cipher.encrypt_cnt); + atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen); } crypto_alg_put(alg); } @@ -1301,10 +1295,10 @@ void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.cipher_err_cnt); } else { - atomic64_inc(&alg->decrypt_cnt); - atomic64_add(cryptlen, &alg->decrypt_tlen); + atomic64_inc(&alg->stats.cipher.decrypt_cnt); + atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen); } crypto_alg_put(alg); } diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c index 7b668c659122..113bf1691560 100644 --- a/crypto/crypto_user_stat.c +++ b/crypto/crypto_user_stat.c @@ -39,11 +39,11 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg) strscpy(raead.type, "aead", sizeof(raead.type)); - raead.stat_encrypt_cnt = atomic64_read(&alg->encrypt_cnt); - raead.stat_encrypt_tlen = atomic64_read(&alg->encrypt_tlen); - raead.stat_decrypt_cnt = atomic64_read(&alg->decrypt_cnt); - raead.stat_decrypt_tlen = atomic64_read(&alg->decrypt_tlen); - raead.stat_aead_err_cnt = atomic64_read(&alg->aead_err_cnt); + raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt); + raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen); + raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt); + raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen); + raead.stat_aead_err_cnt = atomic64_read(&alg->stats.aead.aead_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); } @@ -56,11 +56,11 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); - rcipher.stat_encrypt_cnt = atomic64_read(&alg->encrypt_cnt); - rcipher.stat_encrypt_tlen = atomic64_read(&alg->encrypt_tlen); - rcipher.stat_decrypt_cnt = atomic64_read(&alg->decrypt_cnt); - rcipher.stat_decrypt_tlen = atomic64_read(&alg->decrypt_tlen); - rcipher.stat_cipher_err_cnt = atomic64_read(&alg->cipher_err_cnt); + rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt); + rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen); + rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt); + rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen); + rcipher.stat_cipher_err_cnt = atomic64_read(&alg->stats.cipher.cipher_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); } @@ -72,11 +72,11 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) memset(&rcomp, 0, sizeof(rcomp)); strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - rcomp.stat_compress_cnt = atomic64_read(&alg->compress_cnt); - rcomp.stat_compress_tlen = atomic64_read(&alg->compress_tlen); - rcomp.stat_decompress_cnt = atomic64_read(&alg->decompress_cnt); - rcomp.stat_decompress_tlen = atomic64_read(&alg->decompress_tlen); - rcomp.stat_compress_err_cnt = atomic64_read(&alg->compress_err_cnt); + rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt); + rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); + rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); + rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); + rcomp.stat_compress_err_cnt = atomic64_read(&alg->stats.compress.compress_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); } @@ -88,11 +88,11 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) memset(&racomp, 0, sizeof(racomp)); strscpy(racomp.type, "acomp", sizeof(racomp.type)); - racomp.stat_compress_cnt = atomic64_read(&alg->compress_cnt); - racomp.stat_compress_tlen = atomic64_read(&alg->compress_tlen); - racomp.stat_decompress_cnt = atomic64_read(&alg->decompress_cnt); - racomp.stat_decompress_tlen = atomic64_read(&alg->decompress_tlen); - racomp.stat_compress_err_cnt = atomic64_read(&alg->compress_err_cnt); + racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt); + racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); + racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); + racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); + racomp.stat_compress_err_cnt = atomic64_read(&alg->stats.compress.compress_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); } @@ -104,13 +104,13 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) memset(&rakcipher, 0, sizeof(rakcipher)); strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); - rakcipher.stat_encrypt_cnt = atomic64_read(&alg->encrypt_cnt); - rakcipher.stat_encrypt_tlen = atomic64_read(&alg->encrypt_tlen); - rakcipher.stat_decrypt_cnt = atomic64_read(&alg->decrypt_cnt); - rakcipher.stat_decrypt_tlen = atomic64_read(&alg->decrypt_tlen); - rakcipher.stat_sign_cnt = atomic64_read(&alg->sign_cnt); - rakcipher.stat_verify_cnt = atomic64_read(&alg->verify_cnt); - rakcipher.stat_akcipher_err_cnt = atomic64_read(&alg->akcipher_err_cnt); + rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt); + rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen); + rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt); + rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen); + rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt); + rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt); + rakcipher.stat_akcipher_err_cnt = atomic64_read(&alg->stats.akcipher.akcipher_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rakcipher), &rakcipher); @@ -124,10 +124,10 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); - rkpp.stat_setsecret_cnt = atomic64_read(&alg->setsecret_cnt); - rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->generate_public_key_cnt); - rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->compute_shared_secret_cnt); - rkpp.stat_kpp_err_cnt = atomic64_read(&alg->kpp_err_cnt); + rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt); + rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt); + rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt); + rkpp.stat_kpp_err_cnt = atomic64_read(&alg->stats.kpp.kpp_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); } @@ -140,9 +140,9 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg) strscpy(rhash.type, "ahash", sizeof(rhash.type)); - rhash.stat_hash_cnt = atomic64_read(&alg->hash_cnt); - rhash.stat_hash_tlen = atomic64_read(&alg->hash_tlen); - rhash.stat_hash_err_cnt = atomic64_read(&alg->hash_err_cnt); + rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); + rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); + rhash.stat_hash_err_cnt = atomic64_read(&alg->stats.hash.hash_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } @@ -155,9 +155,9 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg) strscpy(rhash.type, "shash", sizeof(rhash.type)); - rhash.stat_hash_cnt = atomic64_read(&alg->hash_cnt); - rhash.stat_hash_tlen = atomic64_read(&alg->hash_tlen); - rhash.stat_hash_err_cnt = atomic64_read(&alg->hash_err_cnt); + rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); + rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); + rhash.stat_hash_err_cnt = atomic64_read(&alg->stats.hash.hash_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } @@ -170,10 +170,10 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg) strscpy(rrng.type, "rng", sizeof(rrng.type)); - rrng.stat_generate_cnt = atomic64_read(&alg->generate_cnt); - rrng.stat_generate_tlen = atomic64_read(&alg->generate_tlen); - rrng.stat_seed_cnt = atomic64_read(&alg->seed_cnt); - rrng.stat_rng_err_cnt = atomic64_read(&alg->rng_err_cnt); + rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt); + rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen); + rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt); + rrng.stat_rng_err_cnt = atomic64_read(&alg->stats.rng.rng_err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index e2fd24714e00..8a46ab35479e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -369,6 +369,115 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +#ifdef CONFIG_CRYPTO_STATS +/* + * struct crypto_istat_aead - statistics for AEAD algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @aead_err_cnt: number of error for AEAD requests + */ +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t aead_err_cnt; +}; + +/* + * struct crypto_istat_akcipher - statistics for akcipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @verify_cnt: number of verify operation + * @sign_cnt: number of sign requests + * @akcipher_err_cnt: number of error for akcipher requests + */ +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t akcipher_err_cnt; +}; + +/* + * struct crypto_istat_cipher - statistics for cipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @cipher_err_cnt: number of error for cipher requests + */ +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t cipher_err_cnt; +}; + +/* + * struct crypto_istat_compress - statistics for compress algorithm + * @compress_cnt: number of compress requests + * @compress_tlen: total data size handled by compress requests + * @decompress_cnt: number of decompress requests + * @decompress_tlen: total data size handled by decompress requests + * @compress_err_cnt: number of error for compress requests + */ +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t compress_err_cnt; +}; + +/* + * struct crypto_istat_hash - statistics for has algorithm + * @hash_cnt: number of hash requests + * @hash_tlen: total data size hashed + * @hash_err_cnt: number of error for hash requests + */ +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t hash_err_cnt; +}; + +/* + * struct crypto_istat_kpp - statistics for KPP algorithm + * @setsecret_cnt: number of setsecrey operation + * @generate_public_key_cnt: number of generate_public_key operation + * @compute_shared_secret_cnt: number of compute_shared_secret operation + * @kpp_err_cnt: number of error for KPP requests + */ +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t kpp_err_cnt; +}; + +/* + * struct crypto_istat_rng: statistics for RNG algorithm + * @generate_cnt: number of RNG generate requests + * @generate_tlen: total data size of generated data by the RNG + * @seed_cnt: number of times the RNG was seeded + * @rng_err_cnt: number of error for RNG requests + */ +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t rng_err_cnt; +}; +#endif /* CONFIG_CRYPTO_STATS */ #define cra_ablkcipher cra_u.ablkcipher #define cra_blkcipher cra_u.blkcipher @@ -454,32 +563,7 @@ struct compress_alg { * @cra_refcnt: internally used * @cra_destroy: internally used * - * All following statistics are for this crypto_alg - * @encrypt_cnt: number of encrypt requests - * @decrypt_cnt: number of decrypt requests - * @compress_cnt: number of compress requests - * @decompress_cnt: number of decompress requests - * @generate_cnt: number of RNG generate requests - * @seed_cnt: number of times the rng was seeded - * @hash_cnt: number of hash requests - * @sign_cnt: number of sign requests - * @setsecret_cnt: number of setsecrey operation - * @generate_public_key_cnt: number of generate_public_key operation - * @verify_cnt: number of verify operation - * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @compress_tlen: total data size handled by compress requests - * @decompress_tlen: total data size handled by decompress requests - * @generate_tlen: total data size of generated data by the RNG - * @hash_tlen: total data size hashed - * @akcipher_err_cnt: number of error for akcipher requests - * @cipher_err_cnt: number of error for akcipher requests - * @compress_err_cnt: number of error for akcipher requests - * @aead_err_cnt: number of error for akcipher requests - * @hash_err_cnt: number of error for akcipher requests - * @rng_err_cnt: number of error for akcipher requests - * @kpp_err_cnt: number of error for akcipher requests + * @stats: union of all possible crypto_istat_xxx structures * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not @@ -517,42 +601,14 @@ struct crypto_alg { #ifdef CONFIG_CRYPTO_STATS union { - atomic64_t encrypt_cnt; - atomic64_t compress_cnt; - atomic64_t generate_cnt; - atomic64_t hash_cnt; - atomic64_t setsecret_cnt; - }; - union { - atomic64_t encrypt_tlen; - atomic64_t compress_tlen; - atomic64_t generate_tlen; - atomic64_t hash_tlen; - }; - union { - atomic64_t akcipher_err_cnt; - atomic64_t cipher_err_cnt; - atomic64_t compress_err_cnt; - atomic64_t aead_err_cnt; - atomic64_t hash_err_cnt; - atomic64_t rng_err_cnt; - atomic64_t kpp_err_cnt; - }; - union { - atomic64_t decrypt_cnt; - atomic64_t decompress_cnt; - atomic64_t seed_cnt; - atomic64_t generate_public_key_cnt; - }; - union { - atomic64_t decrypt_tlen; - atomic64_t decompress_tlen; - }; - union { - atomic64_t verify_cnt; - atomic64_t compute_shared_secret_cnt; - }; - atomic64_t sign_cnt; + struct crypto_istat_aead aead; + struct crypto_istat_akcipher akcipher; + struct crypto_istat_cipher cipher; + struct crypto_istat_compress compress; + struct crypto_istat_hash hash; + struct crypto_istat_rng rng; + struct crypto_istat_kpp kpp; + } stats; #endif /* CONFIG_CRYPTO_STATS */ } CRYPTO_MINALIGN_ATTR; -- cgit v1.2.3 From 44f13133cb03ec32fc88a533673248ef5c0617e3 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:25 +0000 Subject: crypto: user - rename err_cnt parameter Since now all crypto stats are on their own structures, it is now useless to have the algorithm name in the err_cnt member. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/algapi.c | 38 +++++++++++++++++++------------------- crypto/crypto_user_stat.c | 18 +++++++++--------- include/linux/crypto.h | 28 ++++++++++++++-------------- include/uapi/linux/cryptouser.h | 14 +++++++------- tools/crypto/getstat.c | 18 +++++++++--------- 5 files changed, 58 insertions(+), 58 deletions(-) (limited to 'include/linux') diff --git a/crypto/algapi.c b/crypto/algapi.c index a8cb5aed0069..c0d4f9ef6b0f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1083,7 +1083,7 @@ void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.err_cnt); } else { atomic64_inc(&alg->stats.cipher.encrypt_cnt); atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen); @@ -1096,7 +1096,7 @@ void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.err_cnt); } else { atomic64_inc(&alg->stats.cipher.decrypt_cnt); atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen); @@ -1109,7 +1109,7 @@ void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.aead.aead_err_cnt); + atomic64_inc(&alg->stats.aead.err_cnt); } else { atomic64_inc(&alg->stats.aead.encrypt_cnt); atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen); @@ -1122,7 +1122,7 @@ void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.aead.aead_err_cnt); + atomic64_inc(&alg->stats.aead.err_cnt); } else { atomic64_inc(&alg->stats.aead.decrypt_cnt); atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen); @@ -1135,7 +1135,7 @@ void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.err_cnt); } else { atomic64_inc(&alg->stats.akcipher.encrypt_cnt); atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen); @@ -1148,7 +1148,7 @@ void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.err_cnt); } else { atomic64_inc(&alg->stats.akcipher.decrypt_cnt); atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen); @@ -1160,7 +1160,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt); void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.err_cnt); else atomic64_inc(&alg->stats.akcipher.sign_cnt); crypto_alg_put(alg); @@ -1170,7 +1170,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign); void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt); + atomic64_inc(&alg->stats.akcipher.err_cnt); else atomic64_inc(&alg->stats.akcipher.verify_cnt); crypto_alg_put(alg); @@ -1180,7 +1180,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify); void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.compress.compress_err_cnt); + atomic64_inc(&alg->stats.compress.err_cnt); } else { atomic64_inc(&alg->stats.compress.compress_cnt); atomic64_add(slen, &alg->stats.compress.compress_tlen); @@ -1192,7 +1192,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_compress); void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.compress.compress_err_cnt); + atomic64_inc(&alg->stats.compress.err_cnt); } else { atomic64_inc(&alg->stats.compress.decompress_cnt); atomic64_add(slen, &alg->stats.compress.decompress_tlen); @@ -1205,7 +1205,7 @@ void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.hash.hash_err_cnt); + atomic64_inc(&alg->stats.hash.err_cnt); else atomic64_add(nbytes, &alg->stats.hash.hash_tlen); crypto_alg_put(alg); @@ -1216,7 +1216,7 @@ void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.hash.hash_err_cnt); + atomic64_inc(&alg->stats.hash.err_cnt); } else { atomic64_inc(&alg->stats.hash.hash_cnt); atomic64_add(nbytes, &alg->stats.hash.hash_tlen); @@ -1228,7 +1228,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_ahash_final); void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) { if (ret) - atomic64_inc(&alg->stats.kpp.kpp_err_cnt); + atomic64_inc(&alg->stats.kpp.err_cnt); else atomic64_inc(&alg->stats.kpp.setsecret_cnt); crypto_alg_put(alg); @@ -1238,7 +1238,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret); void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) { if (ret) - atomic64_inc(&alg->stats.kpp.kpp_err_cnt); + atomic64_inc(&alg->stats.kpp.err_cnt); else atomic64_inc(&alg->stats.kpp.generate_public_key_cnt); crypto_alg_put(alg); @@ -1248,7 +1248,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key); void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) { if (ret) - atomic64_inc(&alg->stats.kpp.kpp_err_cnt); + atomic64_inc(&alg->stats.kpp.err_cnt); else atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt); crypto_alg_put(alg); @@ -1258,7 +1258,7 @@ EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret); void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic64_inc(&alg->stats.rng.rng_err_cnt); + atomic64_inc(&alg->stats.rng.err_cnt); else atomic64_inc(&alg->stats.rng.seed_cnt); crypto_alg_put(alg); @@ -1269,7 +1269,7 @@ void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.rng.rng_err_cnt); + atomic64_inc(&alg->stats.rng.err_cnt); } else { atomic64_inc(&alg->stats.rng.generate_cnt); atomic64_add(dlen, &alg->stats.rng.generate_tlen); @@ -1282,7 +1282,7 @@ void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.err_cnt); } else { atomic64_inc(&alg->stats.cipher.encrypt_cnt); atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen); @@ -1295,7 +1295,7 @@ void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) { if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic64_inc(&alg->stats.cipher.cipher_err_cnt); + atomic64_inc(&alg->stats.cipher.err_cnt); } else { atomic64_inc(&alg->stats.cipher.decrypt_cnt); atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen); diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c index 113bf1691560..0ba00aaeb810 100644 --- a/crypto/crypto_user_stat.c +++ b/crypto/crypto_user_stat.c @@ -43,7 +43,7 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg) raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen); raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt); raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen); - raead.stat_aead_err_cnt = atomic64_read(&alg->stats.aead.aead_err_cnt); + raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); } @@ -60,7 +60,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen); rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt); rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen); - rcipher.stat_cipher_err_cnt = atomic64_read(&alg->stats.cipher.cipher_err_cnt); + rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); } @@ -76,7 +76,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); - rcomp.stat_compress_err_cnt = atomic64_read(&alg->stats.compress.compress_err_cnt); + rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); } @@ -92,7 +92,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen); racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt); racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen); - racomp.stat_compress_err_cnt = atomic64_read(&alg->stats.compress.compress_err_cnt); + racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); } @@ -110,7 +110,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen); rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt); rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt); - rakcipher.stat_akcipher_err_cnt = atomic64_read(&alg->stats.akcipher.akcipher_err_cnt); + rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rakcipher), &rakcipher); @@ -127,7 +127,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt); rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt); rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt); - rkpp.stat_kpp_err_cnt = atomic64_read(&alg->stats.kpp.kpp_err_cnt); + rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); } @@ -142,7 +142,7 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg) rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); - rhash.stat_hash_err_cnt = atomic64_read(&alg->stats.hash.hash_err_cnt); + rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } @@ -157,7 +157,7 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg) rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt); rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen); - rhash.stat_hash_err_cnt = atomic64_read(&alg->stats.hash.hash_err_cnt); + rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); } @@ -173,7 +173,7 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg) rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt); rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen); rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt); - rrng.stat_rng_err_cnt = atomic64_read(&alg->stats.rng.rng_err_cnt); + rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt); return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 8a46ab35479e..a2967c1a08b1 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -376,14 +376,14 @@ struct compress_alg { * @encrypt_tlen: total data size handled by encrypt requests * @decrypt_cnt: number of decrypt requests * @decrypt_tlen: total data size handled by decrypt requests - * @aead_err_cnt: number of error for AEAD requests + * @err_cnt: number of error for AEAD requests */ struct crypto_istat_aead { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; - atomic64_t aead_err_cnt; + atomic64_t err_cnt; }; /* @@ -394,7 +394,7 @@ struct crypto_istat_aead { * @decrypt_tlen: total data size handled by decrypt requests * @verify_cnt: number of verify operation * @sign_cnt: number of sign requests - * @akcipher_err_cnt: number of error for akcipher requests + * @err_cnt: number of error for akcipher requests */ struct crypto_istat_akcipher { atomic64_t encrypt_cnt; @@ -403,7 +403,7 @@ struct crypto_istat_akcipher { atomic64_t decrypt_tlen; atomic64_t verify_cnt; atomic64_t sign_cnt; - atomic64_t akcipher_err_cnt; + atomic64_t err_cnt; }; /* @@ -412,14 +412,14 @@ struct crypto_istat_akcipher { * @encrypt_tlen: total data size handled by encrypt requests * @decrypt_cnt: number of decrypt requests * @decrypt_tlen: total data size handled by decrypt requests - * @cipher_err_cnt: number of error for cipher requests + * @err_cnt: number of error for cipher requests */ struct crypto_istat_cipher { atomic64_t encrypt_cnt; atomic64_t encrypt_tlen; atomic64_t decrypt_cnt; atomic64_t decrypt_tlen; - atomic64_t cipher_err_cnt; + atomic64_t err_cnt; }; /* @@ -428,26 +428,26 @@ struct crypto_istat_cipher { * @compress_tlen: total data size handled by compress requests * @decompress_cnt: number of decompress requests * @decompress_tlen: total data size handled by decompress requests - * @compress_err_cnt: number of error for compress requests + * @err_cnt: number of error for compress requests */ struct crypto_istat_compress { atomic64_t compress_cnt; atomic64_t compress_tlen; atomic64_t decompress_cnt; atomic64_t decompress_tlen; - atomic64_t compress_err_cnt; + atomic64_t err_cnt; }; /* * struct crypto_istat_hash - statistics for has algorithm * @hash_cnt: number of hash requests * @hash_tlen: total data size hashed - * @hash_err_cnt: number of error for hash requests + * @err_cnt: number of error for hash requests */ struct crypto_istat_hash { atomic64_t hash_cnt; atomic64_t hash_tlen; - atomic64_t hash_err_cnt; + atomic64_t err_cnt; }; /* @@ -455,13 +455,13 @@ struct crypto_istat_hash { * @setsecret_cnt: number of setsecrey operation * @generate_public_key_cnt: number of generate_public_key operation * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @kpp_err_cnt: number of error for KPP requests + * @err_cnt: number of error for KPP requests */ struct crypto_istat_kpp { atomic64_t setsecret_cnt; atomic64_t generate_public_key_cnt; atomic64_t compute_shared_secret_cnt; - atomic64_t kpp_err_cnt; + atomic64_t err_cnt; }; /* @@ -469,13 +469,13 @@ struct crypto_istat_kpp { * @generate_cnt: number of RNG generate requests * @generate_tlen: total data size of generated data by the RNG * @seed_cnt: number of times the RNG was seeded - * @rng_err_cnt: number of error for RNG requests + * @err_cnt: number of error for RNG requests */ struct crypto_istat_rng { atomic64_t generate_cnt; atomic64_t generate_tlen; atomic64_t seed_cnt; - atomic64_t rng_err_cnt; + atomic64_t err_cnt; }; #endif /* CONFIG_CRYPTO_STATS */ diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 3a70f025e27d..4dc1603919ce 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -82,7 +82,7 @@ struct crypto_stat_aead { __u64 stat_encrypt_tlen; __u64 stat_decrypt_cnt; __u64 stat_decrypt_tlen; - __u64 stat_aead_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_akcipher { @@ -93,7 +93,7 @@ struct crypto_stat_akcipher { __u64 stat_decrypt_tlen; __u64 stat_verify_cnt; __u64 stat_sign_cnt; - __u64 stat_akcipher_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_cipher { @@ -102,7 +102,7 @@ struct crypto_stat_cipher { __u64 stat_encrypt_tlen; __u64 stat_decrypt_cnt; __u64 stat_decrypt_tlen; - __u64 stat_cipher_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_compress { @@ -111,14 +111,14 @@ struct crypto_stat_compress { __u64 stat_compress_tlen; __u64 stat_decompress_cnt; __u64 stat_decompress_tlen; - __u64 stat_compress_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_hash { char type[CRYPTO_MAX_NAME]; __u64 stat_hash_cnt; __u64 stat_hash_tlen; - __u64 stat_hash_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_kpp { @@ -126,7 +126,7 @@ struct crypto_stat_kpp { __u64 stat_setsecret_cnt; __u64 stat_generate_public_key_cnt; __u64 stat_compute_shared_secret_cnt; - __u64 stat_kpp_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_rng { @@ -134,7 +134,7 @@ struct crypto_stat_rng { __u64 stat_generate_cnt; __u64 stat_generate_tlen; __u64 stat_seed_cnt; - __u64 stat_rng_err_cnt; + __u64 stat_err_cnt; }; struct crypto_stat_larval { diff --git a/tools/crypto/getstat.c b/tools/crypto/getstat.c index 57fbb94608d4..9e8ff76420fa 100644 --- a/tools/crypto/getstat.c +++ b/tools/crypto/getstat.c @@ -157,7 +157,7 @@ static int get_stat(const char *drivername) printf("%s\tHash\n\tHash: %llu bytes: %llu\n\tErrors: %llu\n", drivername, rhash->stat_hash_cnt, rhash->stat_hash_tlen, - rhash->stat_hash_err_cnt); + rhash->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_COMPRESS]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS]; struct crypto_stat_compress *rblk = @@ -166,7 +166,7 @@ static int get_stat(const char *drivername) drivername, rblk->stat_compress_cnt, rblk->stat_compress_tlen, rblk->stat_decompress_cnt, rblk->stat_decompress_tlen, - rblk->stat_compress_err_cnt); + rblk->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_ACOMP]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP]; struct crypto_stat_compress *rcomp = @@ -175,7 +175,7 @@ static int get_stat(const char *drivername) drivername, rcomp->stat_compress_cnt, rcomp->stat_compress_tlen, rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen, - rcomp->stat_compress_err_cnt); + rcomp->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_AEAD]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD]; struct crypto_stat_aead *raead = @@ -184,7 +184,7 @@ static int get_stat(const char *drivername) drivername, raead->stat_encrypt_cnt, raead->stat_encrypt_tlen, raead->stat_decrypt_cnt, raead->stat_decrypt_tlen, - raead->stat_aead_err_cnt); + raead->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER]; struct crypto_stat_cipher *rblk = @@ -193,7 +193,7 @@ static int get_stat(const char *drivername) drivername, rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen, rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen, - rblk->stat_cipher_err_cnt); + rblk->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER]; struct crypto_stat_akcipher *rblk = @@ -203,7 +203,7 @@ static int get_stat(const char *drivername) rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen, rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen, rblk->stat_sign_cnt, rblk->stat_verify_cnt, - rblk->stat_akcipher_err_cnt); + rblk->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_CIPHER]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER]; struct crypto_stat_cipher *rblk = @@ -212,7 +212,7 @@ static int get_stat(const char *drivername) drivername, rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen, rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen, - rblk->stat_cipher_err_cnt); + rblk->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_RNG]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG]; struct crypto_stat_rng *rrng = @@ -221,7 +221,7 @@ static int get_stat(const char *drivername) drivername, rrng->stat_seed_cnt, rrng->stat_generate_cnt, rrng->stat_generate_tlen, - rrng->stat_rng_err_cnt); + rrng->stat_err_cnt); } else if (tb[CRYPTOCFGA_STAT_KPP]) { struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP]; struct crypto_stat_kpp *rkpp = @@ -231,7 +231,7 @@ static int get_stat(const char *drivername) rkpp->stat_setsecret_cnt, rkpp->stat_generate_public_key_cnt, rkpp->stat_compute_shared_secret_cnt, - rkpp->stat_kpp_err_cnt); + rkpp->stat_err_cnt); } else { fprintf(stderr, "%s is of an unknown algorithm\n", drivername); } -- cgit v1.2.3 From 1f6669b9716c6c98391b0f756e060892b32b8ca7 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 29 Nov 2018 14:42:26 +0000 Subject: crypto: user - Add crypto_stats_init This patch add the crypto_stats_init() function. This will permit to remove some ifdef from __crypto_register_alg(). Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/algapi.c | 10 +++++++--- include/linux/crypto.h | 3 +++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/crypto/algapi.c b/crypto/algapi.c index c0d4f9ef6b0f..8b65ada33e5d 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -258,9 +258,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg) list_add(&alg->cra_list, &crypto_alg_list); list_add(&larval->alg.cra_list, &crypto_alg_list); -#ifdef CONFIG_CRYPTO_STATS - memset(&alg->stats, 0, sizeof(alg->stats)); -#endif + crypto_stats_init(alg); out: return larval; @@ -1073,6 +1071,12 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend, EXPORT_SYMBOL_GPL(crypto_type_has_alg); #ifdef CONFIG_CRYPTO_STATS +void crypto_stats_init(struct crypto_alg *alg) +{ + memset(&alg->stats, 0, sizeof(alg->stats)); +} +EXPORT_SYMBOL_GPL(crypto_stats_init); + void crypto_stats_get(struct crypto_alg *alg) { crypto_alg_get(alg); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index a2967c1a08b1..9850b41e38ae 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -614,6 +614,7 @@ struct crypto_alg { } CRYPTO_MINALIGN_ATTR; #ifdef CONFIG_CRYPTO_STATS +void crypto_stats_init(struct crypto_alg *alg); void crypto_stats_get(struct crypto_alg *alg); void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); @@ -635,6 +636,8 @@ void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int re void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); #else +static inline void crypto_stats_init(struct crypto_alg *alg) +{} static inline void crypto_stats_get(struct crypto_alg *alg) {} static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) -- cgit v1.2.3 From bfad6cb3f8295559216690e1eb9c99003a79b3a0 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Thu, 13 Dec 2018 08:36:38 +0000 Subject: crypto: api - document missing stats member This patchs adds missing member of stats documentation. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- include/linux/crypto.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 9850b41e38ae..81e178fb9ed8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -564,6 +564,13 @@ struct crypto_istat_rng { * @cra_destroy: internally used * * @stats: union of all possible crypto_istat_xxx structures + * @stats.aead: statistics for AEAD algorithm + * @stats.akcipher: statistics for akcipher algorithm + * @stats.cipher: statistics for cipher algorithm + * @stats.compress: statistics for compress algorithm + * @stats.hash: statistics for hash algorithm + * @stats.rng: statistics for rng algorithm + * @stats.kpp: statistics for KPP algorithm * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not -- cgit v1.2.3 From c79b411eaa7257204f89c30651c45cea22278769 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 16 Dec 2018 15:55:06 -0800 Subject: crypto: skcipher - remove remnants of internal IV generators Remove dead code related to internal IV generators, which are no longer used since they've been replaced with the "seqiv" and "echainiv" templates. The removed code includes: - The "givcipher" (GIVCIPHER) algorithm type. No algorithms are registered with this type anymore, so it's unneeded. - The "const char *geniv" member of aead_alg, ablkcipher_alg, and blkcipher_alg. A few algorithms still set this, but it isn't used anymore except to show via /proc/crypto and CRYPTO_MSG_GETALG. Just hardcode "" or "" in those cases. - The 'skcipher_givcrypt_request' structure, which is never used. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/crypto/api.rst | 9 ----- Documentation/crypto/architecture.rst | 31 +++----------- crypto/ablkcipher.c | 76 +---------------------------------- crypto/blkcipher.c | 6 +-- crypto/cryptd.c | 4 +- crypto/ctr.c | 2 - crypto/skcipher.c | 6 +-- drivers/crypto/bcm/cipher.c | 1 - drivers/crypto/chelsio/chcr_algo.c | 1 - drivers/crypto/ixp4xx_crypto.c | 5 --- drivers/crypto/nx/nx-aes-ctr.c | 1 - drivers/crypto/omap-aes.c | 1 - drivers/crypto/picoxcell_crypto.c | 3 +- drivers/crypto/talitos.c | 1 - include/crypto/aead.h | 3 -- include/crypto/internal/skcipher.h | 2 - include/crypto/skcipher.h | 13 ------ include/linux/crypto.h | 34 ++-------------- 18 files changed, 17 insertions(+), 182 deletions(-) (limited to 'include/linux') diff --git a/Documentation/crypto/api.rst b/Documentation/crypto/api.rst index 2e519193ab4a..b91b31736df8 100644 --- a/Documentation/crypto/api.rst +++ b/Documentation/crypto/api.rst @@ -1,15 +1,6 @@ Programming Interface ===================== -Please note that the kernel crypto API contains the AEAD givcrypt API -(crypto_aead_giv\* and aead_givcrypt\* function calls in -include/crypto/aead.h). This API is obsolete and will be removed in the -future. To obtain the functionality of an AEAD cipher with internal IV -generation, use the IV generator as a regular cipher. For example, -rfc4106(gcm(aes)) is the AEAD cipher with external IV generation and -seqniv(rfc4106(gcm(aes))) implies that the kernel crypto API generates -the IV. Different IV generators are available. - .. class:: toc-title Table of contents diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst index ca2d09b991f5..ee8ff0762d7f 100644 --- a/Documentation/crypto/architecture.rst +++ b/Documentation/crypto/architecture.rst @@ -157,10 +157,6 @@ applicable to a cipher, it is not displayed: - rng for random number generator - - givcipher for cipher with associated IV generator (see the geniv - entry below for the specification of the IV generator type used by - the cipher implementation) - - kpp for a Key-agreement Protocol Primitive (KPP) cipher such as an ECDH or DH implementation @@ -174,16 +170,7 @@ applicable to a cipher, it is not displayed: - digestsize: output size of the message digest -- geniv: IV generation type: - - - eseqiv for encrypted sequence number based IV generation - - - seqiv for sequence number based IV generation - - - chainiv for chain iv generation - - - is a marker that the cipher implements IV generation and - handling as it is specific to the given cipher +- geniv: IV generator (obsolete) Key Sizes --------- @@ -218,10 +205,6 @@ the aforementioned cipher types: - CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher -- CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block cipher packed - together with an IV generator (see geniv field in the /proc/crypto - listing for the known IV generators) - - CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as an ECDH or DH implementation @@ -338,18 +321,14 @@ uses the API applicable to the cipher type specified for the block. The following call sequence is applicable when the IPSEC layer triggers an encryption operation with the esp_output function. During -configuration, the administrator set up the use of rfc4106(gcm(aes)) as -the cipher for ESP. The following call sequence is now depicted in the -ASCII art above: +configuration, the administrator set up the use of seqiv(rfc4106(gcm(aes))) +as the cipher for ESP. The following call sequence is now depicted in +the ASCII art above: 1. esp_output() invokes crypto_aead_encrypt() to trigger an encryption operation of the AEAD cipher with IV generator. - In case of GCM, the SEQIV implementation is registered as GIVCIPHER - in crypto_rfc4106_alloc(). - - The SEQIV performs its operation to generate an IV where the core - function is seqiv_geniv(). + The SEQIV generates the IV. 2. Now, SEQIV uses the AEAD API function calls to invoke the associated AEAD cipher. In our case, during the instantiation of SEQIV, the diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index b5e9ce19d324..b339587073c3 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -368,8 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) memset(&rblkcipher, 0, sizeof(rblkcipher)); strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); - strscpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", - sizeof(rblkcipher.geniv)); + strscpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; @@ -399,7 +398,7 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); - seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: ""); + seq_printf(m, "geniv : \n"); } const struct crypto_type crypto_ablkcipher_type = { @@ -411,74 +410,3 @@ const struct crypto_type crypto_ablkcipher_type = { .report = crypto_ablkcipher_report, }; EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); - -static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, - u32 mask) -{ - struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; - struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; - - if (alg->ivsize > PAGE_SIZE / 8) - return -EINVAL; - - crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? - alg->setkey : setkey; - crt->encrypt = alg->encrypt; - crt->decrypt = alg->decrypt; - crt->base = __crypto_ablkcipher_cast(tfm); - crt->ivsize = alg->ivsize; - - return 0; -} - -#ifdef CONFIG_NET -static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_report_blkcipher rblkcipher; - - memset(&rblkcipher, 0, sizeof(rblkcipher)); - - strscpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); - strscpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "", - sizeof(rblkcipher.geniv)); - - rblkcipher.blocksize = alg->cra_blocksize; - rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; - rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; - rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; - - return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, - sizeof(rblkcipher), &rblkcipher); -} -#else -static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) -{ - return -ENOSYS; -} -#endif - -static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) - __maybe_unused; -static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) -{ - struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; - - seq_printf(m, "type : givcipher\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); - seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); - seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); - seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); - seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); - seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: ""); -} - -const struct crypto_type crypto_givcipher_type = { - .ctxsize = crypto_ablkcipher_ctxsize, - .init = crypto_init_givcipher_ops, -#ifdef CONFIG_PROC_FS - .show = crypto_givcipher_show, -#endif - .report = crypto_givcipher_report, -}; -EXPORT_SYMBOL_GPL(crypto_givcipher_type); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 193237514e90..c5398bd54942 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -510,8 +510,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) memset(&rblkcipher, 0, sizeof(rblkcipher)); strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); - strscpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "", - sizeof(rblkcipher.geniv)); + strscpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; @@ -537,8 +536,7 @@ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); - seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?: - ""); + seq_printf(m, "geniv : \n"); } const struct crypto_type crypto_blkcipher_type = { diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 7118fb5efbaa..5640e5db7bdb 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -422,8 +422,6 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl, inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; - inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; - inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); inst->alg.cra_init = cryptd_blkcipher_init_tfm; @@ -1174,7 +1172,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, return ERR_PTR(-EINVAL); type = crypto_skcipher_type(type); mask &= ~CRYPTO_ALG_TYPE_MASK; - mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); + mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; tfm = crypto_alloc_base(cryptd_alg_name, type, mask); if (IS_ERR(tfm)) return ERR_CAST(tfm); diff --git a/crypto/ctr.c b/crypto/ctr.c index 435b75bd619e..30f3946efc6d 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -233,8 +233,6 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; - inst->alg.cra_blkcipher.geniv = "chainiv"; - out: crypto_mod_put(alg); return inst; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 41b4f7f27f45..2a969296bc24 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -579,8 +579,7 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); - if (alg->cra_type == &crypto_ablkcipher_type || - alg->cra_type == &crypto_givcipher_type) + if (alg->cra_type == &crypto_ablkcipher_type) return sizeof(struct crypto_ablkcipher *); return crypto_alg_extsize(alg); @@ -844,8 +843,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); - if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || - tfm->__crt_alg->cra_type == &crypto_givcipher_type) + if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type) return crypto_init_skcipher_ops_ablkcipher(tfm); skcipher->setkey = skcipher_setkey; diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 2ce3a16d3d10..c9393ffb70ed 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -3868,7 +3868,6 @@ static struct iproc_alg_s driver_algs[] = { .cra_driver_name = "ctr-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_ablkcipher = { - /* .geniv = "chainiv", */ .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index eedc33128da4..bcef76508dfa 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -3816,7 +3816,6 @@ static struct chcr_alg_template driver_algs[] = { .setkey = chcr_aes_rfc3686_setkey, .encrypt = chcr_aes_encrypt, .decrypt = chcr_aes_decrypt, - .geniv = "seqiv", } } }, diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 27f7dad2d45d..19fba998b86b 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1194,7 +1194,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1221,7 +1220,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1247,7 +1245,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1273,7 +1270,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - .geniv = "eseqiv", } } }, @@ -1287,7 +1283,6 @@ static struct ixp_alg ixp4xx_algos[] = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, - .geniv = "eseqiv", .setkey = ablk_rfc3686_setkey, .encrypt = ablk_rfc3686_crypt, .decrypt = ablk_rfc3686_crypt } diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 898c0a280511..5a26fcd75d2d 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -159,7 +159,6 @@ struct crypto_alg nx_ctr3686_aes_alg = { .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, - .geniv = "seqiv", .setkey = ctr3686_aes_nx_set_key, .encrypt = ctr3686_aes_nx_crypt, .decrypt = ctr3686_aes_nx_crypt, diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 4c0ea8142923..0120feb2d746 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -749,7 +749,6 @@ static struct crypto_alg algs_ctr[] = { .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .geniv = "eseqiv", .ivsize = AES_BLOCK_SIZE, .setkey = omap_aes_setkey, .encrypt = omap_aes_ctr_encrypt, diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index a28f1d18fe01..17068b55fea5 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1585,8 +1585,7 @@ static struct spacc_alg l2_engine_algs[] = { .cra_name = "f8(kasumi)", .cra_driver_name = "f8-kasumi-picoxcell", .cra_priority = SPACC_CRYPTO_ALG_PRIORITY, - .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | - CRYPTO_ALG_ASYNC | + .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 8, .cra_ctxsize = sizeof(struct spacc_ablk_ctx), diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6988012deca4..45e20707cef8 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -3155,7 +3155,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, alg->cra_ablkcipher.setkey = ablkcipher_setkey; alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; - alg->cra_ablkcipher.geniv = "eseqiv"; break; case CRYPTO_ALG_TYPE_AEAD: alg = &t_alg->algt.alg.aead.base; diff --git a/include/crypto/aead.h b/include/crypto/aead.h index b7b8d24cf765..9ad595f97c65 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -115,7 +115,6 @@ struct aead_request { * @setkey: see struct skcipher_alg * @encrypt: see struct skcipher_alg * @decrypt: see struct skcipher_alg - * @geniv: see struct skcipher_alg * @ivsize: see struct skcipher_alg * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function @@ -142,8 +141,6 @@ struct aead_alg { int (*init)(struct crypto_aead *tfm); void (*exit)(struct crypto_aead *tfm); - const char *geniv; - unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index e42f7063f245..453e867b4bd9 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -70,8 +70,6 @@ struct skcipher_walk { unsigned int alignmask; }; -extern const struct crypto_type crypto_givcipher_type; - static inline struct crypto_instance *skcipher_crypto_instance( struct skcipher_instance *inst) { diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 480f8301a47d..e555294ed77f 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -39,19 +39,6 @@ struct skcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -/** - * struct skcipher_givcrypt_request - Crypto request with IV generation - * @seq: Sequence number for IV generation - * @giv: Space for generated IV - * @creq: The crypto request itself - */ -struct skcipher_givcrypt_request { - u64 seq; - u8 *giv; - - struct ablkcipher_request creq; -}; - struct crypto_skcipher { int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 81e178fb9ed8..902ec171fc6d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -49,7 +49,6 @@ #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 -#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b @@ -76,12 +75,6 @@ */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 -/* - * This bit is set for symmetric key ciphers that have already been wrapped - * with a generic IV generator to prevent them from being wrapped again. - */ -#define CRYPTO_ALG_GENIV 0x00000200 - /* * Set if the algorithm has passed automated run-time testing. Note that * if there is no run-time testing for a given algorithm it is considered @@ -157,7 +150,6 @@ struct crypto_async_request; struct crypto_blkcipher; struct crypto_tfm; struct crypto_type; -struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -246,31 +238,16 @@ struct cipher_desc { * be called in parallel with the same transformation object. * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt * and the conditions are exactly the same. - * @givencrypt: Update the IV for encryption. With this function, a cipher - * implementation may provide the function on how to update the IV - * for encryption. - * @givdecrypt: Update the IV for decryption. This is the reverse of - * @givencrypt . - * @geniv: The transformation implementation may use an "IV generator" provided - * by the kernel crypto API. Several use cases have a predefined - * approach how IVs are to be updated. For such use cases, the kernel - * crypto API provides ready-to-use implementations that can be - * referenced with this variable. * @ivsize: IV size applicable for transformation. The consumer must provide an * IV of exactly that size to perform the encrypt or decrypt operation. * - * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are - * mandatory and must be filled. + * All fields except @ivsize are mandatory and must be filled. */ struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); - - const char *geniv; unsigned int min_keysize; unsigned int max_keysize; @@ -284,10 +261,9 @@ struct ablkcipher_alg { * @setkey: see struct ablkcipher_alg * @encrypt: see struct ablkcipher_alg * @decrypt: see struct ablkcipher_alg - * @geniv: see struct ablkcipher_alg * @ivsize: see struct ablkcipher_alg * - * All fields except @geniv and @ivsize are mandatory and must be filled. + * All fields except @ivsize are mandatory and must be filled. */ struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, @@ -299,8 +275,6 @@ struct blkcipher_alg { struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); - const char *geniv; - unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; @@ -931,14 +905,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( static inline u32 crypto_skcipher_type(u32 type) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; return type; } static inline u32 crypto_skcipher_mask(u32 mask) { - mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return mask; } -- cgit v1.2.3