From 0a596b0682a7ce37e26c36629816f105c6459d06 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 26 Aug 2023 16:36:41 +0800 Subject: KEYS: Include linux/errno.h in linux/verification.h Add inclusion of linux/errno.h as otherwise the reference to EINVAL may be invalid. Fixes: f3cf4134c5c6 ("bpf: Add bpf_lookup_*_key() and bpf_key_put() kfuncs") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202308261414.HKw1Mrip-lkp@intel.com/ Signed-off-by: Herbert Xu --- include/linux/verification.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/verification.h b/include/linux/verification.h index f34e50ebcf60..cb2d47f28091 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -8,6 +8,7 @@ #ifndef _LINUX_VERIFICATION_H #define _LINUX_VERIFICATION_H +#include #include /* -- cgit v1.2.3 From b58a36008bfa1aadf55f516bcbfae40c779eb54b Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Wed, 6 Sep 2023 01:27:57 +0200 Subject: hwrng: bcm2835 - Fix hwrng throughput regression The last RCU stall fix caused a massive throughput regression of the hwrng on Raspberry Pi 0 - 3. hwrng_msleep doesn't sleep precisely enough and usleep_range doesn't allow scheduling. So try to restore the best possible throughput by introducing hwrng_yield which interruptable sleeps for one jiffy. Some performance measurements on Raspberry Pi 3B+ (arm64/defconfig): sudo dd if=/dev/hwrng of=/dev/null count=1 bs=10000 cpu_relax ~138025 Bytes / sec hwrng_msleep(1000) ~13 Bytes / sec hwrng_yield ~2510 Bytes / sec Fixes: 96cb9d055445 ("hwrng: bcm2835 - use hwrng_msleep() instead of cpu_relax()") Link: https://lore.kernel.org/linux-arm-kernel/bc97ece5-44a3-4c4e-77da-2db3eb66b128@gmx.net/ Signed-off-by: Stefan Wahren Reviewed-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 2 +- drivers/char/hw_random/core.c | 6 ++++++ include/linux/hw_random.h | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index eb04b12f9f01..b03e80300627 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -70,7 +70,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; - hwrng_msleep(rng, 1000); + hwrng_yield(rng); } num_words = rng_readl(priv, RNG_STATUS) >> 24; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index e3598ec9cfca..420f155d251f 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -678,6 +678,12 @@ long hwrng_msleep(struct hwrng *rng, unsigned int msecs) } EXPORT_SYMBOL_GPL(hwrng_msleep); +long hwrng_yield(struct hwrng *rng) +{ + return wait_for_completion_interruptible_timeout(&rng->dying, 1); +} +EXPORT_SYMBOL_GPL(hwrng_yield); + static int __init hwrng_modinit(void) { int ret; diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8a3115516a1b..136e9842120e 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs); +extern long hwrng_yield(struct hwrng *rng); #endif /* LINUX_HWRANDOM_H_ */ -- cgit v1.2.3 From b64d143b752932ef483d0ed8d00958f1832dd6bc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:23 +0800 Subject: crypto: hash - Hide CRYPTO_ALG_TYPE_AHASH_MASK Move the macro CRYPTO_ALG_TYPE_AHASH_MASK out of linux/crypto.h and into crypto/ahash.c so that it's not visible to users of the Crypto API. Also remove the unused CRYPTO_ALG_TYPE_HASH_MASK macro. Signed-off-by: Herbert Xu --- crypto/ahash.c | 2 ++ include/linux/crypto.h | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/crypto/ahash.c b/crypto/ahash.c index 709ef0940799..213bb3e9f245 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -21,6 +21,8 @@ #include "hash.h" +#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e + static const struct crypto_type crypto_ahash_type; struct ahash_request_priv { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 31f6fee0c36c..a0780deb017a 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -35,8 +35,6 @@ #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f -#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e -#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e #define CRYPTO_ALG_LARVAL 0x00000010 -- cgit v1.2.3 From 31865c4c4db2b742fec6ccbff80483fa3e7ab9b9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 14 Sep 2023 16:28:24 +0800 Subject: crypto: skcipher - Add lskcipher Add a new API type lskcipher designed for taking straight kernel pointers instead of SG lists. Its relationship to skcipher will be analogous to that between shash and ahash. Signed-off-by: Herbert Xu --- crypto/Makefile | 6 +- crypto/cryptd.c | 2 +- crypto/lskcipher.c | 594 +++++++++++++++++++++++++++++++++++++ crypto/skcipher.c | 75 +++-- crypto/skcipher.h | 30 ++ include/crypto/internal/skcipher.h | 114 ++++++- include/crypto/skcipher.h | 309 ++++++++++++++++++- include/linux/crypto.h | 1 + 8 files changed, 1086 insertions(+), 45 deletions(-) create mode 100644 crypto/lskcipher.c create mode 100644 crypto/skcipher.h (limited to 'include/linux') diff --git a/crypto/Makefile b/crypto/Makefile index 953a7e105e58..5ac6876f935a 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -16,7 +16,11 @@ obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o obj-$(CONFIG_CRYPTO_AEAD2) += aead.o obj-$(CONFIG_CRYPTO_GENIV) += geniv.o -obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o +crypto_skcipher-y += lskcipher.o +crypto_skcipher-y += skcipher.o + +obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o + obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o diff --git a/crypto/cryptd.c b/crypto/cryptd.c index bbcc368b6a55..194a92d677b9 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -929,7 +929,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { - case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_LSKCIPHER: return cryptd_create_skcipher(tmpl, tb, algt, &queue); case CRYPTO_ALG_TYPE_HASH: return cryptd_create_hash(tmpl, tb, algt, &queue); diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c new file mode 100644 index 000000000000..3343c6d955da --- /dev/null +++ b/crypto/lskcipher.c @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linear symmetric key cipher operations. + * + * Generic encrypt/decrypt wrapper for ciphers. + * + * Copyright (c) 2023 Herbert Xu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "skcipher.h" + +static inline struct crypto_lskcipher *__crypto_lskcipher_cast( + struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_lskcipher, base); +} + +static inline struct lskcipher_alg *__crypto_lskcipher_alg( + struct crypto_alg *alg) +{ + return container_of(alg, struct lskcipher_alg, co.base); +} + +static inline struct crypto_istat_cipher *lskcipher_get_stat( + struct lskcipher_alg *alg) +{ + return skcipher_get_stat_common(&alg->co); +} + +static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) +{ + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) + return err; + + if (err) + atomic64_inc(&istat->err_cnt); + + return err; +} + +static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + u8 *buffer, *alignbuffer; + unsigned long absize; + int ret; + + absize = keylen + alignmask; + buffer = kmalloc(absize, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + ret = cipher->setkey(tfm, alignbuffer, keylen); + kfree_sensitive(buffer); + return ret; +} + +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm); + + if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize) + return -EINVAL; + + if ((unsigned long)key & alignmask) + return lskcipher_setkey_unaligned(tfm, key, keylen); + else + return cipher->setkey(tfm, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey); + +static int crypto_lskcipher_crypt_unaligned( + struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len, + u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final)) +{ + unsigned ivsize = crypto_lskcipher_ivsize(tfm); + unsigned bs = crypto_lskcipher_blocksize(tfm); + unsigned cs = crypto_lskcipher_chunksize(tfm); + int err; + u8 *tiv; + u8 *p; + + BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE || + MAX_CIPHER_ALIGNMASK >= PAGE_SIZE); + + tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (!tiv) + return -ENOMEM; + + memcpy(tiv, iv, ivsize); + + p = kmalloc(PAGE_SIZE, GFP_ATOMIC); + err = -ENOMEM; + if (!p) + goto out; + + while (len >= bs) { + unsigned chunk = min((unsigned)PAGE_SIZE, len); + int err; + + if (chunk > cs) + chunk &= ~(cs - 1); + + memcpy(p, src, chunk); + err = crypt(tfm, p, p, chunk, tiv, true); + if (err) + goto out; + + memcpy(dst, p, chunk); + src += chunk; + dst += chunk; + len -= chunk; + } + + err = len ? -EINVAL : 0; + +out: + memcpy(iv, tiv, ivsize); + kfree_sensitive(p); + kfree_sensitive(tiv); + return err; +} + +static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + bool final)) +{ + unsigned long alignmask = crypto_lskcipher_alignmask(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + int ret; + + if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & + alignmask) { + ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, + crypt); + goto out; + } + + ret = crypt(tfm, src, dst, len, iv, true); + +out: + return crypto_lskcipher_errstat(alg, ret); +} + +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->encrypt_cnt); + atomic64_add(len, &istat->encrypt_tlen); + } + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt); + +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv) +{ + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); + + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); + + atomic64_inc(&istat->decrypt_cnt); + atomic64_add(len, &istat->decrypt_tlen); + } + + return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); +} +EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt); + +int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); + + return crypto_lskcipher_setkey(*ctx, key, keylen); +} + +static int crypto_lskcipher_crypt_sg(struct skcipher_request *req, + int (*crypt)(struct crypto_lskcipher *tfm, + const u8 *src, u8 *dst, + unsigned len, u8 *iv, + bool final)) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct crypto_lskcipher *tfm = *ctx; + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, walk.iv, walk.nbytes == walk.total); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->encrypt); +} + +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher); + struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx); + + return crypto_lskcipher_crypt_sg(req, alg->decrypt); +} + +static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + alg->exit(skcipher); +} + +static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm); + struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher); + + if (alg->exit) + skcipher->base.exit = crypto_lskcipher_exit_tfm; + + if (alg->init) + return alg->init(skcipher); + + return 0; +} + +static void crypto_lskcipher_free_instance(struct crypto_instance *inst) +{ + struct lskcipher_instance *skcipher = + container_of(inst, struct lskcipher_instance, s.base); + + skcipher->free(skcipher); +} + +static void __maybe_unused crypto_lskcipher_show( + struct seq_file *m, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + + seq_printf(m, "type : lskcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize); + seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize); + seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize); + seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize); +} + +static int __maybe_unused crypto_lskcipher_report( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_report_blkcipher rblkcipher; + + memset(&rblkcipher, 0, sizeof(rblkcipher)); + + strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type)); + strscpy(rblkcipher.geniv, "", sizeof(rblkcipher.geniv)); + + rblkcipher.blocksize = alg->cra_blocksize; + rblkcipher.min_keysize = skcipher->co.min_keysize; + rblkcipher.max_keysize = skcipher->co.max_keysize; + rblkcipher.ivsize = skcipher->co.ivsize; + + return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, + sizeof(rblkcipher), &rblkcipher); +} + +static int __maybe_unused crypto_lskcipher_report_stat( + struct sk_buff *skb, struct crypto_alg *alg) +{ + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); + struct crypto_istat_cipher *istat; + struct crypto_stat_cipher rcipher; + + istat = lskcipher_get_stat(skcipher); + + memset(&rcipher, 0, sizeof(rcipher)); + + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); + + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); + + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); +} + +static const struct crypto_type crypto_lskcipher_type = { + .extsize = crypto_alg_extsize, + .init_tfm = crypto_lskcipher_init_tfm, + .free = crypto_lskcipher_free_instance, +#ifdef CONFIG_PROC_FS + .show = crypto_lskcipher_show, +#endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + .report = crypto_lskcipher_report, +#endif +#ifdef CONFIG_CRYPTO_STATS + .report_stat = crypto_lskcipher_report_stat, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_LSKCIPHER, + .tfmsize = offsetof(struct crypto_lskcipher, base), +}; + +static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm) +{ + struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *calg = tfm->__crt_alg; + struct crypto_lskcipher *skcipher; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type); + if (IS_ERR(skcipher)) { + crypto_mod_put(calg); + return PTR_ERR(skcipher); + } + + *ctx = skcipher; + tfm->exit = crypto_lskcipher_exit_tfm_sg; + + return 0; +} + +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_lskcipher_type; + return crypto_grab_spawn(&spawn->base, inst, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_lskcipher); + +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher); + +static int lskcipher_prepare_alg(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->co.chunksize & (alg->co.chunksize - 1)) + return -EINVAL; + + base->cra_type = &crypto_lskcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER; + + return 0; +} + +int crypto_register_lskcipher(struct lskcipher_alg *alg) +{ + struct crypto_alg *base = &alg->co.base; + int err; + + err = lskcipher_prepare_alg(alg); + if (err) + return err; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_lskcipher); + +void crypto_unregister_lskcipher(struct lskcipher_alg *alg) +{ + crypto_unregister_alg(&alg->co.base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher); + +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_lskcipher(&algs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_lskciphers); + +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_lskcipher(&algs[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers); + +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst) +{ + int err; + + if (WARN_ON(!inst->free)) + return -EINVAL; + + err = lskcipher_prepare_alg(&inst->alg); + if (err) + return err; + + return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(lskcipher_register_instance); + +static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm); + + crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + return crypto_lskcipher_setkey(cipher, key, keylen); +} + +static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct lskcipher_instance *inst = lskcipher_alg_instance(tfm); + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + struct crypto_lskcipher_spawn *spawn; + struct crypto_lskcipher *cipher; + + spawn = lskcipher_instance_ctx(inst); + cipher = crypto_spawn_lskcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + *ctx = cipher; + return 0; +} + +static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + crypto_free_lskcipher(*ctx); +} + +static void lskcipher_free_instance_simple(struct lskcipher_instance *inst) +{ + crypto_drop_lskcipher(lskcipher_instance_ctx(inst)); + kfree(inst); +} + +/** + * lskcipher_alloc_instance_simple - allocate instance of simple block cipher + * + * Allocate an lskcipher_instance for a simple block cipher mode of operation, + * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, + * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, + * alignmask, and priority are set from the underlying cipher but can be + * overridden if needed. The tfm context defaults to + * struct crypto_lskcipher *, and default ->setkey(), ->init(), and + * ->exit() methods are installed. + * + * @tmpl: the template being instantiated + * @tb: the template parameters + * + * Return: a pointer to the new instance, or an ERR_PTR(). The caller still + * needs to register the instance. + */ +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb) +{ + u32 mask; + struct lskcipher_instance *inst; + struct crypto_lskcipher_spawn *spawn; + struct lskcipher_alg *cipher_alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask); + if (err) + return ERR_PTR(err); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + spawn = lskcipher_instance_ctx(inst); + err = crypto_grab_lskcipher(spawn, + lskcipher_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) + goto err_free_inst; + cipher_alg = crypto_lskcipher_spawn_alg(spawn); + + err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name, + &cipher_alg->co.base); + if (err) + goto err_free_inst; + + /* Don't allow nesting. */ + err = -ELOOP; + if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE)) + goto err_free_inst; + + err = -EINVAL; + if (cipher_alg->co.ivsize) + goto err_free_inst; + + inst->free = lskcipher_free_instance_simple; + + /* Default algorithm properties, can be overridden */ + inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize; + inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask; + inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority; + inst->alg.co.min_keysize = cipher_alg->co.min_keysize; + inst->alg.co.max_keysize = cipher_alg->co.max_keysize; + inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize; + + /* Use struct crypto_lskcipher * by default, can be overridden */ + inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *); + inst->alg.setkey = lskcipher_setkey_simple; + inst->alg.init = lskcipher_init_tfm_simple; + inst->alg.exit = lskcipher_exit_tfm_simple; + + return inst; + +err_free_inst: + lskcipher_free_instance_simple(inst); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple); diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 7b275716cf4e..b9496dc8a609 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -24,8 +24,9 @@ #include #include #include +#include "skcipher.h" -#include "internal.h" +#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e enum { SKCIPHER_WALK_PHYS = 1 << 0, @@ -43,6 +44,8 @@ struct skcipher_walk_buffer { u8 buffer[]; }; +static const struct crypto_type crypto_skcipher_type; + static int skcipher_walk_next(struct skcipher_walk *walk); static inline void skcipher_map_src(struct skcipher_walk *walk) @@ -89,11 +92,7 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( static inline struct crypto_istat_cipher *skcipher_get_stat( struct skcipher_alg *alg) { -#ifdef CONFIG_CRYPTO_STATS - return &alg->stat; -#else - return NULL; -#endif + return skcipher_get_stat_common(&alg->co); } static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) @@ -468,6 +467,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); walk->total = req->cryptlen; walk->nbytes = 0; @@ -485,10 +485,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, SKCIPHER_WALK_SLEEP : 0; walk->blocksize = crypto_skcipher_blocksize(tfm); - walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); + if (alg->co.base.cra_type != &crypto_skcipher_type) + walk->stride = alg->co.chunksize; + else + walk->stride = alg->walksize; + return skcipher_walk_first(walk); } @@ -616,6 +620,11 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned long alignmask = crypto_skcipher_alignmask(tfm); int err; + if (cipher->co.base.cra_type != &crypto_skcipher_type) { + err = crypto_lskcipher_setkey_sg(tfm, key, keylen); + goto out; + } + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) return -EINVAL; @@ -624,6 +633,7 @@ int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); +out: if (unlikely(err)) { skcipher_set_needkey(tfm); return err; @@ -649,6 +659,8 @@ int crypto_skcipher_encrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_encrypt_sg(req); else ret = alg->encrypt(req); @@ -671,6 +683,8 @@ int crypto_skcipher_decrypt(struct skcipher_request *req) if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; + else if (alg->co.base.cra_type != &crypto_skcipher_type) + ret = crypto_lskcipher_decrypt_sg(req); else ret = alg->decrypt(req); @@ -693,6 +707,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher_set_needkey(skcipher); + if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) + return crypto_init_lskcipher_ops_sg(tfm); + if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; @@ -702,6 +719,14 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) +{ + if (alg->cra_type != &crypto_skcipher_type) + return sizeof(struct crypto_lskcipher *); + + return crypto_alg_extsize(alg); +} + static void crypto_skcipher_free_instance(struct crypto_instance *inst) { struct skcipher_instance *skcipher = @@ -770,7 +795,7 @@ static int __maybe_unused crypto_skcipher_report_stat( } static const struct crypto_type crypto_skcipher_type = { - .extsize = crypto_alg_extsize, + .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS @@ -783,7 +808,7 @@ static const struct crypto_type crypto_skcipher_type = { .report_stat = crypto_skcipher_report_stat, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; @@ -834,23 +859,18 @@ int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_skcipher); -static int skcipher_prepare_alg(struct skcipher_alg *alg) +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) { - struct crypto_istat_cipher *istat = skcipher_get_stat(alg); + struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); struct crypto_alg *base = &alg->base; - if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || - alg->walksize > PAGE_SIZE / 8) + if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; - if (!alg->walksize) - alg->walksize = alg->chunksize; - base->cra_type = &crypto_skcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; - base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) memset(istat, 0, sizeof(*istat)); @@ -858,6 +878,27 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg) return 0; } +static int skcipher_prepare_alg(struct skcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + int err; + + err = skcipher_prepare_alg_common(&alg->co); + if (err) + return err; + + if (alg->walksize > PAGE_SIZE / 8) + return -EINVAL; + + if (!alg->walksize) + alg->walksize = alg->chunksize; + + base->cra_type = &crypto_skcipher_type; + base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; + + return 0; +} + int crypto_register_skcipher(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; diff --git a/crypto/skcipher.h b/crypto/skcipher.h new file mode 100644 index 000000000000..6f1295f0fef2 --- /dev/null +++ b/crypto/skcipher.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * Copyright (c) 2023 Herbert Xu + */ +#ifndef _LOCAL_CRYPTO_SKCIPHER_H +#define _LOCAL_CRYPTO_SKCIPHER_H + +#include +#include "internal.h" + +static inline struct crypto_istat_cipher *skcipher_get_stat_common( + struct skcipher_alg_common *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + return &alg->stat; +#else + return NULL; +#endif +} + +int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen); +int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); +int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); +int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm); +int skcipher_prepare_alg_common(struct skcipher_alg_common *alg); + +#endif /* _LOCAL_CRYPTO_SKCIPHER_H */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fb3d9e899f52..4382fd707b8a 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -36,10 +36,25 @@ struct skcipher_instance { }; }; +struct lskcipher_instance { + void (*free)(struct lskcipher_instance *inst); + union { + struct { + char head[offsetof(struct lskcipher_alg, co.base)]; + struct crypto_instance base; + } s; + struct lskcipher_alg alg; + }; +}; + struct crypto_skcipher_spawn { struct crypto_spawn base; }; +struct crypto_lskcipher_spawn { + struct crypto_spawn base; +}; + struct skcipher_walk { union { struct { @@ -80,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance( return &inst->s.base; } +static inline struct crypto_instance *lskcipher_crypto_instance( + struct lskcipher_instance *inst) +{ + return &inst->s.base; +} + static inline struct skcipher_instance *skcipher_alg_instance( struct crypto_skcipher *skcipher) { @@ -87,11 +108,23 @@ static inline struct skcipher_instance *skcipher_alg_instance( struct skcipher_instance, alg); } +static inline struct lskcipher_instance *lskcipher_alg_instance( + struct crypto_lskcipher *lskcipher) +{ + return container_of(crypto_lskcipher_alg(lskcipher), + struct lskcipher_instance, alg); +} + static inline void *skcipher_instance_ctx(struct skcipher_instance *inst) { return crypto_instance_ctx(skcipher_crypto_instance(inst)); } +static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst) +{ + return crypto_instance_ctx(lskcipher_crypto_instance(inst)); +} + static inline void skcipher_request_complete(struct skcipher_request *req, int err) { crypto_request_complete(&req->base, err); @@ -101,29 +134,56 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); +int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) { crypto_drop_spawn(&spawn->base); } +static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + static inline struct skcipher_alg *crypto_skcipher_spawn_alg( struct crypto_skcipher_spawn *spawn) { return container_of(spawn->base.alg, struct skcipher_alg, base); } +static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct lskcipher_alg, co.base); +} + static inline struct skcipher_alg *crypto_spawn_skcipher_alg( struct crypto_skcipher_spawn *spawn) { return crypto_skcipher_spawn_alg(spawn); } +static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_lskcipher_spawn_alg(spawn); +} + static inline struct crypto_skcipher *crypto_spawn_skcipher( struct crypto_skcipher_spawn *spawn) { return crypto_spawn_tfm2(&spawn->base); } +static inline struct crypto_lskcipher *crypto_spawn_lskcipher( + struct crypto_lskcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + static inline void crypto_skcipher_set_reqsize( struct crypto_skcipher *skcipher, unsigned int reqsize) { @@ -144,6 +204,13 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst); +int crypto_register_lskcipher(struct lskcipher_alg *alg); +void crypto_unregister_lskcipher(struct lskcipher_alg *alg); +int crypto_register_lskciphers(struct lskcipher_alg *algs, int count); +void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count); +int lskcipher_register_instance(struct crypto_template *tmpl, + struct lskcipher_instance *inst); + int skcipher_walk_done(struct skcipher_walk *walk, int err); int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, @@ -166,6 +233,11 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm) { return crypto_tfm_ctx_dma(&tfm->base); @@ -209,21 +281,16 @@ static inline unsigned int crypto_skcipher_alg_walksize( return alg->walksize; } -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) +static inline unsigned int crypto_lskcipher_alg_min_keysize( + struct lskcipher_alg *alg) +{ + return alg->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_alg_max_keysize( + struct lskcipher_alg *alg) { - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); + return alg->co.max_keysize; } /* Helpers for simple block cipher modes of operation */ @@ -249,5 +316,24 @@ static inline struct crypto_alg *skcipher_ialg_simple( return crypto_spawn_cipher_alg(spawn); } +static inline struct crypto_lskcipher *lskcipher_cipher_simple( + struct crypto_lskcipher *tfm) +{ + struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm); + + return *ctx; +} + +struct lskcipher_instance *lskcipher_alloc_instance_simple( + struct crypto_template *tmpl, struct rtattr **tb); + +static inline struct lskcipher_alg *lskcipher_ialg_simple( + struct lskcipher_instance *inst) +{ + struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst); + + return crypto_lskcipher_spawn_alg(spawn); +} + #endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 080d1ba3611d..a648ef5ce897 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -49,6 +49,10 @@ struct crypto_sync_skcipher { struct crypto_skcipher base; }; +struct crypto_lskcipher { + struct crypto_tfm base; +}; + /* * struct crypto_istat_cipher - statistics for cipher algorithm * @encrypt_cnt: number of encrypt requests @@ -65,6 +69,43 @@ struct crypto_istat_cipher { atomic64_t err_cnt; }; +#ifdef CONFIG_CRYPTO_STATS +#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; +#else +#define SKCIPHER_ALG_COMMON_STAT +#endif + +/* + * struct skcipher_alg_common - common properties of skcipher_alg + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * @chunksize: Equal to the block size except for stream ciphers such as + * CTR where it is set to the underlying block size. + * @stat: Statistics for cipher algorithm + * @base: Definition of a generic crypto algorithm. + */ +#define SKCIPHER_ALG_COMMON { \ + unsigned int min_keysize; \ + unsigned int max_keysize; \ + unsigned int ivsize; \ + unsigned int chunksize; \ + \ + SKCIPHER_ALG_COMMON_STAT \ + \ + struct crypto_alg base; \ +} +struct skcipher_alg_common SKCIPHER_ALG_COMMON; + /** * struct skcipher_alg - symmetric key cipher definition * @min_keysize: Minimum key size supported by the transformation. This is the @@ -120,6 +161,7 @@ struct crypto_istat_cipher { * in parallel. Should be a multiple of chunksize. * @stat: Statistics for cipher algorithm * @base: Definition of a generic crypto algorithm. + * @co: see struct skcipher_alg_common * * All fields except @ivsize are mandatory and must be filled. */ @@ -131,17 +173,55 @@ struct skcipher_alg { int (*init)(struct crypto_skcipher *tfm); void (*exit)(struct crypto_skcipher *tfm); - unsigned int min_keysize; - unsigned int max_keysize; - unsigned int ivsize; - unsigned int chunksize; unsigned int walksize; -#ifdef CONFIG_CRYPTO_STATS - struct crypto_istat_cipher stat; -#endif + union { + struct SKCIPHER_ALG_COMMON; + struct skcipher_alg_common co; + }; +}; - struct crypto_alg base; +/** + * struct lskcipher_alg - linear symmetric key cipher definition + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a number of bytes. This function is used to encrypt + * the supplied data. This function shall not modify + * the transformation context, as this function may be called + * in parallel with the same transformation object. Data + * may be left over if length is not a multiple of blocks + * and there is more to come (final == false). The number of + * left-over bytes should be returned in case of success. + * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to + * @encrypt and the conditions are exactly the same. + * @init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * @co: see struct skcipher_alg_common + */ +struct lskcipher_alg { + int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv, bool final); + int (*init)(struct crypto_lskcipher *tfm); + void (*exit)(struct crypto_lskcipher *tfm); + + struct skcipher_alg_common co; }; #define MAX_SYNC_SKCIPHER_REQSIZE 384 @@ -213,12 +293,36 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, u32 type, u32 mask); + +/** + * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * lskcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an lskcipher. The returned struct + * crypto_lskcipher is the cipher handle that is required for any subsequent + * API invocation for that lskcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ +struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { return &tfm->base; } +static inline struct crypto_tfm *crypto_lskcipher_tfm( + struct crypto_lskcipher *tfm) +{ + return &tfm->base; +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed @@ -235,6 +339,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) crypto_free_skcipher(&tfm->base); } +/** + * crypto_free_lskcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. + */ +static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm) +{ + crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm)); +} + /** * crypto_has_skcipher() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -253,6 +368,19 @@ static inline const char *crypto_skcipher_driver_name( return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); } +static inline const char *crypto_lskcipher_driver_name( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm)); +} + +static inline struct skcipher_alg_common *crypto_skcipher_alg_common( + struct crypto_skcipher *tfm) +{ + return container_of(crypto_skcipher_tfm(tfm)->__crt_alg, + struct skcipher_alg_common, base); +} + static inline struct skcipher_alg *crypto_skcipher_alg( struct crypto_skcipher *tfm) { @@ -260,11 +388,24 @@ static inline struct skcipher_alg *crypto_skcipher_alg( struct skcipher_alg, base); } +static inline struct lskcipher_alg *crypto_lskcipher_alg( + struct crypto_lskcipher *tfm) +{ + return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg, + struct lskcipher_alg, co.base); +} + static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) { return alg->ivsize; } +static inline unsigned int crypto_lskcipher_alg_ivsize( + struct lskcipher_alg *alg) +{ + return alg->co.ivsize; +} + /** * crypto_skcipher_ivsize() - obtain IV size * @tfm: cipher handle @@ -276,7 +417,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) */ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->ivsize; + return crypto_skcipher_alg_common(tfm)->ivsize; } static inline unsigned int crypto_sync_skcipher_ivsize( @@ -285,6 +426,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize( return crypto_skcipher_ivsize(&tfm->base); } +/** + * crypto_lskcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the lskcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ +static inline unsigned int crypto_lskcipher_ivsize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.ivsize; +} + /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -301,12 +457,34 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } +/** + * crypto_lskcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the lskcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ +static inline unsigned int crypto_lskcipher_blocksize( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm)); +} + static inline unsigned int crypto_skcipher_alg_chunksize( struct skcipher_alg *alg) { return alg->chunksize; } +static inline unsigned int crypto_lskcipher_alg_chunksize( + struct lskcipher_alg *alg) +{ + return alg->co.chunksize; +} + /** * crypto_skcipher_chunksize() - obtain chunk size * @tfm: cipher handle @@ -321,7 +499,24 @@ static inline unsigned int crypto_skcipher_alg_chunksize( static inline unsigned int crypto_skcipher_chunksize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); + return crypto_skcipher_alg_common(tfm)->chunksize; +} + +/** + * crypto_lskcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_lskcipher_chunksize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg_chunksize(crypto_lskcipher_alg(tfm)); } static inline unsigned int crypto_sync_skcipher_blocksize( @@ -336,6 +531,12 @@ static inline unsigned int crypto_skcipher_alignmask( return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_lskcipher_alignmask( + struct crypto_lskcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm)); +} + static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm) { return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm)); @@ -371,6 +572,23 @@ static inline void crypto_sync_skcipher_clear_flags( crypto_skcipher_clear_flags(&tfm->base, flags); } +static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm)); +} + +static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags); +} + +static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags); +} + /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -396,16 +614,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, return crypto_skcipher_setkey(&tfm->base, key, keylen); } +/** + * crypto_lskcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the lskcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ +int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, + const u8 *key, unsigned int keylen); + static inline unsigned int crypto_skcipher_min_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->min_keysize; + return crypto_skcipher_alg_common(tfm)->min_keysize; } static inline unsigned int crypto_skcipher_max_keysize( struct crypto_skcipher *tfm) { - return crypto_skcipher_alg(tfm)->max_keysize; + return crypto_skcipher_alg_common(tfm)->max_keysize; +} + +static inline unsigned int crypto_lskcipher_min_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.min_keysize; +} + +static inline unsigned int crypto_lskcipher_max_keysize( + struct crypto_lskcipher *tfm) +{ + return crypto_lskcipher_alg(tfm)->co.max_keysize; } /** @@ -457,6 +706,42 @@ int crypto_skcipher_encrypt(struct skcipher_request *req); */ int crypto_skcipher_decrypt(struct skcipher_request *req); +/** + * crypto_lskcipher_encrypt() - encrypt plaintext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Encrypt plaintext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + +/** + * crypto_lskcipher_decrypt() - decrypt ciphertext + * @tfm: lskcipher handle + * @src: source buffer + * @dst: destination buffer + * @len: number of bytes to process + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_lskcipher_ivsize + * + * Decrypt ciphertext data using the lskcipher handle. + * + * Return: >=0 if the cipher operation was successful, if positive + * then this many bytes have been left unprocessed; + * < 0 if an error occurred + */ +int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src, + u8 *dst, unsigned len, u8 *iv); + /** * DOC: Symmetric Key Cipher Request Handle * diff --git a/include/linux/crypto.h b/include/linux/crypto.h index a0780deb017a..f3c3a3b27fac 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -24,6 +24,7 @@ #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 +#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_SIG 0x00000007 -- cgit v1.2.3 From 5831fc1fd4a578232fea708b82de0c666ed17153 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Thu, 28 Sep 2023 16:57:22 +0800 Subject: crypto: hisilicon/qm - fix PF queue parameter issue If the queue isolation feature is enabled, the number of queues supported by the device changes. When PF is enabled using the current default number of queues, the default number of queues may be greater than the number supported by the device. As a result, the PF fails to be bound to the driver. After modification, if queue isolation feature is enabled, when the default queue parameter is greater than the number supported by the device, the number of enabled queues will be changed to the number supported by the device, so that the PF and driver can be properly bound. Fixes: 8bbecfb402f7 ("crypto: hisilicon/qm - add queue isolation support for Kunpeng930") Signed-off-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 5 +++++ drivers/crypto/hisilicon/qm.c | 18 ++++++++++++------ drivers/crypto/hisilicon/qm_common.h | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 5 +++++ drivers/crypto/hisilicon/zip/zip_main.c | 5 +++++ include/linux/hisi_acc_qm.h | 7 +++++++ 6 files changed, 34 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index db44d889438a..3dce35debf63 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -433,8 +433,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); +static bool pf_q_num_flag; static int pf_q_num_set(const char *val, const struct kernel_param *kp) { + pf_q_num_flag = true; + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF); } @@ -1157,6 +1160,8 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } ret = hisi_qm_init(qm); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a99fd589445c..1638c0a7df31 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -206,8 +206,6 @@ #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 -#define QM_DRIVER_REMOVING 0 -#define QM_RST_SCHED 1 #define QM_QOS_PARAM_NUM 2 #define QM_QOS_MAX_VAL 1000 #define QM_QOS_RATE 100 @@ -2824,7 +2822,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) mutex_init(&qm->mailbox_lock); init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; - qm->misc_ctl = false; if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); @@ -5093,6 +5090,7 @@ free_eq_irq: static int qm_get_qp_num(struct hisi_qm *qm) { + struct device *dev = &qm->pdev->dev; bool is_db_isolation; /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ @@ -5109,13 +5107,21 @@ static int qm_get_qp_num(struct hisi_qm *qm) qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_FUNC_MAX_QP_CAP, is_db_isolation); - /* check if qp number is valid */ - if (qm->qp_num > qm->max_qp_num) { - dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", + if (qm->qp_num <= qm->max_qp_num) + return 0; + + if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { + /* Check whether the set qp number is valid */ + dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n", qm->qp_num, qm->max_qp_num); return -EINVAL; } + dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n", + qm->qp_num, qm->max_qp_num); + qm->qp_num = qm->max_qp_num; + qm->debug.curr_qm_qp_num = qm->qp_num; + return 0; } diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h index 1406a422d455..8e36aa9c681b 100644 --- a/drivers/crypto/hisilicon/qm_common.h +++ b/drivers/crypto/hisilicon/qm_common.h @@ -4,7 +4,6 @@ #define QM_COMMON_H #define QM_DBG_READ_LEN 256 -#define QM_RESETTING 2 struct qm_cqe { __le32 rsvd0; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 77f9f131b850..62bd8936a915 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -311,8 +311,11 @@ static int sec_diff_regs_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); +static bool pf_q_num_flag; static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { + pf_q_num_flag = true; + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); } @@ -1120,6 +1123,8 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { /* * have no way to get qm configure in VM in v1 hardware, diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 0d5d1ee363e4..945ab3648a87 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -364,8 +364,11 @@ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); +static bool pf_q_num_flag; static int pf_q_num_set(const char *val, const struct kernel_param *kp) { + pf_q_num_flag = true; + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF); } @@ -1139,6 +1142,8 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + if (pf_q_num_flag) + set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { /* * have no way to get qm configure in VM in v1 hardware, diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 39fbfb4be944..9da4f3f1e6d6 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -144,6 +144,13 @@ enum qm_vf_state { QM_NOT_READY, }; +enum qm_misc_ctl_bits { + QM_DRIVER_REMOVING = 0x0, + QM_RST_SCHED, + QM_RESETTING, + QM_MODULE_PARAM, +}; + enum qm_cap_bits { QM_SUPPORT_DB_ISOLATION = 0x0, QM_SUPPORT_FUNC_QOS, -- cgit v1.2.3 From b42ab1c61a77832040ad42ebf9adf237360e49f7 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Thu, 28 Sep 2023 17:21:47 +0800 Subject: crypto: hisilicon/qm - check function qp num before alg register When the Kunpeng accelerator executes tasks such as encryption and decryption have minimum requirements on the number of device queues. If the number of queues does not meet the requirement, the process initialization will fail. Therefore, the driver checks the number of queues on the device before registering the algorithm. If the number does not meet the requirements, the driver does not register the algorithm to crypto subsystem, the device is still added to the qm_list. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 25 +++++++++++++-- drivers/crypto/hisilicon/hpre/hpre_main.c | 14 ++++++--- drivers/crypto/hisilicon/qm.c | 47 ++++++++++------------------- drivers/crypto/hisilicon/sec2/sec_crypto.c | 31 +++++++++++++++++-- drivers/crypto/hisilicon/sec2/sec_main.c | 24 ++++++--------- drivers/crypto/hisilicon/zip/zip_crypto.c | 24 ++++++++++++++- drivers/crypto/hisilicon/zip/zip_main.c | 14 ++++++--- include/linux/hisi_acc_qm.h | 18 +++++++++-- 8 files changed, 134 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 9a1c61be32cc..764532a6ca82 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -57,6 +57,9 @@ struct hpre_ctx; #define HPRE_DRV_ECDH_MASK_CAP BIT(2) #define HPRE_DRV_X25519_MASK_CAP BIT(5) +static DEFINE_MUTEX(hpre_algs_lock); +static unsigned int hpre_available_devs; + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -2202,11 +2205,17 @@ static void hpre_unregister_x25519(struct hisi_qm *qm) int hpre_algs_register(struct hisi_qm *qm) { - int ret; + int ret = 0; + + mutex_lock(&hpre_algs_lock); + if (hpre_available_devs) { + hpre_available_devs++; + goto unlock; + } ret = hpre_register_rsa(qm); if (ret) - return ret; + goto unlock; ret = hpre_register_dh(qm); if (ret) @@ -2220,6 +2229,9 @@ int hpre_algs_register(struct hisi_qm *qm) if (ret) goto unreg_ecdh; + hpre_available_devs++; + mutex_unlock(&hpre_algs_lock); + return ret; unreg_ecdh: @@ -2228,13 +2240,22 @@ unreg_dh: hpre_unregister_dh(qm); unreg_rsa: hpre_unregister_rsa(qm); +unlock: + mutex_unlock(&hpre_algs_lock); return ret; } void hpre_algs_unregister(struct hisi_qm *qm) { + mutex_lock(&hpre_algs_lock); + if (--hpre_available_devs) + goto unlock; + hpre_unregister_x25519(qm); hpre_unregister_ecdh(qm); hpre_unregister_dh(qm); hpre_unregister_rsa(qm); + +unlock: + mutex_unlock(&hpre_algs_lock); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 3dce35debf63..56777099ef69 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -107,6 +107,7 @@ #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 +#define HPRE_CTX_Q_NUM_DEF 1 #define HPRE_DFX_BASE 0x301000 #define HPRE_DFX_COMMON1 0x301400 @@ -1399,10 +1400,11 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); - ret = hisi_qm_alg_register(qm, &hpre_devices); + hisi_qm_add_list(qm, &hpre_devices); + ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); if (ret < 0) { pci_err(pdev, "fail to register algs to crypto!\n"); - goto err_with_qm_start; + goto err_qm_del_list; } if (qm->uacce) { @@ -1424,9 +1426,10 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_with_alg_register: - hisi_qm_alg_unregister(qm, &hpre_devices); + hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); -err_with_qm_start: +err_qm_del_list: + hisi_qm_del_list(qm, &hpre_devices); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); @@ -1446,7 +1449,8 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &hpre_devices); - hisi_qm_alg_unregister(qm, &hpre_devices); + hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); + hisi_qm_del_list(qm, &hpre_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 458d1fe42a24..f3b55c044dd3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4860,63 +4860,48 @@ static void qm_cmd_process(struct work_struct *cmd_process) } /** - * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list. + * hisi_qm_alg_register() - Register alg to crypto. * @qm: The qm needs add. * @qm_list: The qm list. + * @guard: Guard of qp_num. * - * This function adds qm to qm list, and will register algorithm to - * crypto when the qm list is empty. + * Register algorithm to crypto when the function is satisfy guard. */ -int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) { struct device *dev = &qm->pdev->dev; - int flag = 0; - int ret = 0; - - mutex_lock(&qm_list->lock); - if (list_empty(&qm_list->list)) - flag = 1; - list_add_tail(&qm->list, &qm_list->list); - mutex_unlock(&qm_list->lock); if (qm->ver <= QM_HW_V2 && qm->use_sva) { dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); return 0; } - if (flag) { - ret = qm_list->register_to_crypto(qm); - if (ret) { - mutex_lock(&qm_list->lock); - list_del(&qm->list); - mutex_unlock(&qm_list->lock); - } + if (qm->qp_num < guard) { + dev_info(dev, "qp_num is less than task need.\n"); + return 0; } - return ret; + return qm_list->register_to_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_register); /** - * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from - * qm list. + * hisi_qm_alg_unregister() - Unregister alg from crypto. * @qm: The qm needs delete. * @qm_list: The qm list. + * @guard: Guard of qp_num. * - * This function deletes qm from qm list, and will unregister algorithm - * from crypto when the qm list is empty. + * Unregister algorithm from crypto when the last function is satisfy guard. */ -void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) { - mutex_lock(&qm_list->lock); - list_del(&qm->list); - mutex_unlock(&qm_list->lock); - if (qm->ver <= QM_HW_V2 && qm->use_sva) return; - if (list_empty(&qm_list->list)) - qm_list->unregister_from_crypto(qm); + if (qm->qp_num < guard) + return; + + qm_list->unregister_from_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index ed77711e809e..6fcabbc87860 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -104,6 +104,9 @@ #define IV_CTR_INIT 0x1 #define IV_BYTE_OFFSET 0x8 +static DEFINE_MUTEX(sec_algs_lock); +static unsigned int sec_available_devs; + struct sec_skcipher { u64 alg_msk; struct skcipher_alg alg; @@ -2545,16 +2548,31 @@ err: int sec_register_to_crypto(struct hisi_qm *qm) { u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); - int ret; + int ret = 0; + + mutex_lock(&sec_algs_lock); + if (sec_available_devs) { + sec_available_devs++; + goto unlock; + } ret = sec_register_skcipher(alg_mask); if (ret) - return ret; + goto unlock; ret = sec_register_aead(alg_mask); if (ret) - sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); + goto unreg_skcipher; + sec_available_devs++; + mutex_unlock(&sec_algs_lock); + + return 0; + +unreg_skcipher: + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); +unlock: + mutex_unlock(&sec_algs_lock); return ret; } @@ -2562,6 +2580,13 @@ void sec_unregister_from_crypto(struct hisi_qm *qm) { u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); + mutex_lock(&sec_algs_lock); + if (--sec_available_devs) + goto unlock; + sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); + +unlock: + mutex_unlock(&sec_algs_lock); } diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 62bd8936a915..0e56a47eb862 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1234,15 +1234,11 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); - if (qm->qp_num >= ctx_q_num) { - ret = hisi_qm_alg_register(qm, &sec_devices); - if (ret < 0) { - pr_err("Failed to register driver to crypto.\n"); - goto err_qm_stop; - } - } else { - pci_warn(qm->pdev, - "Failed to use kernel mode, qp not enough!\n"); + hisi_qm_add_list(qm, &sec_devices); + ret = hisi_qm_alg_register(qm, &sec_devices, ctx_q_num); + if (ret < 0) { + pr_err("Failed to register driver to crypto.\n"); + goto err_qm_del_list; } if (qm->uacce) { @@ -1264,9 +1260,9 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_alg_unregister: - if (qm->qp_num >= ctx_q_num) - hisi_qm_alg_unregister(qm, &sec_devices); -err_qm_stop: + hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); +err_qm_del_list: + hisi_qm_del_list(qm, &sec_devices); sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: @@ -1283,8 +1279,8 @@ static void sec_remove(struct pci_dev *pdev) hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &sec_devices); - if (qm->qp_num >= ctx_q_num) - hisi_qm_alg_unregister(qm, &sec_devices); + hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); + hisi_qm_del_list(qm, &sec_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 636ac794ebb7..c650c741a18d 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -25,6 +25,9 @@ #define HZIP_ALG_DEFLATE GENMASK(5, 4) +static DEFINE_MUTEX(zip_algs_lock); +static unsigned int zip_available_devs; + enum hisi_zip_alg_type { HZIP_ALG_TYPE_COMP = 0, HZIP_ALG_TYPE_DECOMP = 1, @@ -618,10 +621,29 @@ static void hisi_zip_unregister_deflate(struct hisi_qm *qm) int hisi_zip_register_to_crypto(struct hisi_qm *qm) { - return hisi_zip_register_deflate(qm); + int ret = 0; + + mutex_lock(&zip_algs_lock); + if (zip_available_devs++) + goto unlock; + + ret = hisi_zip_register_deflate(qm); + if (ret) + zip_available_devs--; + +unlock: + mutex_unlock(&zip_algs_lock); + return ret; } void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) { + mutex_lock(&zip_algs_lock); + if (--zip_available_devs) + goto unlock; + hisi_zip_unregister_deflate(qm); + +unlock: + mutex_unlock(&zip_algs_lock); } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 945ab3648a87..db4c964cd649 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -66,6 +66,7 @@ #define HZIP_SQE_SIZE 128 #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 +#define HZIP_CTX_Q_NUM_DEF 2 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 #define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) @@ -1231,10 +1232,11 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) pci_err(pdev, "failed to init debugfs (%d)!\n", ret); - ret = hisi_qm_alg_register(qm, &zip_devices); + hisi_qm_add_list(qm, &zip_devices); + ret = hisi_qm_alg_register(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF); if (ret < 0) { pci_err(pdev, "failed to register driver to crypto!\n"); - goto err_qm_stop; + goto err_qm_del_list; } if (qm->uacce) { @@ -1256,9 +1258,10 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_qm_alg_unregister: - hisi_qm_alg_unregister(qm, &zip_devices); + hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF); -err_qm_stop: +err_qm_del_list: + hisi_qm_del_list(qm, &zip_devices); hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); @@ -1278,7 +1281,8 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &zip_devices); - hisi_qm_alg_unregister(qm, &zip_devices); + hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF); + hisi_qm_del_list(qm, &zip_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 9da4f3f1e6d6..34c64a02712c 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -478,6 +478,20 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) mutex_init(&qm_list->lock); } +static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_add_tail(&qm->list, &qm_list->list); + mutex_unlock(&qm_list->lock); +} + +static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +{ + mutex_lock(&qm_list->lock); + list_del(&qm->list); + mutex_unlock(&qm_list->lock); +} + int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); @@ -523,8 +537,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); void hisi_qm_dev_shutdown(struct pci_dev *pdev); void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); int hisi_qm_resume(struct device *dev); int hisi_qm_suspend(struct device *dev); void hisi_qm_pm_uninit(struct hisi_qm *qm); -- cgit v1.2.3 From 8468516f9f93a41dc65158b6428a1a1039c68f20 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Mon, 2 Oct 2023 00:57:15 +0100 Subject: crypto: pkcs7 - remove md4 md5 x.509 support Remove support for md4 md5 hash and signatures in x.509 certificate parsers, pkcs7 signature parser, authenticode parser. All of these are insecure or broken, and everyone has long time ago migrated to alternative hash implementations. Also remove md2 & md3 oids which have already didn't have support. This is also likely the last user of md4 in the kernel, and thus crypto/md4.c and related tests in tcrypt & testmgr can likely be removed. Other users such as cifs smbfs ext modpost sumversions have their own internal implementation as needed. Signed-off-by: Dimitri John Ledkov Reviewed-by: Jarkko Sakkinen Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 6 ------ crypto/asymmetric_keys/pkcs7_parser.c | 6 ------ crypto/asymmetric_keys/x509_cert_parser.c | 6 ------ include/linux/oid_registry.h | 8 -------- 4 files changed, 26 deletions(-) (limited to 'include/linux') diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 839591ad21ac..690405ebe77b 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,12 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_md4: - ctx->digest_algo = "md4"; - break; - case OID_md5: - ctx->digest_algo = "md5"; - break; case OID_sha1: ctx->digest_algo = "sha1"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 277482bb1777..cf4caab9620f 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,12 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_md4: - ctx->sinfo->sig->hash_algo = "md4"; - break; - case OID_md5: - ctx->sinfo->sig->hash_algo = "md5"; - break; case OID_sha1: ctx->sinfo->sig->hash_algo = "sha1"; break; diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 0a7049b470c1..2c30928621b7 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -195,15 +195,9 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, pr_debug("PubKey Algo: %u\n", ctx->last_oid); switch (ctx->last_oid) { - case OID_md2WithRSAEncryption: - case OID_md3WithRSAEncryption: default: return -ENOPKG; /* Unsupported combination */ - case OID_md4WithRSAEncryption: - ctx->cert->sig->hash_algo = "md4"; - goto rsa_pkcs1; - case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; goto rsa_pkcs1; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index f86a08ba0207..4d04fa5d1eec 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -30,9 +30,6 @@ enum OID { /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ - OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */ - OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */ - OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ @@ -49,11 +46,6 @@ enum OID { OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */ - /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */ - OID_md2, /* 1.2.840.113549.2.2 */ - OID_md4, /* 1.2.840.113549.2.4 */ - OID_md5, /* 1.2.840.113549.2.5 */ - OID_mskrb5, /* 1.2.840.48018.1.2.2 */ OID_krb5, /* 1.2.840.113554.1.2.2 */ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */ -- cgit v1.2.3 From 5b90073defd1a52aa8120403d79f6e0fc10c87ee Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sun, 8 Oct 2023 20:36:17 +0800 Subject: crypto: hisilicon/qm - alloc buffer to set and get xqc If the temporarily applied memory is used to set or get the xqc information, the driver releases the memory immediately after the hardware mailbox operation time exceeds the driver waiting time. However, the hardware does not cancel the operation, so the hardware may write data to released memory. Therefore, when the driver is bound to a device, the driver reserves memory for the xqc configuration. The subsequent xqc configuration uses the reserved memory to prevent hardware from accessing the released memory. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/debugfs.c | 75 ++++---- drivers/crypto/hisilicon/qm.c | 332 ++++++++++++++++------------------- drivers/crypto/hisilicon/qm_common.h | 5 +- include/linux/hisi_acc_qm.h | 13 ++ 4 files changed, 191 insertions(+), 234 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 2cc1591949db..7e8186fe0512 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -137,8 +137,8 @@ static void dump_show(struct hisi_qm *qm, void *info, static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc, *sqc_curr; - dma_addr_t sqc_dma; + struct qm_sqc *sqc_curr; + struct qm_sqc sqc; u32 qp_id; int ret; @@ -151,35 +151,29 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) return -EINVAL; } - sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); - if (IS_ERR(sqc)) - return PTR_ERR(sqc); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); + if (!ret) { + dump_show(qm, &sqc, sizeof(struct qm_sqc), name); - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); - if (ret) { - down_read(&qm->qps_lock); - if (qm->sqc) { - sqc_curr = qm->sqc + qp_id; + return 0; + } - dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); - } - up_read(&qm->qps_lock); + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id; - goto free_ctx; + dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC"); } + up_read(&qm->qps_lock); - dump_show(qm, sqc, sizeof(*sqc), name); - -free_ctx: - hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); return 0; } static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_cqc *cqc, *cqc_curr; - dma_addr_t cqc_dma; + struct qm_cqc *cqc_curr; + struct qm_cqc cqc; u32 qp_id; int ret; @@ -192,34 +186,29 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) return -EINVAL; } - cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); - if (IS_ERR(cqc)) - return PTR_ERR(cqc); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); + if (!ret) { + dump_show(qm, &cqc, sizeof(struct qm_cqc), name); - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); - if (ret) { - down_read(&qm->qps_lock); - if (qm->cqc) { - cqc_curr = qm->cqc + qp_id; + return 0; + } - dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); - } - up_read(&qm->qps_lock); + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id; - goto free_ctx; + dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC"); } + up_read(&qm->qps_lock); - dump_show(qm, cqc, sizeof(*cqc), name); - -free_ctx: - hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); return 0; } static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - dma_addr_t xeqc_dma; + struct qm_aeqc aeqc; + struct qm_eqc eqc; size_t size; void *xeqc; int ret; @@ -233,23 +222,19 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) if (!strcmp(name, "EQC")) { cmd = QM_MB_CMD_EQC; size = sizeof(struct qm_eqc); + xeqc = &eqc; } else { cmd = QM_MB_CMD_AEQC; size = sizeof(struct qm_aeqc); + xeqc = &aeqc; } - xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma); - if (IS_ERR(xeqc)) - return PTR_ERR(xeqc); - - ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); + ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1); if (ret) - goto err_free_ctx; + return ret; dump_show(qm, xeqc, size, name); -err_free_ctx: - hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma); return ret; } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f3b55c044dd3..a1d0473f1931 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -46,7 +46,7 @@ #define QM_QC_PASID_ENABLE_SHIFT 7 #define QM_SQ_TYPE_MASK GENMASK(3, 0) -#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) +#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -58,7 +58,7 @@ #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 -#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) +#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) /* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) @@ -252,19 +252,6 @@ #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) -#define INIT_QC_COMMON(qc, base, pasid) do { \ - (qc)->head = 0; \ - (qc)->tail = 0; \ - (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ - (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ - (qc)->dw3 = 0; \ - (qc)->w8 = 0; \ - (qc)->rsvd0 = 0; \ - (qc)->pasid = cpu_to_le16(pasid); \ - (qc)->w11 = 0; \ - (qc)->rsvd1 = 0; \ -} while (0) - enum vft_type { SQC_VFT = 0, CQC_VFT, @@ -686,6 +673,59 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, } EXPORT_SYMBOL_GPL(hisi_qm_mb); +/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ +int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + struct qm_mailbox mailbox; + dma_addr_t xqc_dma; + void *tmp_xqc; + size_t size; + int ret; + + switch (cmd) { + case QM_MB_CMD_SQC: + size = sizeof(struct qm_sqc); + tmp_xqc = qm->xqc_buf.sqc; + xqc_dma = qm->xqc_buf.sqc_dma; + break; + case QM_MB_CMD_CQC: + size = sizeof(struct qm_cqc); + tmp_xqc = qm->xqc_buf.cqc; + xqc_dma = qm->xqc_buf.cqc_dma; + break; + case QM_MB_CMD_EQC: + size = sizeof(struct qm_eqc); + tmp_xqc = qm->xqc_buf.eqc; + xqc_dma = qm->xqc_buf.eqc_dma; + break; + case QM_MB_CMD_AEQC: + size = sizeof(struct qm_aeqc); + tmp_xqc = qm->xqc_buf.aeqc; + xqc_dma = qm->xqc_buf.aeqc_dma; + break; + } + + /* Setting xqc will fail if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) { + dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); + return -EIO; + } + + mutex_lock(&qm->mailbox_lock); + if (!op) + memcpy(tmp_xqc, xqc, size); + + qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); + ret = qm_mb_nolock(qm, &mailbox); + if (!ret && op) + memcpy(xqc, tmp_xqc, size); + + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { u64 doorbell; @@ -1321,45 +1361,6 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) return 0; } -void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - void *ctx_addr; - - ctx_addr = kzalloc(ctx_size, GFP_KERNEL); - if (!ctx_addr) - return ERR_PTR(-ENOMEM); - - *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_addr)) { - dev_err(dev, "DMA mapping error!\n"); - kfree(ctx_addr); - return ERR_PTR(-ENOMEM); - } - - return ctx_addr; -} - -void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - - dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); - kfree(ctx_addr); -} - -static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); -} - -static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); -} - static void qm_hw_error_init_v1(struct hisi_qm *qm) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); @@ -1952,84 +1953,51 @@ static void hisi_qm_release_qp(struct hisi_qp *qp) static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; - struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; - struct qm_sqc *sqc; - dma_addr_t sqc_dma; - int ret; + struct qm_sqc sqc = {0}; - sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); - if (!sqc) - return -ENOMEM; - - INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); - sqc->w8 = cpu_to_le16(qp->sq_depth - 1); + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); + sqc.w8 = cpu_to_le16(qp->sq_depth - 1); } else { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); - sqc->w8 = 0; /* rand_qc */ + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); + sqc.w8 = 0; /* rand_qc */ } - sqc->cq_num = cpu_to_le16(qp_id); - sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); + sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); + sqc.cq_num = cpu_to_le16(qp_id); + sqc.pasid = cpu_to_le16(pasid); if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) - sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << - QM_QC_PASID_ENABLE_SHIFT); - - sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, sqc_dma)) { - kfree(sqc); - return -ENOMEM; - } + sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << + QM_QC_PASID_ENABLE_SHIFT); - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); - dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); - kfree(sqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); } static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; - struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; - struct qm_cqc *cqc; - dma_addr_t cqc_dma; - int ret; - - cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); - if (!cqc) - return -ENOMEM; + struct qm_cqc cqc = {0}; - INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); if (ver == QM_HW_V1) { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, - QM_QC_CQE_SIZE)); - cqc->w8 = cpu_to_le16(qp->cq_depth - 1); + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); + cqc.w8 = cpu_to_le16(qp->cq_depth - 1); } else { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); - cqc->w8 = 0; /* rand_qc */ + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); + cqc.w8 = 0; /* rand_qc */ } - cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); + cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); + cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); + cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); + cqc.pasid = cpu_to_le16(pasid); if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) - cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); - - cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, cqc_dma)) { - kfree(cqc); - return -ENOMEM; - } + cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); - dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); - kfree(cqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); } static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) @@ -2119,14 +2087,11 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) */ static int qm_drain_qp(struct hisi_qp *qp) { - size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc; - struct qm_cqc *cqc; - dma_addr_t dma_addr; - int ret = 0, i = 0; - void *addr; + struct qm_sqc sqc; + struct qm_cqc cqc; + int ret, i = 0; /* No need to judge if master OOO is blocked. */ if (qm_check_dev_error(qm)) @@ -2140,44 +2105,32 @@ static int qm_drain_qp(struct hisi_qp *qp) return ret; } - addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); - if (IS_ERR(addr)) { - dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); - return -ENOMEM; - } - while (++i) { - ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump sqc!\n"); - break; + return ret; } - sqc = addr; - ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), - qp->qp_id); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump cqc!\n"); - break; + return ret; } - cqc = addr + sizeof(struct qm_sqc); - if ((sqc->tail == cqc->tail) && + if ((sqc.tail == cqc.tail) && (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) break; if (i == MAX_WAIT_COUNTS) { dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); - ret = -EBUSY; - break; + return -EBUSY; } usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); } - hisi_qm_ctx_free(qm, size, addr, &dma_addr); - - return ret; + return 0; } static int qm_stop_qp_nolock(struct hisi_qp *qp) @@ -2889,11 +2842,20 @@ static void hisi_qm_unint_work(struct hisi_qm *qm) destroy_workqueue(qm->wq); } +static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) +{ + struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; + struct device *dev = &qm->pdev->dev; + + dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); +} + static void hisi_qm_memory_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; hisi_qp_memory_uninit(qm, qm->qp_num); + hisi_qm_free_rsv_buf(qm); if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, @@ -3015,62 +2977,26 @@ static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) static int qm_eq_ctx_cfg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - struct qm_eqc *eqc; - dma_addr_t eqc_dma; - int ret; - - eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); - if (!eqc) - return -ENOMEM; + struct qm_eqc eqc = {0}; - eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); - eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); + eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); + eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) - eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); - eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); + eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); + eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); - eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, eqc_dma)) { - kfree(eqc); - return -ENOMEM; - } - - ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); - dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); - kfree(eqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); } static int qm_aeq_ctx_cfg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - struct qm_aeqc *aeqc; - dma_addr_t aeqc_dma; - int ret; + struct qm_aeqc aeqc = {0}; - aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); - if (!aeqc) - return -ENOMEM; - - aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); - aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); - aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); + aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); + aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); + aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); - aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, aeqc_dma)) { - kfree(aeqc); - return -ENOMEM; - } - - ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); - dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); - kfree(aeqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); } static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) @@ -5296,6 +5222,36 @@ err_init_qp_mem: return ret; } +static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) +{ + struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; + struct qm_dma *xqc_dma = &xqc_buf->qcdma; + struct device *dev = &qm->pdev->dev; + size_t off = 0; + +#define QM_XQC_BUF_INIT(xqc_buf, type) do { \ + (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ + (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type)); \ +} while (0) + + xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + + QMC_ALIGN(sizeof(struct qm_aeqc)) + + QMC_ALIGN(sizeof(struct qm_sqc)) + + QMC_ALIGN(sizeof(struct qm_cqc)); + xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, + &xqc_dma->dma, GFP_KERNEL); + if (!xqc_dma->va) + return -ENOMEM; + + QM_XQC_BUF_INIT(xqc_buf, eqc); + QM_XQC_BUF_INIT(xqc_buf, aeqc); + QM_XQC_BUF_INIT(xqc_buf, sqc); + QM_XQC_BUF_INIT(xqc_buf, cqc); + + return 0; +} + static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -5337,13 +5293,19 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) QM_INIT_BUF(qm, sqc, qm->qp_num); QM_INIT_BUF(qm, cqc, qm->qp_num); + ret = hisi_qm_alloc_rsv_buf(qm); + if (ret) + goto err_free_qdma; + ret = hisi_qp_alloc_memory(qm); if (ret) - goto err_alloc_qp_array; + goto err_free_reserve_buf; return 0; -err_alloc_qp_array: +err_free_reserve_buf: + hisi_qm_free_rsv_buf(qm); +err_free_qdma: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); err_destroy_idr: idr_destroy(&qm->qp_idr); diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h index 8e36aa9c681b..7b0b15c83ec1 100644 --- a/drivers/crypto/hisilicon/qm_common.h +++ b/drivers/crypto/hisilicon/qm_common.h @@ -76,10 +76,7 @@ static const char * const qm_s[] = { "init", "start", "close", "stop", }; -void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr); -void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr); +int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op); void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm); void hisi_qm_set_algqos_init(struct hisi_qm *qm); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 34c64a02712c..44e0c44a2e20 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -292,6 +292,18 @@ struct qm_err_isolate { struct list_head qm_hw_errs; }; +struct qm_rsv_buf { + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqc *eqc; + struct qm_aeqc *aeqc; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqc_dma; + dma_addr_t aeqc_dma; + struct qm_dma qcdma; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -324,6 +336,7 @@ struct hisi_qm { dma_addr_t cqc_dma; dma_addr_t eqe_dma; dma_addr_t aeqe_dma; + struct qm_rsv_buf xqc_buf; struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; -- cgit v1.2.3 From 16ab7cb5825fc3425c16ad2c6e53d827f382d7c6 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Tue, 10 Oct 2023 22:22:38 +0100 Subject: crypto: pkcs7 - remove sha1 support Removes support for sha1 signed kernel modules, importing sha1 signed x.509 certificates. rsa-pkcs1pad keeps sha1 padding support, which seems to be used by virtio driver. sha1 remains available as there are many drivers and subsystems using it. Note only hmac(sha1) with secret keys remains cryptographically secure. In the kernel there are filesystems, IMA, tpm/pcr that appear to be using sha1. Maybe they can all start to be slowly upgraded to something else i.e. blake3, ParallelHash, SHAKE256 as needed. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/mscode_parser.c | 3 -- crypto/asymmetric_keys/pkcs7_parser.c | 4 -- crypto/asymmetric_keys/public_key.c | 3 +- crypto/asymmetric_keys/signature.c | 2 +- crypto/asymmetric_keys/x509_cert_parser.c | 8 ---- crypto/testmgr.h | 80 ------------------------------- include/linux/oid_registry.h | 4 -- kernel/module/Kconfig | 5 -- 8 files changed, 2 insertions(+), 107 deletions(-) (limited to 'include/linux') diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c index 690405ebe77b..6416bded0e07 100644 --- a/crypto/asymmetric_keys/mscode_parser.c +++ b/crypto/asymmetric_keys/mscode_parser.c @@ -75,9 +75,6 @@ int mscode_note_digest_algo(void *context, size_t hdrlen, oid = look_up_OID(value, vlen); switch (oid) { - case OID_sha1: - ctx->digest_algo = "sha1"; - break; case OID_sha256: ctx->digest_algo = "sha256"; break; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index cf4caab9620f..ab647cb4d766 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -227,9 +227,6 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, struct pkcs7_parse_context *ctx = context; switch (ctx->last_oid) { - case OID_sha1: - ctx->sinfo->sig->hash_algo = "sha1"; - break; case OID_sha256: ctx->sinfo->sig->hash_algo = "sha256"; break; @@ -272,7 +269,6 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; - case OID_id_ecdsa_with_sha1: case OID_id_ecdsa_with_sha224: case OID_id_ecdsa_with_sha256: case OID_id_ecdsa_with_sha384: diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index abeecb8329b3..5bf0452c17af 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -116,8 +116,7 @@ software_key_determine_akcipher(const struct public_key *pkey, */ if (!hash_algo) return -EINVAL; - if (strcmp(hash_algo, "sha1") != 0 && - strcmp(hash_algo, "sha224") != 0 && + if (strcmp(hash_algo, "sha224") != 0 && strcmp(hash_algo, "sha256") != 0 && strcmp(hash_algo, "sha384") != 0 && strcmp(hash_algo, "sha512") != 0) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 2deff81f8af5..398983be77e8 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob); * Sign the specified data blob using the private key specified by params->key. * The signature is wrapped in an encoding if params->encoding is specified * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be - * passed through params->hash_algo (eg. "sha1"). + * passed through params->hash_algo (eg. "sha512"). * * Returns the length of the data placed in the signature buffer or an error. */ diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 2c30928621b7..68ef1ffbbef6 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -198,10 +198,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, default: return -ENOPKG; /* Unsupported combination */ - case OID_sha1WithRSAEncryption: - ctx->cert->sig->hash_algo = "sha1"; - goto rsa_pkcs1; - case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; goto rsa_pkcs1; @@ -218,10 +214,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; - case OID_id_ecdsa_with_sha1: - ctx->cert->sig->hash_algo = "sha1"; - goto ecdsa; - case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 0cd6e0600255..d7e98397549b 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -653,30 +653,6 @@ static const struct akcipher_testvec rsa_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { { .key = - "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" - "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" - "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" - "\x98", - .key_len = 49, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x01", - .param_len = 21, - .m = - "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" - "\x63\x85\xe7\x82", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91" - "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10" - "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86" - "\x80\x6f\xa5\x79\x77\xda\xd0", - .c_size = 55, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { - .key = "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" @@ -780,32 +756,6 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { { .key = - "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" - "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" - "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" - "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" - "\xaf", - .key_len = 65, - .params = - "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" - "\xce\x3d\x03\x01\x07", - .param_len = 21, - .m = - "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" - "\x0b\xde\x6a\x42", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7" - "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a" - "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d" - "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7" - "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad", - .c_size = 72, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { - .key = "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" @@ -916,36 +866,6 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { { - .key = /* secp384r1(sha1) */ - "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" - "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" - "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" - "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" - "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" - "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" - "\xf1", - .key_len = 97, - .params = - "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" - "\x00\x22", - .param_len = 18, - .m = - "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" - "\x3a\x69\xc1\x93", - .m_size = 20, - .algo = OID_id_ecdsa_with_sha1, - .c = - "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07" - "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1" - "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e" - "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0" - "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88" - "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26" - "\x79\x12\x2a\xb7\xc5\x15\x92\xc5", - .c_size = 104, - .public_key_vec = true, - .siggen_sigver_test = true, - }, { .key = /* secp384r1(sha224) */ "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 4d04fa5d1eec..8b79e55cfcec 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -17,12 +17,10 @@ * build_OID_registry.pl to generate the data for look_up_OID(). */ enum OID { - OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ @@ -30,7 +28,6 @@ enum OID { /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ - OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */ OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */ @@ -67,7 +64,6 @@ enum OID { OID_PKU2U, /* 1.3.5.1.5.2.7 */ OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ - OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig index 33a2e991f608..19a53d5e7736 100644 --- a/kernel/module/Kconfig +++ b/kernel/module/Kconfig @@ -236,10 +236,6 @@ choice possible to load a signed module containing the algorithm to check the signature on that module. -config MODULE_SIG_SHA1 - bool "Sign modules with SHA-1" - select CRYPTO_SHA1 - config MODULE_SIG_SHA224 bool "Sign modules with SHA-224" select CRYPTO_SHA256 @@ -261,7 +257,6 @@ endchoice config MODULE_SIG_HASH string depends on MODULE_SIG || IMA_APPRAISE_MODSIG - default "sha1" if MODULE_SIG_SHA1 default "sha224" if MODULE_SIG_SHA224 default "sha256" if MODULE_SIG_SHA256 default "sha384" if MODULE_SIG_SHA384 -- cgit v1.2.3 From 5acab6eb592387191c1bb745ba9b815e1e076db5 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 13 Oct 2023 11:49:57 +0800 Subject: crypto: hisilicon/qm - fix EQ/AEQ interrupt issue During hisilicon accelerator live migration operation. In order to prevent the problem of EQ/AEQ interrupt loss. Migration driver will trigger an EQ/AEQ doorbell at the end of the migration. This operation may cause double interruption of EQ/AEQ events. To ensure that the EQ/AEQ interrupt processing function is normal. The interrupt handling functionality of EQ/AEQ needs to be updated. Used to handle repeated interrupts event. Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration") Signed-off-by: Longfang Liu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 105 ++++++++++++++++-------------------------- include/linux/hisi_acc_qm.h | 1 + 2 files changed, 41 insertions(+), 65 deletions(-) (limited to 'include/linux') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a1d0473f1931..6ee24313d851 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -894,47 +894,15 @@ static void qm_poll_req_cb(struct hisi_qp *qp) qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); } -static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) -{ - struct hisi_qm *qm = poll_data->qm; - struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; - u16 eq_depth = qm->eq_depth; - int eqe_num = 0; - u16 cqn; - - while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { - cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - poll_data->qp_finish_id[eqe_num] = cqn; - eqe_num++; - - if (qm->status.eq_head == eq_depth - 1) { - qm->status.eqc_phase = !qm->status.eqc_phase; - eqe = qm->eqe; - qm->status.eq_head = 0; - } else { - eqe++; - qm->status.eq_head++; - } - - if (eqe_num == (eq_depth >> 1) - 1) - break; - } - - qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); - - return eqe_num; -} - static void qm_work_process(struct work_struct *work) { struct hisi_qm_poll_data *poll_data = container_of(work, struct hisi_qm_poll_data, work); struct hisi_qm *qm = poll_data->qm; + u16 eqe_num = poll_data->eqe_num; struct hisi_qp *qp; - int eqe_num, i; + int i; - /* Get qp id of completed tasks and re-enable the interrupt. */ - eqe_num = qm_get_complete_eqe_num(poll_data); for (i = eqe_num - 1; i >= 0; i--) { qp = &qm->qp_array[poll_data->qp_finish_id[i]]; if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) @@ -950,39 +918,55 @@ static void qm_work_process(struct work_struct *work) } } -static bool do_qm_eq_irq(struct hisi_qm *qm) +static void qm_get_complete_eqe_num(struct hisi_qm *qm) { struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; - struct hisi_qm_poll_data *poll_data; - u16 cqn; + struct hisi_qm_poll_data *poll_data = NULL; + u16 eq_depth = qm->eq_depth; + u16 cqn, eqe_num = 0; - if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) - return false; + if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { + atomic64_inc(&qm->debug.dfx.err_irq_cnt); + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + return; + } - if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + if (unlikely(cqn >= qm->qp_num)) + return; + poll_data = &qm->poll_data[cqn]; + + while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - poll_data = &qm->poll_data[cqn]; - queue_work(qm->wq, &poll_data->work); + poll_data->qp_finish_id[eqe_num] = cqn; + eqe_num++; + + if (qm->status.eq_head == eq_depth - 1) { + qm->status.eqc_phase = !qm->status.eqc_phase; + eqe = qm->eqe; + qm->status.eq_head = 0; + } else { + eqe++; + qm->status.eq_head++; + } - return true; + if (eqe_num == (eq_depth >> 1) - 1) + break; } - return false; + poll_data->eqe_num = eqe_num; + queue_work(qm->wq, &poll_data->work); + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); } static irqreturn_t qm_eq_irq(int irq, void *data) { struct hisi_qm *qm = data; - bool ret; - - ret = do_qm_eq_irq(qm); - if (ret) - return IRQ_HANDLED; - atomic64_inc(&qm->debug.dfx.err_irq_cnt); - qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + /* Get qp id of completed tasks and re-enable the interrupt */ + qm_get_complete_eqe_num(qm); - return IRQ_NONE; + return IRQ_HANDLED; } static irqreturn_t qm_mb_cmd_irq(int irq, void *data) @@ -1064,6 +1048,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) u16 aeq_depth = qm->aeq_depth; u32 type, qp_id; + atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); + while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK; @@ -1102,17 +1088,6 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t qm_aeq_irq(int irq, void *data) -{ - struct hisi_qm *qm = data; - - atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); - if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) - return IRQ_NONE; - - return IRQ_WAKE_THREAD; -} - static void qm_init_qp_status(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; @@ -4923,8 +4898,8 @@ static int qm_register_aeq_irq(struct hisi_qm *qm) return 0; irq_vector = val & QM_IRQ_VECTOR_MASK; - ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, - qm_aeq_thread, 0, qm->dev_name, qm); + ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, + qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); if (ret) dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 44e0c44a2e20..ddc7ebb70523 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -276,6 +276,7 @@ struct hisi_qm_poll_data { struct hisi_qm *qm; struct work_struct work; u16 *qp_finish_id; + u16 eqe_num; }; /** -- cgit v1.2.3 From e8eed5f7366f1f5decb694168bd06fb59ef6b12c Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:25 +0200 Subject: units: Add BYTES_PER_*BIT There is going to be a new user of the BYTES_PER_[K/M/G]BIT definition besides possibly existing ones. Add them to the header. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu --- include/linux/units.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73a2..ff1bd6b5f5b3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -31,6 +31,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) -- cgit v1.2.3 From c626910f3f1bbce6ad18bc613d895d2a089ed95e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 22 Oct 2023 01:10:44 -0700 Subject: crypto: ahash - remove support for nonzero alignmask Currently, the ahash API checks the alignment of all key and result buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. First, since it does not apply to the message, its effect is much more limited than e.g. is the case for the alignmask for "skcipher". Second, the key and result buffers are given as virtual addresses and cannot (in general) be DMA'ed into, so drivers end up having to copy to/from them in software anyway. As a result it's easy to use memcpy() or the unaligned access helpers. The crypto_hash_walk_*() helper functions do use the alignmask to align the message. But with one exception those are only used for shash algorithms being exposed via the ahash API, not for native ahashes, and aligning the message is not required in this case, especially now that alignmask support has been removed from shash. The exception is the n2_core driver, which doesn't set an alignmask. In any case, no ahash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from ahash. The benefit is that all the code to handle "misaligned" buffers in the ahash API goes away, reducing the overhead of the ahash API. This follows the same change that was made to shash. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/crypto/devel-algos.rst | 4 +- crypto/ahash.c | 117 +++-------------------------------- crypto/shash.c | 8 +-- include/crypto/internal/hash.h | 4 +- include/linux/crypto.h | 27 ++++---- 5 files changed, 28 insertions(+), 132 deletions(-) (limited to 'include/linux') diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst index 3506899ef83e..9b7782f4f6e0 100644 --- a/Documentation/crypto/devel-algos.rst +++ b/Documentation/crypto/devel-algos.rst @@ -235,6 +235,4 @@ Specifics Of Asynchronous HASH Transformation Some of the drivers will want to use the Generic ScatterWalk in case the implementation needs to be fed separate chunks of the scatterlist which -contains the input data. The buffer containing the resulting hash will -always be properly aligned to .cra_alignmask so there is no need to -worry about this. +contains the input data. diff --git a/crypto/ahash.c b/crypto/ahash.c index 213bb3e9f245..744fd3b8ea25 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -35,21 +35,12 @@ struct ahash_request_priv { static int hash_walk_next(struct crypto_hash_walk *walk) { - unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); walk->data = kmap_local_page(walk->pg); walk->data += offset; - - if (offset & alignmask) { - unsigned int unaligned = alignmask + 1 - (offset & alignmask); - - if (nbytes > unaligned) - nbytes = unaligned; - } - walk->entrylen -= nbytes; return nbytes; } @@ -73,23 +64,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { - unsigned int alignmask = walk->alignmask; - walk->data -= walk->offset; - if (walk->entrylen && (walk->offset & alignmask) && !err) { - unsigned int nbytes; - - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(walk->entrylen, - (unsigned int)(PAGE_SIZE - walk->offset)); - if (nbytes) { - walk->entrylen -= nbytes; - walk->data += walk->offset; - return nbytes; - } - } - kunmap_local(walk->data); crypto_yield(walk->flags); @@ -121,7 +97,6 @@ int crypto_hash_walk_first(struct ahash_request *req, return 0; } - walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); walk->sg = req->src; walk->flags = req->base.flags; @@ -129,26 +104,6 @@ int crypto_hash_walk_first(struct ahash_request *req, } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int ret; - u8 *buffer, *alignbuffer; - unsigned long absize; - - absize = keylen + alignmask; - buffer = kmalloc(absize, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - memcpy(alignbuffer, key, keylen); - ret = tfm->setkey(tfm, alignbuffer, keylen); - kfree_sensitive(buffer); - return ret; -} - static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -167,13 +122,7 @@ static void ahash_set_needkey(struct crypto_ahash *tfm) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; - - if ((unsigned long)key & alignmask) - err = ahash_setkey_unaligned(tfm, key, keylen); - else - err = tfm->setkey(tfm, key, keylen); + int err = tfm->setkey(tfm, key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm); @@ -189,7 +138,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, bool has_state) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request *subreq; unsigned int subreq_size; @@ -203,7 +151,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); subreq_size += reqsize; subreq_size += ds; - subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); flags = ahash_request_flags(req); gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -215,7 +162,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, ahash_request_set_callback(subreq, flags, cplt, req); result = (u8 *)(subreq + 1) + reqsize; - result = PTR_ALIGN(result, alignmask + 1); ahash_request_set_crypt(subreq, req->src, result, req->nbytes); @@ -251,56 +197,6 @@ static void ahash_restore_req(struct ahash_request *req, int err) kfree_sensitive(subreq); } -static void ahash_op_unaligned_done(void *data, int err) -{ - struct ahash_request *areq = data; - - if (err == -EINPROGRESS) - goto out; - - /* First copy req->result into req->priv.result */ - ahash_restore_req(areq, err); - -out: - /* Complete the ORIGINAL request. */ - ahash_request_complete(areq, err); -} - -static int ahash_op_unaligned(struct ahash_request *req, - int (*op)(struct ahash_request *), - bool has_state) -{ - int err; - - err = ahash_save_req(req, ahash_op_unaligned_done, has_state); - if (err) - return err; - - err = op(req->priv); - if (err == -EINPROGRESS || err == -EBUSY) - return err; - - ahash_restore_req(req, err); - - return err; -} - -static int crypto_ahash_op(struct ahash_request *req, - int (*op)(struct ahash_request *), - bool has_state) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned long alignmask = crypto_ahash_alignmask(tfm); - int err; - - if ((unsigned long)req->result & alignmask) - err = ahash_op_unaligned(req, op, has_state); - else - err = op(req); - - return crypto_hash_errstat(crypto_hash_alg_common(tfm), err); -} - int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -309,7 +205,7 @@ int crypto_ahash_final(struct ahash_request *req) if (IS_ENABLED(CONFIG_CRYPTO_STATS)) atomic64_inc(&hash_get_stat(alg)->hash_cnt); - return crypto_ahash_op(req, tfm->final, true); + return crypto_hash_errstat(alg, tfm->final(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_final); @@ -325,7 +221,7 @@ int crypto_ahash_finup(struct ahash_request *req) atomic64_add(req->nbytes, &istat->hash_tlen); } - return crypto_ahash_op(req, tfm->finup, true); + return crypto_hash_errstat(alg, tfm->finup(req)); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); @@ -333,6 +229,7 @@ int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + int err; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { struct crypto_istat_hash *istat = hash_get_stat(alg); @@ -342,9 +239,11 @@ int crypto_ahash_digest(struct ahash_request *req) } if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return crypto_hash_errstat(alg, -ENOKEY); + err = -ENOKEY; + else + err = tfm->digest(req); - return crypto_ahash_op(req, tfm->digest, false); + return crypto_hash_errstat(alg, err); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); diff --git a/crypto/shash.c b/crypto/shash.c index 409b33f9c97c..359702c2cd02 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -541,6 +541,10 @@ int hash_prepare_alg(struct hash_alg_common *alg) if (alg->digestsize > HASH_MAX_DIGESTSIZE) return -EINVAL; + /* alignmask is not useful for hashes, so it is not supported. */ + if (base->cra_alignmask) + return -EINVAL; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; if (IS_ENABLED(CONFIG_CRYPTO_STATS)) @@ -557,10 +561,6 @@ static int shash_prepare_alg(struct shash_alg *alg) if (alg->descsize > HASH_MAX_DESCSIZE) return -EINVAL; - /* alignmask is not useful for shash, so it is not supported. */ - if (base->cra_alignmask) - return -EINVAL; - if ((alg->export && !alg->import) || (alg->import && !alg->export)) return -EINVAL; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 8d0cd0c591a0..59c707e4dea4 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -18,15 +18,13 @@ struct crypto_hash_walk { char *data; unsigned int offset; - unsigned int alignmask; + unsigned int flags; struct page *pg; unsigned int entrylen; unsigned int total; struct scatterlist *sg; - - unsigned int flags; }; struct ahash_instance { diff --git a/include/linux/crypto.h b/include/linux/crypto.h index f3c3a3b27fac..b164da5e129e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -110,7 +110,6 @@ * crypto_aead_walksize() (with the remainder going at the end), no chunk * can cross a page boundary or a scatterlist element boundary. * ahash: - * - The result buffer must be aligned to the algorithm's alignmask. * - crypto_ahash_finup() must not be used unless the algorithm implements * ->finup() natively. */ @@ -278,18 +277,20 @@ struct compress_alg { * @cra_ctxsize: Size of the operational context of the transformation. This * value informs the kernel crypto API about the memory size * needed to be allocated for the transformation context. - * @cra_alignmask: Alignment mask for the input and output data buffer. The data - * buffer containing the input data for the algorithm must be - * aligned to this alignment mask. The data buffer for the - * output data must be aligned to this alignment mask. Note that - * the Crypto API will do the re-alignment in software, but - * only under special conditions and there is a performance hit. - * The re-alignment happens at these occasions for different - * @cra_u types: cipher -- For both input data and output data - * buffer; ahash -- For output hash destination buf; shash -- - * For output hash destination buf. - * This is needed on hardware which is flawed by design and - * cannot pick data from arbitrary addresses. + * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is + * 1 less than the alignment, in bytes, that the algorithm + * implementation requires for input and output buffers. When + * the crypto API is invoked with buffers that are not aligned + * to this alignment, the crypto API automatically utilizes + * appropriately aligned temporary buffers to comply with what + * the algorithm needs. (For scatterlists this happens only if + * the algorithm uses the skcipher_walk helper functions.) This + * misalignment handling carries a performance penalty, so it is + * preferred that algorithms do not set a nonzero alignmask. + * Also, crypto API users may wish to allocate buffers aligned + * to the alignmask of the algorithm being used, in order to + * avoid the API having to realign them. Note: the alignmask is + * not supported for hash algorithms and is always 0 for them. * @cra_priority: Priority of this transformation implementation. In case * multiple transformations with same @cra_name are available to * the Crypto API, the kernel will use the one with highest -- cgit v1.2.3 From 2ee7c1bcf3d1c91ede9d914c52fa2f56c449b75a Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sun, 22 Oct 2023 19:22:03 +0100 Subject: x509: Add OIDs for FIPS 202 SHA-3 hash and signatures Add OID for FIPS 202 SHA-3 family of hash functions, RSA & ECDSA signatures using those. Limit to 256 or larger sizes, for interoperability reasons. 224 is too weak for any practical uses. Signed-off-by: Dimitri John Ledkov Signed-off-by: Herbert Xu --- include/linux/oid_registry.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 8b79e55cfcec..3921fbed0b28 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -129,6 +129,17 @@ enum OID { OID_TPMImportableKey, /* 2.23.133.10.1.4 */ OID_TPMSealedData, /* 2.23.133.10.1.5 */ + /* CSOR FIPS-202 SHA-3 */ + OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */ + OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */ + OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */ + OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */ + OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */ + OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */ + OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */ + OID__NR }; -- cgit v1.2.3